blob: a5e26c330d539333a9b4742070d9279c605c18f3 [file] [log] [blame]
William Lallemand41db4602017-10-30 11:15:51 +01001/*
2 * Cache management
3 *
4 * Copyright 2017 HAProxy Technologies
5 * William Lallemand <wlallemand@haproxy.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
Willy Tarreaub2551052020-06-09 09:07:15 +020013#include <import/eb32tree.h>
14#include <import/sha1.h>
15
Willy Tarreau122eba92020-06-04 10:15:32 +020016#include <haproxy/action-t.h>
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020017#include <haproxy/api.h>
Willy Tarreau6be78492020-06-05 00:00:29 +020018#include <haproxy/cfgparse.h>
Willy Tarreauf1d32c42020-06-04 21:07:02 +020019#include <haproxy/channel.h>
Willy Tarreau83487a82020-06-04 20:19:54 +020020#include <haproxy/cli.h>
Willy Tarreau36979d92020-06-05 17:27:29 +020021#include <haproxy/errors.h>
Willy Tarreauc7babd82020-06-04 21:29:29 +020022#include <haproxy/filters.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020023#include <haproxy/hash.h>
Remi Tricot-Le Bretondbb65b52020-10-22 10:40:04 +020024#include <haproxy/http.h>
Willy Tarreauc2b1ff02020-06-04 21:21:03 +020025#include <haproxy/http_ana.h>
Willy Tarreau87735332020-06-04 09:08:41 +020026#include <haproxy/http_htx.h>
Willy Tarreauc761f842020-06-04 11:40:28 +020027#include <haproxy/http_rules.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020028#include <haproxy/htx.h>
29#include <haproxy/net_helper.h>
Willy Tarreaua264d962020-06-04 22:29:18 +020030#include <haproxy/proxy.h>
Willy Tarreau334099c2020-06-03 18:38:48 +020031#include <haproxy/shctx.h>
Willy Tarreaudfd3de82020-06-04 23:46:14 +020032#include <haproxy/stream.h>
Willy Tarreau5e539c92020-06-04 20:45:39 +020033#include <haproxy/stream_interface.h>
William Lallemand41db4602017-10-30 11:15:51 +010034
Christopher Faulet27d93c32018-12-15 22:32:02 +010035#define CACHE_FLT_F_IMPLICIT_DECL 0x00000001 /* The cache filtre was implicitly declared (ie without
Christopher Faulet99a17a22018-12-11 09:18:27 +010036 * the filter keyword) */
Tim Duesterhusd7c6e6a2020-09-14 18:01:33 +020037#define CACHE_FLT_INIT 0x00000002 /* Whether the cache name was freed. */
Christopher Fauletafd819c2018-12-11 08:57:45 +010038
Christopher Fauletf4a4ef72018-12-07 17:39:53 +010039const char *cache_store_flt_id = "cache store filter";
William Lallemand41db4602017-10-30 11:15:51 +010040
Willy Tarreau2231b632019-03-29 18:26:52 +010041extern struct applet http_cache_applet;
William Lallemand41db4602017-10-30 11:15:51 +010042
43struct flt_ops cache_ops;
44
45struct cache {
Willy Tarreaufd5efb52017-11-26 08:54:31 +010046 struct list list; /* cache linked list */
William Lallemand41db4602017-10-30 11:15:51 +010047 struct eb_root entries; /* head of cache entries based on keys */
Willy Tarreaufd5efb52017-11-26 08:54:31 +010048 unsigned int maxage; /* max-age */
49 unsigned int maxblocks;
Frédéric Lécaille4eba5442018-10-25 20:29:31 +020050 unsigned int maxobjsz; /* max-object-size (in bytes) */
Willy Tarreaufd5efb52017-11-26 08:54:31 +010051 char id[33]; /* cache name */
William Lallemand41db4602017-10-30 11:15:51 +010052};
53
Christopher Faulet95220e22018-12-07 17:34:39 +010054/* cache config for filters */
55struct cache_flt_conf {
56 union {
57 struct cache *cache; /* cache used by the filter */
58 char *name; /* cache name used during conf parsing */
59 } c;
60 unsigned int flags; /* CACHE_FLT_F_* */
61};
62
William Lallemand41db4602017-10-30 11:15:51 +010063/*
64 * cache ctx for filters
65 */
66struct cache_st {
William Lallemand41db4602017-10-30 11:15:51 +010067 struct shared_block *first_block;
68};
69
70struct cache_entry {
71 unsigned int latest_validation; /* latest validation date */
72 unsigned int expire; /* expiration date */
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +020073 unsigned int age; /* Origin server "Age" header value */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +010074
William Lallemand41db4602017-10-30 11:15:51 +010075 struct eb32_node eb; /* ebtree node used to hold the cache object */
William Lallemandf528fff2017-11-23 19:43:17 +010076 char hash[20];
Remi Tricot-Le Bretondbb65b52020-10-22 10:40:04 +020077
78 unsigned int etag_length; /* Length of the ETag value (if one was found in the response). */
79 unsigned int etag_offset; /* Offset of the ETag value in the data buffer. */
80
William Lallemand41db4602017-10-30 11:15:51 +010081 unsigned char data[0];
82};
83
84#define CACHE_BLOCKSIZE 1024
Willy Tarreau96062a12018-11-11 14:00:28 +010085#define CACHE_ENTRY_MAX_AGE 2147483648U
William Lallemand41db4602017-10-30 11:15:51 +010086
87static struct list caches = LIST_HEAD_INIT(caches);
William Lallemandd1d1e222019-08-28 15:22:49 +020088static struct list caches_config = LIST_HEAD_INIT(caches_config); /* cache config to init */
William Lallemand41db4602017-10-30 11:15:51 +010089static struct cache *tmp_cache_config = NULL;
90
Willy Tarreau8ceae722018-11-26 11:58:30 +010091DECLARE_STATIC_POOL(pool_head_cache_st, "cache_st", sizeof(struct cache_st));
92
William Lallemandf528fff2017-11-23 19:43:17 +010093struct cache_entry *entry_exist(struct cache *cache, char *hash)
William Lallemand4da3f8a2017-10-31 14:33:34 +010094{
95 struct eb32_node *node;
96 struct cache_entry *entry;
97
Willy Tarreau8b507582020-02-25 09:35:07 +010098 node = eb32_lookup(&cache->entries, read_u32(hash));
William Lallemand4da3f8a2017-10-31 14:33:34 +010099 if (!node)
100 return NULL;
101
102 entry = eb32_entry(node, struct cache_entry, eb);
William Lallemandf528fff2017-11-23 19:43:17 +0100103
104 /* if that's not the right node */
105 if (memcmp(entry->hash, hash, sizeof(entry->hash)))
106 return NULL;
107
William Lallemand08727662017-11-21 20:01:27 +0100108 if (entry->expire > now.tv_sec) {
William Lallemand4da3f8a2017-10-31 14:33:34 +0100109 return entry;
William Lallemand08727662017-11-21 20:01:27 +0100110 } else {
William Lallemand4da3f8a2017-10-31 14:33:34 +0100111 eb32_delete(node);
William Lallemand08727662017-11-21 20:01:27 +0100112 entry->eb.key = 0;
113 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100114 return NULL;
115
116}
117
118static inline struct shared_context *shctx_ptr(struct cache *cache)
119{
120 return (struct shared_context *)((unsigned char *)cache - ((struct shared_context *)NULL)->data);
121}
122
William Lallemand77c11972017-10-31 20:43:01 +0100123static inline struct shared_block *block_ptr(struct cache_entry *entry)
124{
125 return (struct shared_block *)((unsigned char *)entry - ((struct shared_block *)NULL)->data);
126}
127
128
129
William Lallemand41db4602017-10-30 11:15:51 +0100130static int
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100131cache_store_init(struct proxy *px, struct flt_conf *fconf)
William Lallemand41db4602017-10-30 11:15:51 +0100132{
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100133 fconf->flags |= FLT_CFG_FL_HTX;
William Lallemand41db4602017-10-30 11:15:51 +0100134 return 0;
135}
136
Christopher Faulet95220e22018-12-07 17:34:39 +0100137static void
138cache_store_deinit(struct proxy *px, struct flt_conf *fconf)
139{
140 struct cache_flt_conf *cconf = fconf->conf;
141
Tim Duesterhusd7c6e6a2020-09-14 18:01:33 +0200142 if (!(cconf->flags & CACHE_FLT_INIT))
143 free(cconf->c.name);
Christopher Faulet95220e22018-12-07 17:34:39 +0100144 free(cconf);
145}
146
William Lallemand4da3f8a2017-10-31 14:33:34 +0100147static int
Christopher Faulet95220e22018-12-07 17:34:39 +0100148cache_store_check(struct proxy *px, struct flt_conf *fconf)
149{
150 struct cache_flt_conf *cconf = fconf->conf;
Christopher Fauletafd819c2018-12-11 08:57:45 +0100151 struct flt_conf *f;
Christopher Faulet95220e22018-12-07 17:34:39 +0100152 struct cache *cache;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100153 int comp = 0;
Christopher Faulet95220e22018-12-07 17:34:39 +0100154
William Lallemandd1d1e222019-08-28 15:22:49 +0200155 /* Find the cache corresponding to the name in the filter config. The
156 * cache will not be referenced now in the filter config because it is
157 * not fully allocated. This step will be performed during the cache
158 * post_check.
159 */
160 list_for_each_entry(cache, &caches_config, list) {
161 if (!strcmp(cache->id, cconf->c.name))
Christopher Faulet95220e22018-12-07 17:34:39 +0100162 goto found;
Christopher Faulet95220e22018-12-07 17:34:39 +0100163 }
164
165 ha_alert("config: %s '%s': unable to find the cache '%s' referenced by the filter 'cache'.\n",
166 proxy_type_str(px), px->id, (char *)cconf->c.name);
167 return 1;
168
169 found:
Christopher Fauletafd819c2018-12-11 08:57:45 +0100170 /* Here <cache> points on the cache the filter must use and <cconf>
171 * points on the cache filter configuration. */
172
173 /* Check all filters for proxy <px> to know if the compression is
Christopher Faulet27d93c32018-12-15 22:32:02 +0100174 * enabled and if it is after the cache. When the compression is before
175 * the cache, an error is returned. Also check if the cache filter must
176 * be explicitly declaired or not. */
Christopher Fauletafd819c2018-12-11 08:57:45 +0100177 list_for_each_entry(f, &px->filter_configs, list) {
178 if (f == fconf) {
Christopher Faulet27d93c32018-12-15 22:32:02 +0100179 /* The compression filter must be evaluated after the cache. */
180 if (comp) {
181 ha_alert("config: %s '%s': unable to enable the compression filter before "
182 "the cache '%s'.\n", proxy_type_str(px), px->id, cache->id);
183 return 1;
184 }
Christopher Faulet99a17a22018-12-11 09:18:27 +0100185 }
Christopher Faulet8f7fe1c2019-07-15 15:08:25 +0200186 else if (f->id == http_comp_flt_id)
Christopher Faulet27d93c32018-12-15 22:32:02 +0100187 comp = 1;
Christopher Faulet78fbb9f2019-08-11 23:11:03 +0200188 else if (f->id == fcgi_flt_id)
189 continue;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100190 else if ((f->id != fconf->id) && (cconf->flags & CACHE_FLT_F_IMPLICIT_DECL)) {
191 /* Implicit declaration is only allowed with the
Christopher Faulet78fbb9f2019-08-11 23:11:03 +0200192 * compression and fcgi. For other filters, an implicit
Christopher Faulet27d93c32018-12-15 22:32:02 +0100193 * declaration is required. */
194 ha_alert("config: %s '%s': require an explicit filter declaration "
195 "to use the cache '%s'.\n", proxy_type_str(px), px->id, cache->id);
196 return 1;
197 }
198
Christopher Fauletafd819c2018-12-11 08:57:45 +0100199 }
Christopher Faulet95220e22018-12-07 17:34:39 +0100200 return 0;
201}
202
203static int
Christopher Faulet65554e12020-03-06 14:52:06 +0100204cache_store_strm_init(struct stream *s, struct filter *filter)
William Lallemand4da3f8a2017-10-31 14:33:34 +0100205{
Christopher Faulet65554e12020-03-06 14:52:06 +0100206 struct cache_st *st;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100207
Christopher Faulet65554e12020-03-06 14:52:06 +0100208 st = pool_alloc_dirty(pool_head_cache_st);
209 if (st == NULL)
210 return -1;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100211
Christopher Faulet65554e12020-03-06 14:52:06 +0100212 st->first_block = NULL;
213 filter->ctx = st;
Christopher Faulet839791a2019-01-07 16:12:07 +0100214
Christopher Faulet65554e12020-03-06 14:52:06 +0100215 /* Register post-analyzer on AN_RES_WAIT_HTTP */
216 filter->post_analyzers |= AN_RES_WAIT_HTTP;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100217 return 1;
218}
219
Christopher Faulet65554e12020-03-06 14:52:06 +0100220static void
221cache_store_strm_deinit(struct stream *s, struct filter *filter)
William Lallemand49dc0482017-11-24 14:33:54 +0100222{
223 struct cache_st *st = filter->ctx;
Christopher Faulet95220e22018-12-07 17:34:39 +0100224 struct cache_flt_conf *cconf = FLT_CONF(filter);
225 struct cache *cache = cconf->c.cache;
William Lallemand49dc0482017-11-24 14:33:54 +0100226 struct shared_context *shctx = shctx_ptr(cache);
227
William Lallemand49dc0482017-11-24 14:33:54 +0100228 /* Everything should be released in the http_end filter, but we need to do it
229 * there too, in case of errors */
William Lallemand49dc0482017-11-24 14:33:54 +0100230 if (st && st->first_block) {
William Lallemand49dc0482017-11-24 14:33:54 +0100231 shctx_lock(shctx);
232 shctx_row_dec_hot(shctx, st->first_block);
233 shctx_unlock(shctx);
William Lallemand49dc0482017-11-24 14:33:54 +0100234 }
235 if (st) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100236 pool_free(pool_head_cache_st, st);
William Lallemand49dc0482017-11-24 14:33:54 +0100237 filter->ctx = NULL;
238 }
William Lallemand49dc0482017-11-24 14:33:54 +0100239}
240
Christopher Faulet839791a2019-01-07 16:12:07 +0100241static int
242cache_store_post_analyze(struct stream *s, struct filter *filter, struct channel *chn,
243 unsigned an_bit)
244{
245 struct http_txn *txn = s->txn;
246 struct http_msg *msg = &txn->rsp;
247 struct cache_st *st = filter->ctx;
248
249 if (an_bit != AN_RES_WAIT_HTTP)
250 goto end;
251
252 /* Here we need to check if any compression filter precedes the cache
253 * filter. This is only possible when the compression is configured in
254 * the frontend while the cache filter is configured on the
255 * backend. This case cannot be detected during HAProxy startup. So in
256 * such cases, the cache is disabled.
257 */
258 if (st && (msg->flags & HTTP_MSGF_COMPRESSING)) {
259 pool_free(pool_head_cache_st, st);
260 filter->ctx = NULL;
261 }
262
263 end:
264 return 1;
265}
William Lallemand49dc0482017-11-24 14:33:54 +0100266
267static int
William Lallemand4da3f8a2017-10-31 14:33:34 +0100268cache_store_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
269{
270 struct cache_st *st = filter->ctx;
271
William Lallemand4da3f8a2017-10-31 14:33:34 +0100272 if (!(msg->chn->flags & CF_ISRESP) || !st)
273 return 1;
274
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200275 if (st->first_block)
Christopher Faulet67658c92018-12-06 21:59:39 +0100276 register_data_filter(s, msg->chn, filter);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100277 return 1;
278}
279
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200280static inline void disable_cache_entry(struct cache_st *st,
281 struct filter *filter, struct shared_context *shctx)
282{
283 struct cache_entry *object;
284
285 object = (struct cache_entry *)st->first_block->data;
286 filter->ctx = NULL; /* disable cache */
287 shctx_lock(shctx);
288 shctx_row_dec_hot(shctx, st->first_block);
289 object->eb.key = 0;
290 shctx_unlock(shctx);
291 pool_free(pool_head_cache_st, st);
292}
293
William Lallemand4da3f8a2017-10-31 14:33:34 +0100294static int
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100295cache_store_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
296 unsigned int offset, unsigned int len)
297{
Christopher Faulet95220e22018-12-07 17:34:39 +0100298 struct cache_flt_conf *cconf = FLT_CONF(filter);
299 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100300 struct cache_st *st = filter->ctx;
301 struct htx *htx = htxbuf(&msg->chn->buf);
302 struct htx_blk *blk;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200303 struct shared_block *fb;
Christopher Faulet497c7592020-03-02 16:19:50 +0100304 struct htx_ret htxret;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200305 unsigned int orig_len, to_forward;
306 int ret;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100307
308 if (!len)
309 return len;
310
311 if (!st->first_block) {
312 unregister_data_filter(s, msg->chn, filter);
313 return len;
314 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100315
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200316 chunk_reset(&trash);
317 orig_len = len;
318 to_forward = 0;
Christopher Faulet497c7592020-03-02 16:19:50 +0100319
320 htxret = htx_find_offset(htx, offset);
321 blk = htxret.blk;
322 offset = htxret.ret;
323 for (; blk && len; blk = htx_get_next_blk(htx, blk)) {
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100324 enum htx_blk_type type = htx_get_blk_type(blk);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200325 uint32_t info, sz = htx_get_blksz(blk);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100326 struct ist v;
327
328 switch (type) {
329 case HTX_BLK_UNUSED:
330 break;
331
332 case HTX_BLK_DATA:
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100333 v = htx_get_blk_value(htx, blk);
334 v.ptr += offset;
335 v.len -= offset;
336 if (v.len > len)
337 v.len = len;
338
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200339 info = (type << 28) + v.len;
340 chunk_memcat(&trash, (char *)&info, sizeof(info));
341 chunk_memcat(&trash, v.ptr, v.len);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100342 to_forward += v.len;
343 len -= v.len;
344 break;
345
346 default:
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200347 /* Here offset must always be 0 because only
348 * DATA blocks can be partially transferred. */
349 if (offset)
350 goto no_cache;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100351 if (sz > len)
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200352 goto end;
353
354 chunk_memcat(&trash, (char *)&blk->info, sizeof(blk->info));
355 chunk_memcat(&trash, htx_get_blk_ptr(htx, blk), sz);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100356 to_forward += sz;
357 len -= sz;
358 break;
359 }
360
361 offset = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100362 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200363
364 end:
365 shctx_lock(shctx);
366 fb = shctx_row_reserve_hot(shctx, st->first_block, trash.data);
367 if (!fb) {
368 shctx_unlock(shctx);
369 goto no_cache;
370 }
371 shctx_unlock(shctx);
372
373 ret = shctx_row_data_append(shctx, st->first_block, st->first_block->last_append,
374 (unsigned char *)b_head(&trash), b_data(&trash));
375 if (ret < 0)
376 goto no_cache;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100377
378 return to_forward;
379
380 no_cache:
381 disable_cache_entry(st, filter, shctx);
382 unregister_data_filter(s, msg->chn, filter);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200383 return orig_len;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100384}
385
386static int
William Lallemand4da3f8a2017-10-31 14:33:34 +0100387cache_store_http_end(struct stream *s, struct filter *filter,
388 struct http_msg *msg)
389{
390 struct cache_st *st = filter->ctx;
Christopher Faulet95220e22018-12-07 17:34:39 +0100391 struct cache_flt_conf *cconf = FLT_CONF(filter);
392 struct cache *cache = cconf->c.cache;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100393 struct shared_context *shctx = shctx_ptr(cache);
394 struct cache_entry *object;
395
396 if (!(msg->chn->flags & CF_ISRESP))
397 return 1;
398
399 if (st && st->first_block) {
400
401 object = (struct cache_entry *)st->first_block->data;
402
403 /* does not need to test if the insertion worked, if it
404 * doesn't, the blocks will be reused anyway */
405
406 shctx_lock(shctx);
William Lallemand08727662017-11-21 20:01:27 +0100407 if (eb32_insert(&cache->entries, &object->eb) != &object->eb) {
408 object->eb.key = 0;
409 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100410 /* remove from the hotlist */
William Lallemand4da3f8a2017-10-31 14:33:34 +0100411 shctx_row_dec_hot(shctx, st->first_block);
412 shctx_unlock(shctx);
413
414 }
415 if (st) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100416 pool_free(pool_head_cache_st, st);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100417 filter->ctx = NULL;
418 }
419
420 return 1;
421}
422
423 /*
424 * This intends to be used when checking HTTP headers for some
425 * word=value directive. Return a pointer to the first character of value, if
426 * the word was not found or if there wasn't any value assigned ot it return NULL
427 */
428char *directive_value(const char *sample, int slen, const char *word, int wlen)
429{
430 int st = 0;
431
432 if (slen < wlen)
433 return 0;
434
435 while (wlen) {
436 char c = *sample ^ *word;
437 if (c && c != ('A' ^ 'a'))
438 return NULL;
439 sample++;
440 word++;
441 slen--;
442 wlen--;
443 }
444
445 while (slen) {
446 if (st == 0) {
447 if (*sample != '=')
448 return NULL;
449 sample++;
450 slen--;
451 st = 1;
452 continue;
453 } else {
454 return (char *)sample;
455 }
456 }
457
458 return NULL;
459}
460
461/*
462 * Return the maxage in seconds of an HTTP response.
463 * Compute the maxage using either:
464 * - the assigned max-age of the cache
465 * - the s-maxage directive
466 * - the max-age directive
467 * - (Expires - Data) headers
468 * - the default-max-age of the cache
469 *
470 */
William Lallemand49b44532017-11-24 18:53:43 +0100471int http_calc_maxage(struct stream *s, struct cache *cache)
William Lallemand4da3f8a2017-10-31 14:33:34 +0100472{
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200473 struct htx *htx = htxbuf(&s->res.buf);
474 struct http_hdr_ctx ctx = { .blk = NULL };
William Lallemand4da3f8a2017-10-31 14:33:34 +0100475 int smaxage = -1;
476 int maxage = -1;
477
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200478 while (http_find_header(htx, ist("cache-control"), &ctx, 0)) {
479 char *value;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100480
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200481 value = directive_value(ctx.value.ptr, ctx.value.len, "s-maxage", 8);
482 if (value) {
483 struct buffer *chk = get_trash_chunk();
William Lallemand4da3f8a2017-10-31 14:33:34 +0100484
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200485 chunk_strncat(chk, value, ctx.value.len - 8 + 1);
486 chunk_strncat(chk, "", 1);
487 maxage = atoi(chk->area);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100488 }
489
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200490 value = directive_value(ctx.value.ptr, ctx.value.len, "max-age", 7);
491 if (value) {
492 struct buffer *chk = get_trash_chunk();
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200493
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200494 chunk_strncat(chk, value, ctx.value.len - 7 + 1);
495 chunk_strncat(chk, "", 1);
496 smaxage = atoi(chk->area);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100497 }
498 }
499
500 /* TODO: Expires - Data */
501
502
503 if (smaxage > 0)
William Lallemand49b44532017-11-24 18:53:43 +0100504 return MIN(smaxage, cache->maxage);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100505
506 if (maxage > 0)
William Lallemand49b44532017-11-24 18:53:43 +0100507 return MIN(maxage, cache->maxage);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100508
William Lallemand49b44532017-11-24 18:53:43 +0100509 return cache->maxage;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100510
511}
512
513
William Lallemanda400a3a2017-11-20 19:13:12 +0100514static void cache_free_blocks(struct shared_block *first, struct shared_block *block)
515{
Willy Tarreau5bd37fa2018-04-04 20:17:03 +0200516 struct cache_entry *object = (struct cache_entry *)block->data;
517
518 if (first == block && object->eb.key)
519 eb32_delete(&object->eb);
520 object->eb.key = 0;
William Lallemanda400a3a2017-11-20 19:13:12 +0100521}
522
William Lallemand41db4602017-10-30 11:15:51 +0100523/*
Ilya Shipitsin6fb0f212020-04-02 15:25:26 +0500524 * This function will store the headers of the response in a buffer and then
William Lallemand41db4602017-10-30 11:15:51 +0100525 * register a filter to store the data
526 */
527enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200528 struct session *sess, struct stream *s, int flags)
William Lallemand41db4602017-10-30 11:15:51 +0100529{
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200530 unsigned int age;
531 long long hdr_age;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100532 struct http_txn *txn = s->txn;
533 struct http_msg *msg = &txn->rsp;
534 struct filter *filter;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100535 struct shared_block *first = NULL;
Christopher Faulet95220e22018-12-07 17:34:39 +0100536 struct cache_flt_conf *cconf = rule->arg.act.p[0];
537 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet839791a2019-01-07 16:12:07 +0100538 struct cache_st *cache_ctx = NULL;
539 struct cache_entry *object, *old;
Willy Tarreau8b507582020-02-25 09:35:07 +0100540 unsigned int key = read_u32(txn->cache_hash);
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200541 struct htx *htx;
542 struct http_hdr_ctx ctx;
Christopher Fauletb0667472019-09-03 22:22:12 +0200543 size_t hdrs_len = 0;
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200544 int32_t pos;
Remi Tricot-Le Bretondbb65b52020-10-22 10:40:04 +0200545 unsigned int etag_length = 0;
546 unsigned int etag_offset = 0;
547 struct ist header_name = IST_NULL;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100548
William Lallemand4da3f8a2017-10-31 14:33:34 +0100549 /* Don't cache if the response came from a cache */
550 if ((obj_type(s->target) == OBJ_TYPE_APPLET) &&
551 s->target == &http_cache_applet.obj_type) {
552 goto out;
553 }
554
555 /* cache only HTTP/1.1 */
556 if (!(txn->req.flags & HTTP_MSGF_VER_11))
557 goto out;
558
Willy Tarreau6905d182019-10-01 17:59:17 +0200559 /* cache only GET method */
560 if (txn->meth != HTTP_METH_GET)
William Lallemand4da3f8a2017-10-31 14:33:34 +0100561 goto out;
562
Willy Tarreauc9036c02019-01-11 19:38:25 +0100563 /* cache key was not computed */
564 if (!key)
565 goto out;
566
William Lallemand4da3f8a2017-10-31 14:33:34 +0100567 /* cache only 200 status code */
568 if (txn->status != 200)
569 goto out;
570
Christopher Faulet839791a2019-01-07 16:12:07 +0100571 /* Find the corresponding filter instance for the current stream */
572 list_for_each_entry(filter, &s->strm_flt.filters, list) {
573 if (FLT_ID(filter) == cache_store_flt_id && FLT_CONF(filter) == cconf) {
574 /* No filter ctx, don't cache anything */
575 if (!filter->ctx)
576 goto out;
577 cache_ctx = filter->ctx;
578 break;
579 }
580 }
581
582 /* from there, cache_ctx is always defined */
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200583 htx = htxbuf(&s->res.buf);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100584
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200585 /* Do not cache too big objects. */
586 if ((msg->flags & HTTP_MSGF_CNT_LEN) && shctx->max_obj_size > 0 &&
587 htx->data + htx->extra > shctx->max_obj_size)
588 goto out;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100589
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200590 /* Does not manage Vary at the moment. We will need a secondary key later for that */
591 ctx.blk = NULL;
592 if (http_find_header(htx, ist("Vary"), &ctx, 0))
593 goto out;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100594
Christopher Fauletfc9cfe42019-07-16 14:54:53 +0200595 http_check_response_for_cacheability(s, &s->res);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100596
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200597 if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK))
598 goto out;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100599
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200600 age = 0;
601 ctx.blk = NULL;
602 if (http_find_header(htx, ist("Age"), &ctx, 0)) {
603 if (!strl2llrc(ctx.value.ptr, ctx.value.len, &hdr_age) && hdr_age > 0) {
604 if (unlikely(hdr_age > CACHE_ENTRY_MAX_AGE))
605 hdr_age = CACHE_ENTRY_MAX_AGE;
606 age = hdr_age;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100607 }
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200608 http_remove_header(htx, &ctx);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100609 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100610
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200611 chunk_reset(&trash);
612 for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
613 struct htx_blk *blk = htx_get_blk(htx, pos);
614 enum htx_blk_type type = htx_get_blk_type(blk);
615 uint32_t sz = htx_get_blksz(blk);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100616
Christopher Fauletb0667472019-09-03 22:22:12 +0200617 hdrs_len += sizeof(*blk) + sz;
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200618 chunk_memcat(&trash, (char *)&blk->info, sizeof(blk->info));
619 chunk_memcat(&trash, htx_get_blk_ptr(htx, blk), sz);
Remi Tricot-Le Bretondbb65b52020-10-22 10:40:04 +0200620
621 /* Look for optional ETag header.
622 * We need to store the offset of the ETag value in order for
623 * future conditional requests to be able to perform ETag
624 * comparisons. */
625 if (type == HTX_BLK_HDR) {
626 header_name = htx_get_blk_name(htx, blk);
627 if (isteq(header_name, ist("etag"))) {
628 etag_length = sz - istlen(header_name);
629 etag_offset = sizeof(struct cache_entry) + b_data(&trash) - sz + istlen(header_name);
630 }
631 }
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200632 if (type == HTX_BLK_EOH)
633 break;
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200634 }
635
Christopher Fauletb0667472019-09-03 22:22:12 +0200636 /* Do not cache objects if the headers are too big. */
637 if (hdrs_len > htx->size - global.tune.maxrewrite)
638 goto out;
639
William Lallemand4da3f8a2017-10-31 14:33:34 +0100640 shctx_lock(shctx);
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200641 first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry) + trash.data);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100642 if (!first) {
643 shctx_unlock(shctx);
644 goto out;
645 }
646 shctx_unlock(shctx);
647
Willy Tarreau1093a452018-04-06 19:02:25 +0200648 /* the received memory is not initialized, we need at least to mark
649 * the object as not indexed yet.
650 */
651 object = (struct cache_entry *)first->data;
652 object->eb.node.leaf_p = NULL;
653 object->eb.key = 0;
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200654 object->age = age;
Willy Tarreau1093a452018-04-06 19:02:25 +0200655
William Lallemand4da3f8a2017-10-31 14:33:34 +0100656 /* reserve space for the cache_entry structure */
657 first->len = sizeof(struct cache_entry);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200658 first->last_append = NULL;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100659 /* cache the headers in a http action because it allows to chose what
660 * to cache, for example you might want to cache a response before
661 * modifying some HTTP headers, or on the contrary after modifying
662 * those headers.
663 */
664
Remi Tricot-Le Bretondbb65b52020-10-22 10:40:04 +0200665 /* Write the ETag information in the cache_entry if needed. */
666 object->etag_length = etag_length;
667 object->etag_offset = etag_offset;
668
William Lallemand4da3f8a2017-10-31 14:33:34 +0100669 /* does not need to be locked because it's in the "hot" list,
670 * copy the headers */
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200671 if (shctx_row_data_append(shctx, first, NULL, (unsigned char *)trash.area, trash.data) < 0)
672 goto out;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100673
674 /* register the buffer in the filter ctx for filling it with data*/
Christopher Faulet839791a2019-01-07 16:12:07 +0100675 if (cache_ctx) {
676 cache_ctx->first_block = first;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100677
Willy Tarreauc9036c02019-01-11 19:38:25 +0100678 object->eb.key = key;
679
Christopher Faulet839791a2019-01-07 16:12:07 +0100680 memcpy(object->hash, txn->cache_hash, sizeof(object->hash));
681 /* Insert the node later on caching success */
William Lallemand4da3f8a2017-10-31 14:33:34 +0100682
Christopher Faulet839791a2019-01-07 16:12:07 +0100683 shctx_lock(shctx);
Christopher Faulet95220e22018-12-07 17:34:39 +0100684
Christopher Faulet839791a2019-01-07 16:12:07 +0100685 old = entry_exist(cconf->c.cache, txn->cache_hash);
686 if (old) {
687 eb32_delete(&old->eb);
688 old->eb.key = 0;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100689 }
Christopher Faulet839791a2019-01-07 16:12:07 +0100690 shctx_unlock(shctx);
691
692 /* store latest value and expiration time */
693 object->latest_validation = now.tv_sec;
694 object->expire = now.tv_sec + http_calc_maxage(s, cconf->c.cache);
695 return ACT_RET_CONT;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100696 }
697
698out:
699 /* if does not cache */
700 if (first) {
701 shctx_lock(shctx);
William Lallemand08727662017-11-21 20:01:27 +0100702 first->len = 0;
703 object->eb.key = 0;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100704 shctx_row_dec_hot(shctx, first);
705 shctx_unlock(shctx);
706 }
707
William Lallemand41db4602017-10-30 11:15:51 +0100708 return ACT_RET_CONT;
709}
710
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100711#define HTX_CACHE_INIT 0 /* Initial state. */
712#define HTX_CACHE_HEADER 1 /* Cache entry headers forwarding */
713#define HTX_CACHE_DATA 2 /* Cache entry data forwarding */
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200714#define HTX_CACHE_EOM 3 /* Cache entry completely forwarded. Finish the HTX message */
715#define HTX_CACHE_END 4 /* Cache entry treatment terminated */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100716
William Lallemandecb73b12017-11-24 14:33:55 +0100717static void http_cache_applet_release(struct appctx *appctx)
718{
Christopher Faulet95220e22018-12-07 17:34:39 +0100719 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
William Lallemandecb73b12017-11-24 14:33:55 +0100720 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
Christopher Faulet95220e22018-12-07 17:34:39 +0100721 struct cache *cache = cconf->c.cache;
William Lallemandecb73b12017-11-24 14:33:55 +0100722 struct shared_block *first = block_ptr(cache_ptr);
723
724 shctx_lock(shctx_ptr(cache));
725 shctx_row_dec_hot(shctx_ptr(cache), first);
726 shctx_unlock(shctx_ptr(cache));
727}
728
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200729
730static unsigned int htx_cache_dump_blk(struct appctx *appctx, struct htx *htx, enum htx_blk_type type,
731 uint32_t info, struct shared_block *shblk, unsigned int offset)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100732{
Christopher Faulet95220e22018-12-07 17:34:39 +0100733 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
734 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200735 struct htx_blk *blk;
Christopher Faulet15a4ce82019-09-03 22:11:52 +0200736 char *ptr;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200737 unsigned int max, total;
738 uint32_t blksz;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100739
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200740 max = htx_get_max_blksz(htx, channel_htx_recv_max(si_ic(appctx->owner), htx));
741 if (!max)
742 return 0;
Christopher Faulet2d7c5392019-06-03 10:41:26 +0200743 blksz = ((type == HTX_BLK_HDR || type == HTX_BLK_TLR)
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200744 ? (info & 0xff) + ((info >> 8) & 0xfffff)
745 : info & 0xfffffff);
746 if (blksz > max)
747 return 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100748
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200749 blk = htx_add_blk(htx, type, blksz);
750 if (!blk)
751 return 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100752
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200753 blk->info = info;
754 total = 4;
Christopher Faulet15a4ce82019-09-03 22:11:52 +0200755 ptr = htx_get_blk_ptr(htx, blk);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200756 while (blksz) {
757 max = MIN(blksz, shctx->block_size - offset);
Christopher Faulet15a4ce82019-09-03 22:11:52 +0200758 memcpy(ptr, (const char *)shblk->data + offset, max);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200759 offset += max;
760 blksz -= max;
761 total += max;
Christopher Faulet15a4ce82019-09-03 22:11:52 +0200762 ptr += max;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200763 if (blksz || offset == shctx->block_size) {
764 shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
765 offset = 0;
766 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100767 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200768 appctx->ctx.cache.offset = offset;
769 appctx->ctx.cache.next = shblk;
770 appctx->ctx.cache.sent += total;
771 return total;
772}
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100773
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200774static unsigned int htx_cache_dump_data_blk(struct appctx *appctx, struct htx *htx,
775 uint32_t info, struct shared_block *shblk, unsigned int offset)
776{
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100777
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200778 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
779 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
780 unsigned int max, total, rem_data;
781 uint32_t blksz;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100782
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200783 max = htx_get_max_blksz(htx, channel_htx_recv_max(si_ic(appctx->owner), htx));
784 if (!max)
785 return 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100786
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200787 rem_data = 0;
Christopher Fauletbda83972019-06-11 09:58:09 +0200788 if (appctx->ctx.cache.rem_data) {
789 blksz = appctx->ctx.cache.rem_data;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200790 total = 0;
Christopher Fauletbda83972019-06-11 09:58:09 +0200791 }
792 else {
793 blksz = (info & 0xfffffff);
794 total = 4;
795 }
796 if (blksz > max) {
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200797 rem_data = blksz - max;
798 blksz = max;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100799 }
800
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200801 while (blksz) {
802 size_t sz;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100803
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200804 max = MIN(blksz, shctx->block_size - offset);
805 sz = htx_add_data(htx, ist2(shblk->data + offset, max));
806 offset += sz;
807 blksz -= sz;
808 total += sz;
809 if (sz < max)
810 break;
811 if (blksz || offset == shctx->block_size) {
812 shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
813 offset = 0;
814 }
815 }
816
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200817 appctx->ctx.cache.offset = offset;
818 appctx->ctx.cache.next = shblk;
819 appctx->ctx.cache.sent += total;
820 appctx->ctx.cache.rem_data = rem_data + blksz;
821 return total;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100822}
823
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200824static size_t htx_cache_dump_msg(struct appctx *appctx, struct htx *htx, unsigned int len,
825 enum htx_blk_type mark)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100826{
Christopher Faulet95220e22018-12-07 17:34:39 +0100827 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
828 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200829 struct shared_block *shblk;
830 unsigned int offset, sz;
831 unsigned int ret, total = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100832
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200833 while (len) {
834 enum htx_blk_type type;
835 uint32_t info;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100836
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200837 shblk = appctx->ctx.cache.next;
838 offset = appctx->ctx.cache.offset;
839 if (appctx->ctx.cache.rem_data) {
840 type = HTX_BLK_DATA;
841 info = 0;
842 goto add_data_blk;
843 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100844
Ilya Shipitsin6fb0f212020-04-02 15:25:26 +0500845 /* Get info of the next HTX block. May be split on 2 shblk */
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200846 sz = MIN(4, shctx->block_size - offset);
847 memcpy((char *)&info, (const char *)shblk->data + offset, sz);
848 offset += sz;
849 if (sz < 4) {
850 shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
851 memcpy(((char *)&info)+sz, (const char *)shblk->data, 4 - sz);
852 offset = (4 - sz);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100853 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200854
855 /* Get payload of the next HTX block and insert it. */
856 type = (info >> 28);
857 if (type != HTX_BLK_DATA)
858 ret = htx_cache_dump_blk(appctx, htx, type, info, shblk, offset);
859 else {
860 add_data_blk:
861 ret = htx_cache_dump_data_blk(appctx, htx, info, shblk, offset);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100862 }
863
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200864 if (!ret)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100865 break;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200866 total += ret;
867 len -= ret;
868
869 if (appctx->ctx.cache.rem_data || type == mark)
870 break;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100871 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100872
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100873 return total;
874}
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200875
876static int htx_cache_add_age_hdr(struct appctx *appctx, struct htx *htx)
877{
878 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
879 unsigned int age;
880 char *end;
881
882 chunk_reset(&trash);
883 age = MAX(0, (int)(now.tv_sec - cache_ptr->latest_validation)) + cache_ptr->age;
884 if (unlikely(age > CACHE_ENTRY_MAX_AGE))
885 age = CACHE_ENTRY_MAX_AGE;
886 end = ultoa_o(age, b_head(&trash), b_size(&trash));
887 b_set_data(&trash, end - b_head(&trash));
888 if (!http_add_header(htx, ist("Age"), ist2(b_head(&trash), b_data(&trash))))
889 return 0;
890 return 1;
891}
892
Christopher Faulet95e7ea32019-07-15 21:01:29 +0200893static void http_cache_io_handler(struct appctx *appctx)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100894{
895 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
896 struct shared_block *first = block_ptr(cache_ptr);
897 struct stream_interface *si = appctx->owner;
898 struct channel *req = si_oc(si);
899 struct channel *res = si_ic(si);
900 struct htx *req_htx, *res_htx;
901 struct buffer *errmsg;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200902 unsigned int len;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100903 size_t ret, total = 0;
904
905 res_htx = htxbuf(&res->buf);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200906 total = res_htx->data;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100907
908 if (unlikely(si->state == SI_ST_DIS || si->state == SI_ST_CLO))
909 goto out;
910
Ilya Shipitsin6fb0f212020-04-02 15:25:26 +0500911 /* Check if the input buffer is available. */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100912 if (!b_size(&res->buf)) {
913 si_rx_room_blk(si);
914 goto out;
915 }
916
Willy Tarreauefef3232018-12-16 00:37:45 +0100917 if (res->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTW_NOW))
Willy Tarreau273e9642018-12-16 00:35:15 +0100918 appctx->st0 = HTX_CACHE_END;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100919
920 if (appctx->st0 == HTX_CACHE_INIT) {
921 appctx->ctx.cache.next = block_ptr(cache_ptr);
922 appctx->ctx.cache.offset = sizeof(*cache_ptr);
923 appctx->ctx.cache.sent = 0;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200924 appctx->ctx.cache.rem_data = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100925 appctx->st0 = HTX_CACHE_HEADER;
926 }
927
928 if (appctx->st0 == HTX_CACHE_HEADER) {
929 /* Headers must be dump at once. Otherwise it is an error */
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200930 len = first->len - sizeof(*cache_ptr) - appctx->ctx.cache.sent;
931 ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_EOH);
932 if (!ret || (htx_get_tail_type(res_htx) != HTX_BLK_EOH) ||
933 !htx_cache_add_age_hdr(appctx, res_htx))
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100934 goto error;
935
Remi Tricot-Le Breton6cb10382020-10-22 10:40:05 +0200936 /* In case of a conditional request, we might want to send a
937 * "304 Not Modified" response instead of the stored data. */
938 if (appctx->ctx.cache.send_notmodified)
939 http_replace_res_status(res_htx, ist("304"), ist("Not Modified"));
940
941 /* Skip response body for HEAD requests or in case of "304 Not
942 * Modified" response. */
943 if (si_strm(si)->txn->meth == HTTP_METH_HEAD || appctx->ctx.cache.send_notmodified)
Christopher Fauletf0dd0372019-02-25 11:08:34 +0100944 appctx->st0 = HTX_CACHE_EOM;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100945 else
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200946 appctx->st0 = HTX_CACHE_DATA;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100947 }
948
949 if (appctx->st0 == HTX_CACHE_DATA) {
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200950 len = first->len - sizeof(*cache_ptr) - appctx->ctx.cache.sent;
951 if (len) {
952 ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_EOM);
953 if (ret < len) {
954 si_rx_room_blk(si);
955 goto out;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100956 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100957 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200958 appctx->st0 = HTX_CACHE_END;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100959 }
960
961 if (appctx->st0 == HTX_CACHE_EOM) {
Christopher Faulet810df062020-07-22 16:20:34 +0200962 res_htx->flags |= HTX_FL_EOI; /* no more data are expected. Only EOM remains to add now */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100963 if (!htx_add_endof(res_htx, HTX_BLK_EOM)) {
964 si_rx_room_blk(si);
965 goto out;
966 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100967 appctx->st0 = HTX_CACHE_END;
968 }
969
970 end:
Christopher Fauletadb36312019-02-25 11:40:49 +0100971 if (!(res->flags & CF_SHUTR) && appctx->st0 == HTX_CACHE_END) {
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100972 res->flags |= CF_READ_NULL;
973 si_shutr(si);
974 }
975
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100976 out:
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200977 total = res_htx->data - total;
Christopher Faulet61123912019-01-02 14:10:01 +0100978 if (total)
979 channel_add_input(res, total);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100980 htx_to_buf(res_htx, &res->buf);
Christopher Fauletadb36312019-02-25 11:40:49 +0100981
982 /* eat the whole request */
983 if (co_data(req)) {
984 req_htx = htx_from_buf(&req->buf);
985 co_htx_skip(req, req_htx, co_data(req));
986 htx_to_buf(req_htx, &req->buf);
987 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100988 return;
989
990 error:
991 /* Sent and HTTP error 500 */
992 b_reset(&res->buf);
Christopher Fauletf7346382019-07-17 22:02:08 +0200993 errmsg = &http_err_chunks[HTTP_ERR_500];
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100994 res->buf.data = b_data(errmsg);
995 memcpy(res->buf.area, b_head(errmsg), b_data(errmsg));
996 res_htx = htx_from_buf(&res->buf);
997
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200998 total = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100999 appctx->st0 = HTX_CACHE_END;
1000 goto end;
1001}
1002
1003
Christopher Faulet95220e22018-12-07 17:34:39 +01001004static int parse_cache_rule(struct proxy *proxy, const char *name, struct act_rule *rule, char **err)
William Lallemand41db4602017-10-30 11:15:51 +01001005{
1006 struct flt_conf *fconf;
Christopher Faulet95220e22018-12-07 17:34:39 +01001007 struct cache_flt_conf *cconf = NULL;
William Lallemand41db4602017-10-30 11:15:51 +01001008
Christopher Faulet95220e22018-12-07 17:34:39 +01001009 if (!*name || strcmp(name, "if") == 0 || strcmp(name, "unless") == 0) {
William Lallemand41db4602017-10-30 11:15:51 +01001010 memprintf(err, "expects a cache name");
Christopher Faulet95220e22018-12-07 17:34:39 +01001011 goto err;
William Lallemand41db4602017-10-30 11:15:51 +01001012 }
1013
1014 /* check if a cache filter was already registered with this cache
1015 * name, if that's the case, must use it. */
1016 list_for_each_entry(fconf, &proxy->filter_configs, list) {
Christopher Faulet95220e22018-12-07 17:34:39 +01001017 if (fconf->id == cache_store_flt_id) {
1018 cconf = fconf->conf;
1019 if (cconf && !strcmp((char *)cconf->c.name, name)) {
1020 rule->arg.act.p[0] = cconf;
1021 return 1;
1022 }
William Lallemand41db4602017-10-30 11:15:51 +01001023 }
1024 }
1025
Christopher Faulet95220e22018-12-07 17:34:39 +01001026 /* Create the filter cache config */
1027 cconf = calloc(1, sizeof(*cconf));
1028 if (!cconf) {
1029 memprintf(err, "out of memory\n");
1030 goto err;
1031 }
Christopher Faulet99a17a22018-12-11 09:18:27 +01001032 cconf->flags = CACHE_FLT_F_IMPLICIT_DECL;
Christopher Faulet95220e22018-12-07 17:34:39 +01001033 cconf->c.name = strdup(name);
1034 if (!cconf->c.name) {
1035 memprintf(err, "out of memory\n");
William Lallemand41db4602017-10-30 11:15:51 +01001036 goto err;
1037 }
Christopher Faulet95220e22018-12-07 17:34:39 +01001038
William Lallemand41db4602017-10-30 11:15:51 +01001039 /* register a filter to fill the cache buffer */
1040 fconf = calloc(1, sizeof(*fconf));
1041 if (!fconf) {
Christopher Faulet95220e22018-12-07 17:34:39 +01001042 memprintf(err, "out of memory\n");
William Lallemand41db4602017-10-30 11:15:51 +01001043 goto err;
1044 }
Christopher Faulet95220e22018-12-07 17:34:39 +01001045 fconf->id = cache_store_flt_id;
1046 fconf->conf = cconf;
William Lallemand41db4602017-10-30 11:15:51 +01001047 fconf->ops = &cache_ops;
1048 LIST_ADDQ(&proxy->filter_configs, &fconf->list);
1049
Christopher Faulet95220e22018-12-07 17:34:39 +01001050 rule->arg.act.p[0] = cconf;
1051 return 1;
William Lallemand41db4602017-10-30 11:15:51 +01001052
Christopher Faulet95220e22018-12-07 17:34:39 +01001053 err:
1054 free(cconf);
1055 return 0;
1056}
1057
1058enum act_parse_ret parse_cache_store(const char **args, int *orig_arg, struct proxy *proxy,
1059 struct act_rule *rule, char **err)
1060{
1061 rule->action = ACT_CUSTOM;
1062 rule->action_ptr = http_action_store_cache;
1063
1064 if (!parse_cache_rule(proxy, args[*orig_arg], rule, err))
1065 return ACT_RET_PRS_ERR;
William Lallemand41db4602017-10-30 11:15:51 +01001066
Christopher Faulet95220e22018-12-07 17:34:39 +01001067 (*orig_arg)++;
1068 return ACT_RET_PRS_OK;
William Lallemand41db4602017-10-30 11:15:51 +01001069}
1070
Baptiste Assmanndb92a832019-08-05 16:55:32 +02001071/* This produces a sha1 hash of the concatenation of the HTTP method,
1072 * the first occurrence of the Host header followed by the path component
1073 * if it begins with a slash ('/'). */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001074int sha1_hosturi(struct stream *s)
William Lallemandf528fff2017-11-23 19:43:17 +01001075{
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001076 struct http_txn *txn = s->txn;
Christopher Faulet95e7ea32019-07-15 21:01:29 +02001077 struct htx *htx = htxbuf(&s->req.buf);
1078 struct htx_sl *sl;
1079 struct http_hdr_ctx ctx;
Willy Tarreauccc61d82019-10-17 09:28:28 +02001080 struct ist uri;
William Lallemandf528fff2017-11-23 19:43:17 +01001081 blk_SHA_CTX sha1_ctx;
Willy Tarreau83061a82018-07-13 11:56:34 +02001082 struct buffer *trash;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001083
William Lallemandf528fff2017-11-23 19:43:17 +01001084 trash = get_trash_chunk();
Christopher Faulet95e7ea32019-07-15 21:01:29 +02001085 ctx.blk = NULL;
Baptiste Assmanndb92a832019-08-05 16:55:32 +02001086
1087 switch (txn->meth) {
1088 case HTTP_METH_HEAD:
1089 case HTTP_METH_GET:
1090 chunk_memcat(trash, "GET", 3);
1091 break;
1092 default:
1093 return 0;
1094 }
1095
Christopher Faulet95e7ea32019-07-15 21:01:29 +02001096 sl = http_get_stline(htx);
Willy Tarreauccc61d82019-10-17 09:28:28 +02001097 uri = htx_sl_req_uri(sl); // whole uri
1098 if (!uri.len)
Christopher Faulet95e7ea32019-07-15 21:01:29 +02001099 return 0;
Willy Tarreauccc61d82019-10-17 09:28:28 +02001100
1101 /* In HTTP/1, most URIs are seen in origin form ('/path/to/resource'),
1102 * unless haproxy is deployed in front of an outbound cache. In HTTP/2,
1103 * URIs are almost always sent in absolute form with their scheme. In
1104 * this case, the scheme is almost always "https". In order to support
1105 * sharing of cache objects between H1 and H2, we'll hash the absolute
1106 * URI whenever known, or prepend "https://" + the Host header for
1107 * relative URIs. The difference will only appear on absolute HTTP/1
1108 * requests sent to an origin server, which practically is never met in
1109 * the real world so we don't care about the ability to share the same
1110 * key here.URIs are normalized from the absolute URI to an origin form as
1111 * well.
1112 */
1113 if (!(sl->flags & HTX_SL_F_HAS_AUTHORITY)) {
Willy Tarreau20020ae2019-10-29 13:02:15 +01001114 chunk_istcat(trash, ist("https://"));
Willy Tarreauccc61d82019-10-17 09:28:28 +02001115 if (!http_find_header(htx, ist("Host"), &ctx, 0))
1116 return 0;
Willy Tarreau20020ae2019-10-29 13:02:15 +01001117 chunk_istcat(trash, ctx.value);
Willy Tarreauccc61d82019-10-17 09:28:28 +02001118 }
1119
1120 chunk_memcat(trash, uri.ptr, uri.len);
William Lallemandf528fff2017-11-23 19:43:17 +01001121
1122 /* hash everything */
1123 blk_SHA1_Init(&sha1_ctx);
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001124 blk_SHA1_Update(&sha1_ctx, trash->area, trash->data);
William Lallemandf528fff2017-11-23 19:43:17 +01001125 blk_SHA1_Final((unsigned char *)txn->cache_hash, &sha1_ctx);
1126
1127 return 1;
1128}
1129
Remi Tricot-Le Breton6cb10382020-10-22 10:40:05 +02001130/* Looks for "If-None-Match" headers in the request and compares their value
1131 * with the one that might have been stored in the cache_entry. If any of them
1132 * matches, a "304 Not Modified" response should be sent instead of the cached
1133 * data.
1134 * Although unlikely in a GET/HEAD request, the "If-None-Match: *" syntax is
1135 * valid and should receive a "304 Not Modified" response (RFC 7434#4.3.2).
1136 * Returns 1 if "304 Not Modified" should be sent, 0 otherwise.
1137 */
1138static int should_send_notmodified_response(struct cache *cache, struct htx *htx,
1139 struct cache_entry *entry)
1140{
1141 int retval = 0;
1142
1143 struct http_hdr_ctx ctx = { .blk = NULL };
1144 struct ist cache_entry_etag = IST_NULL;
1145 struct buffer *etag_buffer = NULL;
1146
1147 if (entry->etag_length == 0)
1148 return 0;
1149
1150 /* If we find a "If-None-Match" header in the request, rebuild the
1151 * cache_entry's ETag in order to perform comparisons. */
1152 /* There could be multiple "if-none-match" header lines. */
1153 while (http_find_header(htx, ist("if-none-match"), &ctx, 0)) {
1154
1155 /* A '*' matches everything. */
1156 if (isteq(ctx.value, ist("*")) != 0) {
1157 retval = 1;
1158 break;
1159 }
1160
1161 /* Rebuild the stored ETag. */
1162 if (etag_buffer == NULL) {
1163 etag_buffer = get_trash_chunk();
1164
1165 if (shctx_row_data_get(shctx_ptr(cache), block_ptr(entry),
1166 (unsigned char*)b_orig(etag_buffer),
1167 entry->etag_offset, entry->etag_length) == 0) {
1168 cache_entry_etag = ist2(b_orig(etag_buffer), entry->etag_length);
1169 } else {
1170 /* We could not rebuild the ETag in one go, we
1171 * won't send a "304 Not Modified" response. */
1172 break;
1173 }
1174 }
1175
1176 if (http_compare_etags(cache_entry_etag, ctx.value) == 1) {
1177 retval = 1;
1178 break;
1179 }
1180 }
1181
1182 return retval;
1183}
1184
William Lallemand41db4602017-10-30 11:15:51 +01001185enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *px,
1186 struct session *sess, struct stream *s, int flags)
1187{
William Lallemand77c11972017-10-31 20:43:01 +01001188
Christopher Fauletb3d4bca2019-02-25 10:59:33 +01001189 struct http_txn *txn = s->txn;
William Lallemand77c11972017-10-31 20:43:01 +01001190 struct cache_entry *res;
Christopher Faulet95220e22018-12-07 17:34:39 +01001191 struct cache_flt_conf *cconf = rule->arg.act.p[0];
1192 struct cache *cache = cconf->c.cache;
William Lallemand77c11972017-10-31 20:43:01 +01001193
Willy Tarreau6905d182019-10-01 17:59:17 +02001194 /* Ignore cache for HTTP/1.0 requests and for requests other than GET
1195 * and HEAD */
Christopher Fauletb3d4bca2019-02-25 10:59:33 +01001196 if (!(txn->req.flags & HTTP_MSGF_VER_11) ||
Willy Tarreau6905d182019-10-01 17:59:17 +02001197 (txn->meth != HTTP_METH_GET && txn->meth != HTTP_METH_HEAD))
Christopher Fauletb3d4bca2019-02-25 10:59:33 +01001198 txn->flags |= TX_CACHE_IGNORE;
1199
Christopher Fauletfc9cfe42019-07-16 14:54:53 +02001200 http_check_request_for_cacheability(s, &s->req);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001201
Willy Tarreau504455c2017-12-22 17:47:35 +01001202 if ((s->txn->flags & (TX_CACHE_IGNORE|TX_CACHEABLE)) == TX_CACHE_IGNORE)
1203 return ACT_RET_CONT;
1204
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001205 if (!sha1_hosturi(s))
Willy Tarreau7704b1e2017-12-22 16:32:43 +01001206 return ACT_RET_CONT;
William Lallemandf528fff2017-11-23 19:43:17 +01001207
Willy Tarreau504455c2017-12-22 17:47:35 +01001208 if (s->txn->flags & TX_CACHE_IGNORE)
1209 return ACT_RET_CONT;
1210
Willy Tarreaua1214a52018-12-14 14:00:25 +01001211 if (px == strm_fe(s))
Olivier Houchardaa090d42019-03-08 18:49:24 +01001212 _HA_ATOMIC_ADD(&px->fe_counters.p.http.cache_lookups, 1);
Willy Tarreaua1214a52018-12-14 14:00:25 +01001213 else
Olivier Houchardaa090d42019-03-08 18:49:24 +01001214 _HA_ATOMIC_ADD(&px->be_counters.p.http.cache_lookups, 1);
Willy Tarreaua1214a52018-12-14 14:00:25 +01001215
William Lallemanda400a3a2017-11-20 19:13:12 +01001216 shctx_lock(shctx_ptr(cache));
William Lallemandf528fff2017-11-23 19:43:17 +01001217 res = entry_exist(cache, s->txn->cache_hash);
William Lallemand77c11972017-10-31 20:43:01 +01001218 if (res) {
1219 struct appctx *appctx;
William Lallemanda400a3a2017-11-20 19:13:12 +01001220 shctx_row_inc_hot(shctx_ptr(cache), block_ptr(res));
1221 shctx_unlock(shctx_ptr(cache));
William Lallemand77c11972017-10-31 20:43:01 +01001222 s->target = &http_cache_applet.obj_type;
Willy Tarreau14bfe9a2018-12-19 15:19:27 +01001223 if ((appctx = si_register_handler(&s->si[1], objt_applet(s->target)))) {
Christopher Faulet95e7ea32019-07-15 21:01:29 +02001224 appctx->st0 = HTX_CACHE_INIT;
William Lallemand77c11972017-10-31 20:43:01 +01001225 appctx->rule = rule;
1226 appctx->ctx.cache.entry = res;
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001227 appctx->ctx.cache.next = NULL;
1228 appctx->ctx.cache.sent = 0;
Remi Tricot-Le Breton6cb10382020-10-22 10:40:05 +02001229 appctx->ctx.cache.send_notmodified =
1230 should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
Willy Tarreaua1214a52018-12-14 14:00:25 +01001231
1232 if (px == strm_fe(s))
Olivier Houchardaa090d42019-03-08 18:49:24 +01001233 _HA_ATOMIC_ADD(&px->fe_counters.p.http.cache_hits, 1);
Willy Tarreaua1214a52018-12-14 14:00:25 +01001234 else
Olivier Houchardaa090d42019-03-08 18:49:24 +01001235 _HA_ATOMIC_ADD(&px->be_counters.p.http.cache_hits, 1);
Olivier Houchardfccf8402017-11-01 14:04:02 +01001236 return ACT_RET_CONT;
William Lallemand77c11972017-10-31 20:43:01 +01001237 } else {
William Lallemand55e76742017-11-21 20:01:28 +01001238 shctx_lock(shctx_ptr(cache));
1239 shctx_row_dec_hot(shctx_ptr(cache), block_ptr(res));
1240 shctx_unlock(shctx_ptr(cache));
Olivier Houchardfccf8402017-11-01 14:04:02 +01001241 return ACT_RET_YIELD;
William Lallemand77c11972017-10-31 20:43:01 +01001242 }
1243 }
William Lallemanda400a3a2017-11-20 19:13:12 +01001244 shctx_unlock(shctx_ptr(cache));
Olivier Houchardfccf8402017-11-01 14:04:02 +01001245 return ACT_RET_CONT;
William Lallemand41db4602017-10-30 11:15:51 +01001246}
1247
1248
1249enum act_parse_ret parse_cache_use(const char **args, int *orig_arg, struct proxy *proxy,
1250 struct act_rule *rule, char **err)
1251{
William Lallemand41db4602017-10-30 11:15:51 +01001252 rule->action = ACT_CUSTOM;
1253 rule->action_ptr = http_action_req_cache_use;
1254
Christopher Faulet95220e22018-12-07 17:34:39 +01001255 if (!parse_cache_rule(proxy, args[*orig_arg], rule, err))
William Lallemand41db4602017-10-30 11:15:51 +01001256 return ACT_RET_PRS_ERR;
William Lallemand41db4602017-10-30 11:15:51 +01001257
1258 (*orig_arg)++;
1259 return ACT_RET_PRS_OK;
William Lallemand41db4602017-10-30 11:15:51 +01001260}
1261
1262int cfg_parse_cache(const char *file, int linenum, char **args, int kwm)
1263{
1264 int err_code = 0;
1265
1266 if (strcmp(args[0], "cache") == 0) { /* new cache section */
1267
1268 if (!*args[1]) {
Tim Duesterhusff4d86b2020-08-18 22:20:27 +02001269 ha_alert("parsing [%s:%d] : '%s' expects a <name> argument\n",
Christopher Faulet767a84b2017-11-24 16:50:31 +01001270 file, linenum, args[0]);
William Lallemand41db4602017-10-30 11:15:51 +01001271 err_code |= ERR_ALERT | ERR_ABORT;
1272 goto out;
1273 }
1274
1275 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1276 err_code |= ERR_ABORT;
1277 goto out;
1278 }
1279
1280 if (tmp_cache_config == NULL) {
Tim Duesterhusff4d86b2020-08-18 22:20:27 +02001281 struct cache *cache_config;
1282
William Lallemand41db4602017-10-30 11:15:51 +01001283 tmp_cache_config = calloc(1, sizeof(*tmp_cache_config));
1284 if (!tmp_cache_config) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001285 ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
William Lallemand41db4602017-10-30 11:15:51 +01001286 err_code |= ERR_ALERT | ERR_ABORT;
1287 goto out;
1288 }
1289
1290 strlcpy2(tmp_cache_config->id, args[1], 33);
1291 if (strlen(args[1]) > 32) {
Tim Duesterhusff4d86b2020-08-18 22:20:27 +02001292 ha_warning("parsing [%s:%d]: cache name is limited to 32 characters, truncate to '%s'.\n",
Christopher Faulet767a84b2017-11-24 16:50:31 +01001293 file, linenum, tmp_cache_config->id);
William Lallemand41db4602017-10-30 11:15:51 +01001294 err_code |= ERR_WARN;
1295 }
Tim Duesterhusff4d86b2020-08-18 22:20:27 +02001296
1297 list_for_each_entry(cache_config, &caches_config, list) {
1298 if (strcmp(tmp_cache_config->id, cache_config->id) == 0) {
1299 ha_alert("parsing [%s:%d]: Duplicate cache name '%s'.\n",
1300 file, linenum, tmp_cache_config->id);
1301 err_code |= ERR_ALERT | ERR_ABORT;
1302 goto out;
1303 }
1304 }
1305
William Lallemand49b44532017-11-24 18:53:43 +01001306 tmp_cache_config->maxage = 60;
William Lallemand41db4602017-10-30 11:15:51 +01001307 tmp_cache_config->maxblocks = 0;
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001308 tmp_cache_config->maxobjsz = 0;
William Lallemand41db4602017-10-30 11:15:51 +01001309 }
1310 } else if (strcmp(args[0], "total-max-size") == 0) {
Frédéric Lécailleb9b8b6b2018-10-25 20:17:45 +02001311 unsigned long int maxsize;
1312 char *err;
William Lallemand41db4602017-10-30 11:15:51 +01001313
1314 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1315 err_code |= ERR_ABORT;
1316 goto out;
1317 }
1318
Frédéric Lécailleb9b8b6b2018-10-25 20:17:45 +02001319 maxsize = strtoul(args[1], &err, 10);
1320 if (err == args[1] || *err != '\0') {
1321 ha_warning("parsing [%s:%d]: total-max-size wrong value '%s'\n",
1322 file, linenum, args[1]);
1323 err_code |= ERR_ABORT;
1324 goto out;
1325 }
1326
1327 if (maxsize > (UINT_MAX >> 20)) {
1328 ha_warning("parsing [%s:%d]: \"total-max-size\" (%s) must not be greater than %u\n",
1329 file, linenum, args[1], UINT_MAX >> 20);
1330 err_code |= ERR_ABORT;
1331 goto out;
1332 }
1333
William Lallemand41db4602017-10-30 11:15:51 +01001334 /* size in megabytes */
Frédéric Lécailleb9b8b6b2018-10-25 20:17:45 +02001335 maxsize *= 1024 * 1024 / CACHE_BLOCKSIZE;
William Lallemand41db4602017-10-30 11:15:51 +01001336 tmp_cache_config->maxblocks = maxsize;
William Lallemand49b44532017-11-24 18:53:43 +01001337 } else if (strcmp(args[0], "max-age") == 0) {
1338 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1339 err_code |= ERR_ABORT;
1340 goto out;
1341 }
1342
1343 if (!*args[1]) {
1344 ha_warning("parsing [%s:%d]: '%s' expects an age parameter in seconds.\n",
1345 file, linenum, args[0]);
1346 err_code |= ERR_WARN;
1347 }
1348
1349 tmp_cache_config->maxage = atoi(args[1]);
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001350 } else if (strcmp(args[0], "max-object-size") == 0) {
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001351 unsigned int maxobjsz;
1352 char *err;
1353
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001354 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1355 err_code |= ERR_ABORT;
1356 goto out;
1357 }
1358
1359 if (!*args[1]) {
1360 ha_warning("parsing [%s:%d]: '%s' expects a maximum file size parameter in bytes.\n",
1361 file, linenum, args[0]);
1362 err_code |= ERR_WARN;
1363 }
1364
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001365 maxobjsz = strtoul(args[1], &err, 10);
1366 if (err == args[1] || *err != '\0') {
1367 ha_warning("parsing [%s:%d]: max-object-size wrong value '%s'\n",
1368 file, linenum, args[1]);
1369 err_code |= ERR_ABORT;
1370 goto out;
1371 }
1372 tmp_cache_config->maxobjsz = maxobjsz;
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001373 }
1374 else if (*args[0] != 0) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001375 ha_alert("parsing [%s:%d] : unknown keyword '%s' in 'cache' section\n", file, linenum, args[0]);
William Lallemand41db4602017-10-30 11:15:51 +01001376 err_code |= ERR_ALERT | ERR_FATAL;
1377 goto out;
1378 }
1379out:
1380 return err_code;
1381}
1382
1383/* once the cache section is parsed */
1384
1385int cfg_post_parse_section_cache()
1386{
William Lallemand41db4602017-10-30 11:15:51 +01001387 int err_code = 0;
William Lallemand41db4602017-10-30 11:15:51 +01001388
1389 if (tmp_cache_config) {
William Lallemand41db4602017-10-30 11:15:51 +01001390
1391 if (tmp_cache_config->maxblocks <= 0) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001392 ha_alert("Size not specified for cache '%s'\n", tmp_cache_config->id);
William Lallemand41db4602017-10-30 11:15:51 +01001393 err_code |= ERR_FATAL | ERR_ALERT;
1394 goto out;
1395 }
1396
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001397 if (!tmp_cache_config->maxobjsz) {
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001398 /* Default max. file size is a 256th of the cache size. */
1399 tmp_cache_config->maxobjsz =
1400 (tmp_cache_config->maxblocks * CACHE_BLOCKSIZE) >> 8;
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001401 }
1402 else if (tmp_cache_config->maxobjsz > tmp_cache_config->maxblocks * CACHE_BLOCKSIZE / 2) {
1403 ha_alert("\"max-object-size\" is limited to an half of \"total-max-size\" => %u\n", tmp_cache_config->maxblocks * CACHE_BLOCKSIZE / 2);
1404 err_code |= ERR_FATAL | ERR_ALERT;
1405 goto out;
1406 }
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001407
William Lallemandd1d1e222019-08-28 15:22:49 +02001408 /* add to the list of cache to init and reinit tmp_cache_config
1409 * for next cache section, if any.
1410 */
1411 LIST_ADDQ(&caches_config, &tmp_cache_config->list);
1412 tmp_cache_config = NULL;
1413 return err_code;
1414 }
1415out:
1416 free(tmp_cache_config);
1417 tmp_cache_config = NULL;
1418 return err_code;
1419
1420}
1421
1422int post_check_cache()
1423{
1424 struct proxy *px;
1425 struct cache *back, *cache_config, *cache;
1426 struct shared_context *shctx;
1427 int ret_shctx;
1428 int err_code = 0;
1429
1430 list_for_each_entry_safe(cache_config, back, &caches_config, list) {
1431
1432 ret_shctx = shctx_init(&shctx, cache_config->maxblocks, CACHE_BLOCKSIZE,
1433 cache_config->maxobjsz, sizeof(struct cache), 1);
William Lallemand4da3f8a2017-10-31 14:33:34 +01001434
Frédéric Lécaillebc584492018-10-25 20:18:59 +02001435 if (ret_shctx <= 0) {
William Lallemand41db4602017-10-30 11:15:51 +01001436 if (ret_shctx == SHCTX_E_INIT_LOCK)
Christopher Faulet767a84b2017-11-24 16:50:31 +01001437 ha_alert("Unable to initialize the lock for the cache.\n");
William Lallemand41db4602017-10-30 11:15:51 +01001438 else
Christopher Faulet767a84b2017-11-24 16:50:31 +01001439 ha_alert("Unable to allocate cache.\n");
William Lallemand41db4602017-10-30 11:15:51 +01001440
1441 err_code |= ERR_FATAL | ERR_ALERT;
1442 goto out;
1443 }
William Lallemanda400a3a2017-11-20 19:13:12 +01001444 shctx->free_block = cache_free_blocks;
William Lallemandd1d1e222019-08-28 15:22:49 +02001445 /* the cache structure is stored in the shctx and added to the
1446 * caches list, we can remove the entry from the caches_config
1447 * list */
1448 memcpy(shctx->data, cache_config, sizeof(struct cache));
William Lallemand41db4602017-10-30 11:15:51 +01001449 cache = (struct cache *)shctx->data;
1450 cache->entries = EB_ROOT_UNIQUE;
William Lallemand41db4602017-10-30 11:15:51 +01001451 LIST_ADDQ(&caches, &cache->list);
William Lallemandd1d1e222019-08-28 15:22:49 +02001452 LIST_DEL(&cache_config->list);
1453 free(cache_config);
1454
1455 /* Find all references for this cache in the existing filters
1456 * (over all proxies) and reference it in matching filters.
1457 */
1458 for (px = proxies_list; px; px = px->next) {
1459 struct flt_conf *fconf;
1460 struct cache_flt_conf *cconf;
1461
1462 list_for_each_entry(fconf, &px->filter_configs, list) {
1463 if (fconf->id != cache_store_flt_id)
1464 continue;
1465
1466 cconf = fconf->conf;
1467 if (!strcmp(cache->id, cconf->c.name)) {
1468 free(cconf->c.name);
Tim Duesterhusd7c6e6a2020-09-14 18:01:33 +02001469 cconf->flags |= CACHE_FLT_INIT;
William Lallemandd1d1e222019-08-28 15:22:49 +02001470 cconf->c.cache = cache;
1471 break;
1472 }
1473 }
1474 }
William Lallemand41db4602017-10-30 11:15:51 +01001475 }
William Lallemandd1d1e222019-08-28 15:22:49 +02001476
William Lallemand41db4602017-10-30 11:15:51 +01001477out:
William Lallemand41db4602017-10-30 11:15:51 +01001478 return err_code;
1479
William Lallemand41db4602017-10-30 11:15:51 +01001480}
1481
William Lallemand41db4602017-10-30 11:15:51 +01001482struct flt_ops cache_ops = {
1483 .init = cache_store_init,
Christopher Faulet95220e22018-12-07 17:34:39 +01001484 .check = cache_store_check,
1485 .deinit = cache_store_deinit,
William Lallemand41db4602017-10-30 11:15:51 +01001486
Christopher Faulet65554e12020-03-06 14:52:06 +01001487 /* Handle stream init/deinit */
1488 .attach = cache_store_strm_init,
1489 .detach = cache_store_strm_deinit,
1490
William Lallemand4da3f8a2017-10-31 14:33:34 +01001491 /* Handle channels activity */
Christopher Faulet839791a2019-01-07 16:12:07 +01001492 .channel_post_analyze = cache_store_post_analyze,
William Lallemand4da3f8a2017-10-31 14:33:34 +01001493
1494 /* Filter HTTP requests and responses */
1495 .http_headers = cache_store_http_headers,
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001496 .http_payload = cache_store_http_payload,
William Lallemand4da3f8a2017-10-31 14:33:34 +01001497 .http_end = cache_store_http_end,
William Lallemand41db4602017-10-30 11:15:51 +01001498};
1499
Christopher Faulet99a17a22018-12-11 09:18:27 +01001500
1501
1502static int
1503parse_cache_flt(char **args, int *cur_arg, struct proxy *px,
1504 struct flt_conf *fconf, char **err, void *private)
1505{
1506 struct flt_conf *f, *back;
Willy Tarreaua73da1e2018-12-14 10:19:28 +01001507 struct cache_flt_conf *cconf = NULL;
Christopher Faulet99a17a22018-12-11 09:18:27 +01001508 char *name = NULL;
1509 int pos = *cur_arg;
1510
Christopher Faulet2a37cdb2020-05-18 11:58:16 +02001511 /* Get the cache filter name. <pos> point on "cache" keyword */
1512 if (!*args[pos + 1]) {
Tim Duesterhusea969f62020-08-18 22:06:51 +02001513 memprintf(err, "%s : expects a <name> argument", args[pos]);
Christopher Faulet2a37cdb2020-05-18 11:58:16 +02001514 goto error;
1515 }
1516 name = strdup(args[pos + 1]);
1517 if (!name) {
1518 memprintf(err, "%s '%s' : out of memory", args[pos], args[pos + 1]);
1519 goto error;
Christopher Faulet99a17a22018-12-11 09:18:27 +01001520 }
Christopher Faulet2a37cdb2020-05-18 11:58:16 +02001521 pos += 2;
Christopher Faulet99a17a22018-12-11 09:18:27 +01001522
1523 /* Check if an implicit filter with the same name already exists. If so,
1524 * we remove the implicit filter to use the explicit one. */
1525 list_for_each_entry_safe(f, back, &px->filter_configs, list) {
1526 if (f->id != cache_store_flt_id)
1527 continue;
1528
1529 cconf = f->conf;
1530 if (strcmp(name, cconf->c.name)) {
1531 cconf = NULL;
1532 continue;
1533 }
1534
1535 if (!(cconf->flags & CACHE_FLT_F_IMPLICIT_DECL)) {
1536 cconf = NULL;
1537 memprintf(err, "%s: multiple explicit declarations of the cache filter '%s'",
1538 px->id, name);
Tim Duesterhusd34b1ce2020-01-18 01:46:18 +01001539 goto error;
Christopher Faulet99a17a22018-12-11 09:18:27 +01001540 }
1541
1542 /* Remove the implicit filter. <cconf> is kept for the explicit one */
1543 LIST_DEL(&f->list);
1544 free(f);
1545 free(name);
1546 break;
1547 }
1548
1549 /* No implicit cache filter found, create configuration for the explicit one */
1550 if (!cconf) {
1551 cconf = calloc(1, sizeof(*cconf));
1552 if (!cconf) {
1553 memprintf(err, "%s: out of memory", args[*cur_arg]);
1554 goto error;
1555 }
1556 cconf->c.name = name;
1557 }
1558
1559 cconf->flags = 0;
1560 fconf->id = cache_store_flt_id;
1561 fconf->conf = cconf;
1562 fconf->ops = &cache_ops;
1563
1564 *cur_arg = pos;
1565 return 0;
1566
1567 error:
1568 free(name);
1569 free(cconf);
1570 return -1;
1571}
1572
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02001573static int cli_parse_show_cache(char **args, char *payload, struct appctx *appctx, void *private)
William Lallemand1f49a362017-11-21 20:01:26 +01001574{
1575 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1576 return 1;
1577
1578 return 0;
1579}
1580
1581static int cli_io_handler_show_cache(struct appctx *appctx)
1582{
1583 struct cache* cache = appctx->ctx.cli.p0;
1584 struct stream_interface *si = appctx->owner;
1585
William Lallemand1f49a362017-11-21 20:01:26 +01001586 if (cache == NULL) {
1587 cache = LIST_ELEM((caches).n, typeof(struct cache *), list);
1588 }
1589
1590 list_for_each_entry_from(cache, &caches, list) {
1591 struct eb32_node *node = NULL;
1592 unsigned int next_key;
1593 struct cache_entry *entry;
1594
William Lallemand1f49a362017-11-21 20:01:26 +01001595 next_key = appctx->ctx.cli.i0;
Willy Tarreauafe1de52018-04-04 11:56:43 +02001596 if (!next_key) {
1597 chunk_printf(&trash, "%p: %s (shctx:%p, available blocks:%d)\n", cache, cache->id, shctx_ptr(cache), shctx_ptr(cache)->nbav);
1598 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +01001599 si_rx_room_blk(si);
Willy Tarreauafe1de52018-04-04 11:56:43 +02001600 return 0;
1601 }
1602 }
William Lallemand1f49a362017-11-21 20:01:26 +01001603
1604 appctx->ctx.cli.p0 = cache;
1605
1606 while (1) {
1607
1608 shctx_lock(shctx_ptr(cache));
1609 node = eb32_lookup_ge(&cache->entries, next_key);
1610 if (!node) {
1611 shctx_unlock(shctx_ptr(cache));
Willy Tarreauafe1de52018-04-04 11:56:43 +02001612 appctx->ctx.cli.i0 = 0;
William Lallemand1f49a362017-11-21 20:01:26 +01001613 break;
1614 }
1615
1616 entry = container_of(node, struct cache_entry, eb);
Willy Tarreau8b507582020-02-25 09:35:07 +01001617 chunk_printf(&trash, "%p hash:%u size:%u (%u blocks), refcount:%u, expire:%d\n", entry, read_u32(entry->hash), block_ptr(entry)->len, block_ptr(entry)->block_count, block_ptr(entry)->refcount, entry->expire - (int)now.tv_sec);
William Lallemand1f49a362017-11-21 20:01:26 +01001618
1619 next_key = node->key + 1;
1620 appctx->ctx.cli.i0 = next_key;
1621
1622 shctx_unlock(shctx_ptr(cache));
1623
1624 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +01001625 si_rx_room_blk(si);
William Lallemand1f49a362017-11-21 20:01:26 +01001626 return 0;
1627 }
1628 }
1629
1630 }
1631
1632 return 1;
1633
1634}
1635
Christopher Faulet99a17a22018-12-11 09:18:27 +01001636/* Declare the filter parser for "cache" keyword */
1637static struct flt_kw_list filter_kws = { "CACHE", { }, {
1638 { "cache", parse_cache_flt, NULL },
1639 { NULL, NULL, NULL },
1640 }
1641};
1642
1643INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
1644
William Lallemand1f49a362017-11-21 20:01:26 +01001645static struct cli_kw_list cli_kws = {{},{
William Lallemande899af82017-11-22 16:41:26 +01001646 { { "show", "cache", NULL }, "show cache : show cache status", cli_parse_show_cache, cli_io_handler_show_cache, NULL, NULL },
1647 {{},}
William Lallemand1f49a362017-11-21 20:01:26 +01001648}};
1649
Willy Tarreau0108d902018-11-25 19:14:37 +01001650INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
William Lallemand1f49a362017-11-21 20:01:26 +01001651
William Lallemand41db4602017-10-30 11:15:51 +01001652static struct action_kw_list http_res_actions = {
1653 .kw = {
1654 { "cache-store", parse_cache_store },
1655 { NULL, NULL }
1656 }
1657};
1658
Willy Tarreau0108d902018-11-25 19:14:37 +01001659INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_actions);
1660
William Lallemand41db4602017-10-30 11:15:51 +01001661static struct action_kw_list http_req_actions = {
1662 .kw = {
1663 { "cache-use", parse_cache_use },
1664 { NULL, NULL }
1665 }
1666};
1667
Willy Tarreau0108d902018-11-25 19:14:37 +01001668INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_actions);
1669
Willy Tarreau2231b632019-03-29 18:26:52 +01001670struct applet http_cache_applet = {
William Lallemand41db4602017-10-30 11:15:51 +01001671 .obj_type = OBJ_TYPE_APPLET,
1672 .name = "<CACHE>", /* used for logging */
William Lallemand77c11972017-10-31 20:43:01 +01001673 .fct = http_cache_io_handler,
William Lallemandecb73b12017-11-24 14:33:55 +01001674 .release = http_cache_applet_release,
William Lallemand41db4602017-10-30 11:15:51 +01001675};
1676
Willy Tarreaue6552512018-11-26 11:33:13 +01001677/* config parsers for this section */
1678REGISTER_CONFIG_SECTION("cache", cfg_parse_cache, cfg_post_parse_section_cache);
William Lallemandd1d1e222019-08-28 15:22:49 +02001679REGISTER_POST_CHECK(post_check_cache);