blob: 1056b1ac54890470ee8667555f57a36a47ee4a0c [file] [log] [blame]
William Lallemand41db4602017-10-30 11:15:51 +01001/*
2 * Cache management
3 *
4 * Copyright 2017 HAProxy Technologies
5 * William Lallemand <wlallemand@haproxy.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
William Lallemand41db4602017-10-30 11:15:51 +010013#include <eb32tree.h>
William Lallemandf528fff2017-11-23 19:43:17 +010014#include <import/sha1.h>
William Lallemand41db4602017-10-30 11:15:51 +010015
William Lallemand75d93292017-11-21 20:01:24 +010016#include <types/action.h>
William Lallemand1f49a362017-11-21 20:01:26 +010017#include <types/cli.h>
William Lallemand75d93292017-11-21 20:01:24 +010018#include <types/filters.h>
19#include <types/proxy.h>
20#include <types/shctx.h>
21
William Lallemand41db4602017-10-30 11:15:51 +010022#include <proto/channel.h>
William Lallemand1f49a362017-11-21 20:01:26 +010023#include <proto/cli.h>
William Lallemand41db4602017-10-30 11:15:51 +010024#include <proto/proxy.h>
25#include <proto/hdr_idx.h>
Christopher Faulet54a8d5a2018-12-07 12:21:11 +010026#include <proto/http_htx.h>
William Lallemand41db4602017-10-30 11:15:51 +010027#include <proto/filters.h>
Willy Tarreau61c112a2018-10-02 16:43:32 +020028#include <proto/http_rules.h>
William Lallemand41db4602017-10-30 11:15:51 +010029#include <proto/proto_http.h>
30#include <proto/log.h>
31#include <proto/stream.h>
32#include <proto/stream_interface.h>
33#include <proto/shctx.h>
34
William Lallemand41db4602017-10-30 11:15:51 +010035
36#include <common/cfgparse.h>
37#include <common/hash.h>
Willy Tarreaub96b77e2018-12-11 10:22:41 +010038#include <common/htx.h>
Willy Tarreau0108d902018-11-25 19:14:37 +010039#include <common/initcall.h>
William Lallemand41db4602017-10-30 11:15:51 +010040
Christopher Faulet27d93c32018-12-15 22:32:02 +010041#define CACHE_FLT_F_IMPLICIT_DECL 0x00000001 /* The cache filtre was implicitly declared (ie without
Christopher Faulet99a17a22018-12-11 09:18:27 +010042 * the filter keyword) */
Christopher Fauletafd819c2018-12-11 08:57:45 +010043
Christopher Fauletf4a4ef72018-12-07 17:39:53 +010044const char *cache_store_flt_id = "cache store filter";
William Lallemand41db4602017-10-30 11:15:51 +010045
Willy Tarreau2231b632019-03-29 18:26:52 +010046extern struct applet http_cache_applet;
William Lallemand41db4602017-10-30 11:15:51 +010047
48struct flt_ops cache_ops;
49
50struct cache {
Willy Tarreaufd5efb52017-11-26 08:54:31 +010051 struct list list; /* cache linked list */
William Lallemand41db4602017-10-30 11:15:51 +010052 struct eb_root entries; /* head of cache entries based on keys */
Willy Tarreaufd5efb52017-11-26 08:54:31 +010053 unsigned int maxage; /* max-age */
54 unsigned int maxblocks;
Frédéric Lécaille4eba5442018-10-25 20:29:31 +020055 unsigned int maxobjsz; /* max-object-size (in bytes) */
Willy Tarreaufd5efb52017-11-26 08:54:31 +010056 char id[33]; /* cache name */
William Lallemand41db4602017-10-30 11:15:51 +010057};
58
Christopher Faulet95220e22018-12-07 17:34:39 +010059/* cache config for filters */
60struct cache_flt_conf {
61 union {
62 struct cache *cache; /* cache used by the filter */
63 char *name; /* cache name used during conf parsing */
64 } c;
65 unsigned int flags; /* CACHE_FLT_F_* */
66};
67
William Lallemand41db4602017-10-30 11:15:51 +010068/*
69 * cache ctx for filters
70 */
71struct cache_st {
Christopher Faulet54a8d5a2018-12-07 12:21:11 +010072 int hdrs_len; // field used in legacy mode only
William Lallemand41db4602017-10-30 11:15:51 +010073 struct shared_block *first_block;
74};
75
76struct cache_entry {
77 unsigned int latest_validation; /* latest validation date */
78 unsigned int expire; /* expiration date */
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +020079 unsigned int age; /* Origin server "Age" header value */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +010080 unsigned int eoh; /* Origin server end of headers offset. */ // field used in legacy mode only
81
William Lallemand41db4602017-10-30 11:15:51 +010082 struct eb32_node eb; /* ebtree node used to hold the cache object */
William Lallemandf528fff2017-11-23 19:43:17 +010083 char hash[20];
William Lallemand41db4602017-10-30 11:15:51 +010084 unsigned char data[0];
85};
86
87#define CACHE_BLOCKSIZE 1024
Willy Tarreau96062a12018-11-11 14:00:28 +010088#define CACHE_ENTRY_MAX_AGE 2147483648U
William Lallemand41db4602017-10-30 11:15:51 +010089
90static struct list caches = LIST_HEAD_INIT(caches);
91static struct cache *tmp_cache_config = NULL;
92
Willy Tarreau8ceae722018-11-26 11:58:30 +010093DECLARE_STATIC_POOL(pool_head_cache_st, "cache_st", sizeof(struct cache_st));
94
William Lallemandf528fff2017-11-23 19:43:17 +010095struct cache_entry *entry_exist(struct cache *cache, char *hash)
William Lallemand4da3f8a2017-10-31 14:33:34 +010096{
97 struct eb32_node *node;
98 struct cache_entry *entry;
99
William Lallemandf528fff2017-11-23 19:43:17 +0100100 node = eb32_lookup(&cache->entries, (*(unsigned int *)hash));
William Lallemand4da3f8a2017-10-31 14:33:34 +0100101 if (!node)
102 return NULL;
103
104 entry = eb32_entry(node, struct cache_entry, eb);
William Lallemandf528fff2017-11-23 19:43:17 +0100105
106 /* if that's not the right node */
107 if (memcmp(entry->hash, hash, sizeof(entry->hash)))
108 return NULL;
109
William Lallemand08727662017-11-21 20:01:27 +0100110 if (entry->expire > now.tv_sec) {
William Lallemand4da3f8a2017-10-31 14:33:34 +0100111 return entry;
William Lallemand08727662017-11-21 20:01:27 +0100112 } else {
William Lallemand4da3f8a2017-10-31 14:33:34 +0100113 eb32_delete(node);
William Lallemand08727662017-11-21 20:01:27 +0100114 entry->eb.key = 0;
115 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100116 return NULL;
117
118}
119
120static inline struct shared_context *shctx_ptr(struct cache *cache)
121{
122 return (struct shared_context *)((unsigned char *)cache - ((struct shared_context *)NULL)->data);
123}
124
William Lallemand77c11972017-10-31 20:43:01 +0100125static inline struct shared_block *block_ptr(struct cache_entry *entry)
126{
127 return (struct shared_block *)((unsigned char *)entry - ((struct shared_block *)NULL)->data);
128}
129
130
131
William Lallemand41db4602017-10-30 11:15:51 +0100132static int
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100133cache_store_init(struct proxy *px, struct flt_conf *fconf)
William Lallemand41db4602017-10-30 11:15:51 +0100134{
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100135 fconf->flags |= FLT_CFG_FL_HTX;
William Lallemand41db4602017-10-30 11:15:51 +0100136 return 0;
137}
138
Christopher Faulet95220e22018-12-07 17:34:39 +0100139static void
140cache_store_deinit(struct proxy *px, struct flt_conf *fconf)
141{
142 struct cache_flt_conf *cconf = fconf->conf;
143
144 free(cconf);
145}
146
William Lallemand4da3f8a2017-10-31 14:33:34 +0100147static int
Christopher Faulet95220e22018-12-07 17:34:39 +0100148cache_store_check(struct proxy *px, struct flt_conf *fconf)
149{
150 struct cache_flt_conf *cconf = fconf->conf;
Christopher Fauletafd819c2018-12-11 08:57:45 +0100151 struct flt_conf *f;
Christopher Faulet95220e22018-12-07 17:34:39 +0100152 struct cache *cache;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100153 int comp = 0;
Christopher Faulet95220e22018-12-07 17:34:39 +0100154
155 /* resolve the cache name to a ptr in the filter config */
156 list_for_each_entry(cache, &caches, list) {
157 if (!strcmp(cache->id, cconf->c.name)) {
Christopher Faulet95220e22018-12-07 17:34:39 +0100158 free(cconf->c.name);
159 cconf->c.cache = cache;
160 goto found;
161 }
162 }
163
164 ha_alert("config: %s '%s': unable to find the cache '%s' referenced by the filter 'cache'.\n",
165 proxy_type_str(px), px->id, (char *)cconf->c.name);
166 return 1;
167
168 found:
Christopher Fauletafd819c2018-12-11 08:57:45 +0100169 /* Here <cache> points on the cache the filter must use and <cconf>
170 * points on the cache filter configuration. */
171
172 /* Check all filters for proxy <px> to know if the compression is
Christopher Faulet27d93c32018-12-15 22:32:02 +0100173 * enabled and if it is after the cache. When the compression is before
174 * the cache, an error is returned. Also check if the cache filter must
175 * be explicitly declaired or not. */
Christopher Fauletafd819c2018-12-11 08:57:45 +0100176 list_for_each_entry(f, &px->filter_configs, list) {
177 if (f == fconf) {
Christopher Faulet27d93c32018-12-15 22:32:02 +0100178 /* The compression filter must be evaluated after the cache. */
179 if (comp) {
180 ha_alert("config: %s '%s': unable to enable the compression filter before "
181 "the cache '%s'.\n", proxy_type_str(px), px->id, cache->id);
182 return 1;
183 }
Christopher Faulet99a17a22018-12-11 09:18:27 +0100184 }
Christopher Faulet8f7fe1c2019-07-15 15:08:25 +0200185 else if (f->id == http_comp_flt_id)
Christopher Faulet27d93c32018-12-15 22:32:02 +0100186 comp = 1;
Christopher Faulet27d93c32018-12-15 22:32:02 +0100187 else if ((f->id != fconf->id) && (cconf->flags & CACHE_FLT_F_IMPLICIT_DECL)) {
188 /* Implicit declaration is only allowed with the
189 * compression. For other filters, an implicit
190 * declaration is required. */
191 ha_alert("config: %s '%s': require an explicit filter declaration "
192 "to use the cache '%s'.\n", proxy_type_str(px), px->id, cache->id);
193 return 1;
194 }
195
Christopher Fauletafd819c2018-12-11 08:57:45 +0100196 }
Christopher Faulet95220e22018-12-07 17:34:39 +0100197 return 0;
198}
199
200static int
William Lallemand4da3f8a2017-10-31 14:33:34 +0100201cache_store_chn_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
202{
203 if (!(chn->flags & CF_ISRESP))
204 return 1;
205
206 if (filter->ctx == NULL) {
207 struct cache_st *st;
208
Willy Tarreaubafbe012017-11-24 17:34:44 +0100209 st = pool_alloc_dirty(pool_head_cache_st);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100210 if (st == NULL)
211 return -1;
212
213 st->hdrs_len = 0;
214 st->first_block = NULL;
215 filter->ctx = st;
Christopher Faulet839791a2019-01-07 16:12:07 +0100216
217 /* Register post-analyzer on AN_RES_WAIT_HTTP */
218 filter->post_analyzers |= AN_RES_WAIT_HTTP;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100219 }
220
William Lallemand4da3f8a2017-10-31 14:33:34 +0100221 return 1;
222}
223
224static int
William Lallemand49dc0482017-11-24 14:33:54 +0100225cache_store_chn_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
226{
227 struct cache_st *st = filter->ctx;
Christopher Faulet95220e22018-12-07 17:34:39 +0100228 struct cache_flt_conf *cconf = FLT_CONF(filter);
229 struct cache *cache = cconf->c.cache;
William Lallemand49dc0482017-11-24 14:33:54 +0100230 struct shared_context *shctx = shctx_ptr(cache);
231
232 if (!(chn->flags & CF_ISRESP))
233 return 1;
234
235 /* Everything should be released in the http_end filter, but we need to do it
236 * there too, in case of errors */
237
238 if (st && st->first_block) {
239
240 shctx_lock(shctx);
241 shctx_row_dec_hot(shctx, st->first_block);
242 shctx_unlock(shctx);
243
244 }
245 if (st) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100246 pool_free(pool_head_cache_st, st);
William Lallemand49dc0482017-11-24 14:33:54 +0100247 filter->ctx = NULL;
248 }
249
250 return 1;
251}
252
Christopher Faulet839791a2019-01-07 16:12:07 +0100253static int
254cache_store_post_analyze(struct stream *s, struct filter *filter, struct channel *chn,
255 unsigned an_bit)
256{
257 struct http_txn *txn = s->txn;
258 struct http_msg *msg = &txn->rsp;
259 struct cache_st *st = filter->ctx;
260
261 if (an_bit != AN_RES_WAIT_HTTP)
262 goto end;
263
264 /* Here we need to check if any compression filter precedes the cache
265 * filter. This is only possible when the compression is configured in
266 * the frontend while the cache filter is configured on the
267 * backend. This case cannot be detected during HAProxy startup. So in
268 * such cases, the cache is disabled.
269 */
270 if (st && (msg->flags & HTTP_MSGF_COMPRESSING)) {
271 pool_free(pool_head_cache_st, st);
272 filter->ctx = NULL;
273 }
274
275 end:
276 return 1;
277}
William Lallemand49dc0482017-11-24 14:33:54 +0100278
279static int
William Lallemand4da3f8a2017-10-31 14:33:34 +0100280cache_store_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
281{
282 struct cache_st *st = filter->ctx;
283
William Lallemand4da3f8a2017-10-31 14:33:34 +0100284 if (!(msg->chn->flags & CF_ISRESP) || !st)
285 return 1;
286
Christopher Faulet67658c92018-12-06 21:59:39 +0100287 if (st->first_block) {
288 register_data_filter(s, msg->chn, filter);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100289 if (!IS_HTX_STRM(s))
290 st->hdrs_len = msg->sov;
Christopher Faulet67658c92018-12-06 21:59:39 +0100291 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100292 return 1;
293}
294
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200295static inline void disable_cache_entry(struct cache_st *st,
296 struct filter *filter, struct shared_context *shctx)
297{
298 struct cache_entry *object;
299
300 object = (struct cache_entry *)st->first_block->data;
301 filter->ctx = NULL; /* disable cache */
302 shctx_lock(shctx);
303 shctx_row_dec_hot(shctx, st->first_block);
304 object->eb.key = 0;
305 shctx_unlock(shctx);
306 pool_free(pool_head_cache_st, st);
307}
308
William Lallemand4da3f8a2017-10-31 14:33:34 +0100309static int
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100310cache_store_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
311 unsigned int offset, unsigned int len)
312{
Christopher Faulet95220e22018-12-07 17:34:39 +0100313 struct cache_flt_conf *cconf = FLT_CONF(filter);
314 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100315 struct cache_st *st = filter->ctx;
316 struct htx *htx = htxbuf(&msg->chn->buf);
317 struct htx_blk *blk;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200318 struct shared_block *fb;
319 unsigned int orig_len, to_forward;
320 int ret;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100321
322 if (!len)
323 return len;
324
325 if (!st->first_block) {
326 unregister_data_filter(s, msg->chn, filter);
327 return len;
328 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100329
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200330 chunk_reset(&trash);
331 orig_len = len;
332 to_forward = 0;
Christopher Fauletee847d42019-05-23 11:55:33 +0200333 for (blk = htx_get_first_blk(htx); blk && len; blk = htx_get_next_blk(htx, blk)) {
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100334 enum htx_blk_type type = htx_get_blk_type(blk);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200335 uint32_t info, sz = htx_get_blksz(blk);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100336 struct ist v;
337
Christopher Fauletee847d42019-05-23 11:55:33 +0200338 if (offset >= sz) {
339 offset -= sz;
340 continue;
341 }
342
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100343 switch (type) {
344 case HTX_BLK_UNUSED:
345 break;
346
347 case HTX_BLK_DATA:
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100348 v = htx_get_blk_value(htx, blk);
349 v.ptr += offset;
350 v.len -= offset;
351 if (v.len > len)
352 v.len = len;
353
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200354 info = (type << 28) + v.len;
355 chunk_memcat(&trash, (char *)&info, sizeof(info));
356 chunk_memcat(&trash, v.ptr, v.len);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100357 to_forward += v.len;
358 len -= v.len;
359 break;
360
361 default:
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200362 /* Here offset must always be 0 because only
363 * DATA blocks can be partially transferred. */
364 if (offset)
365 goto no_cache;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100366 if (sz > len)
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200367 goto end;
368
369 chunk_memcat(&trash, (char *)&blk->info, sizeof(blk->info));
370 chunk_memcat(&trash, htx_get_blk_ptr(htx, blk), sz);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100371 to_forward += sz;
372 len -= sz;
373 break;
374 }
375
376 offset = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100377 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200378
379 end:
380 shctx_lock(shctx);
381 fb = shctx_row_reserve_hot(shctx, st->first_block, trash.data);
382 if (!fb) {
383 shctx_unlock(shctx);
384 goto no_cache;
385 }
386 shctx_unlock(shctx);
387
388 ret = shctx_row_data_append(shctx, st->first_block, st->first_block->last_append,
389 (unsigned char *)b_head(&trash), b_data(&trash));
390 if (ret < 0)
391 goto no_cache;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100392
393 return to_forward;
394
395 no_cache:
396 disable_cache_entry(st, filter, shctx);
397 unregister_data_filter(s, msg->chn, filter);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200398 return orig_len;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100399}
400
401static int
William Lallemand4da3f8a2017-10-31 14:33:34 +0100402cache_store_http_forward_data(struct stream *s, struct filter *filter,
403 struct http_msg *msg, unsigned int len)
404{
405 struct cache_st *st = filter->ctx;
Christopher Faulet95220e22018-12-07 17:34:39 +0100406 struct cache_flt_conf *cconf = FLT_CONF(filter);
407 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100408 int ret;
409
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200410 ret = 0;
411
William Lallemand4da3f8a2017-10-31 14:33:34 +0100412 /*
413 * We need to skip the HTTP headers first, because we saved them in the
414 * http-response action.
415 */
Christopher Faulet67658c92018-12-06 21:59:39 +0100416 if (!(msg->chn->flags & CF_ISRESP) || !st) {
417 /* should never happen */
418 unregister_data_filter(s, msg->chn, filter);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100419 return len;
Christopher Faulet67658c92018-12-06 21:59:39 +0100420 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100421
422 if (!len) {
Joseph Herlant8dae5b32018-11-15 14:07:53 -0800423 /* Nothing to forward */
William Lallemand4da3f8a2017-10-31 14:33:34 +0100424 ret = len;
425 }
William Lallemand10935bc2017-11-14 14:39:23 +0100426 else if (st->hdrs_len >= len) {
William Lallemand4da3f8a2017-10-31 14:33:34 +0100427 /* Forward part of headers */
428 ret = len;
429 st->hdrs_len -= len;
430 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100431 else {
William Lallemand10935bc2017-11-14 14:39:23 +0100432 /* Forward data */
Christopher Faulet67658c92018-12-06 21:59:39 +0100433 if (st->first_block) {
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200434 int to_append, append;
435 struct shared_block *fb;
436
437 to_append = MIN(ci_contig_data(msg->chn), len - st->hdrs_len);
438
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200439 shctx_lock(shctx);
440 fb = shctx_row_reserve_hot(shctx, st->first_block, to_append);
441 if (!fb) {
Olivier Houchardcd2867a2017-11-01 13:58:21 +0100442 shctx_unlock(shctx);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200443 disable_cache_entry(st, filter, shctx);
Christopher Faulet67658c92018-12-06 21:59:39 +0100444 unregister_data_filter(s, msg->chn, filter);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200445 return len;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100446 }
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200447 shctx_unlock(shctx);
448
Frédéric Lécaillea2219f52018-10-22 16:59:13 +0200449 /* Skip remaining headers to fill the cache */
450 c_adv(msg->chn, st->hdrs_len);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200451 append = shctx_row_data_append(shctx, st->first_block, st->first_block->last_append,
452 (unsigned char *)ci_head(msg->chn), to_append);
453 ret = st->hdrs_len + to_append - append;
454 /* Rewind the buffer to forward all data */
455 c_rew(msg->chn, st->hdrs_len);
456 st->hdrs_len = 0;
Christopher Faulet67658c92018-12-06 21:59:39 +0100457 if (ret < 0) {
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200458 disable_cache_entry(st, filter, shctx);
Christopher Faulet67658c92018-12-06 21:59:39 +0100459 unregister_data_filter(s, msg->chn, filter);
460 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100461 }
Christopher Faulet67658c92018-12-06 21:59:39 +0100462 else {
463 /* should never happen */
464 unregister_data_filter(s, msg->chn, filter);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200465 ret = len;
Christopher Faulet67658c92018-12-06 21:59:39 +0100466 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100467 }
468
469 if ((ret != len) ||
470 (FLT_NXT(filter, msg->chn) != FLT_FWD(filter, msg->chn) + ret))
471 task_wakeup(s->task, TASK_WOKEN_MSG);
472
473 return ret;
474}
475
476static int
477cache_store_http_end(struct stream *s, struct filter *filter,
478 struct http_msg *msg)
479{
480 struct cache_st *st = filter->ctx;
Christopher Faulet95220e22018-12-07 17:34:39 +0100481 struct cache_flt_conf *cconf = FLT_CONF(filter);
482 struct cache *cache = cconf->c.cache;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100483 struct shared_context *shctx = shctx_ptr(cache);
484 struct cache_entry *object;
485
486 if (!(msg->chn->flags & CF_ISRESP))
487 return 1;
488
489 if (st && st->first_block) {
490
491 object = (struct cache_entry *)st->first_block->data;
492
493 /* does not need to test if the insertion worked, if it
494 * doesn't, the blocks will be reused anyway */
495
496 shctx_lock(shctx);
William Lallemand08727662017-11-21 20:01:27 +0100497 if (eb32_insert(&cache->entries, &object->eb) != &object->eb) {
498 object->eb.key = 0;
499 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100500 /* remove from the hotlist */
William Lallemand4da3f8a2017-10-31 14:33:34 +0100501 shctx_row_dec_hot(shctx, st->first_block);
502 shctx_unlock(shctx);
503
504 }
505 if (st) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100506 pool_free(pool_head_cache_st, st);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100507 filter->ctx = NULL;
508 }
509
510 return 1;
511}
512
513 /*
514 * This intends to be used when checking HTTP headers for some
515 * word=value directive. Return a pointer to the first character of value, if
516 * the word was not found or if there wasn't any value assigned ot it return NULL
517 */
518char *directive_value(const char *sample, int slen, const char *word, int wlen)
519{
520 int st = 0;
521
522 if (slen < wlen)
523 return 0;
524
525 while (wlen) {
526 char c = *sample ^ *word;
527 if (c && c != ('A' ^ 'a'))
528 return NULL;
529 sample++;
530 word++;
531 slen--;
532 wlen--;
533 }
534
535 while (slen) {
536 if (st == 0) {
537 if (*sample != '=')
538 return NULL;
539 sample++;
540 slen--;
541 st = 1;
542 continue;
543 } else {
544 return (char *)sample;
545 }
546 }
547
548 return NULL;
549}
550
551/*
552 * Return the maxage in seconds of an HTTP response.
553 * Compute the maxage using either:
554 * - the assigned max-age of the cache
555 * - the s-maxage directive
556 * - the max-age directive
557 * - (Expires - Data) headers
558 * - the default-max-age of the cache
559 *
560 */
William Lallemand49b44532017-11-24 18:53:43 +0100561int http_calc_maxage(struct stream *s, struct cache *cache)
William Lallemand4da3f8a2017-10-31 14:33:34 +0100562{
William Lallemand4da3f8a2017-10-31 14:33:34 +0100563 int smaxage = -1;
564 int maxage = -1;
565
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200566
567 if (IS_HTX_STRM(s)) {
568 /* HTX mode */
569 struct htx *htx = htxbuf(&s->res.buf);
570 struct http_hdr_ctx ctx = { .blk = NULL };
William Lallemand4da3f8a2017-10-31 14:33:34 +0100571
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200572 while (http_find_header(htx, ist("cache-control"), &ctx, 0)) {
573 char *value;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100574
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200575 value = directive_value(ctx.value.ptr, ctx.value.len, "s-maxage", 8);
576 if (value) {
577 struct buffer *chk = get_trash_chunk();
William Lallemand4da3f8a2017-10-31 14:33:34 +0100578
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200579 chunk_strncat(chk, value, ctx.value.len - 8 + 1);
580 chunk_strncat(chk, "", 1);
581 maxage = atoi(chk->area);
582 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100583
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200584 value = directive_value(ctx.value.ptr, ctx.value.len, "max-age", 7);
585 if (value) {
586 struct buffer *chk = get_trash_chunk();
587
588 chunk_strncat(chk, value, ctx.value.len - 7 + 1);
589 chunk_strncat(chk, "", 1);
590 smaxage = atoi(chk->area);
591 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100592 }
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200593 }
594 else {
595 /* Legacy mode */
596 struct http_txn *txn = s->txn;
597 struct hdr_ctx ctx;
598
599 ctx.idx = 0;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100600
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200601 /* loop on the Cache-Control values */
602 while (http_find_header2("Cache-Control", 13, ci_head(&s->res), &txn->hdr_idx, &ctx)) {
603 char *directive = ctx.line + ctx.val;
604 char *value;
605
606 value = directive_value(directive, ctx.vlen, "s-maxage", 8);
607 if (value) {
608 struct buffer *chk = get_trash_chunk();
609
610 chunk_strncat(chk, value, ctx.vlen - 8 + 1);
611 chunk_strncat(chk, "", 1);
612 maxage = atoi(chk->area);
613 }
614
615 value = directive_value(ctx.line + ctx.val, ctx.vlen, "max-age", 7);
616 if (value) {
617 struct buffer *chk = get_trash_chunk();
William Lallemand4da3f8a2017-10-31 14:33:34 +0100618
Christopher Faulet5f2c49f2019-07-15 20:49:46 +0200619 chunk_strncat(chk, value, ctx.vlen - 7 + 1);
620 chunk_strncat(chk, "", 1);
621 smaxage = atoi(chk->area);
622 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100623 }
624 }
625
626 /* TODO: Expires - Data */
627
628
629 if (smaxage > 0)
William Lallemand49b44532017-11-24 18:53:43 +0100630 return MIN(smaxage, cache->maxage);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100631
632 if (maxage > 0)
William Lallemand49b44532017-11-24 18:53:43 +0100633 return MIN(maxage, cache->maxage);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100634
William Lallemand49b44532017-11-24 18:53:43 +0100635 return cache->maxage;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100636
637}
638
639
William Lallemanda400a3a2017-11-20 19:13:12 +0100640static void cache_free_blocks(struct shared_block *first, struct shared_block *block)
641{
Willy Tarreau5bd37fa2018-04-04 20:17:03 +0200642 struct cache_entry *object = (struct cache_entry *)block->data;
643
644 if (first == block && object->eb.key)
645 eb32_delete(&object->eb);
646 object->eb.key = 0;
William Lallemanda400a3a2017-11-20 19:13:12 +0100647}
648
William Lallemand41db4602017-10-30 11:15:51 +0100649/*
650 * This fonction will store the headers of the response in a buffer and then
651 * register a filter to store the data
652 */
653enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200654 struct session *sess, struct stream *s, int flags)
William Lallemand41db4602017-10-30 11:15:51 +0100655{
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200656 unsigned int age;
657 long long hdr_age;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100658 struct http_txn *txn = s->txn;
659 struct http_msg *msg = &txn->rsp;
660 struct filter *filter;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100661 struct shared_block *first = NULL;
Christopher Faulet95220e22018-12-07 17:34:39 +0100662 struct cache_flt_conf *cconf = rule->arg.act.p[0];
663 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet839791a2019-01-07 16:12:07 +0100664 struct cache_st *cache_ctx = NULL;
665 struct cache_entry *object, *old;
Willy Tarreauc9036c02019-01-11 19:38:25 +0100666 unsigned int key = *(unsigned int *)txn->cache_hash;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100667
William Lallemand4da3f8a2017-10-31 14:33:34 +0100668 /* Don't cache if the response came from a cache */
669 if ((obj_type(s->target) == OBJ_TYPE_APPLET) &&
670 s->target == &http_cache_applet.obj_type) {
671 goto out;
672 }
673
674 /* cache only HTTP/1.1 */
675 if (!(txn->req.flags & HTTP_MSGF_VER_11))
676 goto out;
677
678 /* cache only GET method */
679 if (txn->meth != HTTP_METH_GET)
680 goto out;
681
Willy Tarreauc9036c02019-01-11 19:38:25 +0100682 /* cache key was not computed */
683 if (!key)
684 goto out;
685
William Lallemand4da3f8a2017-10-31 14:33:34 +0100686 /* cache only 200 status code */
687 if (txn->status != 200)
688 goto out;
689
Christopher Faulet839791a2019-01-07 16:12:07 +0100690 /* Find the corresponding filter instance for the current stream */
691 list_for_each_entry(filter, &s->strm_flt.filters, list) {
692 if (FLT_ID(filter) == cache_store_flt_id && FLT_CONF(filter) == cconf) {
693 /* No filter ctx, don't cache anything */
694 if (!filter->ctx)
695 goto out;
696 cache_ctx = filter->ctx;
697 break;
698 }
699 }
700
701 /* from there, cache_ctx is always defined */
702
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100703 if (IS_HTX_STRM(s)) {
704 struct htx *htx = htxbuf(&s->res.buf);
705 struct http_hdr_ctx ctx;
706 int32_t pos;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100707
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100708 /* Do not cache too big objects. */
709 if ((msg->flags & HTTP_MSGF_CNT_LEN) && shctx->max_obj_size > 0 &&
710 htx->data + htx->extra > shctx->max_obj_size)
711 goto out;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100712
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100713 /* Does not manage Vary at the moment. We will need a secondary key later for that */
714 ctx.blk = NULL;
715 if (http_find_header(htx, ist("Vary"), &ctx, 0))
716 goto out;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100717
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100718 htx_check_response_for_cacheability(s, &s->res);
719
720 if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK))
721 goto out;
722
723 age = 0;
724 ctx.blk = NULL;
725 if (http_find_header(htx, ist("Age"), &ctx, 0)) {
726 if (!strl2llrc(ctx.value.ptr, ctx.value.len, &hdr_age) && hdr_age > 0) {
727 if (unlikely(hdr_age > CACHE_ENTRY_MAX_AGE))
728 hdr_age = CACHE_ENTRY_MAX_AGE;
729 age = hdr_age;
730 }
731 http_remove_header(htx, &ctx);
732 }
733
734 chunk_reset(&trash);
Christopher Fauleta3f15502019-05-13 15:27:23 +0200735 for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100736 struct htx_blk *blk = htx_get_blk(htx, pos);
Christopher Fauletafd819c2018-12-11 08:57:45 +0100737 enum htx_blk_type type = htx_get_blk_type(blk);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100738 uint32_t sz = htx_get_blksz(blk);
739
740 chunk_memcat(&trash, (char *)&blk->info, sizeof(blk->info));
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200741 chunk_memcat(&trash, htx_get_blk_ptr(htx, blk), sz);
Christopher Fauletafd819c2018-12-11 08:57:45 +0100742 if (type == HTX_BLK_EOH)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100743 break;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100744 }
745 }
746 else {
747 struct hdr_ctx ctx;
748
749 /* Do not cache too big objects. */
750 if ((msg->flags & HTTP_MSGF_CNT_LEN) && shctx->max_obj_size > 0 &&
751 msg->sov + msg->body_len > shctx->max_obj_size)
752 goto out;
753
754 /* Does not manage Vary at the moment. We will need a secondary key later for that */
755 ctx.idx = 0;
756 if (http_find_header2("Vary", 4, ci_head(txn->rsp.chn), &txn->hdr_idx, &ctx))
757 goto out;
758
759 check_response_for_cacheability(s, &s->res);
760
761 if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK))
762 goto out;
763
764 age = 0;
765 ctx.idx = 0;
766 if (http_find_header2("Age", 3, ci_head(txn->rsp.chn), &txn->hdr_idx, &ctx)) {
767 if (!strl2llrc(ctx.line + ctx.val, ctx.vlen, &hdr_age) && hdr_age > 0) {
768 if (unlikely(hdr_age > CACHE_ENTRY_MAX_AGE))
769 hdr_age = CACHE_ENTRY_MAX_AGE;
770 age = hdr_age;
771 }
772 http_remove_header2(msg, &txn->hdr_idx, &ctx);
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200773 }
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200774 }
775
William Lallemand4da3f8a2017-10-31 14:33:34 +0100776 shctx_lock(shctx);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100777 if (IS_HTX_STRM(s))
778 first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry) + trash.data);
779 else
780 first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry) + msg->sov);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100781 if (!first) {
782 shctx_unlock(shctx);
783 goto out;
784 }
785 shctx_unlock(shctx);
786
Willy Tarreau1093a452018-04-06 19:02:25 +0200787 /* the received memory is not initialized, we need at least to mark
788 * the object as not indexed yet.
789 */
790 object = (struct cache_entry *)first->data;
791 object->eb.node.leaf_p = NULL;
792 object->eb.key = 0;
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200793 object->age = age;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200794 if (!IS_HTX_STRM(s))
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100795 object->eoh = msg->eoh;
Willy Tarreau1093a452018-04-06 19:02:25 +0200796
William Lallemand4da3f8a2017-10-31 14:33:34 +0100797 /* reserve space for the cache_entry structure */
798 first->len = sizeof(struct cache_entry);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +0200799 first->last_append = NULL;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100800 /* cache the headers in a http action because it allows to chose what
801 * to cache, for example you might want to cache a response before
802 * modifying some HTTP headers, or on the contrary after modifying
803 * those headers.
804 */
805
806 /* does not need to be locked because it's in the "hot" list,
807 * copy the headers */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100808 if (IS_HTX_STRM(s)) {
809 if (shctx_row_data_append(shctx, first, NULL, (unsigned char *)trash.area, trash.data) < 0)
810 goto out;
811 }
812 else {
813 if (shctx_row_data_append(shctx, first, NULL, (unsigned char *)ci_head(&s->res), msg->sov) < 0)
814 goto out;
815 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100816
817 /* register the buffer in the filter ctx for filling it with data*/
Christopher Faulet839791a2019-01-07 16:12:07 +0100818 if (cache_ctx) {
819 cache_ctx->first_block = first;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100820
Willy Tarreauc9036c02019-01-11 19:38:25 +0100821 object->eb.key = key;
822
Christopher Faulet839791a2019-01-07 16:12:07 +0100823 memcpy(object->hash, txn->cache_hash, sizeof(object->hash));
824 /* Insert the node later on caching success */
William Lallemand4da3f8a2017-10-31 14:33:34 +0100825
Christopher Faulet839791a2019-01-07 16:12:07 +0100826 shctx_lock(shctx);
Christopher Faulet95220e22018-12-07 17:34:39 +0100827
Christopher Faulet839791a2019-01-07 16:12:07 +0100828 old = entry_exist(cconf->c.cache, txn->cache_hash);
829 if (old) {
830 eb32_delete(&old->eb);
831 old->eb.key = 0;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100832 }
Christopher Faulet839791a2019-01-07 16:12:07 +0100833 shctx_unlock(shctx);
834
835 /* store latest value and expiration time */
836 object->latest_validation = now.tv_sec;
837 object->expire = now.tv_sec + http_calc_maxage(s, cconf->c.cache);
838 return ACT_RET_CONT;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100839 }
840
841out:
842 /* if does not cache */
843 if (first) {
844 shctx_lock(shctx);
William Lallemand08727662017-11-21 20:01:27 +0100845 first->len = 0;
846 object->eb.key = 0;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100847 shctx_row_dec_hot(shctx, first);
848 shctx_unlock(shctx);
849 }
850
William Lallemand41db4602017-10-30 11:15:51 +0100851 return ACT_RET_CONT;
852}
853
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +0200854#define HTTP_CACHE_INIT 0 /* Initial state. */
855#define HTTP_CACHE_HEADER 1 /* Cache entry headers forwarded. */
856#define HTTP_CACHE_FWD 2 /* Cache entry completely forwarded. */
857#define HTTP_CACHE_END 3 /* Cache entry treatment terminated. */
William Lallemand77c11972017-10-31 20:43:01 +0100858
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100859#define HTX_CACHE_INIT 0 /* Initial state. */
860#define HTX_CACHE_HEADER 1 /* Cache entry headers forwarding */
861#define HTX_CACHE_DATA 2 /* Cache entry data forwarding */
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200862#define HTX_CACHE_EOM 3 /* Cache entry completely forwarded. Finish the HTX message */
863#define HTX_CACHE_END 4 /* Cache entry treatment terminated */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100864
William Lallemandecb73b12017-11-24 14:33:55 +0100865static void http_cache_applet_release(struct appctx *appctx)
866{
Christopher Faulet95220e22018-12-07 17:34:39 +0100867 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
William Lallemandecb73b12017-11-24 14:33:55 +0100868 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
Christopher Faulet95220e22018-12-07 17:34:39 +0100869 struct cache *cache = cconf->c.cache;
William Lallemandecb73b12017-11-24 14:33:55 +0100870 struct shared_block *first = block_ptr(cache_ptr);
871
872 shctx_lock(shctx_ptr(cache));
873 shctx_row_dec_hot(shctx_ptr(cache), first);
874 shctx_unlock(shctx_ptr(cache));
875}
876
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200877
878static unsigned int htx_cache_dump_blk(struct appctx *appctx, struct htx *htx, enum htx_blk_type type,
879 uint32_t info, struct shared_block *shblk, unsigned int offset)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100880{
Christopher Faulet95220e22018-12-07 17:34:39 +0100881 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
882 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200883 struct htx_blk *blk;
884 unsigned int max, total;
885 uint32_t blksz;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100886
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200887 max = htx_get_max_blksz(htx, channel_htx_recv_max(si_ic(appctx->owner), htx));
888 if (!max)
889 return 0;
Christopher Faulet2d7c5392019-06-03 10:41:26 +0200890 blksz = ((type == HTX_BLK_HDR || type == HTX_BLK_TLR)
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200891 ? (info & 0xff) + ((info >> 8) & 0xfffff)
892 : info & 0xfffffff);
893 if (blksz > max)
894 return 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100895
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200896 blk = htx_add_blk(htx, type, blksz);
897 if (!blk)
898 return 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100899
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200900 blk->info = info;
901 total = 4;
902 while (blksz) {
903 max = MIN(blksz, shctx->block_size - offset);
904 memcpy(htx_get_blk_ptr(htx, blk), (const char *)shblk->data + offset, max);
905 offset += max;
906 blksz -= max;
907 total += max;
908 if (blksz || offset == shctx->block_size) {
909 shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
910 offset = 0;
911 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100912 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200913 appctx->ctx.cache.offset = offset;
914 appctx->ctx.cache.next = shblk;
915 appctx->ctx.cache.sent += total;
916 return total;
917}
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100918
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200919static unsigned int htx_cache_dump_data_blk(struct appctx *appctx, struct htx *htx,
920 uint32_t info, struct shared_block *shblk, unsigned int offset)
921{
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100922
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200923 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
924 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
925 unsigned int max, total, rem_data;
926 uint32_t blksz;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100927
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200928 max = htx_get_max_blksz(htx, channel_htx_recv_max(si_ic(appctx->owner), htx));
929 if (!max)
930 return 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100931
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200932 rem_data = 0;
Christopher Fauletbda83972019-06-11 09:58:09 +0200933 if (appctx->ctx.cache.rem_data) {
934 blksz = appctx->ctx.cache.rem_data;
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200935 total = 0;
Christopher Fauletbda83972019-06-11 09:58:09 +0200936 }
937 else {
938 blksz = (info & 0xfffffff);
939 total = 4;
940 }
941 if (blksz > max) {
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200942 rem_data = blksz - max;
943 blksz = max;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100944 }
945
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200946 while (blksz) {
947 size_t sz;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100948
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200949 max = MIN(blksz, shctx->block_size - offset);
950 sz = htx_add_data(htx, ist2(shblk->data + offset, max));
951 offset += sz;
952 blksz -= sz;
953 total += sz;
954 if (sz < max)
955 break;
956 if (blksz || offset == shctx->block_size) {
957 shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
958 offset = 0;
959 }
960 }
961
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200962 appctx->ctx.cache.offset = offset;
963 appctx->ctx.cache.next = shblk;
964 appctx->ctx.cache.sent += total;
965 appctx->ctx.cache.rem_data = rem_data + blksz;
966 return total;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100967}
968
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200969static size_t htx_cache_dump_msg(struct appctx *appctx, struct htx *htx, unsigned int len,
970 enum htx_blk_type mark)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100971{
Christopher Faulet95220e22018-12-07 17:34:39 +0100972 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
973 struct shared_context *shctx = shctx_ptr(cconf->c.cache);
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200974 struct shared_block *shblk;
975 unsigned int offset, sz;
976 unsigned int ret, total = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100977
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200978 while (len) {
979 enum htx_blk_type type;
980 uint32_t info;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100981
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200982 shblk = appctx->ctx.cache.next;
983 offset = appctx->ctx.cache.offset;
984 if (appctx->ctx.cache.rem_data) {
985 type = HTX_BLK_DATA;
986 info = 0;
987 goto add_data_blk;
988 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100989
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200990 /* Get info of the next HTX block. May be splitted on 2 shblk */
991 sz = MIN(4, shctx->block_size - offset);
992 memcpy((char *)&info, (const char *)shblk->data + offset, sz);
993 offset += sz;
994 if (sz < 4) {
995 shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
996 memcpy(((char *)&info)+sz, (const char *)shblk->data, 4 - sz);
997 offset = (4 - sz);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +0100998 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +0200999
1000 /* Get payload of the next HTX block and insert it. */
1001 type = (info >> 28);
1002 if (type != HTX_BLK_DATA)
1003 ret = htx_cache_dump_blk(appctx, htx, type, info, shblk, offset);
1004 else {
1005 add_data_blk:
1006 ret = htx_cache_dump_data_blk(appctx, htx, info, shblk, offset);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001007 }
1008
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001009 if (!ret)
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001010 break;
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001011 total += ret;
1012 len -= ret;
1013
1014 if (appctx->ctx.cache.rem_data || type == mark)
1015 break;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001016 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001017
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001018 return total;
1019}
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001020
1021static int htx_cache_add_age_hdr(struct appctx *appctx, struct htx *htx)
1022{
1023 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
1024 unsigned int age;
1025 char *end;
1026
1027 chunk_reset(&trash);
1028 age = MAX(0, (int)(now.tv_sec - cache_ptr->latest_validation)) + cache_ptr->age;
1029 if (unlikely(age > CACHE_ENTRY_MAX_AGE))
1030 age = CACHE_ENTRY_MAX_AGE;
1031 end = ultoa_o(age, b_head(&trash), b_size(&trash));
1032 b_set_data(&trash, end - b_head(&trash));
1033 if (!http_add_header(htx, ist("Age"), ist2(b_head(&trash), b_data(&trash))))
1034 return 0;
1035 return 1;
1036}
1037
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001038static void htx_cache_io_handler(struct appctx *appctx)
1039{
1040 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
1041 struct shared_block *first = block_ptr(cache_ptr);
1042 struct stream_interface *si = appctx->owner;
1043 struct channel *req = si_oc(si);
1044 struct channel *res = si_ic(si);
1045 struct htx *req_htx, *res_htx;
1046 struct buffer *errmsg;
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001047 unsigned int len;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001048 size_t ret, total = 0;
1049
1050 res_htx = htxbuf(&res->buf);
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001051 total = res_htx->data;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001052
1053 if (unlikely(si->state == SI_ST_DIS || si->state == SI_ST_CLO))
1054 goto out;
1055
1056 /* Check if the input buffer is avalaible. */
1057 if (!b_size(&res->buf)) {
1058 si_rx_room_blk(si);
1059 goto out;
1060 }
1061
Willy Tarreauefef3232018-12-16 00:37:45 +01001062 if (res->flags & (CF_SHUTW|CF_SHUTR|CF_SHUTW_NOW))
Willy Tarreau273e9642018-12-16 00:35:15 +01001063 appctx->st0 = HTX_CACHE_END;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001064
1065 if (appctx->st0 == HTX_CACHE_INIT) {
1066 appctx->ctx.cache.next = block_ptr(cache_ptr);
1067 appctx->ctx.cache.offset = sizeof(*cache_ptr);
1068 appctx->ctx.cache.sent = 0;
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001069 appctx->ctx.cache.rem_data = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001070 appctx->st0 = HTX_CACHE_HEADER;
1071 }
1072
1073 if (appctx->st0 == HTX_CACHE_HEADER) {
1074 /* Headers must be dump at once. Otherwise it is an error */
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001075 len = first->len - sizeof(*cache_ptr) - appctx->ctx.cache.sent;
1076 ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_EOH);
1077 if (!ret || (htx_get_tail_type(res_htx) != HTX_BLK_EOH) ||
1078 !htx_cache_add_age_hdr(appctx, res_htx))
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001079 goto error;
1080
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001081 /* Skip response body for HEAD requests */
1082 if (si_strm(si)->txn->meth == HTTP_METH_HEAD)
Christopher Fauletf0dd0372019-02-25 11:08:34 +01001083 appctx->st0 = HTX_CACHE_EOM;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001084 else
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001085 appctx->st0 = HTX_CACHE_DATA;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001086 }
1087
1088 if (appctx->st0 == HTX_CACHE_DATA) {
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001089 len = first->len - sizeof(*cache_ptr) - appctx->ctx.cache.sent;
1090 if (len) {
1091 ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_EOM);
1092 if (ret < len) {
1093 si_rx_room_blk(si);
1094 goto out;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001095 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001096 }
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001097 appctx->st0 = HTX_CACHE_END;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001098 }
1099
1100 if (appctx->st0 == HTX_CACHE_EOM) {
1101 if (!htx_add_endof(res_htx, HTX_BLK_EOM)) {
1102 si_rx_room_blk(si);
1103 goto out;
1104 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001105 appctx->st0 = HTX_CACHE_END;
1106 }
1107
1108 end:
Christopher Fauletadb36312019-02-25 11:40:49 +01001109 if (!(res->flags & CF_SHUTR) && appctx->st0 == HTX_CACHE_END) {
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001110 res->flags |= CF_READ_NULL;
1111 si_shutr(si);
1112 }
1113
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001114 out:
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001115 total = res_htx->data - total;
Christopher Faulet61123912019-01-02 14:10:01 +01001116 if (total)
1117 channel_add_input(res, total);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001118 htx_to_buf(res_htx, &res->buf);
Christopher Fauletadb36312019-02-25 11:40:49 +01001119
1120 /* eat the whole request */
1121 if (co_data(req)) {
1122 req_htx = htx_from_buf(&req->buf);
1123 co_htx_skip(req, req_htx, co_data(req));
1124 htx_to_buf(req_htx, &req->buf);
1125 }
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001126 return;
1127
1128 error:
1129 /* Sent and HTTP error 500 */
1130 b_reset(&res->buf);
1131 errmsg = &htx_err_chunks[HTTP_ERR_500];
1132 res->buf.data = b_data(errmsg);
1133 memcpy(res->buf.area, b_head(errmsg), b_data(errmsg));
1134 res_htx = htx_from_buf(&res->buf);
1135
Christopher Faulet8f3c2562019-06-03 22:19:18 +02001136 total = 0;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001137 appctx->st0 = HTX_CACHE_END;
1138 goto end;
1139}
1140
1141
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +02001142/*
1143 * Append an "Age" header into <chn> channel for this <ce> cache entry.
Joseph Herlant8dae5b32018-11-15 14:07:53 -08001144 * This is the responsibility of the caller to insure there is enough
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +02001145 * data in the channel.
1146 *
1147 * Returns the number of bytes inserted if succeeded, 0 if failed.
1148 */
1149static int cache_channel_append_age_header(struct cache_entry *ce, struct channel *chn)
1150{
1151 unsigned int age;
1152
1153 age = MAX(0, (int)(now.tv_sec - ce->latest_validation)) + ce->age;
1154 if (unlikely(age > CACHE_ENTRY_MAX_AGE))
1155 age = CACHE_ENTRY_MAX_AGE;
1156
1157 chunk_reset(&trash);
1158 chunk_printf(&trash, "Age: %u", age);
1159
1160 return ci_insert_line2(chn, ce->eoh, trash.area, trash.data);
1161}
1162
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001163static int cache_channel_row_data_get(struct appctx *appctx, int len)
William Lallemand77c11972017-10-31 20:43:01 +01001164{
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001165 int ret, total;
William Lallemand77c11972017-10-31 20:43:01 +01001166 struct stream_interface *si = appctx->owner;
1167 struct channel *res = si_ic(si);
Christopher Faulet95220e22018-12-07 17:34:39 +01001168 struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
1169 struct cache *cache = cconf->c.cache;
William Lallemand77c11972017-10-31 20:43:01 +01001170 struct shared_context *shctx = shctx_ptr(cache);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001171 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
1172 struct shared_block *blk, *next = appctx->ctx.cache.next;
1173 int offset;
1174
1175 total = 0;
1176 offset = 0;
1177
1178 if (!next) {
1179 offset = sizeof(struct cache_entry);
1180 next = block_ptr(cache_ptr);
1181 }
1182
1183 blk = next;
1184 list_for_each_entry_from(blk, &shctx->hot, list) {
1185 int sz;
1186
1187 if (len <= 0)
1188 break;
1189
1190 sz = MIN(len, shctx->block_size - offset);
1191
1192 ret = ci_putblk(res, (const char *)blk->data + offset, sz);
1193 if (unlikely(offset))
1194 offset = 0;
1195 if (ret <= 0) {
1196 if (ret == -3 || ret == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +01001197 si_rx_room_blk(si);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001198 break;
1199 }
1200 return -1;
1201 }
1202
1203 total += sz;
1204 len -= sz;
1205 }
1206 appctx->ctx.cache.next = blk;
1207
1208 return total;
1209}
1210
1211static void http_cache_io_handler(struct appctx *appctx)
1212{
1213 struct stream_interface *si = appctx->owner;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001214 struct stream *s = si_strm(si);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001215 struct channel *res = si_ic(si);
1216 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
William Lallemand77c11972017-10-31 20:43:01 +01001217 struct shared_block *first = block_ptr(cache_ptr);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001218 unsigned int *sent = &appctx->ctx.cache.sent;
1219
1220 if (IS_HTX_STRM(s))
1221 return htx_cache_io_handler(appctx);
William Lallemand77c11972017-10-31 20:43:01 +01001222
1223 if (unlikely(si->state == SI_ST_DIS || si->state == SI_ST_CLO))
1224 goto out;
1225
Joseph Herlant8dae5b32018-11-15 14:07:53 -08001226 /* Check if the input buffer is available. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +02001227 if (res->buf.size == 0) {
Willy Tarreau4b962a42018-11-15 11:03:21 +01001228 /* buf.size==0 means we failed to get a buffer and were
1229 * already subscribed to a wait list to get a buffer.
1230 */
William Lallemand77c11972017-10-31 20:43:01 +01001231 goto out;
1232 }
1233
Willy Tarreauefef3232018-12-16 00:37:45 +01001234 if (res->flags & (CF_SHUTW|CF_SHUTW_NOW|CF_SHUTR))
William Lallemand77c11972017-10-31 20:43:01 +01001235 appctx->st0 = HTTP_CACHE_END;
1236
1237 /* buffer are aligned there, should be fine */
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +02001238 if (appctx->st0 == HTTP_CACHE_HEADER || appctx->st0 == HTTP_CACHE_INIT) {
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001239 int len = first->len - *sent - sizeof(struct cache_entry);
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001240 if (len > 0) {
1241 int ret;
1242
1243 ret = cache_channel_row_data_get(appctx, len);
1244 if (ret == -1)
1245 appctx->st0 = HTTP_CACHE_END;
1246 else
1247 *sent += ret;
Frédéric Lécaillee7a770c2018-10-26 14:29:22 +02001248 if (appctx->st0 == HTTP_CACHE_INIT && *sent > cache_ptr->eoh &&
1249 cache_channel_append_age_header(cache_ptr, res))
1250 appctx->st0 = HTTP_CACHE_HEADER;
Christopher Faulet61123912019-01-02 14:10:01 +01001251 else if (ret == len) {
1252 *sent = 0;
1253 appctx->st0 = HTTP_CACHE_FWD;
1254 }
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001255 }
1256 else {
1257 *sent = 0;
1258 appctx->st0 = HTTP_CACHE_FWD;
William Lallemand77c11972017-10-31 20:43:01 +01001259 }
William Lallemand77c11972017-10-31 20:43:01 +01001260 }
1261
Christopher Fauletadb36312019-02-25 11:40:49 +01001262 if (appctx->st0 == HTTP_CACHE_FWD)
1263 appctx->st0 = HTTP_CACHE_END;
1264
1265 if (!(res->flags & CF_SHUTR) && appctx->st0 == HTTP_CACHE_END) {
William Lallemand77c11972017-10-31 20:43:01 +01001266 res->flags |= CF_READ_NULL;
1267 si_shutr(si);
1268 }
William Lallemand77c11972017-10-31 20:43:01 +01001269out:
Christopher Fauletadb36312019-02-25 11:40:49 +01001270 /* eat the whole request */
1271 if (co_data(si_oc(si)))
1272 co_skip(si_oc(si), co_data(si_oc(si)));
William Lallemand77c11972017-10-31 20:43:01 +01001273}
1274
Christopher Faulet95220e22018-12-07 17:34:39 +01001275static int parse_cache_rule(struct proxy *proxy, const char *name, struct act_rule *rule, char **err)
William Lallemand41db4602017-10-30 11:15:51 +01001276{
1277 struct flt_conf *fconf;
Christopher Faulet95220e22018-12-07 17:34:39 +01001278 struct cache_flt_conf *cconf = NULL;
William Lallemand41db4602017-10-30 11:15:51 +01001279
Christopher Faulet95220e22018-12-07 17:34:39 +01001280 if (!*name || strcmp(name, "if") == 0 || strcmp(name, "unless") == 0) {
William Lallemand41db4602017-10-30 11:15:51 +01001281 memprintf(err, "expects a cache name");
Christopher Faulet95220e22018-12-07 17:34:39 +01001282 goto err;
William Lallemand41db4602017-10-30 11:15:51 +01001283 }
1284
1285 /* check if a cache filter was already registered with this cache
1286 * name, if that's the case, must use it. */
1287 list_for_each_entry(fconf, &proxy->filter_configs, list) {
Christopher Faulet95220e22018-12-07 17:34:39 +01001288 if (fconf->id == cache_store_flt_id) {
1289 cconf = fconf->conf;
1290 if (cconf && !strcmp((char *)cconf->c.name, name)) {
1291 rule->arg.act.p[0] = cconf;
1292 return 1;
1293 }
William Lallemand41db4602017-10-30 11:15:51 +01001294 }
1295 }
1296
Christopher Faulet95220e22018-12-07 17:34:39 +01001297 /* Create the filter cache config */
1298 cconf = calloc(1, sizeof(*cconf));
1299 if (!cconf) {
1300 memprintf(err, "out of memory\n");
1301 goto err;
1302 }
Christopher Faulet99a17a22018-12-11 09:18:27 +01001303 cconf->flags = CACHE_FLT_F_IMPLICIT_DECL;
Christopher Faulet95220e22018-12-07 17:34:39 +01001304 cconf->c.name = strdup(name);
1305 if (!cconf->c.name) {
1306 memprintf(err, "out of memory\n");
William Lallemand41db4602017-10-30 11:15:51 +01001307 goto err;
1308 }
Christopher Faulet95220e22018-12-07 17:34:39 +01001309
William Lallemand41db4602017-10-30 11:15:51 +01001310 /* register a filter to fill the cache buffer */
1311 fconf = calloc(1, sizeof(*fconf));
1312 if (!fconf) {
Christopher Faulet95220e22018-12-07 17:34:39 +01001313 memprintf(err, "out of memory\n");
William Lallemand41db4602017-10-30 11:15:51 +01001314 goto err;
1315 }
Christopher Faulet95220e22018-12-07 17:34:39 +01001316 fconf->id = cache_store_flt_id;
1317 fconf->conf = cconf;
William Lallemand41db4602017-10-30 11:15:51 +01001318 fconf->ops = &cache_ops;
1319 LIST_ADDQ(&proxy->filter_configs, &fconf->list);
1320
Christopher Faulet95220e22018-12-07 17:34:39 +01001321 rule->arg.act.p[0] = cconf;
1322 return 1;
William Lallemand41db4602017-10-30 11:15:51 +01001323
Christopher Faulet95220e22018-12-07 17:34:39 +01001324 err:
1325 free(cconf);
1326 return 0;
1327}
1328
1329enum act_parse_ret parse_cache_store(const char **args, int *orig_arg, struct proxy *proxy,
1330 struct act_rule *rule, char **err)
1331{
1332 rule->action = ACT_CUSTOM;
1333 rule->action_ptr = http_action_store_cache;
1334
1335 if (!parse_cache_rule(proxy, args[*orig_arg], rule, err))
1336 return ACT_RET_PRS_ERR;
William Lallemand41db4602017-10-30 11:15:51 +01001337
Christopher Faulet95220e22018-12-07 17:34:39 +01001338 (*orig_arg)++;
1339 return ACT_RET_PRS_OK;
William Lallemand41db4602017-10-30 11:15:51 +01001340}
1341
William Lallemandf528fff2017-11-23 19:43:17 +01001342/* This produces a sha1 hash of the concatenation of the first
1343 * occurrence of the Host header followed by the path component if it
1344 * begins with a slash ('/'). */
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001345int sha1_hosturi(struct stream *s)
William Lallemandf528fff2017-11-23 19:43:17 +01001346{
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001347 struct http_txn *txn = s->txn;
William Lallemandf528fff2017-11-23 19:43:17 +01001348 blk_SHA_CTX sha1_ctx;
Willy Tarreau83061a82018-07-13 11:56:34 +02001349 struct buffer *trash;
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001350
William Lallemandf528fff2017-11-23 19:43:17 +01001351 trash = get_trash_chunk();
1352
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001353 if (IS_HTX_STRM(s)) {
1354 struct htx *htx = htxbuf(&s->req.buf);
1355 struct htx_sl *sl;
1356 struct http_hdr_ctx ctx;
1357 struct ist path;
William Lallemandf528fff2017-11-23 19:43:17 +01001358
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001359 ctx.blk = NULL;
1360 if (!http_find_header(htx, ist("Host"), &ctx, 0))
1361 return 0;
1362 chunk_memcat(trash, ctx.value.ptr, ctx.value.len);
1363
Christopher Faulet297fbb42019-05-13 14:41:27 +02001364 sl = http_get_stline(htx);
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001365 path = http_get_path(htx_sl_req_uri(sl));
1366 if (!path.ptr)
1367 return 0;
1368 chunk_memcat(trash, path.ptr, path.len);
1369 }
1370 else {
1371 struct hdr_ctx ctx;
1372 char *path;
1373 char *end;
1374
1375 /* retrive the host */
1376 ctx.idx = 0;
1377 if (!http_find_header2("Host", 4, ci_head(txn->req.chn), &txn->hdr_idx, &ctx))
1378 return 0;
1379 chunk_strncat(trash, ctx.line + ctx.val, ctx.vlen);
1380
1381 /* now retrieve the path */
1382 end = ci_head(txn->req.chn) + txn->req.sl.rq.u + txn->req.sl.rq.u_l;
1383 path = http_txn_get_path(txn);
1384 if (!path)
1385 return 0;
1386 chunk_strncat(trash, path, end - path);
1387 }
William Lallemandf528fff2017-11-23 19:43:17 +01001388
1389 /* hash everything */
1390 blk_SHA1_Init(&sha1_ctx);
Willy Tarreau843b7cb2018-07-13 10:54:26 +02001391 blk_SHA1_Update(&sha1_ctx, trash->area, trash->data);
William Lallemandf528fff2017-11-23 19:43:17 +01001392 blk_SHA1_Final((unsigned char *)txn->cache_hash, &sha1_ctx);
1393
1394 return 1;
1395}
1396
William Lallemand41db4602017-10-30 11:15:51 +01001397enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *px,
1398 struct session *sess, struct stream *s, int flags)
1399{
William Lallemand77c11972017-10-31 20:43:01 +01001400
Christopher Fauletb3d4bca2019-02-25 10:59:33 +01001401 struct http_txn *txn = s->txn;
William Lallemand77c11972017-10-31 20:43:01 +01001402 struct cache_entry *res;
Christopher Faulet95220e22018-12-07 17:34:39 +01001403 struct cache_flt_conf *cconf = rule->arg.act.p[0];
1404 struct cache *cache = cconf->c.cache;
William Lallemand77c11972017-10-31 20:43:01 +01001405
Christopher Fauletb3d4bca2019-02-25 10:59:33 +01001406 /* Ignore cache for HTTP/1.0 requests and for requests other than GET
1407 * and HEAD */
1408 if (!(txn->req.flags & HTTP_MSGF_VER_11) ||
1409 (txn->meth != HTTP_METH_GET && txn->meth != HTTP_METH_HEAD))
1410 txn->flags |= TX_CACHE_IGNORE;
1411
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001412 if (IS_HTX_STRM(s))
1413 htx_check_request_for_cacheability(s, &s->req);
1414 else
1415 check_request_for_cacheability(s, &s->req);
1416
Willy Tarreau504455c2017-12-22 17:47:35 +01001417 if ((s->txn->flags & (TX_CACHE_IGNORE|TX_CACHEABLE)) == TX_CACHE_IGNORE)
1418 return ACT_RET_CONT;
1419
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001420 if (!sha1_hosturi(s))
Willy Tarreau7704b1e2017-12-22 16:32:43 +01001421 return ACT_RET_CONT;
William Lallemandf528fff2017-11-23 19:43:17 +01001422
Willy Tarreau504455c2017-12-22 17:47:35 +01001423 if (s->txn->flags & TX_CACHE_IGNORE)
1424 return ACT_RET_CONT;
1425
Willy Tarreaua1214a52018-12-14 14:00:25 +01001426 if (px == strm_fe(s))
Olivier Houchardaa090d42019-03-08 18:49:24 +01001427 _HA_ATOMIC_ADD(&px->fe_counters.p.http.cache_lookups, 1);
Willy Tarreaua1214a52018-12-14 14:00:25 +01001428 else
Olivier Houchardaa090d42019-03-08 18:49:24 +01001429 _HA_ATOMIC_ADD(&px->be_counters.p.http.cache_lookups, 1);
Willy Tarreaua1214a52018-12-14 14:00:25 +01001430
William Lallemanda400a3a2017-11-20 19:13:12 +01001431 shctx_lock(shctx_ptr(cache));
William Lallemandf528fff2017-11-23 19:43:17 +01001432 res = entry_exist(cache, s->txn->cache_hash);
William Lallemand77c11972017-10-31 20:43:01 +01001433 if (res) {
1434 struct appctx *appctx;
William Lallemanda400a3a2017-11-20 19:13:12 +01001435 shctx_row_inc_hot(shctx_ptr(cache), block_ptr(res));
1436 shctx_unlock(shctx_ptr(cache));
William Lallemand77c11972017-10-31 20:43:01 +01001437 s->target = &http_cache_applet.obj_type;
Willy Tarreau14bfe9a2018-12-19 15:19:27 +01001438 if ((appctx = si_register_handler(&s->si[1], objt_applet(s->target)))) {
William Lallemand77c11972017-10-31 20:43:01 +01001439 appctx->st0 = HTTP_CACHE_INIT;
1440 appctx->rule = rule;
1441 appctx->ctx.cache.entry = res;
Frédéric Lécaille8df65ae2018-10-22 18:01:48 +02001442 appctx->ctx.cache.next = NULL;
1443 appctx->ctx.cache.sent = 0;
Willy Tarreaua1214a52018-12-14 14:00:25 +01001444
1445 if (px == strm_fe(s))
Olivier Houchardaa090d42019-03-08 18:49:24 +01001446 _HA_ATOMIC_ADD(&px->fe_counters.p.http.cache_hits, 1);
Willy Tarreaua1214a52018-12-14 14:00:25 +01001447 else
Olivier Houchardaa090d42019-03-08 18:49:24 +01001448 _HA_ATOMIC_ADD(&px->be_counters.p.http.cache_hits, 1);
Olivier Houchardfccf8402017-11-01 14:04:02 +01001449 return ACT_RET_CONT;
William Lallemand77c11972017-10-31 20:43:01 +01001450 } else {
William Lallemand55e76742017-11-21 20:01:28 +01001451 shctx_lock(shctx_ptr(cache));
1452 shctx_row_dec_hot(shctx_ptr(cache), block_ptr(res));
1453 shctx_unlock(shctx_ptr(cache));
Olivier Houchardfccf8402017-11-01 14:04:02 +01001454 return ACT_RET_YIELD;
William Lallemand77c11972017-10-31 20:43:01 +01001455 }
1456 }
William Lallemanda400a3a2017-11-20 19:13:12 +01001457 shctx_unlock(shctx_ptr(cache));
Olivier Houchardfccf8402017-11-01 14:04:02 +01001458 return ACT_RET_CONT;
William Lallemand41db4602017-10-30 11:15:51 +01001459}
1460
1461
1462enum act_parse_ret parse_cache_use(const char **args, int *orig_arg, struct proxy *proxy,
1463 struct act_rule *rule, char **err)
1464{
William Lallemand41db4602017-10-30 11:15:51 +01001465 rule->action = ACT_CUSTOM;
1466 rule->action_ptr = http_action_req_cache_use;
1467
Christopher Faulet95220e22018-12-07 17:34:39 +01001468 if (!parse_cache_rule(proxy, args[*orig_arg], rule, err))
William Lallemand41db4602017-10-30 11:15:51 +01001469 return ACT_RET_PRS_ERR;
William Lallemand41db4602017-10-30 11:15:51 +01001470
1471 (*orig_arg)++;
1472 return ACT_RET_PRS_OK;
William Lallemand41db4602017-10-30 11:15:51 +01001473}
1474
1475int cfg_parse_cache(const char *file, int linenum, char **args, int kwm)
1476{
1477 int err_code = 0;
1478
1479 if (strcmp(args[0], "cache") == 0) { /* new cache section */
1480
1481 if (!*args[1]) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001482 ha_alert("parsing [%s:%d] : '%s' expects an <id> argument\n",
1483 file, linenum, args[0]);
William Lallemand41db4602017-10-30 11:15:51 +01001484 err_code |= ERR_ALERT | ERR_ABORT;
1485 goto out;
1486 }
1487
1488 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1489 err_code |= ERR_ABORT;
1490 goto out;
1491 }
1492
1493 if (tmp_cache_config == NULL) {
1494 tmp_cache_config = calloc(1, sizeof(*tmp_cache_config));
1495 if (!tmp_cache_config) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001496 ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
William Lallemand41db4602017-10-30 11:15:51 +01001497 err_code |= ERR_ALERT | ERR_ABORT;
1498 goto out;
1499 }
1500
1501 strlcpy2(tmp_cache_config->id, args[1], 33);
1502 if (strlen(args[1]) > 32) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001503 ha_warning("parsing [%s:%d]: cache id is limited to 32 characters, truncate to '%s'.\n",
1504 file, linenum, tmp_cache_config->id);
William Lallemand41db4602017-10-30 11:15:51 +01001505 err_code |= ERR_WARN;
1506 }
William Lallemand49b44532017-11-24 18:53:43 +01001507 tmp_cache_config->maxage = 60;
William Lallemand41db4602017-10-30 11:15:51 +01001508 tmp_cache_config->maxblocks = 0;
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001509 tmp_cache_config->maxobjsz = 0;
William Lallemand41db4602017-10-30 11:15:51 +01001510 }
1511 } else if (strcmp(args[0], "total-max-size") == 0) {
Frédéric Lécailleb9b8b6b2018-10-25 20:17:45 +02001512 unsigned long int maxsize;
1513 char *err;
William Lallemand41db4602017-10-30 11:15:51 +01001514
1515 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1516 err_code |= ERR_ABORT;
1517 goto out;
1518 }
1519
Frédéric Lécailleb9b8b6b2018-10-25 20:17:45 +02001520 maxsize = strtoul(args[1], &err, 10);
1521 if (err == args[1] || *err != '\0') {
1522 ha_warning("parsing [%s:%d]: total-max-size wrong value '%s'\n",
1523 file, linenum, args[1]);
1524 err_code |= ERR_ABORT;
1525 goto out;
1526 }
1527
1528 if (maxsize > (UINT_MAX >> 20)) {
1529 ha_warning("parsing [%s:%d]: \"total-max-size\" (%s) must not be greater than %u\n",
1530 file, linenum, args[1], UINT_MAX >> 20);
1531 err_code |= ERR_ABORT;
1532 goto out;
1533 }
1534
William Lallemand41db4602017-10-30 11:15:51 +01001535 /* size in megabytes */
Frédéric Lécailleb9b8b6b2018-10-25 20:17:45 +02001536 maxsize *= 1024 * 1024 / CACHE_BLOCKSIZE;
William Lallemand41db4602017-10-30 11:15:51 +01001537 tmp_cache_config->maxblocks = maxsize;
William Lallemand49b44532017-11-24 18:53:43 +01001538 } else if (strcmp(args[0], "max-age") == 0) {
1539 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1540 err_code |= ERR_ABORT;
1541 goto out;
1542 }
1543
1544 if (!*args[1]) {
1545 ha_warning("parsing [%s:%d]: '%s' expects an age parameter in seconds.\n",
1546 file, linenum, args[0]);
1547 err_code |= ERR_WARN;
1548 }
1549
1550 tmp_cache_config->maxage = atoi(args[1]);
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001551 } else if (strcmp(args[0], "max-object-size") == 0) {
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001552 unsigned int maxobjsz;
1553 char *err;
1554
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001555 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
1556 err_code |= ERR_ABORT;
1557 goto out;
1558 }
1559
1560 if (!*args[1]) {
1561 ha_warning("parsing [%s:%d]: '%s' expects a maximum file size parameter in bytes.\n",
1562 file, linenum, args[0]);
1563 err_code |= ERR_WARN;
1564 }
1565
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001566 maxobjsz = strtoul(args[1], &err, 10);
1567 if (err == args[1] || *err != '\0') {
1568 ha_warning("parsing [%s:%d]: max-object-size wrong value '%s'\n",
1569 file, linenum, args[1]);
1570 err_code |= ERR_ABORT;
1571 goto out;
1572 }
1573 tmp_cache_config->maxobjsz = maxobjsz;
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001574 }
1575 else if (*args[0] != 0) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001576 ha_alert("parsing [%s:%d] : unknown keyword '%s' in 'cache' section\n", file, linenum, args[0]);
William Lallemand41db4602017-10-30 11:15:51 +01001577 err_code |= ERR_ALERT | ERR_FATAL;
1578 goto out;
1579 }
1580out:
1581 return err_code;
1582}
1583
1584/* once the cache section is parsed */
1585
1586int cfg_post_parse_section_cache()
1587{
1588 struct shared_context *shctx;
1589 int err_code = 0;
1590 int ret_shctx;
1591
1592 if (tmp_cache_config) {
1593 struct cache *cache;
1594
1595 if (tmp_cache_config->maxblocks <= 0) {
Christopher Faulet767a84b2017-11-24 16:50:31 +01001596 ha_alert("Size not specified for cache '%s'\n", tmp_cache_config->id);
William Lallemand41db4602017-10-30 11:15:51 +01001597 err_code |= ERR_FATAL | ERR_ALERT;
1598 goto out;
1599 }
1600
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001601 if (!tmp_cache_config->maxobjsz) {
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001602 /* Default max. file size is a 256th of the cache size. */
1603 tmp_cache_config->maxobjsz =
1604 (tmp_cache_config->maxblocks * CACHE_BLOCKSIZE) >> 8;
Frédéric Lécaille4eba5442018-10-25 20:29:31 +02001605 }
1606 else if (tmp_cache_config->maxobjsz > tmp_cache_config->maxblocks * CACHE_BLOCKSIZE / 2) {
1607 ha_alert("\"max-object-size\" is limited to an half of \"total-max-size\" => %u\n", tmp_cache_config->maxblocks * CACHE_BLOCKSIZE / 2);
1608 err_code |= ERR_FATAL | ERR_ALERT;
1609 goto out;
1610 }
Frédéric Lécaillea2219f52018-10-22 16:59:13 +02001611
1612 ret_shctx = shctx_init(&shctx, tmp_cache_config->maxblocks, CACHE_BLOCKSIZE,
1613 tmp_cache_config->maxobjsz, sizeof(struct cache), 1);
William Lallemand4da3f8a2017-10-31 14:33:34 +01001614
Frédéric Lécaillebc584492018-10-25 20:18:59 +02001615 if (ret_shctx <= 0) {
William Lallemand41db4602017-10-30 11:15:51 +01001616 if (ret_shctx == SHCTX_E_INIT_LOCK)
Christopher Faulet767a84b2017-11-24 16:50:31 +01001617 ha_alert("Unable to initialize the lock for the cache.\n");
William Lallemand41db4602017-10-30 11:15:51 +01001618 else
Christopher Faulet767a84b2017-11-24 16:50:31 +01001619 ha_alert("Unable to allocate cache.\n");
William Lallemand41db4602017-10-30 11:15:51 +01001620
1621 err_code |= ERR_FATAL | ERR_ALERT;
1622 goto out;
1623 }
William Lallemanda400a3a2017-11-20 19:13:12 +01001624 shctx->free_block = cache_free_blocks;
William Lallemand41db4602017-10-30 11:15:51 +01001625 memcpy(shctx->data, tmp_cache_config, sizeof(struct cache));
1626 cache = (struct cache *)shctx->data;
1627 cache->entries = EB_ROOT_UNIQUE;
William Lallemand41db4602017-10-30 11:15:51 +01001628 LIST_ADDQ(&caches, &cache->list);
1629 }
1630out:
1631 free(tmp_cache_config);
1632 tmp_cache_config = NULL;
1633 return err_code;
1634
William Lallemand41db4602017-10-30 11:15:51 +01001635}
1636
William Lallemand41db4602017-10-30 11:15:51 +01001637struct flt_ops cache_ops = {
1638 .init = cache_store_init,
Christopher Faulet95220e22018-12-07 17:34:39 +01001639 .check = cache_store_check,
1640 .deinit = cache_store_deinit,
William Lallemand41db4602017-10-30 11:15:51 +01001641
William Lallemand4da3f8a2017-10-31 14:33:34 +01001642 /* Handle channels activity */
1643 .channel_start_analyze = cache_store_chn_start_analyze,
William Lallemand49dc0482017-11-24 14:33:54 +01001644 .channel_end_analyze = cache_store_chn_end_analyze,
Christopher Faulet839791a2019-01-07 16:12:07 +01001645 .channel_post_analyze = cache_store_post_analyze,
William Lallemand4da3f8a2017-10-31 14:33:34 +01001646
1647 /* Filter HTTP requests and responses */
1648 .http_headers = cache_store_http_headers,
Christopher Faulet54a8d5a2018-12-07 12:21:11 +01001649 .http_payload = cache_store_http_payload,
William Lallemand4da3f8a2017-10-31 14:33:34 +01001650 .http_end = cache_store_http_end,
1651
1652 .http_forward_data = cache_store_http_forward_data,
1653
William Lallemand41db4602017-10-30 11:15:51 +01001654};
1655
Christopher Faulet99a17a22018-12-11 09:18:27 +01001656
1657
1658static int
1659parse_cache_flt(char **args, int *cur_arg, struct proxy *px,
1660 struct flt_conf *fconf, char **err, void *private)
1661{
1662 struct flt_conf *f, *back;
Willy Tarreaua73da1e2018-12-14 10:19:28 +01001663 struct cache_flt_conf *cconf = NULL;
Christopher Faulet99a17a22018-12-11 09:18:27 +01001664 char *name = NULL;
1665 int pos = *cur_arg;
1666
1667 /* Get the cache filter name*/
1668 if (!strcmp(args[pos], "cache")) {
1669 if (!*args[pos + 1]) {
1670 memprintf(err, "%s : expects an <id> argument", args[pos]);
1671 goto error;
1672 }
1673 name = strdup(args[pos + 1]);
1674 if (!name) {
1675 memprintf(err, "%s '%s' : out of memory", args[pos], args[pos + 1]);
1676 goto error;
1677 }
1678 pos += 2;
1679 }
1680
1681 /* Check if an implicit filter with the same name already exists. If so,
1682 * we remove the implicit filter to use the explicit one. */
1683 list_for_each_entry_safe(f, back, &px->filter_configs, list) {
1684 if (f->id != cache_store_flt_id)
1685 continue;
1686
1687 cconf = f->conf;
1688 if (strcmp(name, cconf->c.name)) {
1689 cconf = NULL;
1690 continue;
1691 }
1692
1693 if (!(cconf->flags & CACHE_FLT_F_IMPLICIT_DECL)) {
1694 cconf = NULL;
1695 memprintf(err, "%s: multiple explicit declarations of the cache filter '%s'",
1696 px->id, name);
1697 return -1;
1698 }
1699
1700 /* Remove the implicit filter. <cconf> is kept for the explicit one */
1701 LIST_DEL(&f->list);
1702 free(f);
1703 free(name);
1704 break;
1705 }
1706
1707 /* No implicit cache filter found, create configuration for the explicit one */
1708 if (!cconf) {
1709 cconf = calloc(1, sizeof(*cconf));
1710 if (!cconf) {
1711 memprintf(err, "%s: out of memory", args[*cur_arg]);
1712 goto error;
1713 }
1714 cconf->c.name = name;
1715 }
1716
1717 cconf->flags = 0;
1718 fconf->id = cache_store_flt_id;
1719 fconf->conf = cconf;
1720 fconf->ops = &cache_ops;
1721
1722 *cur_arg = pos;
1723 return 0;
1724
1725 error:
1726 free(name);
1727 free(cconf);
1728 return -1;
1729}
1730
Aurélien Nephtaliabbf6072018-04-18 13:26:46 +02001731static int cli_parse_show_cache(char **args, char *payload, struct appctx *appctx, void *private)
William Lallemand1f49a362017-11-21 20:01:26 +01001732{
1733 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
1734 return 1;
1735
1736 return 0;
1737}
1738
1739static int cli_io_handler_show_cache(struct appctx *appctx)
1740{
1741 struct cache* cache = appctx->ctx.cli.p0;
1742 struct stream_interface *si = appctx->owner;
1743
William Lallemand1f49a362017-11-21 20:01:26 +01001744 if (cache == NULL) {
1745 cache = LIST_ELEM((caches).n, typeof(struct cache *), list);
1746 }
1747
1748 list_for_each_entry_from(cache, &caches, list) {
1749 struct eb32_node *node = NULL;
1750 unsigned int next_key;
1751 struct cache_entry *entry;
1752
William Lallemand1f49a362017-11-21 20:01:26 +01001753 next_key = appctx->ctx.cli.i0;
Willy Tarreauafe1de52018-04-04 11:56:43 +02001754 if (!next_key) {
1755 chunk_printf(&trash, "%p: %s (shctx:%p, available blocks:%d)\n", cache, cache->id, shctx_ptr(cache), shctx_ptr(cache)->nbav);
1756 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +01001757 si_rx_room_blk(si);
Willy Tarreauafe1de52018-04-04 11:56:43 +02001758 return 0;
1759 }
1760 }
William Lallemand1f49a362017-11-21 20:01:26 +01001761
1762 appctx->ctx.cli.p0 = cache;
1763
1764 while (1) {
1765
1766 shctx_lock(shctx_ptr(cache));
1767 node = eb32_lookup_ge(&cache->entries, next_key);
1768 if (!node) {
1769 shctx_unlock(shctx_ptr(cache));
Willy Tarreauafe1de52018-04-04 11:56:43 +02001770 appctx->ctx.cli.i0 = 0;
William Lallemand1f49a362017-11-21 20:01:26 +01001771 break;
1772 }
1773
1774 entry = container_of(node, struct cache_entry, eb);
Willy Tarreauafe1de52018-04-04 11:56:43 +02001775 chunk_printf(&trash, "%p hash:%u size:%u (%u blocks), refcount:%u, expire:%d\n", entry, (*(unsigned int *)entry->hash), block_ptr(entry)->len, block_ptr(entry)->block_count, block_ptr(entry)->refcount, entry->expire - (int)now.tv_sec);
William Lallemand1f49a362017-11-21 20:01:26 +01001776
1777 next_key = node->key + 1;
1778 appctx->ctx.cli.i0 = next_key;
1779
1780 shctx_unlock(shctx_ptr(cache));
1781
1782 if (ci_putchk(si_ic(si), &trash) == -1) {
Willy Tarreaudb398432018-11-15 11:08:52 +01001783 si_rx_room_blk(si);
William Lallemand1f49a362017-11-21 20:01:26 +01001784 return 0;
1785 }
1786 }
1787
1788 }
1789
1790 return 1;
1791
1792}
1793
Christopher Faulet99a17a22018-12-11 09:18:27 +01001794/* Declare the filter parser for "cache" keyword */
1795static struct flt_kw_list filter_kws = { "CACHE", { }, {
1796 { "cache", parse_cache_flt, NULL },
1797 { NULL, NULL, NULL },
1798 }
1799};
1800
1801INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
1802
William Lallemand1f49a362017-11-21 20:01:26 +01001803static struct cli_kw_list cli_kws = {{},{
William Lallemande899af82017-11-22 16:41:26 +01001804 { { "show", "cache", NULL }, "show cache : show cache status", cli_parse_show_cache, cli_io_handler_show_cache, NULL, NULL },
1805 {{},}
William Lallemand1f49a362017-11-21 20:01:26 +01001806}};
1807
Willy Tarreau0108d902018-11-25 19:14:37 +01001808INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
William Lallemand1f49a362017-11-21 20:01:26 +01001809
William Lallemand41db4602017-10-30 11:15:51 +01001810static struct action_kw_list http_res_actions = {
1811 .kw = {
1812 { "cache-store", parse_cache_store },
1813 { NULL, NULL }
1814 }
1815};
1816
Willy Tarreau0108d902018-11-25 19:14:37 +01001817INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_actions);
1818
William Lallemand41db4602017-10-30 11:15:51 +01001819static struct action_kw_list http_req_actions = {
1820 .kw = {
1821 { "cache-use", parse_cache_use },
1822 { NULL, NULL }
1823 }
1824};
1825
Willy Tarreau0108d902018-11-25 19:14:37 +01001826INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_actions);
1827
Willy Tarreau2231b632019-03-29 18:26:52 +01001828struct applet http_cache_applet = {
William Lallemand41db4602017-10-30 11:15:51 +01001829 .obj_type = OBJ_TYPE_APPLET,
1830 .name = "<CACHE>", /* used for logging */
William Lallemand77c11972017-10-31 20:43:01 +01001831 .fct = http_cache_io_handler,
William Lallemandecb73b12017-11-24 14:33:55 +01001832 .release = http_cache_applet_release,
William Lallemand41db4602017-10-30 11:15:51 +01001833};
1834
Willy Tarreaue6552512018-11-26 11:33:13 +01001835/* config parsers for this section */
1836REGISTER_CONFIG_SECTION("cache", cfg_parse_cache, cfg_post_parse_section_cache);