blob: ae345a1610349473bff3d9731b8107d33a60f432 [file] [log] [blame]
William Lallemand41db4602017-10-30 11:15:51 +01001/*
2 * Cache management
3 *
4 * Copyright 2017 HAProxy Technologies
5 * William Lallemand <wlallemand@haproxy.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
William Lallemand41db4602017-10-30 11:15:51 +010013#include <eb32tree.h>
William Lallemandf528fff2017-11-23 19:43:17 +010014#include <import/sha1.h>
William Lallemand41db4602017-10-30 11:15:51 +010015
William Lallemand75d93292017-11-21 20:01:24 +010016#include <types/action.h>
William Lallemand1f49a362017-11-21 20:01:26 +010017#include <types/cli.h>
William Lallemand75d93292017-11-21 20:01:24 +010018#include <types/filters.h>
19#include <types/proxy.h>
20#include <types/shctx.h>
21
William Lallemand41db4602017-10-30 11:15:51 +010022#include <proto/channel.h>
William Lallemand1f49a362017-11-21 20:01:26 +010023#include <proto/cli.h>
William Lallemand41db4602017-10-30 11:15:51 +010024#include <proto/proxy.h>
25#include <proto/hdr_idx.h>
26#include <proto/filters.h>
27#include <proto/proto_http.h>
28#include <proto/log.h>
29#include <proto/stream.h>
30#include <proto/stream_interface.h>
31#include <proto/shctx.h>
32
William Lallemand41db4602017-10-30 11:15:51 +010033
34#include <common/cfgparse.h>
35#include <common/hash.h>
36
37/* flt_cache_store */
38
39static const char *cache_store_flt_id = "cache store filter";
40
William Lallemand4da3f8a2017-10-31 14:33:34 +010041static struct pool_head *pool2_cache_st = NULL;
42
William Lallemand41db4602017-10-30 11:15:51 +010043struct applet http_cache_applet;
44
45struct flt_ops cache_ops;
46
47struct cache {
48 char id[33]; /* cache name */
49 unsigned int maxage; /* max-age */
50 unsigned int maxblocks;
51 struct list list; /* cache linked list */
52 struct eb_root entries; /* head of cache entries based on keys */
53};
54
55/*
56 * cache ctx for filters
57 */
58struct cache_st {
59 int hdrs_len;
60 struct shared_block *first_block;
61};
62
63struct cache_entry {
64 unsigned int latest_validation; /* latest validation date */
65 unsigned int expire; /* expiration date */
66 struct eb32_node eb; /* ebtree node used to hold the cache object */
William Lallemandf528fff2017-11-23 19:43:17 +010067 char hash[20];
William Lallemand41db4602017-10-30 11:15:51 +010068 unsigned char data[0];
69};
70
71#define CACHE_BLOCKSIZE 1024
72
73static struct list caches = LIST_HEAD_INIT(caches);
74static struct cache *tmp_cache_config = NULL;
75
William Lallemandf528fff2017-11-23 19:43:17 +010076struct cache_entry *entry_exist(struct cache *cache, char *hash)
William Lallemand4da3f8a2017-10-31 14:33:34 +010077{
78 struct eb32_node *node;
79 struct cache_entry *entry;
80
William Lallemandf528fff2017-11-23 19:43:17 +010081 node = eb32_lookup(&cache->entries, (*(unsigned int *)hash));
William Lallemand4da3f8a2017-10-31 14:33:34 +010082 if (!node)
83 return NULL;
84
85 entry = eb32_entry(node, struct cache_entry, eb);
William Lallemandf528fff2017-11-23 19:43:17 +010086
87 /* if that's not the right node */
88 if (memcmp(entry->hash, hash, sizeof(entry->hash)))
89 return NULL;
90
William Lallemand08727662017-11-21 20:01:27 +010091 if (entry->expire > now.tv_sec) {
William Lallemand4da3f8a2017-10-31 14:33:34 +010092 return entry;
William Lallemand08727662017-11-21 20:01:27 +010093 } else {
William Lallemand4da3f8a2017-10-31 14:33:34 +010094 eb32_delete(node);
William Lallemand08727662017-11-21 20:01:27 +010095 entry->eb.key = 0;
96 }
William Lallemand4da3f8a2017-10-31 14:33:34 +010097 return NULL;
98
99}
100
101static inline struct shared_context *shctx_ptr(struct cache *cache)
102{
103 return (struct shared_context *)((unsigned char *)cache - ((struct shared_context *)NULL)->data);
104}
105
William Lallemand77c11972017-10-31 20:43:01 +0100106static inline struct shared_block *block_ptr(struct cache_entry *entry)
107{
108 return (struct shared_block *)((unsigned char *)entry - ((struct shared_block *)NULL)->data);
109}
110
111
112
William Lallemand41db4602017-10-30 11:15:51 +0100113static int
114cache_store_init(struct proxy *px, struct flt_conf *f1conf)
115{
116 return 0;
117}
118
William Lallemand4da3f8a2017-10-31 14:33:34 +0100119static int
120cache_store_chn_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
121{
122 if (!(chn->flags & CF_ISRESP))
123 return 1;
124
125 if (filter->ctx == NULL) {
126 struct cache_st *st;
127
128 st = pool_alloc_dirty(pool2_cache_st);
129 if (st == NULL)
130 return -1;
131
132 st->hdrs_len = 0;
133 st->first_block = NULL;
134 filter->ctx = st;
135 }
136
137 register_data_filter(s, chn, filter);
138
139 return 1;
140}
141
142static int
William Lallemand49dc0482017-11-24 14:33:54 +0100143cache_store_chn_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
144{
145 struct cache_st *st = filter->ctx;
146 struct cache *cache = filter->config->conf;
147 struct shared_context *shctx = shctx_ptr(cache);
148
149 if (!(chn->flags & CF_ISRESP))
150 return 1;
151
152 /* Everything should be released in the http_end filter, but we need to do it
153 * there too, in case of errors */
154
155 if (st && st->first_block) {
156
157 shctx_lock(shctx);
158 shctx_row_dec_hot(shctx, st->first_block);
159 shctx_unlock(shctx);
160
161 }
162 if (st) {
163 pool_free2(pool2_cache_st, st);
164 filter->ctx = NULL;
165 }
166
167 return 1;
168}
169
170
171static int
William Lallemand4da3f8a2017-10-31 14:33:34 +0100172cache_store_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
173{
174 struct cache_st *st = filter->ctx;
175
William Lallemand4da3f8a2017-10-31 14:33:34 +0100176 if (!(msg->chn->flags & CF_ISRESP) || !st)
177 return 1;
178
William Lallemand9d5f54d2017-11-14 14:39:22 +0100179 st->hdrs_len = msg->sov;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100180
181 return 1;
182}
183
184static int
185cache_store_http_forward_data(struct stream *s, struct filter *filter,
186 struct http_msg *msg, unsigned int len)
187{
188 struct cache_st *st = filter->ctx;
189 struct shared_context *shctx = shctx_ptr((struct cache *)filter->config->conf);
William Lallemand08727662017-11-21 20:01:27 +0100190 struct cache_entry *object;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100191 int ret;
192
193 /*
194 * We need to skip the HTTP headers first, because we saved them in the
195 * http-response action.
196 */
197 if (!(msg->chn->flags & CF_ISRESP) || !st)
198 return len;
199
200 if (!len) {
201 /* Nothing to foward */
202 ret = len;
203 }
William Lallemand10935bc2017-11-14 14:39:23 +0100204 else if (st->hdrs_len >= len) {
William Lallemand4da3f8a2017-10-31 14:33:34 +0100205 /* Forward part of headers */
206 ret = len;
207 st->hdrs_len -= len;
208 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100209 else {
William Lallemand10935bc2017-11-14 14:39:23 +0100210 /* Forward data */
Olivier Houchardcd2867a2017-11-01 13:58:21 +0100211 if (filter->ctx && st->first_block) {
212 /* disable buffering if too much data (never greater than a buffer size */
William Lallemand10935bc2017-11-14 14:39:23 +0100213 if (len - st->hdrs_len > global.tune.bufsize - global.tune.maxrewrite - st->first_block->len) {
William Lallemande1533f52017-11-14 14:39:24 +0100214 disable_cache:
William Lallemand08727662017-11-21 20:01:27 +0100215 object = (struct cache_entry *)st->first_block->data;
Olivier Houchardcd2867a2017-11-01 13:58:21 +0100216 filter->ctx = NULL; /* disable cache */
217 shctx_lock(shctx);
218 shctx_row_dec_hot(shctx, st->first_block);
William Lallemand08727662017-11-21 20:01:27 +0100219 object->eb.key = 0;
Olivier Houchardcd2867a2017-11-01 13:58:21 +0100220 shctx_unlock(shctx);
221 pool_free2(pool2_cache_st, st);
Olivier Houchardcd2867a2017-11-01 13:58:21 +0100222 } else {
William Lallemand10935bc2017-11-14 14:39:23 +0100223 /* Skip remaining headers to fill the cache */
224 b_adv(msg->chn->buf, st->hdrs_len);
225 ret = shctx_row_data_append(shctx,
226 st->first_block,
227 (unsigned char *)bi_ptr(msg->chn->buf),
228 MIN(bi_contig_data(msg->chn->buf), len - st->hdrs_len));
229 /* Rewind the buffer to forward all data */
230 b_rew(msg->chn->buf, st->hdrs_len);
William Lallemande1533f52017-11-14 14:39:24 +0100231 if (ret)
232 goto disable_cache;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100233 }
234 }
William Lallemand10935bc2017-11-14 14:39:23 +0100235 ret = len;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100236 }
237
238 if ((ret != len) ||
239 (FLT_NXT(filter, msg->chn) != FLT_FWD(filter, msg->chn) + ret))
240 task_wakeup(s->task, TASK_WOKEN_MSG);
241
242 return ret;
243}
244
245static int
246cache_store_http_end(struct stream *s, struct filter *filter,
247 struct http_msg *msg)
248{
249 struct cache_st *st = filter->ctx;
250 struct cache *cache = filter->config->conf;
251 struct shared_context *shctx = shctx_ptr(cache);
252 struct cache_entry *object;
253
254 if (!(msg->chn->flags & CF_ISRESP))
255 return 1;
256
257 if (st && st->first_block) {
258
259 object = (struct cache_entry *)st->first_block->data;
260
261 /* does not need to test if the insertion worked, if it
262 * doesn't, the blocks will be reused anyway */
263
264 shctx_lock(shctx);
William Lallemand08727662017-11-21 20:01:27 +0100265 if (eb32_insert(&cache->entries, &object->eb) != &object->eb) {
266 object->eb.key = 0;
267 }
William Lallemand4da3f8a2017-10-31 14:33:34 +0100268 /* remove from the hotlist */
William Lallemand4da3f8a2017-10-31 14:33:34 +0100269 shctx_row_dec_hot(shctx, st->first_block);
270 shctx_unlock(shctx);
271
272 }
273 if (st) {
274 pool_free2(pool2_cache_st, st);
275 filter->ctx = NULL;
276 }
277
278 return 1;
279}
280
281 /*
282 * This intends to be used when checking HTTP headers for some
283 * word=value directive. Return a pointer to the first character of value, if
284 * the word was not found or if there wasn't any value assigned ot it return NULL
285 */
286char *directive_value(const char *sample, int slen, const char *word, int wlen)
287{
288 int st = 0;
289
290 if (slen < wlen)
291 return 0;
292
293 while (wlen) {
294 char c = *sample ^ *word;
295 if (c && c != ('A' ^ 'a'))
296 return NULL;
297 sample++;
298 word++;
299 slen--;
300 wlen--;
301 }
302
303 while (slen) {
304 if (st == 0) {
305 if (*sample != '=')
306 return NULL;
307 sample++;
308 slen--;
309 st = 1;
310 continue;
311 } else {
312 return (char *)sample;
313 }
314 }
315
316 return NULL;
317}
318
319/*
320 * Return the maxage in seconds of an HTTP response.
321 * Compute the maxage using either:
322 * - the assigned max-age of the cache
323 * - the s-maxage directive
324 * - the max-age directive
325 * - (Expires - Data) headers
326 * - the default-max-age of the cache
327 *
328 */
329int http_calc_maxage(struct stream *s)
330{
331 struct http_txn *txn = s->txn;
332 struct hdr_ctx ctx;
333
334 int smaxage = -1;
335 int maxage = -1;
336
337
338 /* TODO: forced maxage configuration */
339
340 ctx.idx = 0;
341
342 /* loop on the Cache-Control values */
343 while (http_find_header2("Cache-Control", 13, s->res.buf->p, &txn->hdr_idx, &ctx)) {
344 char *directive = ctx.line + ctx.val;
345 char *value;
346
347 value = directive_value(directive, ctx.vlen, "s-maxage", 8);
348 if (value) {
349 struct chunk *chk = get_trash_chunk();
350
351 chunk_strncat(chk, value, ctx.vlen - 8 + 1);
352 chunk_strncat(chk, "", 1);
353 maxage = atoi(chk->str);
354 }
355
356 value = directive_value(ctx.line + ctx.val, ctx.vlen, "max-age", 7);
357 if (value) {
358 struct chunk *chk = get_trash_chunk();
359
360 chunk_strncat(chk, value, ctx.vlen - 7 + 1);
361 chunk_strncat(chk, "", 1);
362 smaxage = atoi(chk->str);
363 }
364 }
365
366 /* TODO: Expires - Data */
367
368
369 if (smaxage > 0)
370 return smaxage;
371
372 if (maxage > 0)
373 return maxage;
374
375 /* TODO: return default value */
376
377 return 60;
378
379}
380
381
William Lallemanda400a3a2017-11-20 19:13:12 +0100382static void cache_free_blocks(struct shared_block *first, struct shared_block *block)
383{
384 if (first == block) {
385 struct cache_entry *object = (struct cache_entry *)first->data;
William Lallemand08727662017-11-21 20:01:27 +0100386 if (object->eb.key) {
387 eb32_delete(&object->eb);
388 object->eb.key = 0;
389 }
William Lallemanda400a3a2017-11-20 19:13:12 +0100390 }
391}
392
William Lallemand41db4602017-10-30 11:15:51 +0100393/*
394 * This fonction will store the headers of the response in a buffer and then
395 * register a filter to store the data
396 */
397enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
398 struct session *sess, struct stream *s, int flags)
399{
William Lallemand4da3f8a2017-10-31 14:33:34 +0100400 struct http_txn *txn = s->txn;
401 struct http_msg *msg = &txn->rsp;
402 struct filter *filter;
403 struct hdr_ctx ctx;
404 struct shared_block *first = NULL;
405 struct shared_context *shctx = shctx_ptr((struct cache *)rule->arg.act.p[0]);
406 struct cache_entry *object;
407
408
409 /* Don't cache if the response came from a cache */
410 if ((obj_type(s->target) == OBJ_TYPE_APPLET) &&
411 s->target == &http_cache_applet.obj_type) {
412 goto out;
413 }
414
415 /* cache only HTTP/1.1 */
416 if (!(txn->req.flags & HTTP_MSGF_VER_11))
417 goto out;
418
William Lallemand18f133a2017-11-08 11:25:15 +0100419 /* does not cache if Content-Length unknown */
420 if (!(msg->flags & HTTP_MSGF_CNT_LEN))
421 goto out;
422
William Lallemand4da3f8a2017-10-31 14:33:34 +0100423 /* cache only GET method */
424 if (txn->meth != HTTP_METH_GET)
425 goto out;
426
427 /* cache only 200 status code */
428 if (txn->status != 200)
429 goto out;
430
431 /* Does not manage Vary at the moment. We will need a secondary key later for that */
432 ctx.idx = 0;
433 if (http_find_header2("Vary", 4, txn->rsp.chn->buf->p, &txn->hdr_idx, &ctx))
434 goto out;
435
436 /* we need to put this flag before using check_response_for_cacheability */
437 txn->flags |= TX_CACHEABLE;
438
439 if (txn->status != 101)
440 check_response_for_cacheability(s, &s->res);
441
442 if (!(txn->flags & TX_CACHEABLE))
443 goto out;
444
William Lallemand9d5f54d2017-11-14 14:39:22 +0100445 if ((msg->sov + msg->body_len) > (global.tune.bufsize - global.tune.maxrewrite))
William Lallemand4da3f8a2017-10-31 14:33:34 +0100446 goto out;
447
448 shctx_lock(shctx);
449
William Lallemand9d5f54d2017-11-14 14:39:22 +0100450 first = shctx_row_reserve_hot(shctx, sizeof(struct cache_entry) + msg->sov + msg->body_len);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100451 if (!first) {
452 shctx_unlock(shctx);
453 goto out;
454 }
455 shctx_unlock(shctx);
456
457 /* reserve space for the cache_entry structure */
458 first->len = sizeof(struct cache_entry);
459
460 /* cache the headers in a http action because it allows to chose what
461 * to cache, for example you might want to cache a response before
462 * modifying some HTTP headers, or on the contrary after modifying
463 * those headers.
464 */
465
466 /* does not need to be locked because it's in the "hot" list,
467 * copy the headers */
William Lallemand9d5f54d2017-11-14 14:39:22 +0100468 if (shctx_row_data_append(shctx, first, (unsigned char *)s->res.buf->p, msg->sov) < 0)
William Lallemand4da3f8a2017-10-31 14:33:34 +0100469 goto out;
470
471 /* register the buffer in the filter ctx for filling it with data*/
472 if (!LIST_ISEMPTY(&s->strm_flt.filters)) {
473 list_for_each_entry(filter, &s->strm_flt.filters, list) {
474 if (filter->config->id == cache_store_flt_id &&
475 filter->config->conf == rule->arg.act.p[0]) {
476 if (filter->ctx) {
477 struct cache_st *cache_ctx = filter->ctx;
478
479 cache_ctx->first_block = first;
480 object = (struct cache_entry *)first->data;
481
William Lallemandf528fff2017-11-23 19:43:17 +0100482 object->eb.key = (*(unsigned int *)&txn->cache_hash);
483 memcpy(object->hash, txn->cache_hash, sizeof(object->hash));
William Lallemand4da3f8a2017-10-31 14:33:34 +0100484 /* Insert the node later on caching success */
485
486 shctx_lock(shctx);
William Lallemandf528fff2017-11-23 19:43:17 +0100487 if (entry_exist((struct cache *)rule->arg.act.p[0], txn->cache_hash)) {
William Lallemand4da3f8a2017-10-31 14:33:34 +0100488 shctx_unlock(shctx);
489 if (filter->ctx) {
William Lallemand08727662017-11-21 20:01:27 +0100490 object->eb.key = 0;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100491 pool_free2(pool2_cache_st, filter->ctx);
492 filter->ctx = NULL;
493 }
494 goto out;
495 }
496 shctx_unlock(shctx);
497
498 /* store latest value and expiration time */
499 object->latest_validation = now.tv_sec;
500 object->expire = now.tv_sec + http_calc_maxage(s);
501
502 }
503 return ACT_RET_CONT;
504 }
505 }
506 }
507
508out:
509 /* if does not cache */
510 if (first) {
William Lallemand08727662017-11-21 20:01:27 +0100511 object = (struct cache_entry *)first->data;
512
William Lallemand4da3f8a2017-10-31 14:33:34 +0100513 shctx_lock(shctx);
William Lallemand08727662017-11-21 20:01:27 +0100514 first->len = 0;
515 object->eb.key = 0;
William Lallemand4da3f8a2017-10-31 14:33:34 +0100516 shctx_row_dec_hot(shctx, first);
517 shctx_unlock(shctx);
518 }
519
William Lallemand41db4602017-10-30 11:15:51 +0100520 return ACT_RET_CONT;
521}
522
William Lallemand77c11972017-10-31 20:43:01 +0100523#define HTTP_CACHE_INIT 0
524#define HTTP_CACHE_FWD 1
525#define HTTP_CACHE_END 2
526
William Lallemandecb73b12017-11-24 14:33:55 +0100527static void http_cache_applet_release(struct appctx *appctx)
528{
529 struct cache *cache = (struct cache *)appctx->rule->arg.act.p[0];
530 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
531 struct shared_block *first = block_ptr(cache_ptr);
532
533 shctx_lock(shctx_ptr(cache));
534 shctx_row_dec_hot(shctx_ptr(cache), first);
535 shctx_unlock(shctx_ptr(cache));
536}
537
William Lallemand77c11972017-10-31 20:43:01 +0100538static void http_cache_io_handler(struct appctx *appctx)
539{
540 struct stream_interface *si = appctx->owner;
541 struct channel *res = si_ic(si);
542 struct cache *cache = (struct cache *)appctx->rule->arg.act.p[0];
543 struct cache_entry *cache_ptr = appctx->ctx.cache.entry;
544 struct shared_context *shctx = shctx_ptr(cache);
545 struct shared_block *first = block_ptr(cache_ptr);
546
547 if (unlikely(si->state == SI_ST_DIS || si->state == SI_ST_CLO))
548 goto out;
549
550 /* Check if the input buffer is avalaible. */
551 if (res->buf->size == 0) {
552 si_applet_cant_put(si);
553 goto out;
554 }
555
556 if (res->flags & (CF_SHUTW|CF_SHUTW_NOW))
557 appctx->st0 = HTTP_CACHE_END;
558
559 /* buffer are aligned there, should be fine */
560 if (appctx->st0 == HTTP_CACHE_INIT) {
561 int len = first->len - sizeof(struct cache_entry);
562 if ((shctx_row_data_get(shctx, first, (unsigned char *)bi_end(res->buf), sizeof(struct cache_entry), len)) != 0) {
563 fprintf(stderr, "cache error too big: %d\n", first->len - (int)sizeof(struct cache_entry));
William Lallemand55e76742017-11-21 20:01:28 +0100564
William Lallemand77c11972017-10-31 20:43:01 +0100565 si_applet_cant_put(si);
566 goto out;
567 }
568 res->buf->i += len;
569 res->total += len;
570 appctx->st0 = HTTP_CACHE_FWD;
571 }
572
573 if (appctx->st0 == HTTP_CACHE_FWD) {
574 /* eat the whole request */
575 co_skip(si_oc(si), si_ob(si)->o); // NOTE: when disabled does not repport the correct status code
576 res->flags |= CF_READ_NULL;
577 si_shutr(si);
578 }
579
580 if ((res->flags & CF_SHUTR) && (si->state == SI_ST_EST))
581 si_shutw(si);
582out:
583 ;
584}
585
William Lallemand41db4602017-10-30 11:15:51 +0100586enum act_parse_ret parse_cache_store(const char **args, int *orig_arg, struct proxy *proxy,
587 struct act_rule *rule, char **err)
588{
589 struct flt_conf *fconf;
590 int cur_arg = *orig_arg;
591 rule->action = ACT_CUSTOM;
592 rule->action_ptr = http_action_store_cache;
593
594 if (!*args[cur_arg] || strcmp(args[cur_arg], "if") == 0 || strcmp(args[cur_arg], "unless") == 0) {
595 memprintf(err, "expects a cache name");
596 return ACT_RET_PRS_ERR;
597 }
598
599 /* check if a cache filter was already registered with this cache
600 * name, if that's the case, must use it. */
601 list_for_each_entry(fconf, &proxy->filter_configs, list) {
602 if (fconf->id == cache_store_flt_id && !strcmp((char *)fconf->conf, args[cur_arg])) {
603 rule->arg.act.p[0] = fconf->conf;
604 (*orig_arg)++;
605 /* filter already registered */
606 return ACT_RET_PRS_OK;
607 }
608 }
609
610 rule->arg.act.p[0] = strdup(args[cur_arg]);
611 if (!rule->arg.act.p[0]) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100612 ha_alert("config: %s '%s': out of memory\n", proxy_type_str(proxy), proxy->id);
William Lallemand41db4602017-10-30 11:15:51 +0100613 err++;
614 goto err;
615 }
616 /* register a filter to fill the cache buffer */
617 fconf = calloc(1, sizeof(*fconf));
618 if (!fconf) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100619 ha_alert("config: %s '%s': out of memory\n",
620 proxy_type_str(proxy), proxy->id);
William Lallemand41db4602017-10-30 11:15:51 +0100621 err++;
622 goto err;
623 }
624 fconf->id = cache_store_flt_id;
625 fconf->conf = rule->arg.act.p[0]; /* store the proxy name */
626 fconf->ops = &cache_ops;
627 LIST_ADDQ(&proxy->filter_configs, &fconf->list);
628
629 (*orig_arg)++;
630
631 return ACT_RET_PRS_OK;
632
633err:
Olivier Houchardfccf8402017-11-01 14:04:02 +0100634 return ACT_RET_PRS_ERR;
William Lallemand41db4602017-10-30 11:15:51 +0100635}
636
William Lallemandf528fff2017-11-23 19:43:17 +0100637/* This produces a sha1 hash of the concatenation of the first
638 * occurrence of the Host header followed by the path component if it
639 * begins with a slash ('/'). */
640int sha1_hosturi(struct http_txn *txn)
641{
642 struct hdr_ctx ctx;
643
644 blk_SHA_CTX sha1_ctx;
645 struct chunk *trash;
646 char *path;
647 char *end;
648 trash = get_trash_chunk();
649
650 /* retrive the host */
651 ctx.idx = 0;
652 if (!http_find_header2("Host", 4, txn->req.chn->buf->p, &txn->hdr_idx, &ctx))
653 return 0;
654 chunk_strncat(trash, ctx.line + ctx.val, ctx.vlen);
655
656 /* now retrieve the path */
657 end = txn->req.chn->buf->p + txn->req.sl.rq.u + txn->req.sl.rq.u_l;
658 path = http_get_path(txn);
659 if (!path)
660 return 0;
661 chunk_strncat(trash, path, end - path);
662
663 /* hash everything */
664 blk_SHA1_Init(&sha1_ctx);
665 blk_SHA1_Update(&sha1_ctx, trash->str, trash->len);
666 blk_SHA1_Final((unsigned char *)txn->cache_hash, &sha1_ctx);
667
668 return 1;
669}
670
671
William Lallemand41db4602017-10-30 11:15:51 +0100672
673enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *px,
674 struct session *sess, struct stream *s, int flags)
675{
William Lallemand77c11972017-10-31 20:43:01 +0100676
William Lallemand77c11972017-10-31 20:43:01 +0100677 struct cache_entry *res;
William Lallemand77c11972017-10-31 20:43:01 +0100678 struct cache *cache = (struct cache *)rule->arg.act.p[0];
679
William Lallemandf528fff2017-11-23 19:43:17 +0100680 sha1_hosturi(s->txn);
681
William Lallemanda400a3a2017-11-20 19:13:12 +0100682 shctx_lock(shctx_ptr(cache));
William Lallemandf528fff2017-11-23 19:43:17 +0100683 res = entry_exist(cache, s->txn->cache_hash);
William Lallemand77c11972017-10-31 20:43:01 +0100684 if (res) {
685 struct appctx *appctx;
William Lallemanda400a3a2017-11-20 19:13:12 +0100686 shctx_row_inc_hot(shctx_ptr(cache), block_ptr(res));
687 shctx_unlock(shctx_ptr(cache));
William Lallemand77c11972017-10-31 20:43:01 +0100688 s->target = &http_cache_applet.obj_type;
689 if ((appctx = stream_int_register_handler(&s->si[1], objt_applet(s->target)))) {
690 appctx->st0 = HTTP_CACHE_INIT;
691 appctx->rule = rule;
692 appctx->ctx.cache.entry = res;
Olivier Houchardfccf8402017-11-01 14:04:02 +0100693 return ACT_RET_CONT;
William Lallemand77c11972017-10-31 20:43:01 +0100694 } else {
William Lallemand55e76742017-11-21 20:01:28 +0100695 shctx_lock(shctx_ptr(cache));
696 shctx_row_dec_hot(shctx_ptr(cache), block_ptr(res));
697 shctx_unlock(shctx_ptr(cache));
Olivier Houchardfccf8402017-11-01 14:04:02 +0100698 return ACT_RET_YIELD;
William Lallemand77c11972017-10-31 20:43:01 +0100699 }
700 }
William Lallemanda400a3a2017-11-20 19:13:12 +0100701 shctx_unlock(shctx_ptr(cache));
Olivier Houchardfccf8402017-11-01 14:04:02 +0100702 return ACT_RET_CONT;
William Lallemand41db4602017-10-30 11:15:51 +0100703}
704
705
706enum act_parse_ret parse_cache_use(const char **args, int *orig_arg, struct proxy *proxy,
707 struct act_rule *rule, char **err)
708{
709 int cur_arg = *orig_arg;
710
711 rule->action = ACT_CUSTOM;
712 rule->action_ptr = http_action_req_cache_use;
713
714 if (!*args[cur_arg] || strcmp(args[cur_arg], "if") == 0 || strcmp(args[cur_arg], "unless") == 0) {
715 memprintf(err, "expects a cache name");
716 return ACT_RET_PRS_ERR;
717 }
718
719 rule->arg.act.p[0] = strdup(args[cur_arg]);
720 if (!rule->arg.act.p[0]) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100721 ha_alert("config: %s '%s': out of memory\n", proxy_type_str(proxy), proxy->id);
William Lallemand41db4602017-10-30 11:15:51 +0100722 err++;
723 goto err;
724 }
725
726 (*orig_arg)++;
727 return ACT_RET_PRS_OK;
728
729err:
Olivier Houchardfccf8402017-11-01 14:04:02 +0100730 return ACT_RET_PRS_ERR;
William Lallemand41db4602017-10-30 11:15:51 +0100731
732}
733
734int cfg_parse_cache(const char *file, int linenum, char **args, int kwm)
735{
736 int err_code = 0;
737
738 if (strcmp(args[0], "cache") == 0) { /* new cache section */
739
740 if (!*args[1]) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100741 ha_alert("parsing [%s:%d] : '%s' expects an <id> argument\n",
742 file, linenum, args[0]);
William Lallemand41db4602017-10-30 11:15:51 +0100743 err_code |= ERR_ALERT | ERR_ABORT;
744 goto out;
745 }
746
747 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
748 err_code |= ERR_ABORT;
749 goto out;
750 }
751
752 if (tmp_cache_config == NULL) {
753 tmp_cache_config = calloc(1, sizeof(*tmp_cache_config));
754 if (!tmp_cache_config) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100755 ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
William Lallemand41db4602017-10-30 11:15:51 +0100756 err_code |= ERR_ALERT | ERR_ABORT;
757 goto out;
758 }
759
760 strlcpy2(tmp_cache_config->id, args[1], 33);
761 if (strlen(args[1]) > 32) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100762 ha_warning("parsing [%s:%d]: cache id is limited to 32 characters, truncate to '%s'.\n",
763 file, linenum, tmp_cache_config->id);
William Lallemand41db4602017-10-30 11:15:51 +0100764 err_code |= ERR_WARN;
765 }
766
767 tmp_cache_config->maxblocks = 0;
768 }
769 } else if (strcmp(args[0], "total-max-size") == 0) {
770 int maxsize;
771
772 if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
773 err_code |= ERR_ABORT;
774 goto out;
775 }
776
777 /* size in megabytes */
778 maxsize = atoi(args[1]) * 1024 * 1024 / CACHE_BLOCKSIZE;
779 tmp_cache_config->maxblocks = maxsize;
780
781 } else if (*args[0] != 0) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100782 ha_alert("parsing [%s:%d] : unknown keyword '%s' in 'cache' section\n", file, linenum, args[0]);
William Lallemand41db4602017-10-30 11:15:51 +0100783 err_code |= ERR_ALERT | ERR_FATAL;
784 goto out;
785 }
786out:
787 return err_code;
788}
789
790/* once the cache section is parsed */
791
792int cfg_post_parse_section_cache()
793{
794 struct shared_context *shctx;
795 int err_code = 0;
796 int ret_shctx;
797
798 if (tmp_cache_config) {
799 struct cache *cache;
800
801 if (tmp_cache_config->maxblocks <= 0) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100802 ha_alert("Size not specified for cache '%s'\n", tmp_cache_config->id);
William Lallemand41db4602017-10-30 11:15:51 +0100803 err_code |= ERR_FATAL | ERR_ALERT;
804 goto out;
805 }
806
807 ret_shctx = shctx_init(&shctx, tmp_cache_config->maxblocks, CACHE_BLOCKSIZE, sizeof(struct cache), 1);
William Lallemand4da3f8a2017-10-31 14:33:34 +0100808
William Lallemand41db4602017-10-30 11:15:51 +0100809 if (ret_shctx < 0) {
810 if (ret_shctx == SHCTX_E_INIT_LOCK)
Christopher Faulet767a84b2017-11-24 16:50:31 +0100811 ha_alert("Unable to initialize the lock for the cache.\n");
William Lallemand41db4602017-10-30 11:15:51 +0100812 else
Christopher Faulet767a84b2017-11-24 16:50:31 +0100813 ha_alert("Unable to allocate cache.\n");
William Lallemand41db4602017-10-30 11:15:51 +0100814
815 err_code |= ERR_FATAL | ERR_ALERT;
816 goto out;
817 }
William Lallemanda400a3a2017-11-20 19:13:12 +0100818 shctx->free_block = cache_free_blocks;
William Lallemand41db4602017-10-30 11:15:51 +0100819 memcpy(shctx->data, tmp_cache_config, sizeof(struct cache));
820 cache = (struct cache *)shctx->data;
821 cache->entries = EB_ROOT_UNIQUE;
William Lallemand41db4602017-10-30 11:15:51 +0100822 LIST_ADDQ(&caches, &cache->list);
823 }
824out:
825 free(tmp_cache_config);
826 tmp_cache_config = NULL;
827 return err_code;
828
829}
830
831/*
832 * Resolve the cache name to a pointer once the file is completely read.
833 */
834int cfg_cache_postparser()
835{
836 struct act_rule *hresrule, *hrqrule;
837 void *cache_ptr;
838 struct cache *cache;
839 struct proxy *curproxy = NULL;
840 int err = 0;
841 struct flt_conf *fconf;
842
Olivier Houchardfbc74e82017-11-24 16:54:05 +0100843 for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
William Lallemand41db4602017-10-30 11:15:51 +0100844
845 /* resolve the http response cache name to a ptr in the action rule */
846 list_for_each_entry(hresrule, &curproxy->http_res_rules, list) {
847 if (hresrule->action != ACT_CUSTOM ||
848 hresrule->action_ptr != http_action_store_cache)
849 continue;
850
851 cache_ptr = hresrule->arg.act.p[0];
852
853 list_for_each_entry(cache, &caches, list) {
854 if (!strcmp(cache->id, cache_ptr)) {
855 /* don't free there, it's still used in the filter conf */
856 cache_ptr = cache;
857 break;
858 }
859 }
860
861 if (cache_ptr == hresrule->arg.act.p[0]) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100862 ha_alert("Proxy '%s': unable to find the cache '%s' referenced by http-response cache-store rule.\n",
863 curproxy->id, (char *)hresrule->arg.act.p[0]);
William Lallemand41db4602017-10-30 11:15:51 +0100864 err++;
865 }
866
867 hresrule->arg.act.p[0] = cache_ptr;
868 }
869
870 /* resolve the http request cache name to a ptr in the action rule */
871 list_for_each_entry(hrqrule, &curproxy->http_req_rules, list) {
872 if (hrqrule->action != ACT_CUSTOM ||
873 hrqrule->action_ptr != http_action_req_cache_use)
874 continue;
875
876 cache_ptr = hrqrule->arg.act.p[0];
877
878 list_for_each_entry(cache, &caches, list) {
879 if (!strcmp(cache->id, cache_ptr)) {
880 free(cache_ptr);
881 cache_ptr = cache;
882 break;
883 }
884 }
885
886 if (cache_ptr == hrqrule->arg.act.p[0]) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100887 ha_alert("Proxy '%s': unable to find the cache '%s' referenced by http-request cache-use rule.\n",
888 curproxy->id, (char *)hrqrule->arg.act.p[0]);
William Lallemand41db4602017-10-30 11:15:51 +0100889 err++;
890 }
891
892 hrqrule->arg.act.p[0] = cache_ptr;
893 }
894
895 /* resolve the cache name to a ptr in the filter config */
896 list_for_each_entry(fconf, &curproxy->filter_configs, list) {
897
William Lallemand9c54c532017-11-02 16:38:42 +0100898 if (fconf->id != cache_store_flt_id)
899 continue;
900
William Lallemand41db4602017-10-30 11:15:51 +0100901 cache_ptr = fconf->conf;
902
903 list_for_each_entry(cache, &caches, list) {
904 if (!strcmp(cache->id, cache_ptr)) {
905 /* there can be only one filter per cache, so we free it there */
906 free(cache_ptr);
907 cache_ptr = cache;
908 break;
909 }
910 }
911
912 if (cache_ptr == fconf->conf) {
Christopher Faulet767a84b2017-11-24 16:50:31 +0100913 ha_alert("Proxy '%s': unable to find the cache '%s' referenced by the filter 'cache'.\n",
914 curproxy->id, (char *)fconf->conf);
William Lallemand41db4602017-10-30 11:15:51 +0100915 err++;
916 }
917 fconf->conf = cache_ptr;
918 }
919 }
920 return err;
921}
922
923
924struct flt_ops cache_ops = {
925 .init = cache_store_init,
926
William Lallemand4da3f8a2017-10-31 14:33:34 +0100927 /* Handle channels activity */
928 .channel_start_analyze = cache_store_chn_start_analyze,
William Lallemand49dc0482017-11-24 14:33:54 +0100929 .channel_end_analyze = cache_store_chn_end_analyze,
William Lallemand4da3f8a2017-10-31 14:33:34 +0100930
931 /* Filter HTTP requests and responses */
932 .http_headers = cache_store_http_headers,
933 .http_end = cache_store_http_end,
934
935 .http_forward_data = cache_store_http_forward_data,
936
William Lallemand41db4602017-10-30 11:15:51 +0100937};
938
William Lallemand1f49a362017-11-21 20:01:26 +0100939static int cli_parse_show_cache(char **args, struct appctx *appctx, void *private)
940{
941 if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
942 return 1;
943
944 return 0;
945}
946
947static int cli_io_handler_show_cache(struct appctx *appctx)
948{
949 struct cache* cache = appctx->ctx.cli.p0;
950 struct stream_interface *si = appctx->owner;
951
952 chunk_reset(&trash);
953
954
955 if (cache == NULL) {
956 cache = LIST_ELEM((caches).n, typeof(struct cache *), list);
957 }
958
959 list_for_each_entry_from(cache, &caches, list) {
960 struct eb32_node *node = NULL;
961 unsigned int next_key;
962 struct cache_entry *entry;
963
964 chunk_appendf(&trash, "%p: %s (shctx:%p, available blocks:%d)\n", cache, cache->id, shctx_ptr(cache), shctx_ptr(cache)->nbav);
965
966 next_key = appctx->ctx.cli.i0;
967
968 appctx->ctx.cli.p0 = cache;
969
970 while (1) {
971
972 shctx_lock(shctx_ptr(cache));
973 node = eb32_lookup_ge(&cache->entries, next_key);
974 if (!node) {
975 shctx_unlock(shctx_ptr(cache));
976 break;
977 }
978
979 entry = container_of(node, struct cache_entry, eb);
William Lallemandf528fff2017-11-23 19:43:17 +0100980 chunk_appendf(&trash, "%p hash:%u size:%u (%u blocks), refcount:%u, expire:%d\n", entry, (*(unsigned int *)entry->hash), block_ptr(entry)->len, block_ptr(entry)->block_count, block_ptr(entry)->refcount, entry->expire - (int)now.tv_sec);
William Lallemand1f49a362017-11-21 20:01:26 +0100981
982 next_key = node->key + 1;
983 appctx->ctx.cli.i0 = next_key;
984
985 shctx_unlock(shctx_ptr(cache));
986
987 if (ci_putchk(si_ic(si), &trash) == -1) {
988 si_applet_cant_put(si);
989 return 0;
990 }
991 }
992
993 }
994
995 return 1;
996
997}
998
999static struct cli_kw_list cli_kws = {{},{
William Lallemande899af82017-11-22 16:41:26 +01001000 { { "show", "cache", NULL }, "show cache : show cache status", cli_parse_show_cache, cli_io_handler_show_cache, NULL, NULL },
1001 {{},}
William Lallemand1f49a362017-11-21 20:01:26 +01001002}};
1003
1004
William Lallemand41db4602017-10-30 11:15:51 +01001005static struct action_kw_list http_res_actions = {
1006 .kw = {
1007 { "cache-store", parse_cache_store },
1008 { NULL, NULL }
1009 }
1010};
1011
1012static struct action_kw_list http_req_actions = {
1013 .kw = {
1014 { "cache-use", parse_cache_use },
1015 { NULL, NULL }
1016 }
1017};
1018
1019struct applet http_cache_applet = {
1020 .obj_type = OBJ_TYPE_APPLET,
1021 .name = "<CACHE>", /* used for logging */
William Lallemand77c11972017-10-31 20:43:01 +01001022 .fct = http_cache_io_handler,
William Lallemandecb73b12017-11-24 14:33:55 +01001023 .release = http_cache_applet_release,
William Lallemand41db4602017-10-30 11:15:51 +01001024};
1025
1026__attribute__((constructor))
1027static void __cache_init(void)
1028{
1029 cfg_register_section("cache", cfg_parse_cache, cfg_post_parse_section_cache);
1030 cfg_register_postparser("cache", cfg_cache_postparser);
William Lallemand1f49a362017-11-21 20:01:26 +01001031 cli_register_kw(&cli_kws);
William Lallemand41db4602017-10-30 11:15:51 +01001032 http_res_keywords_register(&http_res_actions);
1033 http_req_keywords_register(&http_req_actions);
William Lallemand4da3f8a2017-10-31 14:33:34 +01001034 pool2_cache_st = create_pool("cache_st", sizeof(struct cache_st), MEM_F_SHARED);
William Lallemand41db4602017-10-30 11:15:51 +01001035}
1036