blob: 22f15f7041aed115d7cab803f4a978aef1551dcf [file] [log] [blame]
Willy Tarreau69c696c2015-04-28 10:18:09 +02001/*
2 * Copyright (C) 2015 Willy Tarreau <w@1wt.eu>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
19 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <import/lru.h>
26
27/* Minimal list manipulation macros for lru64_list */
Willy Tarreau2b718102021-04-21 07:32:39 +020028#define LIST_INSERT(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); })
29#define LIST_DELETE(el) ({ (el)->n->p = (el)->p; (el)->p->n = (el)->n; })
Willy Tarreau69c696c2015-04-28 10:18:09 +020030
Christopher Faulet92939d22015-06-11 13:33:13 +020031
32/* Lookup key <key> in LRU cache <lru> for use with domain <domain> whose data's
33 * current version is <revision>. It differs from lru64_get as it does not
34 * create missing keys. The function returns NULL if an error or a cache miss
35 * occurs. */
36struct lru64 *lru64_lookup(unsigned long long key, struct lru64_head *lru,
37 void *domain, unsigned long long revision)
38{
39 struct eb64_node *node;
40 struct lru64 *elem;
41
Christopher Faulet92939d22015-06-11 13:33:13 +020042 node = __eb64_lookup(&lru->keys, key);
43 elem = container_of(node, typeof(*elem), node);
44 if (elem) {
45 /* Existing entry found, check validity then move it at the
46 * head of the LRU list.
47 */
48 if (elem->domain == domain && elem->revision == revision) {
Willy Tarreau2b718102021-04-21 07:32:39 +020049 LIST_DELETE(&elem->lru);
50 LIST_INSERT(&lru->list, &elem->lru);
Christopher Faulet92939d22015-06-11 13:33:13 +020051 return elem;
52 }
53 }
54 return NULL;
55}
56
Willy Tarreau69c696c2015-04-28 10:18:09 +020057/* Get key <key> from LRU cache <lru> for use with domain <domain> whose data's
58 * current revision is <revision>. If the key doesn't exist it's first created
59 * with ->domain = NULL. The caller detects this situation by checking ->domain
60 * and must perform the operation to be cached then call lru64_commit() to
61 * complete the operation. A lock (mutex or spinlock) may be added around the
62 * function to permit use in a multi-threaded environment. The function may
63 * return NULL upon memory allocation failure.
64 */
65struct lru64 *lru64_get(unsigned long long key, struct lru64_head *lru,
66 void *domain, unsigned long long revision)
67{
68 struct eb64_node *node;
69 struct lru64 *elem;
70
71 if (!lru->spare) {
72 if (!lru->cache_size)
73 return NULL;
74 lru->spare = malloc(sizeof(*lru->spare));
75 if (!lru->spare)
76 return NULL;
77 lru->spare->domain = NULL;
78 }
79
80 /* Lookup or insert */
81 lru->spare->node.key = key;
82 node = __eb64_insert(&lru->keys, &lru->spare->node);
83 elem = container_of(node, typeof(*elem), node);
84
85 if (elem != lru->spare) {
86 /* Existing entry found, check validity then move it at the
87 * head of the LRU list.
88 */
89 if (elem->domain == domain && elem->revision == revision) {
Willy Tarreau2b718102021-04-21 07:32:39 +020090 LIST_DELETE(&elem->lru);
91 LIST_INSERT(&lru->list, &elem->lru);
Willy Tarreau69c696c2015-04-28 10:18:09 +020092 return elem;
93 }
94
95 if (!elem->domain)
96 return NULL; // currently locked
97
98 /* recycle this entry */
Willy Tarreau2b718102021-04-21 07:32:39 +020099 LIST_DELETE(&elem->lru);
Willy Tarreau69c696c2015-04-28 10:18:09 +0200100 }
101 else {
102 /* New entry inserted, initialize and move to the head of the
103 * LRU list, and lock it until commit.
104 */
105 lru->cache_usage++;
106 lru->spare = NULL; // used, need a new one next time
107 }
108
109 elem->domain = NULL;
Willy Tarreau2b718102021-04-21 07:32:39 +0200110 LIST_INSERT(&lru->list, &elem->lru);
Willy Tarreau69c696c2015-04-28 10:18:09 +0200111
112 if (lru->cache_usage > lru->cache_size) {
113 /* try to kill oldest entry */
114 struct lru64 *old;
115
116 old = container_of(lru->list.p, typeof(*old), lru);
117 if (old->domain) {
118 /* not locked */
Willy Tarreau2b718102021-04-21 07:32:39 +0200119 LIST_DELETE(&old->lru);
Willy Tarreau69c696c2015-04-28 10:18:09 +0200120 __eb64_delete(&old->node);
Willy Tarreau57b8a532015-06-17 20:33:30 +0200121 if (old->data && old->free)
122 old->free(old->data);
Willy Tarreau69c696c2015-04-28 10:18:09 +0200123 if (!lru->spare)
124 lru->spare = old;
Christopher Fauletf90ac552015-06-09 17:06:17 +0200125 else {
Willy Tarreau69c696c2015-04-28 10:18:09 +0200126 free(old);
Christopher Fauletf90ac552015-06-09 17:06:17 +0200127 }
Willy Tarreau69c696c2015-04-28 10:18:09 +0200128 lru->cache_usage--;
129 }
130 }
131 return elem;
132}
133
134/* Commit element <elem> with data <data>, domain <domain> and revision
135 * <revision>. <elem> is checked for NULL so that it's possible to call it
136 * with the result from a call to lru64_get(). The caller might lock it using a
137 * spinlock or mutex shared with the one around lru64_get().
138 */
Christopher Fauletf90ac552015-06-09 17:06:17 +0200139void lru64_commit(struct lru64 *elem, void *data, void *domain,
140 unsigned long long revision, void (*free)(void *))
Willy Tarreau69c696c2015-04-28 10:18:09 +0200141{
142 if (!elem)
143 return;
144
145 elem->data = data;
146 elem->revision = revision;
147 elem->domain = domain;
Christopher Fauletf90ac552015-06-09 17:06:17 +0200148 elem->free = free;
Willy Tarreau69c696c2015-04-28 10:18:09 +0200149}
150
151/* Create a new LRU cache of <size> entries. Returns the new cache or NULL in
152 * case of allocation failure.
153 */
154struct lru64_head *lru64_new(int size)
155{
156 struct lru64_head *lru;
157
158 lru = malloc(sizeof(*lru));
159 if (lru) {
160 lru->list.p = lru->list.n = &lru->list;
161 lru->keys = EB_ROOT_UNIQUE;
162 lru->spare = NULL;
163 lru->cache_size = size;
164 lru->cache_usage = 0;
165 }
166 return lru;
167}
168
169/* Tries to destroy the LRU cache <lru>. Returns the number of locked entries
170 * that prevent it from being destroyed, or zero meaning everything was done.
171 */
172int lru64_destroy(struct lru64_head *lru)
173{
174 struct lru64 *elem, *next;
175
176 if (!lru)
177 return 0;
178
179 elem = container_of(lru->list.p, typeof(*elem), lru);
180 while (&elem->lru != &lru->list) {
181 next = container_of(elem->lru.p, typeof(*next), lru);
182 if (elem->domain) {
183 /* not locked */
Willy Tarreau2b718102021-04-21 07:32:39 +0200184 LIST_DELETE(&elem->lru);
Willy Tarreau69c696c2015-04-28 10:18:09 +0200185 eb64_delete(&elem->node);
Willy Tarreau7810ad72015-06-17 19:53:03 +0200186 if (elem->data && elem->free)
Christopher Fauletf90ac552015-06-09 17:06:17 +0200187 elem->free(elem->data);
Willy Tarreau69c696c2015-04-28 10:18:09 +0200188 free(elem);
189 lru->cache_usage--;
190 lru->cache_size--;
191 }
192 elem = next;
193 }
194
195 if (lru->cache_usage)
196 return lru->cache_usage;
197
198 free(lru);
199 return 0;
200}
201
Baptiste Assmann22c4ed62016-01-07 02:28:50 +0100202/* kill the <nb> least used entries from the <lru> cache */
203void lru64_kill_oldest(struct lru64_head *lru, unsigned long int nb)
204{
205 struct lru64 *elem, *next;
206
207 for (elem = container_of(lru->list.p, typeof(*elem), lru);
208 nb && (&elem->lru != &lru->list);
209 elem = next) {
210 next = container_of(elem->lru.p, typeof(*next), lru);
211 if (!elem->domain)
212 continue; /* locked entry */
213
Willy Tarreau2b718102021-04-21 07:32:39 +0200214 LIST_DELETE(&elem->lru);
Baptiste Assmann22c4ed62016-01-07 02:28:50 +0100215 eb64_delete(&elem->node);
216 if (elem->data && elem->free)
217 elem->free(elem->data);
218 if (!lru->spare)
219 lru->spare = elem;
220 else
221 free(elem);
222 lru->cache_usage--;
223 nb--;
224 }
225}
226
Willy Tarreau69c696c2015-04-28 10:18:09 +0200227/* The code below is just for validation and performance testing. It's an
228 * example of a function taking some time to return results that could be
229 * cached.
230 */
231#ifdef STANDALONE
232
233#include <stdio.h>
234
235static unsigned int misses;
236
237static unsigned long long sum(unsigned long long x)
238{
239#ifndef TEST_LRU_FAST_OPERATION
240 if (x < 1)
241 return 0;
242 return x + sum(x * 99 / 100 - 1);
243#else
244 return (x << 16) - (x << 8) - 1;
245#endif
246}
247
248static long get_value(struct lru64_head *lru, long a)
249{
250 struct lru64 *item;
251
252 if (lru) {
253 item = lru64_get(a, lru, lru, 0);
254 if (item && item->domain)
255 return (long)item->data;
256 }
257 misses++;
258 /* do the painful work here */
259 a = sum(a);
260 if (item)
261 lru64_commit(item, (void *)a, lru, 0);
262 return a;
263}
264
265/* pass #of loops in argv[1] and set argv[2] to something to use the LRU */
266int main(int argc, char **argv)
267{
268 struct lru64_head *lru = NULL;
269 long long ret;
270 int total, loops;
271
272 if (argc < 2) {
273 printf("Need a number of rounds and optionally an LRU cache size (0..65536)\n");
274 exit(1);
275 }
276
277 total = atoi(argv[1]);
278
279 if (argc > 2) /* cache size */
280 lru = lru64_new(atoi(argv[2]));
281
282 ret = 0;
283 for (loops = 0; loops < total; loops++) {
284 ret += get_value(lru, rand() & 65535);
285 }
286 /* just for accuracy control */
287 printf("ret=%llx, hits=%d, misses=%d (%d %% hits)\n", ret, total-misses, misses, (int)((float)(total-misses) * 100.0 / total));
288
289 while (lru64_destroy(lru));
290
291 return 0;
292}
293
294#endif