Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 Willy Tarreau <w@1wt.eu> |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining |
| 5 | * a copy of this software and associated documentation files (the |
| 6 | * "Software"), to deal in the Software without restriction, including |
| 7 | * without limitation the rights to use, copy, modify, merge, publish, |
| 8 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 9 | * permit persons to whom the Software is furnished to do so, subject to |
| 10 | * the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be |
| 13 | * included in all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 16 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES |
| 17 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 18 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT |
| 19 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, |
| 20 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | */ |
| 24 | |
| 25 | #include <import/lru.h> |
| 26 | |
| 27 | /* Minimal list manipulation macros for lru64_list */ |
| 28 | #define LIST_ADD(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); }) |
| 29 | #define LIST_DEL(el) ({ (el)->n->p = (el)->p; (el)->p->n = (el)->n; }) |
| 30 | |
Christopher Faulet | 92939d2 | 2015-06-11 13:33:13 +0200 | [diff] [blame] | 31 | |
| 32 | /* Lookup key <key> in LRU cache <lru> for use with domain <domain> whose data's |
| 33 | * current version is <revision>. It differs from lru64_get as it does not |
| 34 | * create missing keys. The function returns NULL if an error or a cache miss |
| 35 | * occurs. */ |
| 36 | struct lru64 *lru64_lookup(unsigned long long key, struct lru64_head *lru, |
| 37 | void *domain, unsigned long long revision) |
| 38 | { |
| 39 | struct eb64_node *node; |
| 40 | struct lru64 *elem; |
| 41 | |
Christopher Faulet | 92939d2 | 2015-06-11 13:33:13 +0200 | [diff] [blame] | 42 | node = __eb64_lookup(&lru->keys, key); |
| 43 | elem = container_of(node, typeof(*elem), node); |
| 44 | if (elem) { |
| 45 | /* Existing entry found, check validity then move it at the |
| 46 | * head of the LRU list. |
| 47 | */ |
| 48 | if (elem->domain == domain && elem->revision == revision) { |
| 49 | LIST_DEL(&elem->lru); |
| 50 | LIST_ADD(&lru->list, &elem->lru); |
| 51 | return elem; |
| 52 | } |
| 53 | } |
| 54 | return NULL; |
| 55 | } |
| 56 | |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 57 | /* Get key <key> from LRU cache <lru> for use with domain <domain> whose data's |
| 58 | * current revision is <revision>. If the key doesn't exist it's first created |
| 59 | * with ->domain = NULL. The caller detects this situation by checking ->domain |
| 60 | * and must perform the operation to be cached then call lru64_commit() to |
| 61 | * complete the operation. A lock (mutex or spinlock) may be added around the |
| 62 | * function to permit use in a multi-threaded environment. The function may |
| 63 | * return NULL upon memory allocation failure. |
| 64 | */ |
| 65 | struct lru64 *lru64_get(unsigned long long key, struct lru64_head *lru, |
| 66 | void *domain, unsigned long long revision) |
| 67 | { |
| 68 | struct eb64_node *node; |
| 69 | struct lru64 *elem; |
| 70 | |
| 71 | if (!lru->spare) { |
| 72 | if (!lru->cache_size) |
| 73 | return NULL; |
| 74 | lru->spare = malloc(sizeof(*lru->spare)); |
| 75 | if (!lru->spare) |
| 76 | return NULL; |
| 77 | lru->spare->domain = NULL; |
| 78 | } |
| 79 | |
| 80 | /* Lookup or insert */ |
| 81 | lru->spare->node.key = key; |
| 82 | node = __eb64_insert(&lru->keys, &lru->spare->node); |
| 83 | elem = container_of(node, typeof(*elem), node); |
| 84 | |
| 85 | if (elem != lru->spare) { |
| 86 | /* Existing entry found, check validity then move it at the |
| 87 | * head of the LRU list. |
| 88 | */ |
| 89 | if (elem->domain == domain && elem->revision == revision) { |
| 90 | LIST_DEL(&elem->lru); |
| 91 | LIST_ADD(&lru->list, &elem->lru); |
| 92 | return elem; |
| 93 | } |
| 94 | |
| 95 | if (!elem->domain) |
| 96 | return NULL; // currently locked |
| 97 | |
| 98 | /* recycle this entry */ |
| 99 | LIST_DEL(&elem->lru); |
| 100 | } |
| 101 | else { |
| 102 | /* New entry inserted, initialize and move to the head of the |
| 103 | * LRU list, and lock it until commit. |
| 104 | */ |
| 105 | lru->cache_usage++; |
| 106 | lru->spare = NULL; // used, need a new one next time |
| 107 | } |
| 108 | |
| 109 | elem->domain = NULL; |
| 110 | LIST_ADD(&lru->list, &elem->lru); |
| 111 | |
| 112 | if (lru->cache_usage > lru->cache_size) { |
| 113 | /* try to kill oldest entry */ |
| 114 | struct lru64 *old; |
| 115 | |
| 116 | old = container_of(lru->list.p, typeof(*old), lru); |
| 117 | if (old->domain) { |
| 118 | /* not locked */ |
| 119 | LIST_DEL(&old->lru); |
| 120 | __eb64_delete(&old->node); |
Willy Tarreau | 57b8a53 | 2015-06-17 20:33:30 +0200 | [diff] [blame] | 121 | if (old->data && old->free) |
| 122 | old->free(old->data); |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 123 | if (!lru->spare) |
| 124 | lru->spare = old; |
Christopher Faulet | f90ac55 | 2015-06-09 17:06:17 +0200 | [diff] [blame] | 125 | else { |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 126 | free(old); |
Christopher Faulet | f90ac55 | 2015-06-09 17:06:17 +0200 | [diff] [blame] | 127 | } |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 128 | lru->cache_usage--; |
| 129 | } |
| 130 | } |
| 131 | return elem; |
| 132 | } |
| 133 | |
| 134 | /* Commit element <elem> with data <data>, domain <domain> and revision |
| 135 | * <revision>. <elem> is checked for NULL so that it's possible to call it |
| 136 | * with the result from a call to lru64_get(). The caller might lock it using a |
| 137 | * spinlock or mutex shared with the one around lru64_get(). |
| 138 | */ |
Christopher Faulet | f90ac55 | 2015-06-09 17:06:17 +0200 | [diff] [blame] | 139 | void lru64_commit(struct lru64 *elem, void *data, void *domain, |
| 140 | unsigned long long revision, void (*free)(void *)) |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 141 | { |
| 142 | if (!elem) |
| 143 | return; |
| 144 | |
| 145 | elem->data = data; |
| 146 | elem->revision = revision; |
| 147 | elem->domain = domain; |
Christopher Faulet | f90ac55 | 2015-06-09 17:06:17 +0200 | [diff] [blame] | 148 | elem->free = free; |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | /* Create a new LRU cache of <size> entries. Returns the new cache or NULL in |
| 152 | * case of allocation failure. |
| 153 | */ |
| 154 | struct lru64_head *lru64_new(int size) |
| 155 | { |
| 156 | struct lru64_head *lru; |
| 157 | |
| 158 | lru = malloc(sizeof(*lru)); |
| 159 | if (lru) { |
| 160 | lru->list.p = lru->list.n = &lru->list; |
| 161 | lru->keys = EB_ROOT_UNIQUE; |
| 162 | lru->spare = NULL; |
| 163 | lru->cache_size = size; |
| 164 | lru->cache_usage = 0; |
| 165 | } |
| 166 | return lru; |
| 167 | } |
| 168 | |
| 169 | /* Tries to destroy the LRU cache <lru>. Returns the number of locked entries |
| 170 | * that prevent it from being destroyed, or zero meaning everything was done. |
| 171 | */ |
| 172 | int lru64_destroy(struct lru64_head *lru) |
| 173 | { |
| 174 | struct lru64 *elem, *next; |
| 175 | |
| 176 | if (!lru) |
| 177 | return 0; |
| 178 | |
| 179 | elem = container_of(lru->list.p, typeof(*elem), lru); |
| 180 | while (&elem->lru != &lru->list) { |
| 181 | next = container_of(elem->lru.p, typeof(*next), lru); |
| 182 | if (elem->domain) { |
| 183 | /* not locked */ |
| 184 | LIST_DEL(&elem->lru); |
| 185 | eb64_delete(&elem->node); |
Willy Tarreau | 7810ad7 | 2015-06-17 19:53:03 +0200 | [diff] [blame] | 186 | if (elem->data && elem->free) |
Christopher Faulet | f90ac55 | 2015-06-09 17:06:17 +0200 | [diff] [blame] | 187 | elem->free(elem->data); |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 188 | free(elem); |
| 189 | lru->cache_usage--; |
| 190 | lru->cache_size--; |
| 191 | } |
| 192 | elem = next; |
| 193 | } |
| 194 | |
| 195 | if (lru->cache_usage) |
| 196 | return lru->cache_usage; |
| 197 | |
| 198 | free(lru); |
| 199 | return 0; |
| 200 | } |
| 201 | |
Baptiste Assmann | 22c4ed6 | 2016-01-07 02:28:50 +0100 | [diff] [blame] | 202 | /* kill the <nb> least used entries from the <lru> cache */ |
| 203 | void lru64_kill_oldest(struct lru64_head *lru, unsigned long int nb) |
| 204 | { |
| 205 | struct lru64 *elem, *next; |
| 206 | |
| 207 | for (elem = container_of(lru->list.p, typeof(*elem), lru); |
| 208 | nb && (&elem->lru != &lru->list); |
| 209 | elem = next) { |
| 210 | next = container_of(elem->lru.p, typeof(*next), lru); |
| 211 | if (!elem->domain) |
| 212 | continue; /* locked entry */ |
| 213 | |
| 214 | LIST_DEL(&elem->lru); |
| 215 | eb64_delete(&elem->node); |
| 216 | if (elem->data && elem->free) |
| 217 | elem->free(elem->data); |
| 218 | if (!lru->spare) |
| 219 | lru->spare = elem; |
| 220 | else |
| 221 | free(elem); |
| 222 | lru->cache_usage--; |
| 223 | nb--; |
| 224 | } |
| 225 | } |
| 226 | |
Willy Tarreau | 69c696c | 2015-04-28 10:18:09 +0200 | [diff] [blame] | 227 | /* The code below is just for validation and performance testing. It's an |
| 228 | * example of a function taking some time to return results that could be |
| 229 | * cached. |
| 230 | */ |
| 231 | #ifdef STANDALONE |
| 232 | |
| 233 | #include <stdio.h> |
| 234 | |
| 235 | static unsigned int misses; |
| 236 | |
| 237 | static unsigned long long sum(unsigned long long x) |
| 238 | { |
| 239 | #ifndef TEST_LRU_FAST_OPERATION |
| 240 | if (x < 1) |
| 241 | return 0; |
| 242 | return x + sum(x * 99 / 100 - 1); |
| 243 | #else |
| 244 | return (x << 16) - (x << 8) - 1; |
| 245 | #endif |
| 246 | } |
| 247 | |
| 248 | static long get_value(struct lru64_head *lru, long a) |
| 249 | { |
| 250 | struct lru64 *item; |
| 251 | |
| 252 | if (lru) { |
| 253 | item = lru64_get(a, lru, lru, 0); |
| 254 | if (item && item->domain) |
| 255 | return (long)item->data; |
| 256 | } |
| 257 | misses++; |
| 258 | /* do the painful work here */ |
| 259 | a = sum(a); |
| 260 | if (item) |
| 261 | lru64_commit(item, (void *)a, lru, 0); |
| 262 | return a; |
| 263 | } |
| 264 | |
| 265 | /* pass #of loops in argv[1] and set argv[2] to something to use the LRU */ |
| 266 | int main(int argc, char **argv) |
| 267 | { |
| 268 | struct lru64_head *lru = NULL; |
| 269 | long long ret; |
| 270 | int total, loops; |
| 271 | |
| 272 | if (argc < 2) { |
| 273 | printf("Need a number of rounds and optionally an LRU cache size (0..65536)\n"); |
| 274 | exit(1); |
| 275 | } |
| 276 | |
| 277 | total = atoi(argv[1]); |
| 278 | |
| 279 | if (argc > 2) /* cache size */ |
| 280 | lru = lru64_new(atoi(argv[2])); |
| 281 | |
| 282 | ret = 0; |
| 283 | for (loops = 0; loops < total; loops++) { |
| 284 | ret += get_value(lru, rand() & 65535); |
| 285 | } |
| 286 | /* just for accuracy control */ |
| 287 | printf("ret=%llx, hits=%d, misses=%d (%d %% hits)\n", ret, total-misses, misses, (int)((float)(total-misses) * 100.0 / total)); |
| 288 | |
| 289 | while (lru64_destroy(lru)); |
| 290 | |
| 291 | return 0; |
| 292 | } |
| 293 | |
| 294 | #endif |