willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 1 | /* |
Willy Tarreau | 3dd717c | 2014-12-23 13:58:43 +0100 | [diff] [blame] | 2 | * include/common/mini-clist.h |
| 3 | * Circular list manipulation macros and structures. |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 4 | * |
Willy Tarreau | 3dd717c | 2014-12-23 13:58:43 +0100 | [diff] [blame] | 5 | * Copyright (C) 2002-2014 Willy Tarreau - w@1wt.eu |
| 6 | * |
| 7 | * This library is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU Lesser General Public |
| 9 | * License as published by the Free Software Foundation, version 2.1 |
| 10 | * exclusively. |
| 11 | * |
| 12 | * This library is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * Lesser General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU Lesser General Public |
| 18 | * License along with this library; if not, write to the Free Software |
| 19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 20 | */ |
| 21 | |
Willy Tarreau | 2dd0d47 | 2006-06-29 17:53:05 +0200 | [diff] [blame] | 22 | #ifndef _COMMON_MINI_CLIST_H |
| 23 | #define _COMMON_MINI_CLIST_H |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 24 | |
Willy Tarreau | e3ba5f0 | 2006-06-29 18:54:54 +0200 | [diff] [blame] | 25 | #include <common/config.h> |
| 26 | |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 27 | /* these are circular or bidirectionnal lists only. Each list pointer points to |
| 28 | * another list pointer in a structure, and not the structure itself. The |
| 29 | * pointer to the next element MUST be the first one so that the list is easily |
| 30 | * cast as a single linked list or pointer. |
| 31 | */ |
| 32 | struct list { |
| 33 | struct list *n; /* next */ |
| 34 | struct list *p; /* prev */ |
| 35 | }; |
| 36 | |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 37 | /* This is similar to struct list, but we want to be sure the compiler will |
| 38 | * yell at you if you use macroes for one when you're using the other. You have |
| 39 | * to expicitely cast if that's really what you want to do. |
| 40 | */ |
| 41 | struct mt_list { |
| 42 | struct mt_list *next; |
| 43 | struct mt_list *prev; |
| 44 | }; |
| 45 | |
| 46 | |
Willy Tarreau | bc04ce7 | 2008-12-07 20:00:15 +0100 | [diff] [blame] | 47 | /* a back-ref is a pointer to a target list entry. It is used to detect when an |
| 48 | * element being deleted is currently being tracked by another user. The best |
| 49 | * example is a user dumping the session table. The table does not fit in the |
| 50 | * output buffer so we have to set a mark on a session and go on later. But if |
| 51 | * that marked session gets deleted, we don't want the user's pointer to go in |
| 52 | * the wild. So we can simply link this user's request to the list of this |
| 53 | * session's users, and put a pointer to the list element in ref, that will be |
| 54 | * used as the mark for next iteration. |
| 55 | */ |
| 56 | struct bref { |
| 57 | struct list users; |
| 58 | struct list *ref; /* pointer to the target's list entry */ |
| 59 | }; |
| 60 | |
Willy Tarreau | deb9ed8 | 2010-01-03 21:03:22 +0100 | [diff] [blame] | 61 | /* a word list is a generic list with a pointer to a string in each element. */ |
| 62 | struct wordlist { |
| 63 | struct list list; |
| 64 | char *s; |
| 65 | }; |
| 66 | |
Willy Tarreau | f4f0412 | 2010-01-28 18:10:50 +0100 | [diff] [blame] | 67 | /* this is the same as above with an additional pointer to a condition. */ |
| 68 | struct cond_wordlist { |
| 69 | struct list list; |
| 70 | void *cond; |
| 71 | char *s; |
| 72 | }; |
| 73 | |
Willy Tarreau | bd578bb | 2007-10-28 11:41:06 +0100 | [diff] [blame] | 74 | /* First undefine some macros which happen to also be defined on OpenBSD, |
| 75 | * in sys/queue.h, used by sys/event.h |
| 76 | */ |
| 77 | #undef LIST_HEAD |
| 78 | #undef LIST_INIT |
| 79 | #undef LIST_NEXT |
| 80 | |
Willy Tarreau | dc13c11 | 2013-06-21 23:16:39 +0200 | [diff] [blame] | 81 | /* ILH = Initialized List Head : used to prevent gcc from moving an empty |
| 82 | * list to BSS. Some older version tend to trim all the array and cause |
| 83 | * corruption. |
| 84 | */ |
| 85 | #define ILH { .n = (struct list *)1, .p = (struct list *)2 } |
| 86 | |
Willy Tarreau | baaee00 | 2006-06-26 02:48:02 +0200 | [diff] [blame] | 87 | #define LIST_HEAD(a) ((void *)(&(a))) |
| 88 | |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 89 | #define LIST_INIT(l) ((l)->n = (l)->p = (l)) |
| 90 | |
Willy Tarreau | 2b1dccd | 2007-05-07 00:18:32 +0200 | [diff] [blame] | 91 | #define LIST_HEAD_INIT(l) { &l, &l } |
| 92 | |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 93 | /* adds an element at the beginning of a list ; returns the element */ |
| 94 | #define LIST_ADD(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); (el); }) |
| 95 | |
| 96 | /* adds an element at the end of a list ; returns the element */ |
| 97 | #define LIST_ADDQ(lh, el) ({ (el)->p = (lh)->p; (el)->p->n = (lh)->p = (el); (el)->n = (lh); (el); }) |
| 98 | |
Willy Tarreau | 9bead8c | 2019-08-16 11:27:50 +0200 | [diff] [blame] | 99 | /* adds the contents of a list <old> at the beginning of another list <new>. The old list head remains untouched. */ |
| 100 | #define LIST_SPLICE(new, old) do { \ |
| 101 | if (!LIST_ISEMPTY(old)) { \ |
| 102 | (old)->p->n = (new)->n; (old)->n->p = (new); \ |
| 103 | (new)->n->p = (old)->p; (new)->n = (old)->n; \ |
| 104 | } \ |
| 105 | } while (0) |
| 106 | |
Willy Tarreau | c32a0e5 | 2019-10-04 18:01:39 +0200 | [diff] [blame] | 107 | /* adds the contents of a list whose first element is <old> and last one is |
| 108 | * <old->prev> at the end of another list <new>. The old list DOES NOT have |
| 109 | * any head here. |
| 110 | */ |
| 111 | #define LIST_SPLICE_END_DETACHED(new, old) do { \ |
| 112 | typeof(new) __t; \ |
| 113 | (new)->p->n = (old); \ |
| 114 | (old)->p->n = (new); \ |
| 115 | __t = (old)->p; \ |
| 116 | (old)->p = (new)->p; \ |
| 117 | (new)->p = __t; \ |
| 118 | } while (0) |
| 119 | |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 120 | /* removes an element from a list and returns it */ |
| 121 | #define LIST_DEL(el) ({ typeof(el) __ret = (el); (el)->n->p = (el)->p; (el)->p->n = (el)->n; (__ret); }) |
| 122 | |
Willy Tarreau | c5bd311 | 2019-03-06 19:32:11 +0100 | [diff] [blame] | 123 | /* removes an element from a list, initializes it and returns it. |
| 124 | * This is faster than LIST_DEL+LIST_INIT as we avoid reloading the pointers. |
| 125 | */ |
| 126 | #define LIST_DEL_INIT(el) ({ \ |
| 127 | typeof(el) __ret = (el); \ |
| 128 | typeof(__ret->n) __n = __ret->n; \ |
| 129 | typeof(__ret->p) __p = __ret->p; \ |
| 130 | __n->p = __p; __p->n = __n; \ |
| 131 | __ret->n = __ret->p = __ret; \ |
| 132 | __ret; \ |
| 133 | }) |
| 134 | |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 135 | /* returns a pointer of type <pt> to a structure containing a list head called |
| 136 | * <el> at address <lh>. Note that <lh> can be the result of a function or macro |
| 137 | * since it's used only once. |
| 138 | * Example: LIST_ELEM(cur_node->args.next, struct node *, args) |
| 139 | */ |
Willy Tarreau | 855796b | 2020-03-11 11:54:04 +0100 | [diff] [blame] | 140 | #define LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el))) |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 141 | |
| 142 | /* checks if the list head <lh> is empty or not */ |
| 143 | #define LIST_ISEMPTY(lh) ((lh)->n == (lh)) |
| 144 | |
Willy Tarreau | 42ccb5a | 2019-05-13 17:48:46 +0200 | [diff] [blame] | 145 | /* checks if the list element <el> was added to a list or not. This only |
| 146 | * works when detached elements are reinitialized (using LIST_DEL_INIT) |
| 147 | */ |
| 148 | #define LIST_ADDED(el) ((el)->n != (el)) |
| 149 | |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 150 | /* returns a pointer of type <pt> to a structure following the element |
| 151 | * which contains list head <lh>, which is known as element <el> in |
| 152 | * struct pt. |
| 153 | * Example: LIST_NEXT(args, struct node *, list) |
| 154 | */ |
| 155 | #define LIST_NEXT(lh, pt, el) (LIST_ELEM((lh)->n, pt, el)) |
| 156 | |
| 157 | |
Joseph Herlant | 41abef7 | 2018-11-25 10:57:13 -0800 | [diff] [blame] | 158 | /* returns a pointer of type <pt> to a structure preceding the element |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 159 | * which contains list head <lh>, which is known as element <el> in |
| 160 | * struct pt. |
| 161 | */ |
Thierry FOURNIER | 1db9667 | 2015-11-03 19:17:37 +0100 | [diff] [blame] | 162 | #undef LIST_PREV |
willy tarreau | 80862a3 | 2006-04-12 19:15:57 +0200 | [diff] [blame] | 163 | #define LIST_PREV(lh, pt, el) (LIST_ELEM((lh)->p, pt, el)) |
| 164 | |
| 165 | /* |
Willy Tarreau | b9c62b9 | 2007-05-02 20:46:49 +0200 | [diff] [blame] | 166 | * Simpler FOREACH_ITEM macro inspired from Linux sources. |
| 167 | * Iterates <item> through a list of items of type "typeof(*item)" which are |
| 168 | * linked via a "struct list" member named <member>. A pointer to the head of |
| 169 | * the list is passed in <list_head>. No temporary variable is needed. Note |
| 170 | * that <item> must not be modified during the loop. |
| 171 | * Example: list_for_each_entry(cur_acl, known_acl, list) { ... }; |
| 172 | */ |
| 173 | #define list_for_each_entry(item, list_head, member) \ |
| 174 | for (item = LIST_ELEM((list_head)->n, typeof(item), member); \ |
| 175 | &item->member != (list_head); \ |
| 176 | item = LIST_ELEM(item->member.n, typeof(item), member)) |
| 177 | |
| 178 | /* |
William Lallemand | 83215a4 | 2017-09-24 11:26:02 +0200 | [diff] [blame] | 179 | * Same as list_for_each_entry but starting from current point |
| 180 | * Iterates <item> through the list starting from <item> |
| 181 | * It's basically the same macro but without initializing item to the head of |
| 182 | * the list. |
| 183 | */ |
| 184 | #define list_for_each_entry_from(item, list_head, member) \ |
| 185 | for ( ; &item->member != (list_head); \ |
| 186 | item = LIST_ELEM(item->member.n, typeof(item), member)) |
| 187 | |
| 188 | /* |
Willy Tarreau | b9c62b9 | 2007-05-02 20:46:49 +0200 | [diff] [blame] | 189 | * Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources. |
| 190 | * Iterates <item> through a list of items of type "typeof(*item)" which are |
| 191 | * linked via a "struct list" member named <member>. A pointer to the head of |
| 192 | * the list is passed in <list_head>. A temporary variable <back> of same type |
| 193 | * as <item> is needed so that <item> may safely be deleted if needed. |
| 194 | * Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list) { ... }; |
| 195 | */ |
| 196 | #define list_for_each_entry_safe(item, back, list_head, member) \ |
| 197 | for (item = LIST_ELEM((list_head)->n, typeof(item), member), \ |
| 198 | back = LIST_ELEM(item->member.n, typeof(item), member); \ |
| 199 | &item->member != (list_head); \ |
| 200 | item = back, back = LIST_ELEM(back->member.n, typeof(back), member)) |
| 201 | |
| 202 | |
William Lallemand | 83215a4 | 2017-09-24 11:26:02 +0200 | [diff] [blame] | 203 | /* |
| 204 | * Same as list_for_each_entry_safe but starting from current point |
| 205 | * Iterates <item> through the list starting from <item> |
| 206 | * It's basically the same macro but without initializing item to the head of |
| 207 | * the list. |
| 208 | */ |
| 209 | #define list_for_each_entry_safe_from(item, back, list_head, member) \ |
| 210 | for (back = LIST_ELEM(item->member.n, typeof(item), member); \ |
| 211 | &item->member != (list_head); \ |
| 212 | item = back, back = LIST_ELEM(back->member.n, typeof(back), member)) |
| 213 | |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 214 | #include <common/hathreads.h> |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 215 | #define MT_LIST_BUSY ((struct mt_list *)1) |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 216 | |
| 217 | /* |
| 218 | * Locked version of list manipulation macros. |
| 219 | * It is OK to use those concurrently from multiple threads, as long as the |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 220 | * list is only used with the locked variants. |
| 221 | */ |
| 222 | |
| 223 | /* |
| 224 | * Add an item at the beginning of a list. |
| 225 | * Returns 1 if we added the item, 0 otherwise (because it was already in a |
| 226 | * list). |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 227 | */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 228 | #define MT_LIST_ADD(_lh, _el) \ |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 229 | ({ \ |
| 230 | int _ret = 0; \ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 231 | struct mt_list *lh = (_lh), *el = (_el); \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 232 | while (1) { \ |
| 233 | struct mt_list *n; \ |
| 234 | struct mt_list *p; \ |
| 235 | n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \ |
| 236 | if (n == MT_LIST_BUSY) \ |
| 237 | continue; \ |
| 238 | p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \ |
| 239 | if (p == MT_LIST_BUSY) { \ |
| 240 | (lh)->next = n; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 241 | __ha_barrier_store(); \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 242 | continue; \ |
| 243 | } \ |
| 244 | if ((el)->next != (el) || (el)->prev != (el)) { \ |
| 245 | (n)->prev = p; \ |
| 246 | (lh)->next = n; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 247 | break; \ |
| 248 | } \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 249 | (el)->next = n; \ |
| 250 | (el)->prev = p; \ |
| 251 | __ha_barrier_store(); \ |
| 252 | n->prev = (el); \ |
| 253 | __ha_barrier_store(); \ |
| 254 | p->next = (el); \ |
| 255 | __ha_barrier_store(); \ |
| 256 | _ret = 1; \ |
| 257 | break; \ |
| 258 | } \ |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 259 | (_ret); \ |
| 260 | }) |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 261 | |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 262 | /* |
| 263 | * Add an item at the end of a list. |
| 264 | * Returns 1 if we added the item, 0 otherwise (because it was already in a |
| 265 | * list). |
| 266 | */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 267 | #define MT_LIST_ADDQ(_lh, _el) \ |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 268 | ({ \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 269 | int _ret = 0; \ |
| 270 | struct mt_list *lh = (_lh), *el = (_el); \ |
| 271 | while (1) { \ |
| 272 | struct mt_list *n; \ |
| 273 | struct mt_list *p; \ |
| 274 | p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \ |
| 275 | if (p == MT_LIST_BUSY) \ |
| 276 | continue; \ |
| 277 | n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \ |
| 278 | if (n == MT_LIST_BUSY) { \ |
| 279 | (lh)->prev = p; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 280 | __ha_barrier_store(); \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 281 | continue; \ |
| 282 | } \ |
| 283 | if ((el)->next != (el) || (el)->prev != (el)) { \ |
| 284 | p->next = n; \ |
| 285 | (lh)->prev = p; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 286 | break; \ |
| 287 | } \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 288 | (el)->next = n; \ |
| 289 | (el)->prev = p; \ |
| 290 | __ha_barrier_store(); \ |
| 291 | p->next = (el); \ |
| 292 | __ha_barrier_store(); \ |
| 293 | n->prev = (el); \ |
| 294 | __ha_barrier_store(); \ |
| 295 | _ret = 1; \ |
| 296 | break; \ |
| 297 | } \ |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 298 | (_ret); \ |
| 299 | }) |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 300 | |
Willy Tarreau | d7f2bbc | 2019-10-04 18:02:40 +0200 | [diff] [blame] | 301 | /* |
| 302 | * Detach a list from its head. A pointer to the first element is returned |
| 303 | * and the list is closed. If the list was empty, NULL is returned. This may |
| 304 | * exclusively be used with lists modified by MT_LIST_ADD/MT_LIST_ADDQ. This |
| 305 | * is incompatible with MT_LIST_DEL run concurrently. |
Olivier Houchard | 2068ec4 | 2019-10-17 17:46:01 +0200 | [diff] [blame] | 306 | * If there's at least one element, the next of the last element will always |
| 307 | * be NULL. |
Willy Tarreau | d7f2bbc | 2019-10-04 18:02:40 +0200 | [diff] [blame] | 308 | */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 309 | #define MT_LIST_BEHEAD(_lh) ({ \ |
| 310 | struct mt_list *lh = (_lh); \ |
Willy Tarreau | d7f2bbc | 2019-10-04 18:02:40 +0200 | [diff] [blame] | 311 | struct mt_list *_n; \ |
| 312 | struct mt_list *_p; \ |
| 313 | while (1) { \ |
| 314 | _p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \ |
| 315 | if (_p == MT_LIST_BUSY) \ |
| 316 | continue; \ |
| 317 | if (_p == (lh)) { \ |
| 318 | (lh)->prev = _p; \ |
| 319 | _n = NULL; \ |
| 320 | break; \ |
| 321 | } \ |
| 322 | _n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \ |
| 323 | if (_n == MT_LIST_BUSY) { \ |
| 324 | (lh)->prev = _p; \ |
| 325 | __ha_barrier_store(); \ |
| 326 | continue; \ |
| 327 | } \ |
| 328 | if (_n == (lh)) { \ |
| 329 | (lh)->next = _n; \ |
| 330 | (lh)->prev = _p; \ |
| 331 | _n = NULL; \ |
| 332 | break; \ |
| 333 | } \ |
| 334 | (lh)->next = (lh); \ |
| 335 | (lh)->prev = (lh); \ |
| 336 | _n->prev = _p; \ |
Olivier Houchard | 2068ec4 | 2019-10-17 17:46:01 +0200 | [diff] [blame] | 337 | _p->next = NULL; \ |
Willy Tarreau | d7f2bbc | 2019-10-04 18:02:40 +0200 | [diff] [blame] | 338 | __ha_barrier_store(); \ |
| 339 | break; \ |
| 340 | } \ |
| 341 | (_n); \ |
| 342 | }) |
| 343 | |
| 344 | |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 345 | /* Remove an item from a list. |
| 346 | * Returns 1 if we removed the item, 0 otherwise (because it was in no list). |
| 347 | */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 348 | #define MT_LIST_DEL(_el) \ |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 349 | ({ \ |
| 350 | int _ret = 0; \ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 351 | struct mt_list *el = (_el); \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 352 | while (1) { \ |
| 353 | struct mt_list *n, *n2; \ |
| 354 | struct mt_list *p, *p2 = NULL; \ |
| 355 | n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY); \ |
| 356 | if (n == MT_LIST_BUSY) \ |
| 357 | continue; \ |
| 358 | p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY); \ |
| 359 | if (p == MT_LIST_BUSY) { \ |
| 360 | (el)->next = n; \ |
| 361 | __ha_barrier_store(); \ |
| 362 | continue; \ |
| 363 | } \ |
| 364 | if (p != (el)) { \ |
| 365 | p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \ |
| 366 | if (p2 == MT_LIST_BUSY) { \ |
| 367 | (el)->prev = p; \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 368 | (el)->next = n; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 369 | __ha_barrier_store(); \ |
| 370 | continue; \ |
| 371 | } \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 372 | } \ |
| 373 | if (n != (el)) { \ |
| 374 | n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \ |
| 375 | if (n2 == MT_LIST_BUSY) { \ |
| 376 | if (p2 != NULL) \ |
| 377 | p->next = p2; \ |
| 378 | (el)->prev = p; \ |
| 379 | (el)->next = n; \ |
| 380 | __ha_barrier_store(); \ |
| 381 | continue; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 382 | } \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 383 | } \ |
Willy Tarreau | 160ad9e | 2020-02-11 10:17:52 +0100 | [diff] [blame] | 384 | n->prev = p; \ |
| 385 | p->next = n; \ |
| 386 | if (p != (el) && n != (el)) \ |
| 387 | _ret = 1; \ |
| 388 | __ha_barrier_store(); \ |
| 389 | (el)->prev = (el); \ |
| 390 | (el)->next = (el); \ |
| 391 | __ha_barrier_store(); \ |
| 392 | break; \ |
| 393 | } \ |
Olivier Houchard | 0cd6a97 | 2019-09-20 17:32:47 +0200 | [diff] [blame] | 394 | (_ret); \ |
| 395 | }) |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 396 | |
| 397 | |
| 398 | /* Remove the first element from the list, and return it */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 399 | #define MT_LIST_POP(_lh, pt, el) \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 400 | ({ \ |
| 401 | void *_ret; \ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 402 | struct mt_list *lh = (_lh); \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 403 | while (1) { \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 404 | struct mt_list *n, *n2; \ |
| 405 | struct mt_list *p, *p2; \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 406 | n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \ |
| 407 | if (n == MT_LIST_BUSY) \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 408 | continue; \ |
| 409 | if (n == (lh)) { \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 410 | (lh)->next = lh; \ |
Willy Tarreau | 690d2ad | 2019-02-28 11:14:22 +0100 | [diff] [blame] | 411 | __ha_barrier_store(); \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 412 | _ret = NULL; \ |
| 413 | break; \ |
| 414 | } \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 415 | p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \ |
| 416 | if (p == MT_LIST_BUSY) { \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 417 | (lh)->next = n; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 418 | __ha_barrier_store(); \ |
| 419 | continue; \ |
| 420 | } \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 421 | n2 = _HA_ATOMIC_XCHG(&n->next, MT_LIST_BUSY); \ |
| 422 | if (n2 == MT_LIST_BUSY) { \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 423 | n->prev = p; \ |
Willy Tarreau | 690d2ad | 2019-02-28 11:14:22 +0100 | [diff] [blame] | 424 | __ha_barrier_store(); \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 425 | (lh)->next = n; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 426 | __ha_barrier_store(); \ |
| 427 | continue; \ |
| 428 | } \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 429 | p2 = _HA_ATOMIC_XCHG(&n2->prev, MT_LIST_BUSY); \ |
| 430 | if (p2 == MT_LIST_BUSY) { \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 431 | n->next = n2; \ |
| 432 | n->prev = p; \ |
Willy Tarreau | 690d2ad | 2019-02-28 11:14:22 +0100 | [diff] [blame] | 433 | __ha_barrier_store(); \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 434 | (lh)->next = n; \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 435 | __ha_barrier_store(); \ |
| 436 | continue; \ |
| 437 | } \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 438 | (lh)->next = n2; \ |
| 439 | (n2)->prev = (lh); \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 440 | __ha_barrier_store(); \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 441 | (n)->prev = (n); \ |
| 442 | (n)->next = (n); \ |
Willy Tarreau | 4c747e8 | 2019-02-28 15:05:53 +0100 | [diff] [blame] | 443 | __ha_barrier_store(); \ |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 444 | _ret = MT_LIST_ELEM(n, pt, el); \ |
Olivier Houchard | a8434ec | 2019-01-18 17:26:26 +0100 | [diff] [blame] | 445 | break; \ |
| 446 | } \ |
| 447 | (_ret); \ |
| 448 | }) |
William Lallemand | 83215a4 | 2017-09-24 11:26:02 +0200 | [diff] [blame] | 449 | |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 450 | #define MT_LIST_HEAD(a) ((void *)(&(a))) |
| 451 | |
| 452 | #define MT_LIST_INIT(l) ((l)->next = (l)->prev = (l)) |
| 453 | |
| 454 | #define MT_LIST_HEAD_INIT(l) { &l, &l } |
| 455 | /* returns a pointer of type <pt> to a structure containing a list head called |
| 456 | * <el> at address <lh>. Note that <lh> can be the result of a function or macro |
| 457 | * since it's used only once. |
| 458 | * Example: MT_LIST_ELEM(cur_node->args.next, struct node *, args) |
| 459 | */ |
Willy Tarreau | 855796b | 2020-03-11 11:54:04 +0100 | [diff] [blame] | 460 | #define MT_LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el))) |
Olivier Houchard | 859dc80 | 2019-08-08 15:47:21 +0200 | [diff] [blame] | 461 | |
| 462 | /* checks if the list head <lh> is empty or not */ |
| 463 | #define MT_LIST_ISEMPTY(lh) ((lh)->next == (lh)) |
| 464 | |
| 465 | /* returns a pointer of type <pt> to a structure following the element |
| 466 | * which contains list head <lh>, which is known as element <el> in |
| 467 | * struct pt. |
| 468 | * Example: MT_LIST_NEXT(args, struct node *, list) |
| 469 | */ |
| 470 | #define MT_LIST_NEXT(lh, pt, el) (MT_LIST_ELEM((lh)->next, pt, el)) |
| 471 | |
| 472 | |
| 473 | /* returns a pointer of type <pt> to a structure preceding the element |
| 474 | * which contains list head <lh>, which is known as element <el> in |
| 475 | * struct pt. |
| 476 | */ |
| 477 | #undef MT_LIST_PREV |
| 478 | #define MT_LIST_PREV(lh, pt, el) (MT_LIST_ELEM((lh)->prev, pt, el)) |
| 479 | |
| 480 | /* checks if the list element <el> was added to a list or not. This only |
| 481 | * works when detached elements are reinitialized (using LIST_DEL_INIT) |
| 482 | */ |
| 483 | #define MT_LIST_ADDED(el) ((el)->next != (el)) |
| 484 | |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 485 | /* Lock an element in the list, to be sure it won't be removed. |
| 486 | * It needs to be synchronized somehow to be sure it's not removed |
| 487 | * from the list in the meanwhile. |
| 488 | * This returns a struct mt_list, that will be needed at unlock time. |
| 489 | */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 490 | #define MT_LIST_LOCK_ELT(_el) \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 491 | ({ \ |
| 492 | struct mt_list ret; \ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 493 | struct mt_liet *el = (_el); \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 494 | while (1) { \ |
| 495 | struct mt_list *n, *n2; \ |
| 496 | struct mt_list *p, *p2 = NULL; \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 497 | n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY); \ |
| 498 | if (n == MT_LIST_BUSY) \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 499 | continue; \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 500 | p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY); \ |
| 501 | if (p == MT_LIST_BUSY) { \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 502 | (el)->next = n; \ |
| 503 | __ha_barrier_store(); \ |
| 504 | continue; \ |
| 505 | } \ |
| 506 | if (p != (el)) { \ |
| 507 | p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 508 | if (p2 == MT_LIST_BUSY) { \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 509 | (el)->prev = p; \ |
| 510 | (el)->next = n; \ |
| 511 | __ha_barrier_store(); \ |
| 512 | continue; \ |
| 513 | } \ |
| 514 | } \ |
| 515 | if (n != (el)) { \ |
| 516 | n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 517 | if (n2 == MT_LIST_BUSY) { \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 518 | if (p2 != NULL) \ |
| 519 | p->next = p2; \ |
| 520 | (el)->prev = p; \ |
| 521 | (el)->next = n; \ |
| 522 | __ha_barrier_store(); \ |
| 523 | continue; \ |
| 524 | } \ |
| 525 | } \ |
| 526 | ret.next = n; \ |
| 527 | ret.prev = p; \ |
| 528 | break; \ |
| 529 | } \ |
| 530 | ret; \ |
| 531 | }) |
| 532 | |
| 533 | /* Unlock an element previously locked by MT_LIST_LOCK_ELT. "np" is the |
| 534 | * struct mt_list returned by MT_LIST_LOCK_ELT(). |
| 535 | */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 536 | #define MT_LIST_UNLOCK_ELT(_el, np) \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 537 | do { \ |
| 538 | struct mt_list *n = (np).next, *p = (np).prev; \ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 539 | struct mt_list *el = (_el); \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 540 | (el)->next = n; \ |
| 541 | (el)->prev = p; \ |
| 542 | if (n != (el)) \ |
| 543 | n->prev = (el); \ |
| 544 | if (p != (el)) \ |
| 545 | p->next = (el); \ |
| 546 | } while (0) |
| 547 | |
| 548 | /* Internal macroes for the foreach macroes */ |
| 549 | #define _MT_LIST_UNLOCK_NEXT(el, np) \ |
| 550 | do { \ |
| 551 | struct mt_list *n = (np); \ |
| 552 | (el)->next = n; \ |
| 553 | if (n != (el)) \ |
| 554 | n->prev = (el); \ |
| 555 | } while (0) |
| 556 | |
| 557 | /* Internal macroes for the foreach macroes */ |
| 558 | #define _MT_LIST_UNLOCK_PREV(el, np) \ |
| 559 | do { \ |
| 560 | struct mt_list *p = (np); \ |
| 561 | (el)->prev = p; \ |
| 562 | if (p != (el)) \ |
| 563 | p->next = (el); \ |
| 564 | } while (0) |
| 565 | |
| 566 | #define _MT_LIST_LOCK_NEXT(el) \ |
| 567 | ({ \ |
| 568 | struct mt_list *n = NULL; \ |
| 569 | while (1) { \ |
| 570 | struct mt_list *n2; \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 571 | n = _HA_ATOMIC_XCHG(&((el)->next), MT_LIST_BUSY); \ |
| 572 | if (n == MT_LIST_BUSY) \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 573 | continue; \ |
| 574 | if (n != (el)) { \ |
| 575 | n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 576 | if (n2 == MT_LIST_BUSY) { \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 577 | (el)->next = n; \ |
| 578 | __ha_barrier_store(); \ |
| 579 | continue; \ |
| 580 | } \ |
| 581 | } \ |
| 582 | break; \ |
| 583 | } \ |
| 584 | n; \ |
| 585 | }) |
| 586 | |
| 587 | #define _MT_LIST_LOCK_PREV(el) \ |
| 588 | ({ \ |
| 589 | struct mt_list *p = NULL; \ |
| 590 | while (1) { \ |
| 591 | struct mt_list *p2; \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 592 | p = _HA_ATOMIC_XCHG(&((el)->prev), MT_LIST_BUSY); \ |
| 593 | if (p == MT_LIST_BUSY) \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 594 | continue; \ |
| 595 | if (p != (el)) { \ |
| 596 | p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 597 | if (p2 == MT_LIST_BUSY) { \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 598 | (el)->prev = p; \ |
| 599 | __ha_barrier_store(); \ |
| 600 | continue; \ |
| 601 | } \ |
| 602 | } \ |
| 603 | break; \ |
| 604 | } \ |
| 605 | p; \ |
| 606 | }) |
| 607 | |
| 608 | #define _MT_LIST_RELINK_DELETED(elt2) \ |
| 609 | do { \ |
| 610 | struct mt_list *n = elt2.next, *p = elt2.prev; \ |
Olivier Houchard | 49983a9 | 2020-03-11 15:09:16 +0100 | [diff] [blame] | 611 | ALREADY_CHECKED(p); \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 612 | n->prev = p; \ |
| 613 | p->next = n; \ |
| 614 | } while (0); |
| 615 | |
| 616 | /* Equivalent of MT_LIST_DEL(), to be used when parsing the list with mt_list_entry_for_each_safe(). |
| 617 | * It should be the element currently parsed (tmpelt1) |
| 618 | */ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 619 | #define MT_LIST_DEL_SAFE(_el) \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 620 | do { \ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 621 | struct mt_list *el = (_el); \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 622 | (el)->prev = (el); \ |
| 623 | (el)->next = (el); \ |
Olivier Houchard | 1d117e3 | 2020-03-10 17:41:53 +0100 | [diff] [blame] | 624 | (_el) = NULL; \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 625 | } while (0) |
| 626 | |
| 627 | /* Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources. |
| 628 | * Iterates <item> through a list of items of type "typeof(*item)" which are |
| 629 | * linked via a "struct list" member named <member>. A pointer to the head of |
| 630 | * the list is passed in <list_head>. A temporary variable <back> of same type |
| 631 | * as <item> is needed so that <item> may safely be deleted if needed. |
| 632 | * tmpelt1 is a temporary struct mt_list *, and tmpelt2 is a temporary |
| 633 | * struct mt_list, used internally, both are needed for MT_LIST_DEL_SAFE. |
| 634 | * Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list, elt1, elt2) |
| 635 | * { ... }; |
| 636 | * If you want to remove the current element, please use MT_LIST_DEL_SAFE. |
| 637 | */ |
| 638 | #define mt_list_for_each_entry_safe(item, list_head, member, tmpelt, tmpelt2) \ |
| 639 | for ((tmpelt) = NULL; (tmpelt) != MT_LIST_BUSY; ({ \ |
| 640 | if (tmpelt) { \ |
| 641 | if (tmpelt2.prev) \ |
| 642 | MT_LIST_UNLOCK_ELT(tmpelt, tmpelt2); \ |
| 643 | else \ |
| 644 | _MT_LIST_UNLOCK_NEXT(tmpelt, tmpelt2.next); \ |
| 645 | } else \ |
| 646 | _MT_LIST_RELINK_DELETED(tmpelt2); \ |
| 647 | (tmpelt) = MT_LIST_BUSY; \ |
Olivier Houchard | 804ef24 | 2019-10-11 16:57:43 +0200 | [diff] [blame] | 648 | })) \ |
Olivier Houchard | 74715da | 2019-10-11 16:55:11 +0200 | [diff] [blame] | 649 | for ((tmpelt) = (list_head), (tmpelt2).prev = NULL, (tmpelt2).next = _MT_LIST_LOCK_NEXT(tmpelt); ({ \ |
Olivier Houchard | 5e9b92c | 2019-08-12 14:10:12 +0200 | [diff] [blame] | 650 | (item) = MT_LIST_ELEM((tmpelt2.next), typeof(item), member); \ |
| 651 | if (&item->member != (list_head)) { \ |
| 652 | if (tmpelt2.prev != &item->member) \ |
| 653 | tmpelt2.next = _MT_LIST_LOCK_NEXT(&item->member); \ |
| 654 | else \ |
| 655 | tmpelt2.next = tmpelt; \ |
| 656 | if (tmpelt != NULL) { \ |
| 657 | if (tmpelt2.prev) \ |
| 658 | _MT_LIST_UNLOCK_PREV(tmpelt, tmpelt2.prev); \ |
| 659 | tmpelt2.prev = tmpelt; \ |
| 660 | } \ |
| 661 | (tmpelt) = &item->member; \ |
| 662 | } \ |
| 663 | }), \ |
| 664 | &item->member != (list_head);) |
Olivier Houchard | 751e5e2 | 2020-03-11 14:57:52 +0100 | [diff] [blame] | 665 | |
| 666 | static __inline struct list *mt_list_to_list(struct mt_list *list) |
| 667 | { |
| 668 | union { |
| 669 | struct mt_list *mt_list; |
| 670 | struct list *list; |
| 671 | } mylist; |
| 672 | |
| 673 | mylist.mt_list = list; |
| 674 | return mylist.list; |
| 675 | } |
| 676 | |
| 677 | static __inline struct mt_list *list_to_mt_list(struct list *list) |
| 678 | { |
Olivier Houchard | 84fd8a7 | 2020-03-11 21:41:13 +0100 | [diff] [blame] | 679 | union { |
Olivier Houchard | 751e5e2 | 2020-03-11 14:57:52 +0100 | [diff] [blame] | 680 | struct mt_list *mt_list; |
| 681 | struct list *list; |
| 682 | } mylist; |
| 683 | |
| 684 | mylist.list = list; |
| 685 | return mylist.mt_list; |
| 686 | |
| 687 | } |
| 688 | |
Willy Tarreau | 2dd0d47 | 2006-06-29 17:53:05 +0200 | [diff] [blame] | 689 | #endif /* _COMMON_MINI_CLIST_H */ |