blob: 89f235b4ff05bc103555abef3a6302abb196fe43 [file] [log] [blame]
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +01001/*
2 * general purpose event handlers management
3 *
4 * Copyright 2022 HAProxy Technologies
5 *
Aurelien DARRAGONddab08b2024-02-28 11:22:49 +01006 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +010015 *
Aurelien DARRAGONddab08b2024-02-28 11:22:49 +010016 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +010019 */
20
21#include <string.h>
22#include <haproxy/event_hdl.h>
23#include <haproxy/compiler.h>
24#include <haproxy/task.h>
25#include <haproxy/tools.h>
26#include <haproxy/errors.h>
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +010027#include <haproxy/signal.h>
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +010028#include <haproxy/xxhash.h>
29
30/* event types changes in event_hdl-t.h file should be reflected in the
31 * map below to allow string to type and type to string conversions
32 */
33static struct event_hdl_sub_type_map event_hdl_sub_type_map[] = {
34 {"NONE", EVENT_HDL_SUB_NONE},
35 {"SERVER", EVENT_HDL_SUB_SERVER},
36 {"SERVER_ADD", EVENT_HDL_SUB_SERVER_ADD},
37 {"SERVER_DEL", EVENT_HDL_SUB_SERVER_DEL},
Aurelien DARRAGON22f82f82022-11-25 18:07:49 +010038 {"SERVER_UP", EVENT_HDL_SUB_SERVER_UP},
39 {"SERVER_DOWN", EVENT_HDL_SUB_SERVER_DOWN},
Aurelien DARRAGONe3eea292023-04-04 21:28:07 +020040 {"SERVER_STATE", EVENT_HDL_SUB_SERVER_STATE},
Aurelien DARRAGONa163d652023-04-21 18:06:58 +020041 {"SERVER_ADMIN", EVENT_HDL_SUB_SERVER_ADMIN},
Aurelien DARRAGONdcbc2d22023-03-30 10:19:08 +020042 {"SERVER_CHECK", EVENT_HDL_SUB_SERVER_CHECK},
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +010043};
44
45/* internal types (only used in this file) */
46struct event_hdl_async_task_default_ctx
47{
48 event_hdl_async_equeue e_queue; /* event queue list */
49 event_hdl_cb_async func; /* event handling func */
50};
51
52/* memory pools declarations */
53DECLARE_STATIC_POOL(pool_head_sub, "ehdl_sub", sizeof(struct event_hdl_sub));
54DECLARE_STATIC_POOL(pool_head_sub_event, "ehdl_sub_e", sizeof(struct event_hdl_async_event));
55DECLARE_STATIC_POOL(pool_head_sub_event_data, "ehdl_sub_ed", sizeof(struct event_hdl_async_event_data));
56DECLARE_STATIC_POOL(pool_head_sub_taskctx, "ehdl_sub_tctx", sizeof(struct event_hdl_async_task_default_ctx));
57
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +010058/* TODO: will become a config tunable
59 * ie: tune.events.max-async-notif-at-once
60 */
61static int event_hdl_async_max_notif_at_once = 10;
62
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +010063/* global subscription list (implicit where NULL is used as sublist argument) */
64static event_hdl_sub_list global_event_hdl_sub_list;
65
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +010066/* every known subscription lists are tracked in this list (including the global one) */
67static struct mt_list known_event_hdl_sub_list = MT_LIST_HEAD_INIT(known_event_hdl_sub_list);
68
69static void _event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list);
70
71static void event_hdl_deinit(struct sig_handler *sh)
72{
73 event_hdl_sub_list *cur_list;
74 struct mt_list *elt1, elt2;
75
76 /* destroy all known subscription lists */
77 mt_list_for_each_entry_safe(cur_list, &known_event_hdl_sub_list, known, elt1, elt2) {
78 /* remove cur elem from list */
79 MT_LIST_DELETE_SAFE(elt1);
80 /* then destroy it */
81 _event_hdl_sub_list_destroy(cur_list);
82 }
83}
84
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +010085static void event_hdl_init(void)
86{
87 /* initialize global subscription list */
88 event_hdl_sub_list_init(&global_event_hdl_sub_list);
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +010089 /* register the deinit function, will be called on soft-stop */
90 signal_register_fct(0, event_hdl_deinit, 0);
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +010091}
92
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +010093/* general purpose hashing function when you want to compute
94 * an ID based on <scope> x <name>
95 * It is your responsibility to make sure <scope> is not used
96 * elsewhere in the code (or that you are fine with sharing
97 * the scope).
98 */
99inline uint64_t event_hdl_id(const char *scope, const char *name)
100{
101 XXH64_state_t state;
102
103 XXH64_reset(&state, 0);
104 XXH64_update(&state, scope, strlen(scope));
105 XXH64_update(&state, name, strlen(name));
106 return XXH64_digest(&state);
107}
108
109/* takes a sub_type as input, returns corresponding sub_type
110 * printable string or "N/A" if not found.
111 * If not found, an error will be reported to stderr so the developers
112 * know that a sub_type is missing its associated string in event_hdl-t.h
113 */
114const char *event_hdl_sub_type_to_string(struct event_hdl_sub_type sub_type)
115{
116 int it;
117
118 for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
119 if (sub_type.family == event_hdl_sub_type_map[it].type.family &&
120 sub_type.subtype == event_hdl_sub_type_map[it].type.subtype)
121 return event_hdl_sub_type_map[it].name;
122 }
123 ha_alert("event_hdl-t.h: missing sub_type string representation.\n"
124 "Please reflect any changes in event_hdl_sub_type_map.\n");
125 return "N/A";
126}
127
128/* returns the internal sub_type corresponding
129 * to the printable representation <name>
130 * or EVENT_HDL_SUB_NONE if no such event exists
131 * (see event_hdl-t.h for the complete list of supported types)
132 */
133struct event_hdl_sub_type event_hdl_string_to_sub_type(const char *name)
134{
135 int it;
136
137 for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
138 if (!strcmp(name, event_hdl_sub_type_map[it].name))
139 return event_hdl_sub_type_map[it].type;
140 }
141 return EVENT_HDL_SUB_NONE;
142}
143
144/* Takes <subscriptions> sub list as input, returns a printable string
145 * containing every sub_types contained in <subscriptions>
146 * separated by '|' char.
147 * Returns NULL if no sub_types are found in <subscriptions>
148 * This functions leverages memprintf, thus it is up to the
149 * caller to free the returned value (if != NULL) when he no longer
150 * uses it.
151 */
152char *event_hdl_sub_type_print(struct event_hdl_sub_type subscriptions)
153{
154 char *out = NULL;
155 int it;
156 uint8_t first = 1;
157
158 for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
159 if (subscriptions.family == event_hdl_sub_type_map[it].type.family &&
160 ((subscriptions.subtype & event_hdl_sub_type_map[it].type.subtype) ==
161 event_hdl_sub_type_map[it].type.subtype)) {
162 if (first) {
163 memprintf(&out, "%s", event_hdl_sub_type_map[it].name);
164 first--;
165 }
166 else
167 memprintf(&out, "%s%s%s", out, "|", event_hdl_sub_type_map[it].name);
168 }
169 }
170
171 return out;
172}
173
174/* event_hdl debug/reporting function */
175typedef void (*event_hdl_report_hdl_state_func)(const char *fmt, ...);
176static void event_hdl_report_hdl_state(event_hdl_report_hdl_state_func report_func,
177 const struct event_hdl *hdl, const char *what, const char *state)
178{
179 report_func("[event_hdl]:%s (%s)'#%llu@%s': %s\n",
180 what,
181 (hdl->async) ? "ASYNC" : "SYNC",
182 (long long unsigned int)hdl->id,
183 hdl->dorigin,
184 state);
185}
186
Aurelien DARRAGONafcfc202023-03-22 10:42:20 +0100187static inline void _event_hdl_async_data_drop(struct event_hdl_async_event_data *data)
188{
189 if (HA_ATOMIC_SUB_FETCH(&data->refcount, 1) == 0) {
190 /* we were the last one holding a reference to event data - free required */
Aurelien DARRAGONebf58e92023-03-23 19:09:15 +0100191 if (data->mfree) {
192 /* Some event data members are dynamically allocated and thus
193 * require specific cleanup using user-provided function.
194 * We directly pass a pointer to internal data storage but
195 * we only expect the cleanup function to typecast it in the
196 * relevant data type to give enough context to the function to
197 * perform the cleanup on data members, and not actually freeing
198 * data pointer since it is our internal buffer :)
199 */
200 data->mfree(&data->data);
201 }
Aurelien DARRAGONafcfc202023-03-22 10:42:20 +0100202 pool_free(pool_head_sub_event_data, data);
203 }
204}
205
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100206void event_hdl_async_free_event(struct event_hdl_async_event *e)
207{
208 if (unlikely(event_hdl_sub_type_equal(e->type, EVENT_HDL_SUB_END))) {
209 /* last event for hdl, special case */
210 /* free subscription entry as we're the last one still using it
211 * (it is already removed from mt_list, no race can occur)
212 */
213 event_hdl_drop(e->sub_mgmt.this);
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100214 HA_ATOMIC_DEC(&jobs);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100215 }
Aurelien DARRAGONafcfc202023-03-22 10:42:20 +0100216 else if (e->_data)
217 _event_hdl_async_data_drop(e->_data); /* data wrapper */
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100218 pool_free(pool_head_sub_event, e);
219}
220
Aurelien DARRAGONb289fd12023-02-28 15:06:48 +0100221/* wakeup the task depending on its type:
222 * normal async mode internally uses tasklets but advanced async mode
223 * allows both tasks and tasklets.
224 * While tasks and tasklets may be easily casted, we need to use the proper
225 * API to wake them up (the waiting queues are exclusive).
226 */
227static void event_hdl_task_wakeup(struct tasklet *task)
228{
229 if (TASK_IS_TASKLET(task))
230 tasklet_wakeup(task);
231 else
232 task_wakeup((struct task *)task, TASK_WOKEN_OTHER); /* TODO: switch to TASK_WOKEN_EVENT? */
233}
234
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100235/* task handler used for normal async subscription mode
236 * if you use advanced async subscription mode, you can use this
237 * as an example to implement your own task wrapper
238 */
239static struct task *event_hdl_async_task_default(struct task *task, void *ctx, unsigned int state)
240{
241 struct tasklet *tl = (struct tasklet *)task;
242 struct event_hdl_async_task_default_ctx *task_ctx = ctx;
243 struct event_hdl_async_event *event;
244 int max_notif_at_once_it = 0;
245 uint8_t done = 0;
246
247 /* run through e_queue, and call func() for each event
248 * if we read END event, it indicates we must stop:
249 * no more events to come (handler is unregistered)
250 * so we must free task_ctx and stop task
251 */
252 while (max_notif_at_once_it < event_hdl_async_max_notif_at_once &&
253 (event = event_hdl_async_equeue_pop(&task_ctx->e_queue)))
254 {
255 if (event_hdl_sub_type_equal(event->type, EVENT_HDL_SUB_END)) {
256 done = 1;
257 event_hdl_async_free_event(event);
258 /* break is normally not even required, EVENT_HDL_SUB_END
259 * is guaranteed to be last event of e_queue
260 * (because in normal mode one sub == one e_queue)
261 */
262 break;
263 }
264 else {
265 struct event_hdl_cb cb;
266
267 cb.e_type = event->type;
268 cb.e_data = event->data;
269 cb.sub_mgmt = &event->sub_mgmt;
270 cb._sync = 0;
271
272 /* call user function */
273 task_ctx->func(&cb, event->private);
274 max_notif_at_once_it++;
275 }
276 event_hdl_async_free_event(event);
277 }
278
279 if (done) {
280 /* our job is done, subscription is over: no more events to come */
281 pool_free(pool_head_sub_taskctx, task_ctx);
282 tasklet_free(tl);
283 return NULL;
284 }
285 return task;
286}
287
288/* internal subscription mgmt functions */
289static inline struct event_hdl_sub_type _event_hdl_getsub(struct event_hdl_sub *cur_sub)
290{
291 return cur_sub->sub;
292}
293
294static inline struct event_hdl_sub_type _event_hdl_getsub_async(struct event_hdl_sub *cur_sub)
295{
296 struct mt_list lock;
297 struct event_hdl_sub_type type = EVENT_HDL_SUB_NONE;
298
299 lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
300 if (lock.next != &cur_sub->mt_list)
301 type = _event_hdl_getsub(cur_sub);
302 // else already removed
303 MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
304 return type;
305}
306
307static inline int _event_hdl_resub(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
308{
309 if (!event_hdl_sub_family_equal(cur_sub->sub, type))
310 return 0; /* family types differ, do nothing */
311 cur_sub->sub.subtype = type.subtype; /* new subtype assignment */
312 return 1;
313}
314
315static inline int _event_hdl_resub_async(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
316{
317 int status = 0;
318 struct mt_list lock;
319
320 lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
321 if (lock.next != &cur_sub->mt_list)
322 status = _event_hdl_resub(cur_sub, type);
323 // else already removed
324 MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
325 return status;
326}
327
328static inline void _event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
329{
330 struct mt_list lock;
331
332 if (del_sub->hdl.async) {
333 /* ASYNC SUB MODE */
334 /* push EVENT_HDL_SUB_END (to notify the task that the subscription is dead) */
335
336 /* push END EVENT in busy state so we can safely wakeup
337 * the task before releasing it.
338 * Not doing that would expose us to a race where the task could've already
339 * consumed the END event before the wakeup, and some tasks
340 * kill themselves (ie: normal async mode) when they receive such event
341 */
Aurelien DARRAGONb4b73202023-03-01 15:02:04 +0100342 HA_ATOMIC_INC(&del_sub->hdl.async_equeue->size);
343 lock = MT_LIST_APPEND_LOCKED(&del_sub->hdl.async_equeue->head, &del_sub->async_end->mt_list);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100344
345 /* wake up the task */
Aurelien DARRAGONb289fd12023-02-28 15:06:48 +0100346 event_hdl_task_wakeup(del_sub->hdl.async_task);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100347
348 /* unlock END EVENT (we're done, the task is now free to consume it) */
349 MT_LIST_UNLOCK_ELT(&del_sub->async_end->mt_list, lock);
350
351 /* we don't free sub here
352 * freeing will be performed by async task so it can safely rely
353 * on the pointer until it notices it
354 */
355 } else {
356 /* SYNC SUB MODE */
357
358 /* we can directly free the subscription:
359 * no other thread can access it since we successfully
360 * removed it from the list
361 */
362 event_hdl_drop(del_sub);
363 }
364}
365
366static inline void _event_hdl_unsubscribe_async(struct event_hdl_sub *del_sub)
367{
368 if (!MT_LIST_DELETE(&del_sub->mt_list))
369 return; /* already removed (but may be pending in e_queues) */
370 _event_hdl_unsubscribe(del_sub);
371}
372
373/* sub_mgmt function pointers (for handlers) */
374static struct event_hdl_sub_type event_hdl_getsub_sync(const struct event_hdl_sub_mgmt *mgmt)
375{
376 if (!mgmt)
377 return EVENT_HDL_SUB_NONE;
378
379 if (!mgmt->this)
380 return EVENT_HDL_SUB_NONE; /* already removed from sync ctx */
381 return _event_hdl_getsub(mgmt->this);
382}
383
384static struct event_hdl_sub_type event_hdl_getsub_async(const struct event_hdl_sub_mgmt *mgmt)
385{
386 if (!mgmt)
387 return EVENT_HDL_SUB_NONE;
388
389 return _event_hdl_getsub_async(mgmt->this);
390}
391
392static int event_hdl_resub_sync(const struct event_hdl_sub_mgmt *mgmt, struct event_hdl_sub_type type)
393{
394 if (!mgmt)
395 return 0;
396
397 if (!mgmt->this)
398 return 0; /* already removed from sync ctx */
399 return _event_hdl_resub(mgmt->this, type);
400}
401
402static int event_hdl_resub_async(const struct event_hdl_sub_mgmt *mgmt, struct event_hdl_sub_type type)
403{
404 if (!mgmt)
405 return 0;
406
407 return _event_hdl_resub_async(mgmt->this, type);
408}
409
410static void event_hdl_unsubscribe_sync(const struct event_hdl_sub_mgmt *mgmt)
411{
412 if (!mgmt)
413 return;
414
415 if (!mgmt->this)
416 return; /* already removed from sync ctx */
417
418 /* assuming that publish sync code will notice that mgmt->this is NULL
419 * and will perform the list removal using MT_LIST_DELETE_SAFE and
420 * _event_hdl_unsubscribe()
421 * while still owning the lock
422 */
423 ((struct event_hdl_sub_mgmt *)mgmt)->this = NULL;
424}
425
426static void event_hdl_unsubscribe_async(const struct event_hdl_sub_mgmt *mgmt)
427{
428 if (!mgmt)
429 return;
430
431 _event_hdl_unsubscribe_async(mgmt->this);
432}
433
434#define EVENT_HDL_SUB_MGMT_ASYNC(_sub) (struct event_hdl_sub_mgmt){ .this = _sub, \
435 .getsub = event_hdl_getsub_async, \
436 .resub = event_hdl_resub_async, \
437 .unsub = event_hdl_unsubscribe_async}
438#define EVENT_HDL_SUB_MGMT_SYNC(_sub) (struct event_hdl_sub_mgmt){ .this = _sub, \
439 .getsub = event_hdl_getsub_sync, \
440 .resub = event_hdl_resub_sync, \
441 .unsub = event_hdl_unsubscribe_sync}
442
443struct event_hdl_sub *event_hdl_subscribe_ptr(event_hdl_sub_list *sub_list,
444 struct event_hdl_sub_type e_type, struct event_hdl hdl)
445{
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100446 struct event_hdl_sub *new_sub = NULL;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100447 struct mt_list *elt1, elt2;
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100448 struct event_hdl_async_task_default_ctx *task_ctx = NULL;
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100449 struct mt_list lock;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100450
451 if (!sub_list)
452 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
453
454 /* hdl API consistency check */
455 /*FIXME: do we need to ensure that if private is set, private_free should be set as well? */
456 BUG_ON((!hdl.async && !hdl.sync_ptr) ||
457 (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL && !hdl.async_ptr) ||
458 (hdl.async == EVENT_HDL_ASYNC_MODE_ADVANCED &&
459 (!hdl.async_equeue || !hdl.async_task)));
460
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100461 new_sub = pool_alloc(pool_head_sub);
462 if (new_sub == NULL) {
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100463 goto memory_error;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100464 }
465
466 /* assignments */
467 new_sub->sub.family = e_type.family;
468 new_sub->sub.subtype = e_type.subtype;
Aurelien DARRAGONf751a972023-03-10 10:45:58 +0100469 new_sub->flags = 0;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100470 new_sub->hdl = hdl;
471
472 if (hdl.async) {
473 /* async END event pre-allocation */
474 new_sub->async_end = pool_alloc(pool_head_sub_event);
475 if (!new_sub->async_end) {
476 /* memory error */
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100477 goto memory_error;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100478 }
479 if (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL) {
480 /* normal mode: no task provided, we must initialize it */
481
482 /* initialize task context */
483 task_ctx = pool_alloc(pool_head_sub_taskctx);
484
485 if (!task_ctx) {
486 /* memory error */
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100487 goto memory_error;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100488 }
Aurelien DARRAGONb4b73202023-03-01 15:02:04 +0100489 event_hdl_async_equeue_init(&task_ctx->e_queue);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100490 task_ctx->func = new_sub->hdl.async_ptr;
491
492 new_sub->hdl.async_equeue = &task_ctx->e_queue;
493 new_sub->hdl.async_task = tasklet_new();
494
495 if (!new_sub->hdl.async_task) {
496 /* memory error */
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100497 goto memory_error;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100498 }
499 new_sub->hdl.async_task->context = task_ctx;
500 new_sub->hdl.async_task->process = event_hdl_async_task_default;
501 }
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100502 /* initialize END event (used to notify about subscription ending)
503 * used by both normal and advanced mode:
504 * - to safely terminate the task in normal mode
505 * - to safely free subscription and
506 * keep track of active subscriptions in advanced mode
507 */
508 new_sub->async_end->type = EVENT_HDL_SUB_END;
509 new_sub->async_end->sub_mgmt = EVENT_HDL_SUB_MGMT_ASYNC(new_sub);
510 new_sub->async_end->private = new_sub->hdl.private;
511 new_sub->async_end->_data = NULL;
512 MT_LIST_INIT(&new_sub->async_end->mt_list);
513 }
514 /* set refcount to 2:
515 * 1 for handler (because handler can manage the subscription itself)
516 * 1 for caller (will be dropped automatically if caller use the non-ptr version)
517 */
518 new_sub->refcount = 2;
519
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100520 /* ready for registration */
521 MT_LIST_INIT(&new_sub->mt_list);
522
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100523 lock = MT_LIST_LOCK_ELT(&sub_list->known);
524
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100525 /* check if such identified hdl is not already registered */
526 if (hdl.id) {
527 struct event_hdl_sub *cur_sub;
528 uint8_t found = 0;
529
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100530 mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
531 if (hdl.id == cur_sub->hdl.id) {
532 /* we found matching registered hdl */
533 found = 1;
534 break;
535 }
536 }
537 if (found) {
538 /* error already registered */
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100539 MT_LIST_UNLOCK_ELT(&sub_list->known, lock);
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100540 event_hdl_report_hdl_state(ha_alert, &hdl, "SUB", "could not subscribe: subscription with this id already exists");
541 goto cleanup;
542 }
543 }
544
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100545 if (lock.next == &sub_list->known) {
546 /* this is an expected corner case on de-init path, a subscribe attempt
547 * was made but the subscription list is already destroyed, we pretend
548 * it is a memory/IO error since it should not be long before haproxy
549 * enters the deinit() function anyway
550 */
551 MT_LIST_UNLOCK_ELT(&sub_list->known, lock);
552 goto cleanup;
553 }
554
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100555 /* Append in list (global or user specified list).
556 * For now, append when sync mode, and insert when async mode
557 * so that async handlers are executed first
558 */
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100559 if (hdl.async) {
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100560 /* Prevent the task from being aborted on soft-stop: let's wait
561 * until the END event is acknowledged by the task.
562 * (decrease is performed in event_hdl_async_free_event())
563 *
564 * If we don't do this, event_hdl API will leak and we won't give
565 * a chance to the event-handling task to perform cleanup
566 */
567 HA_ATOMIC_INC(&jobs);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100568 /* async mode, insert at the beginning of the list */
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100569 MT_LIST_INSERT(&sub_list->head, &new_sub->mt_list);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100570 } else {
571 /* sync mode, append at the end of the list */
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100572 MT_LIST_APPEND(&sub_list->head, &new_sub->mt_list);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100573 }
574
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100575 MT_LIST_UNLOCK_ELT(&sub_list->known, lock);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100576
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100577 return new_sub;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100578
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100579 cleanup:
580 if (new_sub) {
581 if (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL) {
Tim Duesterhusb1ec21d2023-04-22 17:47:32 +0200582 tasklet_free(new_sub->hdl.async_task);
Tim Duesterhusc18e2442023-04-22 17:47:33 +0200583 pool_free(pool_head_sub_taskctx, task_ctx);
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100584 }
585 if (hdl.async)
586 pool_free(pool_head_sub_event, new_sub->async_end);
587 pool_free(pool_head_sub, new_sub);
588 }
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100589
590 return NULL;
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100591
592 memory_error:
593 event_hdl_report_hdl_state(ha_warning, &hdl, "SUB", "could not register subscription due to memory error");
594 goto cleanup;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100595}
596
597void event_hdl_take(struct event_hdl_sub *sub)
598{
599 HA_ATOMIC_INC(&sub->refcount);
600}
601
602void event_hdl_drop(struct event_hdl_sub *sub)
603{
604 if (HA_ATOMIC_SUB_FETCH(&sub->refcount, 1) != 0)
605 return;
606
Aurelien DARRAGONafcfc202023-03-22 10:42:20 +0100607 /* we were the last one holding a reference to event sub - free required */
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100608 if (sub->hdl.private_free) {
609 /* free private data if specified upon registration */
610 sub->hdl.private_free(sub->hdl.private);
611 }
612 pool_free(pool_head_sub, sub);
613}
614
615int event_hdl_resubscribe(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
616{
617 return _event_hdl_resub_async(cur_sub, type);
618}
619
Aurelien DARRAGONf751a972023-03-10 10:45:58 +0100620void _event_hdl_pause(struct event_hdl_sub *cur_sub)
621{
622 cur_sub->flags |= EHDL_SUB_F_PAUSED;
623}
624
625void event_hdl_pause(struct event_hdl_sub *cur_sub)
626{
627 struct mt_list lock;
628
629 lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
630 if (lock.next != &cur_sub->mt_list)
631 _event_hdl_pause(cur_sub);
632 // else already removed
633 MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
634}
635
636void _event_hdl_resume(struct event_hdl_sub *cur_sub)
637{
638 cur_sub->flags &= ~EHDL_SUB_F_PAUSED;
639}
640
641void event_hdl_resume(struct event_hdl_sub *cur_sub)
642{
643 struct mt_list lock;
644
645 lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
646 if (lock.next != &cur_sub->mt_list)
647 _event_hdl_resume(cur_sub);
648 // else already removed
649 MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
650}
651
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100652void event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
653{
654 _event_hdl_unsubscribe_async(del_sub);
655 /* drop refcount, assuming caller no longer use ptr */
656 event_hdl_drop(del_sub);
657}
658
659int event_hdl_subscribe(event_hdl_sub_list *sub_list, struct event_hdl_sub_type e_type, struct event_hdl hdl)
660{
661 struct event_hdl_sub *sub;
662
663 sub = event_hdl_subscribe_ptr(sub_list, e_type, hdl);
664 if (sub) {
665 /* drop refcount because the user is not willing to hold a reference */
666 event_hdl_drop(sub);
667 return 1;
668 }
669 return 0;
670}
671
672/* Subscription external lookup functions
673 */
674int event_hdl_lookup_unsubscribe(event_hdl_sub_list *sub_list,
675 uint64_t lookup_id)
676{
677 struct event_hdl_sub *del_sub = NULL;
678 struct mt_list *elt1, elt2;
679 int found = 0;
680
681 if (!sub_list)
682 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
683
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100684 mt_list_for_each_entry_safe(del_sub, &sub_list->head, mt_list, elt1, elt2) {
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100685 if (lookup_id == del_sub->hdl.id) {
686 /* we found matching registered hdl */
687 MT_LIST_DELETE_SAFE(elt1);
688 _event_hdl_unsubscribe(del_sub);
689 found = 1;
690 break; /* id is unique, stop searching */
691 }
692 }
693 return found;
694}
695
696int event_hdl_lookup_resubscribe(event_hdl_sub_list *sub_list,
697 uint64_t lookup_id, struct event_hdl_sub_type type)
698{
699 struct event_hdl_sub *cur_sub = NULL;
700 struct mt_list *elt1, elt2;
701 int status = 0;
702
703 if (!sub_list)
704 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
705
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100706 mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100707 if (lookup_id == cur_sub->hdl.id) {
708 /* we found matching registered hdl */
709 status = _event_hdl_resub(cur_sub, type);
710 break; /* id is unique, stop searching */
711 }
712 }
713 return status;
714}
715
Aurelien DARRAGONf751a972023-03-10 10:45:58 +0100716int event_hdl_lookup_pause(event_hdl_sub_list *sub_list,
717 uint64_t lookup_id)
718{
719 struct event_hdl_sub *cur_sub = NULL;
720 struct mt_list *elt1, elt2;
721 int found = 0;
722
723 if (!sub_list)
724 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
725
726 mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
727 if (lookup_id == cur_sub->hdl.id) {
728 /* we found matching registered hdl */
729 _event_hdl_pause(cur_sub);
730 found = 1;
731 break; /* id is unique, stop searching */
732 }
733 }
734 return found;
735}
736
737int event_hdl_lookup_resume(event_hdl_sub_list *sub_list,
738 uint64_t lookup_id)
739{
740 struct event_hdl_sub *cur_sub = NULL;
741 struct mt_list *elt1, elt2;
742 int found = 0;
743
744 if (!sub_list)
745 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
746
747 mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
748 if (lookup_id == cur_sub->hdl.id) {
749 /* we found matching registered hdl */
750 _event_hdl_resume(cur_sub);
751 found = 1;
752 break; /* id is unique, stop searching */
753 }
754 }
755 return found;
756}
757
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100758struct event_hdl_sub *event_hdl_lookup_take(event_hdl_sub_list *sub_list,
759 uint64_t lookup_id)
760{
761 struct event_hdl_sub *cur_sub = NULL;
762 struct mt_list *elt1, elt2;
763 uint8_t found = 0;
764
765 if (!sub_list)
766 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
767
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100768 mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100769 if (lookup_id == cur_sub->hdl.id) {
770 /* we found matching registered hdl */
771 event_hdl_take(cur_sub);
772 found = 1;
773 break; /* id is unique, stop searching */
774 }
775 }
776 if (found)
777 return cur_sub;
778 return NULL;
779}
780
781/* event publishing functions
782 */
783static int _event_hdl_publish(event_hdl_sub_list *sub_list, struct event_hdl_sub_type e_type,
784 const struct event_hdl_cb_data *data)
785{
786 struct event_hdl_sub *cur_sub;
787 struct mt_list *elt1, elt2;
788 struct event_hdl_async_event_data *async_data = NULL; /* reuse async data for multiple async hdls */
789 int error = 0;
790
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100791 mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
Aurelien DARRAGONf751a972023-03-10 10:45:58 +0100792 /* notify each function that has subscribed to sub_family.type, unless paused */
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100793 if ((cur_sub->sub.family == e_type.family) &&
Aurelien DARRAGONf751a972023-03-10 10:45:58 +0100794 ((cur_sub->sub.subtype & e_type.subtype) == e_type.subtype) &&
795 !(cur_sub->flags & EHDL_SUB_F_PAUSED)) {
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100796 /* hdl should be notified */
797 if (!cur_sub->hdl.async) {
798 /* sync mode: simply call cb pointer
799 * it is up to the callee to schedule a task if needed or
800 * take specific precautions in order to return as fast as possible
801 * and not use locks that are already held by the caller
802 */
803 struct event_hdl_cb cb;
804 struct event_hdl_sub_mgmt sub_mgmt;
805
806 sub_mgmt = EVENT_HDL_SUB_MGMT_SYNC(cur_sub);
807 cb.e_type = e_type;
808 if (data)
809 cb.e_data = data->_ptr;
810 else
811 cb.e_data = NULL;
812 cb.sub_mgmt = &sub_mgmt;
813 cb._sync = 1;
814
815 /* call user function */
816 cur_sub->hdl.sync_ptr(&cb, cur_sub->hdl.private);
817
818 if (!sub_mgmt.this) {
819 /* user has performed hdl unsub
820 * we must remove it from the list
821 */
822 MT_LIST_DELETE_SAFE(elt1);
823 /* then free it */
824 _event_hdl_unsubscribe(cur_sub);
825 }
826 } else {
827 /* async mode: here we need to prepare event data
828 * and push it to the event_queue of the task(s)
829 * responsible for consuming the events of current
830 * subscription.
831 * Once the event is pushed, we wake up the associated task.
832 * This feature depends on <haproxy/task> that also
833 * depends on <haproxy/pool>:
834 * If STG_PREPARE+STG_POOL is not performed prior to publishing to
835 * async handler, program may crash.
836 * Hopefully, STG_PREPARE+STG_POOL should be done early in
837 * HAProxy startup sequence.
838 */
839 struct event_hdl_async_event *new_event;
840
841 new_event = pool_alloc(pool_head_sub_event);
842 if (!new_event) {
843 error = 1;
844 break; /* stop on error */
845 }
846 new_event->type = e_type;
847 new_event->private = cur_sub->hdl.private;
Aurelien DARRAGONe9314fb2023-04-04 21:41:10 +0200848 new_event->when = date;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100849 new_event->sub_mgmt = EVENT_HDL_SUB_MGMT_ASYNC(cur_sub);
850 if (data) {
851 /* if this fails, please adjust EVENT_HDL_ASYNC_EVENT_DATA in
Aurelien DARRAGONebf58e92023-03-23 19:09:15 +0100852 * event_hdl-t.h file or consider providing dynamic struct members
853 * to reduce overall struct size
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100854 */
855 BUG_ON(data->_size > sizeof(async_data->data));
856 if (!async_data) {
857 /* first async hdl reached - preparing async_data cache */
858 async_data = pool_alloc(pool_head_sub_event_data);
859 if (!async_data) {
860 error = 1;
861 pool_free(pool_head_sub_event, new_event);
862 break; /* stop on error */
863 }
864
865 /* async data assignment */
866 memcpy(async_data->data, data->_ptr, data->_size);
Aurelien DARRAGONebf58e92023-03-23 19:09:15 +0100867 async_data->mfree = data->_mfree;
Aurelien DARRAGONafcfc202023-03-22 10:42:20 +0100868 /* Initialize refcount, we start at 1 to prevent async
869 * data from being freed by an async handler while we
870 * still use it. We will drop the reference when the
871 * publish is over.
872 *
873 * (first use, atomic operation not required)
874 */
875 async_data->refcount = 1;
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100876 }
877 new_event->_data = async_data;
878 new_event->data = async_data->data;
879 /* increment refcount because multiple hdls could
880 * use the same async_data
881 */
882 HA_ATOMIC_INC(&async_data->refcount);
883 } else
884 new_event->data = NULL;
885
886 /* appending new event to event hdl queue */
887 MT_LIST_INIT(&new_event->mt_list);
Aurelien DARRAGONb4b73202023-03-01 15:02:04 +0100888 HA_ATOMIC_INC(&cur_sub->hdl.async_equeue->size);
889 MT_LIST_APPEND(&cur_sub->hdl.async_equeue->head, &new_event->mt_list);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100890
891 /* wake up the task */
Aurelien DARRAGONb289fd12023-02-28 15:06:48 +0100892 event_hdl_task_wakeup(cur_sub->hdl.async_task);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100893 } /* end async mode */
894 } /* end hdl should be notified */
895 } /* end mt_list */
Aurelien DARRAGONafcfc202023-03-22 10:42:20 +0100896 if (async_data) {
897 /* we finished publishing, drop the reference on async data */
898 _event_hdl_async_data_drop(async_data);
Aurelien DARRAGONebf58e92023-03-23 19:09:15 +0100899 } else {
900 /* no async subscribers, we are responsible for calling the data
901 * member freeing function if it was provided
902 */
903 if (data && data->_mfree)
904 data->_mfree(data->_ptr);
Aurelien DARRAGONafcfc202023-03-22 10:42:20 +0100905 }
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100906 if (error) {
907 event_hdl_report_hdl_state(ha_warning, &cur_sub->hdl, "PUBLISH", "memory error");
908 return 0;
909 }
910 return 1;
911}
912
913/* Publish function should not be used from high calling rate or time sensitive
914 * places for now, because list lookup based on e_type is not optimized at
915 * all!
916 * Returns 1 in case of SUCCESS:
917 * Subscribed handlers were notified successfully
918 * Returns 0 in case of FAILURE:
919 * FAILURE means memory error while handling the very first async handler from
920 * the subscription list.
921 * As async handlers are executed first within the list, when such failure occurs
922 * you can safely assume that no events were published for the current call
923 */
924int event_hdl_publish(event_hdl_sub_list *sub_list,
925 struct event_hdl_sub_type e_type, const struct event_hdl_cb_data *data)
926{
927 if (!e_type.family) {
928 /* do nothing, these types are reserved for internal use only
929 * (ie: unregistering) */
930 return 0;
931 }
932 if (sub_list) {
933 /* if sublist is provided, first publish event to list subscribers */
934 return _event_hdl_publish(sub_list, e_type, data);
935 } else {
936 /* publish to global list */
937 return _event_hdl_publish(&global_event_hdl_sub_list, e_type, data);
938 }
939}
940
Aurelien DARRAGON3a81e992023-03-16 11:16:05 +0100941void event_hdl_sub_list_init(event_hdl_sub_list *sub_list)
942{
943 BUG_ON(!sub_list); /* unexpected, global sublist is managed internally */
944 MT_LIST_INIT(&sub_list->head);
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100945 MT_LIST_APPEND(&known_event_hdl_sub_list, &sub_list->known);
Aurelien DARRAGON3a81e992023-03-16 11:16:05 +0100946}
947
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100948/* internal function, assumes that sub_list ptr is always valid */
949static void _event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list)
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100950{
951 struct event_hdl_sub *cur_sub;
952 struct mt_list *elt1, elt2;
953
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100954 mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100955 /* remove cur elem from list */
956 MT_LIST_DELETE_SAFE(elt1);
957 /* then free it */
958 _event_hdl_unsubscribe(cur_sub);
959 }
Aurelien DARRAGONef6ca672023-03-16 10:54:24 +0100960}
961
962/* when a subscription list is no longer used, call this
963 * to do the cleanup and make sure all related subscriptions are
964 * safely ended according to their types
965 */
966void event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list)
967{
968 BUG_ON(!sub_list); /* unexpected, global sublist is managed internally */
969 if (!MT_LIST_DELETE(&sub_list->known))
970 return; /* already destroyed */
971 _event_hdl_sub_list_destroy(sub_list);
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +0100972}
Aurelien DARRAGONd514ca42023-02-23 19:12:49 +0100973
974INITCALL0(STG_INIT, event_hdl_init);