blob: 6444386f92b655380568f221400a823ef7fe6127 [file] [log] [blame]
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +01001/*
2 * general purpose event handlers management
3 *
4 * Copyright 2022 HAProxy Technologies
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2.1 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <string.h>
14#include <haproxy/event_hdl.h>
15#include <haproxy/compiler.h>
16#include <haproxy/task.h>
17#include <haproxy/tools.h>
18#include <haproxy/errors.h>
19#include <haproxy/xxhash.h>
20
21/* event types changes in event_hdl-t.h file should be reflected in the
22 * map below to allow string to type and type to string conversions
23 */
24static struct event_hdl_sub_type_map event_hdl_sub_type_map[] = {
25 {"NONE", EVENT_HDL_SUB_NONE},
26 {"SERVER", EVENT_HDL_SUB_SERVER},
27 {"SERVER_ADD", EVENT_HDL_SUB_SERVER_ADD},
28 {"SERVER_DEL", EVENT_HDL_SUB_SERVER_DEL},
Aurelien DARRAGON22f82f82022-11-25 18:07:49 +010029 {"SERVER_UP", EVENT_HDL_SUB_SERVER_UP},
30 {"SERVER_DOWN", EVENT_HDL_SUB_SERVER_DOWN},
Aurelien DARRAGON68e692d2022-11-16 18:06:28 +010031};
32
33/* internal types (only used in this file) */
34struct event_hdl_async_task_default_ctx
35{
36 event_hdl_async_equeue e_queue; /* event queue list */
37 event_hdl_cb_async func; /* event handling func */
38};
39
40/* memory pools declarations */
41DECLARE_STATIC_POOL(pool_head_sub, "ehdl_sub", sizeof(struct event_hdl_sub));
42DECLARE_STATIC_POOL(pool_head_sub_event, "ehdl_sub_e", sizeof(struct event_hdl_async_event));
43DECLARE_STATIC_POOL(pool_head_sub_event_data, "ehdl_sub_ed", sizeof(struct event_hdl_async_event_data));
44DECLARE_STATIC_POOL(pool_head_sub_taskctx, "ehdl_sub_tctx", sizeof(struct event_hdl_async_task_default_ctx));
45
46/* global subscription list (implicit where NULL is used as sublist argument) */
47static struct mt_list global_event_hdl_sub_list = MT_LIST_HEAD_INIT(global_event_hdl_sub_list);
48
49/* TODO: will become a config tunable
50 * ie: tune.events.max-async-notif-at-once
51 */
52static int event_hdl_async_max_notif_at_once = 10;
53
54/* general purpose hashing function when you want to compute
55 * an ID based on <scope> x <name>
56 * It is your responsibility to make sure <scope> is not used
57 * elsewhere in the code (or that you are fine with sharing
58 * the scope).
59 */
60inline uint64_t event_hdl_id(const char *scope, const char *name)
61{
62 XXH64_state_t state;
63
64 XXH64_reset(&state, 0);
65 XXH64_update(&state, scope, strlen(scope));
66 XXH64_update(&state, name, strlen(name));
67 return XXH64_digest(&state);
68}
69
70/* takes a sub_type as input, returns corresponding sub_type
71 * printable string or "N/A" if not found.
72 * If not found, an error will be reported to stderr so the developers
73 * know that a sub_type is missing its associated string in event_hdl-t.h
74 */
75const char *event_hdl_sub_type_to_string(struct event_hdl_sub_type sub_type)
76{
77 int it;
78
79 for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
80 if (sub_type.family == event_hdl_sub_type_map[it].type.family &&
81 sub_type.subtype == event_hdl_sub_type_map[it].type.subtype)
82 return event_hdl_sub_type_map[it].name;
83 }
84 ha_alert("event_hdl-t.h: missing sub_type string representation.\n"
85 "Please reflect any changes in event_hdl_sub_type_map.\n");
86 return "N/A";
87}
88
89/* returns the internal sub_type corresponding
90 * to the printable representation <name>
91 * or EVENT_HDL_SUB_NONE if no such event exists
92 * (see event_hdl-t.h for the complete list of supported types)
93 */
94struct event_hdl_sub_type event_hdl_string_to_sub_type(const char *name)
95{
96 int it;
97
98 for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
99 if (!strcmp(name, event_hdl_sub_type_map[it].name))
100 return event_hdl_sub_type_map[it].type;
101 }
102 return EVENT_HDL_SUB_NONE;
103}
104
105/* Takes <subscriptions> sub list as input, returns a printable string
106 * containing every sub_types contained in <subscriptions>
107 * separated by '|' char.
108 * Returns NULL if no sub_types are found in <subscriptions>
109 * This functions leverages memprintf, thus it is up to the
110 * caller to free the returned value (if != NULL) when he no longer
111 * uses it.
112 */
113char *event_hdl_sub_type_print(struct event_hdl_sub_type subscriptions)
114{
115 char *out = NULL;
116 int it;
117 uint8_t first = 1;
118
119 for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
120 if (subscriptions.family == event_hdl_sub_type_map[it].type.family &&
121 ((subscriptions.subtype & event_hdl_sub_type_map[it].type.subtype) ==
122 event_hdl_sub_type_map[it].type.subtype)) {
123 if (first) {
124 memprintf(&out, "%s", event_hdl_sub_type_map[it].name);
125 first--;
126 }
127 else
128 memprintf(&out, "%s%s%s", out, "|", event_hdl_sub_type_map[it].name);
129 }
130 }
131
132 return out;
133}
134
135/* event_hdl debug/reporting function */
136typedef void (*event_hdl_report_hdl_state_func)(const char *fmt, ...);
137static void event_hdl_report_hdl_state(event_hdl_report_hdl_state_func report_func,
138 const struct event_hdl *hdl, const char *what, const char *state)
139{
140 report_func("[event_hdl]:%s (%s)'#%llu@%s': %s\n",
141 what,
142 (hdl->async) ? "ASYNC" : "SYNC",
143 (long long unsigned int)hdl->id,
144 hdl->dorigin,
145 state);
146}
147
148void event_hdl_async_free_event(struct event_hdl_async_event *e)
149{
150 if (unlikely(event_hdl_sub_type_equal(e->type, EVENT_HDL_SUB_END))) {
151 /* last event for hdl, special case */
152 /* free subscription entry as we're the last one still using it
153 * (it is already removed from mt_list, no race can occur)
154 */
155 event_hdl_drop(e->sub_mgmt.this);
156 }
157 else if (e->_data &&
158 HA_ATOMIC_SUB_FETCH(&e->_data->refcount, 1) == 0) {
159 /* we are the last event holding reference to event data - free required */
160 pool_free(pool_head_sub_event_data, e->_data); /* data wrapper */
161 }
162 pool_free(pool_head_sub_event, e);
163}
164
165/* task handler used for normal async subscription mode
166 * if you use advanced async subscription mode, you can use this
167 * as an example to implement your own task wrapper
168 */
169static struct task *event_hdl_async_task_default(struct task *task, void *ctx, unsigned int state)
170{
171 struct tasklet *tl = (struct tasklet *)task;
172 struct event_hdl_async_task_default_ctx *task_ctx = ctx;
173 struct event_hdl_async_event *event;
174 int max_notif_at_once_it = 0;
175 uint8_t done = 0;
176
177 /* run through e_queue, and call func() for each event
178 * if we read END event, it indicates we must stop:
179 * no more events to come (handler is unregistered)
180 * so we must free task_ctx and stop task
181 */
182 while (max_notif_at_once_it < event_hdl_async_max_notif_at_once &&
183 (event = event_hdl_async_equeue_pop(&task_ctx->e_queue)))
184 {
185 if (event_hdl_sub_type_equal(event->type, EVENT_HDL_SUB_END)) {
186 done = 1;
187 event_hdl_async_free_event(event);
188 /* break is normally not even required, EVENT_HDL_SUB_END
189 * is guaranteed to be last event of e_queue
190 * (because in normal mode one sub == one e_queue)
191 */
192 break;
193 }
194 else {
195 struct event_hdl_cb cb;
196
197 cb.e_type = event->type;
198 cb.e_data = event->data;
199 cb.sub_mgmt = &event->sub_mgmt;
200 cb._sync = 0;
201
202 /* call user function */
203 task_ctx->func(&cb, event->private);
204 max_notif_at_once_it++;
205 }
206 event_hdl_async_free_event(event);
207 }
208
209 if (done) {
210 /* our job is done, subscription is over: no more events to come */
211 pool_free(pool_head_sub_taskctx, task_ctx);
212 tasklet_free(tl);
213 return NULL;
214 }
215 return task;
216}
217
218/* internal subscription mgmt functions */
219static inline struct event_hdl_sub_type _event_hdl_getsub(struct event_hdl_sub *cur_sub)
220{
221 return cur_sub->sub;
222}
223
224static inline struct event_hdl_sub_type _event_hdl_getsub_async(struct event_hdl_sub *cur_sub)
225{
226 struct mt_list lock;
227 struct event_hdl_sub_type type = EVENT_HDL_SUB_NONE;
228
229 lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
230 if (lock.next != &cur_sub->mt_list)
231 type = _event_hdl_getsub(cur_sub);
232 // else already removed
233 MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
234 return type;
235}
236
237static inline int _event_hdl_resub(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
238{
239 if (!event_hdl_sub_family_equal(cur_sub->sub, type))
240 return 0; /* family types differ, do nothing */
241 cur_sub->sub.subtype = type.subtype; /* new subtype assignment */
242 return 1;
243}
244
245static inline int _event_hdl_resub_async(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
246{
247 int status = 0;
248 struct mt_list lock;
249
250 lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
251 if (lock.next != &cur_sub->mt_list)
252 status = _event_hdl_resub(cur_sub, type);
253 // else already removed
254 MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
255 return status;
256}
257
258static inline void _event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
259{
260 struct mt_list lock;
261
262 if (del_sub->hdl.async) {
263 /* ASYNC SUB MODE */
264 /* push EVENT_HDL_SUB_END (to notify the task that the subscription is dead) */
265
266 /* push END EVENT in busy state so we can safely wakeup
267 * the task before releasing it.
268 * Not doing that would expose us to a race where the task could've already
269 * consumed the END event before the wakeup, and some tasks
270 * kill themselves (ie: normal async mode) when they receive such event
271 */
272 lock = MT_LIST_APPEND_LOCKED(del_sub->hdl.async_equeue, &del_sub->async_end->mt_list);
273
274 /* wake up the task */
275 tasklet_wakeup(del_sub->hdl.async_task);
276
277 /* unlock END EVENT (we're done, the task is now free to consume it) */
278 MT_LIST_UNLOCK_ELT(&del_sub->async_end->mt_list, lock);
279
280 /* we don't free sub here
281 * freeing will be performed by async task so it can safely rely
282 * on the pointer until it notices it
283 */
284 } else {
285 /* SYNC SUB MODE */
286
287 /* we can directly free the subscription:
288 * no other thread can access it since we successfully
289 * removed it from the list
290 */
291 event_hdl_drop(del_sub);
292 }
293}
294
295static inline void _event_hdl_unsubscribe_async(struct event_hdl_sub *del_sub)
296{
297 if (!MT_LIST_DELETE(&del_sub->mt_list))
298 return; /* already removed (but may be pending in e_queues) */
299 _event_hdl_unsubscribe(del_sub);
300}
301
302/* sub_mgmt function pointers (for handlers) */
303static struct event_hdl_sub_type event_hdl_getsub_sync(const struct event_hdl_sub_mgmt *mgmt)
304{
305 if (!mgmt)
306 return EVENT_HDL_SUB_NONE;
307
308 if (!mgmt->this)
309 return EVENT_HDL_SUB_NONE; /* already removed from sync ctx */
310 return _event_hdl_getsub(mgmt->this);
311}
312
313static struct event_hdl_sub_type event_hdl_getsub_async(const struct event_hdl_sub_mgmt *mgmt)
314{
315 if (!mgmt)
316 return EVENT_HDL_SUB_NONE;
317
318 return _event_hdl_getsub_async(mgmt->this);
319}
320
321static int event_hdl_resub_sync(const struct event_hdl_sub_mgmt *mgmt, struct event_hdl_sub_type type)
322{
323 if (!mgmt)
324 return 0;
325
326 if (!mgmt->this)
327 return 0; /* already removed from sync ctx */
328 return _event_hdl_resub(mgmt->this, type);
329}
330
331static int event_hdl_resub_async(const struct event_hdl_sub_mgmt *mgmt, struct event_hdl_sub_type type)
332{
333 if (!mgmt)
334 return 0;
335
336 return _event_hdl_resub_async(mgmt->this, type);
337}
338
339static void event_hdl_unsubscribe_sync(const struct event_hdl_sub_mgmt *mgmt)
340{
341 if (!mgmt)
342 return;
343
344 if (!mgmt->this)
345 return; /* already removed from sync ctx */
346
347 /* assuming that publish sync code will notice that mgmt->this is NULL
348 * and will perform the list removal using MT_LIST_DELETE_SAFE and
349 * _event_hdl_unsubscribe()
350 * while still owning the lock
351 */
352 ((struct event_hdl_sub_mgmt *)mgmt)->this = NULL;
353}
354
355static void event_hdl_unsubscribe_async(const struct event_hdl_sub_mgmt *mgmt)
356{
357 if (!mgmt)
358 return;
359
360 _event_hdl_unsubscribe_async(mgmt->this);
361}
362
363#define EVENT_HDL_SUB_MGMT_ASYNC(_sub) (struct event_hdl_sub_mgmt){ .this = _sub, \
364 .getsub = event_hdl_getsub_async, \
365 .resub = event_hdl_resub_async, \
366 .unsub = event_hdl_unsubscribe_async}
367#define EVENT_HDL_SUB_MGMT_SYNC(_sub) (struct event_hdl_sub_mgmt){ .this = _sub, \
368 .getsub = event_hdl_getsub_sync, \
369 .resub = event_hdl_resub_sync, \
370 .unsub = event_hdl_unsubscribe_sync}
371
372struct event_hdl_sub *event_hdl_subscribe_ptr(event_hdl_sub_list *sub_list,
373 struct event_hdl_sub_type e_type, struct event_hdl hdl)
374{
375 struct event_hdl_sub *new_sub;
376 struct mt_list *elt1, elt2;
377 uint8_t found = 0;
378 struct event_hdl_async_task_default_ctx *task_ctx;
379
380 if (!sub_list)
381 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
382
383 /* hdl API consistency check */
384 /*FIXME: do we need to ensure that if private is set, private_free should be set as well? */
385 BUG_ON((!hdl.async && !hdl.sync_ptr) ||
386 (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL && !hdl.async_ptr) ||
387 (hdl.async == EVENT_HDL_ASYNC_MODE_ADVANCED &&
388 (!hdl.async_equeue || !hdl.async_task)));
389
390 /* first check if such identified hdl is not already registered */
391 if (hdl.id) {
392 mt_list_for_each_entry_safe(new_sub, sub_list, mt_list, elt1, elt2) {
393 if (hdl.id == new_sub->hdl.id) {
394 /* we found matching registered hdl */
395 found = 1;
396 break;
397 }
398 }
399 }
400
401 if (found) {
402 /* error already registered */
403 event_hdl_report_hdl_state(ha_warning, &hdl, "SUB", "could not subscribe: subscription with this id already exists");
404 return NULL;
405 }
406
407 new_sub = pool_alloc(pool_head_sub);
408 if (new_sub == NULL) {
409 goto new_sub_memory_error;
410 }
411
412 /* assignments */
413 new_sub->sub.family = e_type.family;
414 new_sub->sub.subtype = e_type.subtype;
415 new_sub->hdl = hdl;
416
417 if (hdl.async) {
418 /* async END event pre-allocation */
419 new_sub->async_end = pool_alloc(pool_head_sub_event);
420 if (!new_sub->async_end) {
421 /* memory error */
422 goto new_sub_memory_error_event_end;
423 }
424 if (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL) {
425 /* normal mode: no task provided, we must initialize it */
426
427 /* initialize task context */
428 task_ctx = pool_alloc(pool_head_sub_taskctx);
429
430 if (!task_ctx) {
431 /* memory error */
432 goto new_sub_memory_error_task_ctx;
433 }
434 MT_LIST_INIT(&task_ctx->e_queue);
435 task_ctx->func = new_sub->hdl.async_ptr;
436
437 new_sub->hdl.async_equeue = &task_ctx->e_queue;
438 new_sub->hdl.async_task = tasklet_new();
439
440 if (!new_sub->hdl.async_task) {
441 /* memory error */
442 goto new_sub_memory_error_task;
443 }
444 new_sub->hdl.async_task->context = task_ctx;
445 new_sub->hdl.async_task->process = event_hdl_async_task_default;
446 }
447 /* registration cannot fail anymore */
448
449 /* initialize END event (used to notify about subscription ending)
450 * used by both normal and advanced mode:
451 * - to safely terminate the task in normal mode
452 * - to safely free subscription and
453 * keep track of active subscriptions in advanced mode
454 */
455 new_sub->async_end->type = EVENT_HDL_SUB_END;
456 new_sub->async_end->sub_mgmt = EVENT_HDL_SUB_MGMT_ASYNC(new_sub);
457 new_sub->async_end->private = new_sub->hdl.private;
458 new_sub->async_end->_data = NULL;
459 MT_LIST_INIT(&new_sub->async_end->mt_list);
460 }
461 /* set refcount to 2:
462 * 1 for handler (because handler can manage the subscription itself)
463 * 1 for caller (will be dropped automatically if caller use the non-ptr version)
464 */
465 new_sub->refcount = 2;
466
467 /* Append in list (global or user specified list).
468 * For now, append when sync mode, and insert when async mode
469 * so that async handlers are executed first
470 */
471 MT_LIST_INIT(&new_sub->mt_list);
472 if (hdl.async) {
473 /* async mode, insert at the beginning of the list */
474 MT_LIST_INSERT(sub_list, &new_sub->mt_list);
475 } else {
476 /* sync mode, append at the end of the list */
477 MT_LIST_APPEND(sub_list, &new_sub->mt_list);
478 }
479
480 return new_sub;
481
482new_sub_memory_error_task:
483 pool_free(pool_head_sub_taskctx, task_ctx);
484new_sub_memory_error_task_ctx:
485 pool_free(pool_head_sub_event, new_sub->async_end);
486new_sub_memory_error_event_end:
487 pool_free(pool_head_sub, new_sub);
488new_sub_memory_error:
489
490 event_hdl_report_hdl_state(ha_warning, &hdl, "SUB", "could not register subscription due to memory error");
491
492 return NULL;
493}
494
495void event_hdl_take(struct event_hdl_sub *sub)
496{
497 HA_ATOMIC_INC(&sub->refcount);
498}
499
500void event_hdl_drop(struct event_hdl_sub *sub)
501{
502 if (HA_ATOMIC_SUB_FETCH(&sub->refcount, 1) != 0)
503 return;
504
505 /* we are the last event holding reference to event data - free required */
506 if (sub->hdl.private_free) {
507 /* free private data if specified upon registration */
508 sub->hdl.private_free(sub->hdl.private);
509 }
510 pool_free(pool_head_sub, sub);
511}
512
513int event_hdl_resubscribe(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
514{
515 return _event_hdl_resub_async(cur_sub, type);
516}
517
518void event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
519{
520 _event_hdl_unsubscribe_async(del_sub);
521 /* drop refcount, assuming caller no longer use ptr */
522 event_hdl_drop(del_sub);
523}
524
525int event_hdl_subscribe(event_hdl_sub_list *sub_list, struct event_hdl_sub_type e_type, struct event_hdl hdl)
526{
527 struct event_hdl_sub *sub;
528
529 sub = event_hdl_subscribe_ptr(sub_list, e_type, hdl);
530 if (sub) {
531 /* drop refcount because the user is not willing to hold a reference */
532 event_hdl_drop(sub);
533 return 1;
534 }
535 return 0;
536}
537
538/* Subscription external lookup functions
539 */
540int event_hdl_lookup_unsubscribe(event_hdl_sub_list *sub_list,
541 uint64_t lookup_id)
542{
543 struct event_hdl_sub *del_sub = NULL;
544 struct mt_list *elt1, elt2;
545 int found = 0;
546
547 if (!sub_list)
548 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
549
550 mt_list_for_each_entry_safe(del_sub, sub_list, mt_list, elt1, elt2) {
551 if (lookup_id == del_sub->hdl.id) {
552 /* we found matching registered hdl */
553 MT_LIST_DELETE_SAFE(elt1);
554 _event_hdl_unsubscribe(del_sub);
555 found = 1;
556 break; /* id is unique, stop searching */
557 }
558 }
559 return found;
560}
561
562int event_hdl_lookup_resubscribe(event_hdl_sub_list *sub_list,
563 uint64_t lookup_id, struct event_hdl_sub_type type)
564{
565 struct event_hdl_sub *cur_sub = NULL;
566 struct mt_list *elt1, elt2;
567 int status = 0;
568
569 if (!sub_list)
570 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
571
572 mt_list_for_each_entry_safe(cur_sub, sub_list, mt_list, elt1, elt2) {
573 if (lookup_id == cur_sub->hdl.id) {
574 /* we found matching registered hdl */
575 status = _event_hdl_resub(cur_sub, type);
576 break; /* id is unique, stop searching */
577 }
578 }
579 return status;
580}
581
582struct event_hdl_sub *event_hdl_lookup_take(event_hdl_sub_list *sub_list,
583 uint64_t lookup_id)
584{
585 struct event_hdl_sub *cur_sub = NULL;
586 struct mt_list *elt1, elt2;
587 uint8_t found = 0;
588
589 if (!sub_list)
590 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
591
592 mt_list_for_each_entry_safe(cur_sub, sub_list, mt_list, elt1, elt2) {
593 if (lookup_id == cur_sub->hdl.id) {
594 /* we found matching registered hdl */
595 event_hdl_take(cur_sub);
596 found = 1;
597 break; /* id is unique, stop searching */
598 }
599 }
600 if (found)
601 return cur_sub;
602 return NULL;
603}
604
605/* event publishing functions
606 */
607static int _event_hdl_publish(event_hdl_sub_list *sub_list, struct event_hdl_sub_type e_type,
608 const struct event_hdl_cb_data *data)
609{
610 struct event_hdl_sub *cur_sub;
611 struct mt_list *elt1, elt2;
612 struct event_hdl_async_event_data *async_data = NULL; /* reuse async data for multiple async hdls */
613 int error = 0;
614
615 mt_list_for_each_entry_safe(cur_sub, sub_list, mt_list, elt1, elt2) {
616 /* notify each function that has subscribed to sub_family.type */
617 if ((cur_sub->sub.family == e_type.family) &&
618 ((cur_sub->sub.subtype & e_type.subtype) == e_type.subtype)) {
619 /* hdl should be notified */
620 if (!cur_sub->hdl.async) {
621 /* sync mode: simply call cb pointer
622 * it is up to the callee to schedule a task if needed or
623 * take specific precautions in order to return as fast as possible
624 * and not use locks that are already held by the caller
625 */
626 struct event_hdl_cb cb;
627 struct event_hdl_sub_mgmt sub_mgmt;
628
629 sub_mgmt = EVENT_HDL_SUB_MGMT_SYNC(cur_sub);
630 cb.e_type = e_type;
631 if (data)
632 cb.e_data = data->_ptr;
633 else
634 cb.e_data = NULL;
635 cb.sub_mgmt = &sub_mgmt;
636 cb._sync = 1;
637
638 /* call user function */
639 cur_sub->hdl.sync_ptr(&cb, cur_sub->hdl.private);
640
641 if (!sub_mgmt.this) {
642 /* user has performed hdl unsub
643 * we must remove it from the list
644 */
645 MT_LIST_DELETE_SAFE(elt1);
646 /* then free it */
647 _event_hdl_unsubscribe(cur_sub);
648 }
649 } else {
650 /* async mode: here we need to prepare event data
651 * and push it to the event_queue of the task(s)
652 * responsible for consuming the events of current
653 * subscription.
654 * Once the event is pushed, we wake up the associated task.
655 * This feature depends on <haproxy/task> that also
656 * depends on <haproxy/pool>:
657 * If STG_PREPARE+STG_POOL is not performed prior to publishing to
658 * async handler, program may crash.
659 * Hopefully, STG_PREPARE+STG_POOL should be done early in
660 * HAProxy startup sequence.
661 */
662 struct event_hdl_async_event *new_event;
663
664 new_event = pool_alloc(pool_head_sub_event);
665 if (!new_event) {
666 error = 1;
667 break; /* stop on error */
668 }
669 new_event->type = e_type;
670 new_event->private = cur_sub->hdl.private;
671 new_event->sub_mgmt = EVENT_HDL_SUB_MGMT_ASYNC(cur_sub);
672 if (data) {
673 /* if this fails, please adjust EVENT_HDL_ASYNC_EVENT_DATA in
674 * event_hdl-t.h file
675 */
676 BUG_ON(data->_size > sizeof(async_data->data));
677 if (!async_data) {
678 /* first async hdl reached - preparing async_data cache */
679 async_data = pool_alloc(pool_head_sub_event_data);
680 if (!async_data) {
681 error = 1;
682 pool_free(pool_head_sub_event, new_event);
683 break; /* stop on error */
684 }
685
686 /* async data assignment */
687 memcpy(async_data->data, data->_ptr, data->_size);
688 async_data->refcount = 0; /* initialize async->refcount (first use, atomic operation not required) */
689 }
690 new_event->_data = async_data;
691 new_event->data = async_data->data;
692 /* increment refcount because multiple hdls could
693 * use the same async_data
694 */
695 HA_ATOMIC_INC(&async_data->refcount);
696 } else
697 new_event->data = NULL;
698
699 /* appending new event to event hdl queue */
700 MT_LIST_INIT(&new_event->mt_list);
701 MT_LIST_APPEND(cur_sub->hdl.async_equeue, &new_event->mt_list);
702
703 /* wake up the task */
704 tasklet_wakeup(cur_sub->hdl.async_task);
705 } /* end async mode */
706 } /* end hdl should be notified */
707 } /* end mt_list */
708 if (error) {
709 event_hdl_report_hdl_state(ha_warning, &cur_sub->hdl, "PUBLISH", "memory error");
710 return 0;
711 }
712 return 1;
713}
714
715/* Publish function should not be used from high calling rate or time sensitive
716 * places for now, because list lookup based on e_type is not optimized at
717 * all!
718 * Returns 1 in case of SUCCESS:
719 * Subscribed handlers were notified successfully
720 * Returns 0 in case of FAILURE:
721 * FAILURE means memory error while handling the very first async handler from
722 * the subscription list.
723 * As async handlers are executed first within the list, when such failure occurs
724 * you can safely assume that no events were published for the current call
725 */
726int event_hdl_publish(event_hdl_sub_list *sub_list,
727 struct event_hdl_sub_type e_type, const struct event_hdl_cb_data *data)
728{
729 if (!e_type.family) {
730 /* do nothing, these types are reserved for internal use only
731 * (ie: unregistering) */
732 return 0;
733 }
734 if (sub_list) {
735 /* if sublist is provided, first publish event to list subscribers */
736 return _event_hdl_publish(sub_list, e_type, data);
737 } else {
738 /* publish to global list */
739 return _event_hdl_publish(&global_event_hdl_sub_list, e_type, data);
740 }
741}
742
743/* when a subscription list is no longer used, call this
744 * to do the cleanup and make sure all related subscriptions are
745 * safely ended according to their types
746 */
747void event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list)
748{
749 struct event_hdl_sub *cur_sub;
750 struct mt_list *elt1, elt2;
751
752 if (!sub_list)
753 sub_list = &global_event_hdl_sub_list; /* fall back to global list */
754 mt_list_for_each_entry_safe(cur_sub, sub_list, mt_list, elt1, elt2) {
755 /* remove cur elem from list */
756 MT_LIST_DELETE_SAFE(elt1);
757 /* then free it */
758 _event_hdl_unsubscribe(cur_sub);
759 }
760}