blob: 4bee7d70e3bfbbcf850ed870672ebf12c773b741 [file] [log] [blame]
Willy Tarreau8f38bd02009-05-10 08:53:33 +02001/*
2 * Asynchronous signal delivery functions.
3 *
Willy Tarreau24f4efa2010-08-27 17:56:48 +02004 * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
Willy Tarreau8f38bd02009-05-10 08:53:33 +02005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <signal.h>
Willy Tarreaube8c7362009-07-23 13:40:20 +020014#include <string.h>
15
William Lallemand6e1796e2018-06-07 11:23:40 +020016#include <common/hathreads.h>
17
Willy Tarreau8f38bd02009-05-10 08:53:33 +020018#include <proto/signal.h>
19#include <proto/log.h>
Willy Tarreau24f4efa2010-08-27 17:56:48 +020020#include <proto/task.h>
Willy Tarreau8f38bd02009-05-10 08:53:33 +020021
22/* Principle : we keep an in-order list of the first occurrence of all received
23 * signals. All occurrences of a same signal are grouped though. The signal
24 * queue does not need to be deeper than the number of signals we can handle.
25 * The handlers will be called asynchronously with the signal number. They can
26 * check themselves the number of calls by checking the descriptor this signal.
27 */
28
29int signal_queue_len; /* length of signal queue, <= MAX_SIGNAL (1 entry per signal max) */
30int signal_queue[MAX_SIGNAL]; /* in-order queue of received signals */
31struct signal_descriptor signal_state[MAX_SIGNAL];
Willy Tarreaubafbe012017-11-24 17:34:44 +010032struct pool_head *pool_head_sig_handlers = NULL;
Willy Tarreau8f38bd02009-05-10 08:53:33 +020033sigset_t blocked_sig;
Willy Tarreaud0807c32010-08-27 18:26:11 +020034int signal_pending = 0; /* non-zero if t least one signal remains unprocessed */
Willy Tarreau8f38bd02009-05-10 08:53:33 +020035
Christopher Fauletb79a94c2017-05-30 15:34:30 +020036
Willy Tarreaud0807c32010-08-27 18:26:11 +020037/* Common signal handler, used by all signals. Received signals are queued.
38 * Signal number zero has a specific status, as it cannot be delivered by the
39 * system, any function may call it to perform asynchronous signal delivery.
40 */
41void signal_handler(int sig)
Willy Tarreau8f38bd02009-05-10 08:53:33 +020042{
Willy Tarreau1a53b5e2013-01-24 02:06:05 +010043 if (sig < 0 || sig >= MAX_SIGNAL) {
Willy Tarreau8f38bd02009-05-10 08:53:33 +020044 /* unhandled signal */
Willy Tarreau8f38bd02009-05-10 08:53:33 +020045 signal(sig, SIG_IGN);
Willy Tarreau24f4efa2010-08-27 17:56:48 +020046 qfprintf(stderr, "Received unhandled signal %d. Signal has been disabled.\n", sig);
Willy Tarreau8f38bd02009-05-10 08:53:33 +020047 return;
48 }
49
50 if (!signal_state[sig].count) {
51 /* signal was not queued yet */
52 if (signal_queue_len < MAX_SIGNAL)
53 signal_queue[signal_queue_len++] = sig;
54 else
55 qfprintf(stderr, "Signal %d : signal queue is unexpectedly full.\n", sig);
56 }
Willy Tarreaud0807c32010-08-27 18:26:11 +020057
Willy Tarreau8f38bd02009-05-10 08:53:33 +020058 signal_state[sig].count++;
Willy Tarreaud0807c32010-08-27 18:26:11 +020059 if (sig)
60 signal(sig, signal_handler); /* re-arm signal */
Willy Tarreau8f38bd02009-05-10 08:53:33 +020061}
62
Willy Tarreau8f38bd02009-05-10 08:53:33 +020063/* Call handlers of all pending signals and clear counts and queue length. The
64 * handlers may unregister themselves by calling signal_register() while they
65 * are called, just like it is done with normal signal handlers.
66 * Note that it is more efficient to call the inline version which checks the
67 * queue length before getting here.
68 */
69void __signal_process_queue()
70{
71 int sig, cur_pos = 0;
72 struct signal_descriptor *desc;
73 sigset_t old_sig;
74
75 /* block signal delivery during processing */
William Lallemand6e1796e2018-06-07 11:23:40 +020076 ha_sigmask(SIG_SETMASK, &blocked_sig, &old_sig);
Willy Tarreau8f38bd02009-05-10 08:53:33 +020077
Willy Tarreaud0807c32010-08-27 18:26:11 +020078 /* It is important that we scan the queue forwards so that we can
79 * catch any signal that would have been queued by another signal
80 * handler. That allows real signal handlers to redistribute signals
81 * to tasks subscribed to signal zero.
82 */
Willy Tarreau8f38bd02009-05-10 08:53:33 +020083 for (cur_pos = 0; cur_pos < signal_queue_len; cur_pos++) {
84 sig = signal_queue[cur_pos];
85 desc = &signal_state[sig];
86 if (desc->count) {
Willy Tarreau24f4efa2010-08-27 17:56:48 +020087 struct sig_handler *sh, *shb;
88 list_for_each_entry_safe(sh, shb, &desc->handlers, list) {
89 if ((sh->flags & SIG_F_TYPE_FCT) && sh->handler)
90 ((void (*)(struct sig_handler *))sh->handler)(sh);
91 else if ((sh->flags & SIG_F_TYPE_TASK) && sh->handler)
92 task_wakeup(sh->handler, sh->arg | TASK_WOKEN_SIGNAL);
93 }
Willy Tarreau8f38bd02009-05-10 08:53:33 +020094 desc->count = 0;
95 }
96 }
97 signal_queue_len = 0;
98
99 /* restore signal delivery */
William Lallemand6e1796e2018-06-07 11:23:40 +0200100 ha_sigmask(SIG_SETMASK, &old_sig, NULL);
Willy Tarreau8f38bd02009-05-10 08:53:33 +0200101}
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200102
103/* perform minimal intializations, report 0 in case of error, 1 if OK. */
104int signal_init()
105{
106 int sig;
107
108 signal_queue_len = 0;
109 memset(signal_queue, 0, sizeof(signal_queue));
110 memset(signal_state, 0, sizeof(signal_state));
Willy Tarreaud50b4ac2016-04-20 10:33:15 +0200111
112 /* Ensure signals are not blocked. Some shells or service managers may
113 * accidently block all of our signals unfortunately, causing lots of
114 * zombie processes to remain in the background during reloads.
115 */
116 sigemptyset(&blocked_sig);
William Lallemand73b85e72017-06-01 17:38:51 +0200117 /* Ensure that SIGUSR2 is blocked until the end of configuration
118 * parsing We don't want the process to be killed by an unregistered
119 * USR2 signal when the master-worker is reloading */
120 sigaddset(&blocked_sig, SIGUSR2);
William Lallemand6e1796e2018-06-07 11:23:40 +0200121 ha_sigmask(SIG_SETMASK, &blocked_sig, NULL);
Willy Tarreaud50b4ac2016-04-20 10:33:15 +0200122
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200123 sigfillset(&blocked_sig);
Willy Tarreau6747e272013-01-04 16:20:20 +0100124 sigdelset(&blocked_sig, SIGPROF);
William Lallemand933642c2018-06-07 09:49:04 +0200125 /* man sigprocmask: If SIGBUS, SIGFPE, SIGILL, or SIGSEGV are
126 generated while they are blocked, the result is undefined, unless
127 the signal was generated by kill(2),
128 sigqueue(3), or raise(3) */
129 sigdelset(&blocked_sig, SIGBUS);
130 sigdelset(&blocked_sig, SIGFPE);
131 sigdelset(&blocked_sig, SIGILL);
132 sigdelset(&blocked_sig, SIGSEGV);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200133 for (sig = 0; sig < MAX_SIGNAL; sig++)
134 LIST_INIT(&signal_state[sig].handlers);
135
Willy Tarreaubafbe012017-11-24 17:34:44 +0100136 pool_head_sig_handlers = create_pool("sig_handlers", sizeof(struct sig_handler), MEM_F_SHARED);
137 return pool_head_sig_handlers != NULL;
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200138}
139
140/* releases all registered signal handlers */
141void deinit_signals()
142{
143 int sig;
144 struct sig_handler *sh, *shb;
145
146 for (sig = 0; sig < MAX_SIGNAL; sig++) {
Willy Tarreau6747e272013-01-04 16:20:20 +0100147 if (sig != SIGPROF)
148 signal(sig, SIG_DFL);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200149 list_for_each_entry_safe(sh, shb, &signal_state[sig].handlers, list) {
150 LIST_DEL(&sh->list);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100151 pool_free(pool_head_sig_handlers, sh);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200152 }
153 }
154}
155
156/* Register a function and an integer argument on a signal. A pointer to the
157 * newly allocated sig_handler is returned, or NULL in case of any error. The
158 * caller is responsible for unregistering the function when not used anymore.
159 * Note that passing a NULL as the function pointer enables interception of the
Willy Tarreaud0807c32010-08-27 18:26:11 +0200160 * signal without processing, which is identical to SIG_IGN. If the signal is
161 * zero (which the system cannot deliver), only internal functions will be able
162 * to notify the registered functions.
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200163 */
164struct sig_handler *signal_register_fct(int sig, void (*fct)(struct sig_handler *), int arg)
165{
166 struct sig_handler *sh;
167
Willy Tarreau1a53b5e2013-01-24 02:06:05 +0100168 if (sig < 0 || sig >= MAX_SIGNAL)
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200169 return NULL;
170
Willy Tarreaud0807c32010-08-27 18:26:11 +0200171 if (sig)
Willy Tarreauc39b0d12012-10-04 19:19:36 +0200172 signal(sig, fct ? signal_handler : SIG_IGN);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200173
174 if (!fct)
175 return NULL;
176
Willy Tarreaubafbe012017-11-24 17:34:44 +0100177 sh = pool_alloc(pool_head_sig_handlers);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200178 if (!sh)
179 return NULL;
180
181 sh->handler = fct;
182 sh->arg = arg;
183 sh->flags = SIG_F_TYPE_FCT;
184 LIST_ADDQ(&signal_state[sig].handlers, &sh->list);
185 return sh;
186}
187
188/* Register a task and a wake-up reason on a signal. A pointer to the newly
189 * allocated sig_handler is returned, or NULL in case of any error. The caller
190 * is responsible for unregistering the task when not used anymore. Note that
191 * passing a NULL as the task pointer enables interception of the signal
Willy Tarreaud0807c32010-08-27 18:26:11 +0200192 * without processing, which is identical to SIG_IGN. If the signal is zero
193 * (which the system cannot deliver), only internal functions will be able to
194 * notify the registered functions.
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200195 */
196struct sig_handler *signal_register_task(int sig, struct task *task, int reason)
197{
198 struct sig_handler *sh;
199
Willy Tarreau1a53b5e2013-01-24 02:06:05 +0100200 if (sig < 0 || sig >= MAX_SIGNAL)
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200201 return NULL;
202
Willy Tarreaud0807c32010-08-27 18:26:11 +0200203 if (sig)
204 signal(sig, signal_handler);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200205
206 if (!task)
207 return NULL;
208
Willy Tarreaubafbe012017-11-24 17:34:44 +0100209 sh = pool_alloc(pool_head_sig_handlers);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200210 if (!sh)
211 return NULL;
212
213 sh->handler = task;
214 sh->arg = reason & ~TASK_WOKEN_ANY;
215 sh->flags = SIG_F_TYPE_TASK;
216 LIST_ADDQ(&signal_state[sig].handlers, &sh->list);
217 return sh;
218}
219
220/* Immediately unregister a handler so that no further signals may be delivered
221 * to it. The struct is released so the caller may not reference it anymore.
222 */
223void signal_unregister_handler(struct sig_handler *handler)
224{
225 LIST_DEL(&handler->list);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100226 pool_free(pool_head_sig_handlers, handler);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200227}
228
229/* Immediately unregister a handler so that no further signals may be delivered
230 * to it. The handler struct does not need to be known, only the function or
231 * task pointer. This method is expensive because it scans all the list, so it
232 * should only be used for rare cases (eg: exit). The struct is released so the
233 * caller may not reference it anymore.
234 */
235void signal_unregister_target(int sig, void *target)
236{
237 struct sig_handler *sh, *shb;
238
Willy Tarreau1a53b5e2013-01-24 02:06:05 +0100239 if (sig < 0 || sig >= MAX_SIGNAL)
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200240 return;
241
242 if (!target)
243 return;
244
245 list_for_each_entry_safe(sh, shb, &signal_state[sig].handlers, list) {
246 if (sh->handler == target) {
247 LIST_DEL(&sh->list);
Willy Tarreaubafbe012017-11-24 17:34:44 +0100248 pool_free(pool_head_sig_handlers, sh);
Willy Tarreau24f4efa2010-08-27 17:56:48 +0200249 break;
250 }
251 }
252}