Line data Source code
1 : /*
2 : * linux/kernel/signal.c
3 : *
4 : * Copyright (C) 1991, 1992 Linus Torvalds
5 : *
6 : * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 : *
8 : * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 : * Changes to use preallocated sigqueue structures
10 : * to allow signals to be sent reliably.
11 : */
12 :
13 : #include <linux/slab.h>
14 : #include <linux/export.h>
15 : #include <linux/init.h>
16 : #include <linux/sched.h>
17 : #include <linux/fs.h>
18 : #include <linux/tty.h>
19 : #include <linux/binfmts.h>
20 : #include <linux/coredump.h>
21 : #include <linux/security.h>
22 : #include <linux/syscalls.h>
23 : #include <linux/ptrace.h>
24 : #include <linux/signal.h>
25 : #include <linux/signalfd.h>
26 : #include <linux/ratelimit.h>
27 : #include <linux/tracehook.h>
28 : #include <linux/capability.h>
29 : #include <linux/freezer.h>
30 : #include <linux/pid_namespace.h>
31 : #include <linux/nsproxy.h>
32 : #include <linux/user_namespace.h>
33 : #include <linux/uprobes.h>
34 : #include <linux/compat.h>
35 : #include <linux/cn_proc.h>
36 : #include <linux/compiler.h>
37 :
38 : #define CREATE_TRACE_POINTS
39 : #include <trace/events/signal.h>
40 :
41 : #include <asm/param.h>
42 : #include <asm/uaccess.h>
43 : #include <asm/unistd.h>
44 : #include <asm/siginfo.h>
45 : #include <asm/cacheflush.h>
46 : #include "audit.h" /* audit_signal_info() */
47 :
48 : /*
49 : * SLAB caches for signal bits.
50 : */
51 :
52 : static struct kmem_cache *sigqueue_cachep;
53 :
54 : int print_fatal_signals __read_mostly;
55 :
56 : static void __user *sig_handler(struct task_struct *t, int sig)
57 : {
58 18662 : return t->sighand->action[sig - 1].sa.sa_handler;
59 : }
60 :
61 : static int sig_handler_ignored(void __user *handler, int sig)
62 : {
63 : /* Is it explicitly or implicitly ignored? */
64 34756 : return handler == SIG_IGN ||
65 22305 : (handler == SIG_DFL && sig_kernel_ignore(sig));
66 : }
67 :
68 4826 : static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69 : {
70 : void __user *handler;
71 :
72 : handler = sig_handler(t, sig);
73 :
74 4826 : if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 26 : handler == SIG_DFL && !force)
76 : return 1;
77 :
78 4826 : return sig_handler_ignored(handler, sig);
79 : }
80 :
81 5427 : static int sig_ignored(struct task_struct *t, int sig, bool force)
82 : {
83 : /*
84 : * Blocked signals are never ignored, since the
85 : * signal handler may change by the time it is
86 : * unblocked.
87 : */
88 10253 : if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
89 : return 0;
90 :
91 4826 : if (!sig_task_ignored(t, sig, force))
92 : return 0;
93 :
94 : /*
95 : * Tracers may want to know about even ignored signals.
96 : */
97 94 : return !t->ptrace;
98 : }
99 :
100 : /*
101 : * Re-calculate pending state from the set of locally pending
102 : * signals, globally pending signals, and blocked signals.
103 : */
104 : static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
105 : {
106 : unsigned long ready;
107 : long i;
108 :
109 : switch (_NSIG_WORDS) {
110 : default:
111 : for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 : ready |= signal->sig[i] &~ blocked->sig[i];
113 : break;
114 :
115 : case 4: ready = signal->sig[3] &~ blocked->sig[3];
116 : ready |= signal->sig[2] &~ blocked->sig[2];
117 : ready |= signal->sig[1] &~ blocked->sig[1];
118 : ready |= signal->sig[0] &~ blocked->sig[0];
119 : break;
120 :
121 228583 : case 2: ready = signal->sig[1] &~ blocked->sig[1];
122 228583 : ready |= signal->sig[0] &~ blocked->sig[0];
123 : break;
124 :
125 : case 1: ready = signal->sig[0] &~ blocked->sig[0];
126 : }
127 : return ready != 0;
128 : }
129 :
130 : #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
131 :
132 114293 : static int recalc_sigpending_tsk(struct task_struct *t)
133 : {
134 228586 : if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 114290 : PENDING(&t->pending, &t->blocked) ||
136 114290 : PENDING(&t->signal->shared_pending, &t->blocked)) {
137 : set_tsk_thread_flag(t, TIF_SIGPENDING);
138 439 : return 1;
139 : }
140 : /*
141 : * We must never clear the flag in another thread, or in current
142 : * when it's possible the current syscall is returning -ERESTART*.
143 : * So we don't clear it here, and only callers who know they should do.
144 : */
145 : return 0;
146 : }
147 :
148 : /*
149 : * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
150 : * This is superfluous when called on current, the wakeup is a harmless no-op.
151 : */
152 0 : void recalc_sigpending_and_wake(struct task_struct *t)
153 : {
154 0 : if (recalc_sigpending_tsk(t))
155 : signal_wake_up(t, 0);
156 0 : }
157 :
158 114293 : void recalc_sigpending(void)
159 : {
160 342001 : if (!recalc_sigpending_tsk(current) && !freezing(current))
161 : clear_thread_flag(TIF_SIGPENDING);
162 :
163 114293 : }
164 :
165 : /* Given the mask, find the first available signal that should be serviced. */
166 :
167 : #define SYNCHRONOUS_MASK \
168 : (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 : sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
170 :
171 11084 : int next_signal(struct sigpending *pending, sigset_t *mask)
172 : {
173 : unsigned long i, *s, *m, x;
174 : int sig = 0;
175 :
176 : s = pending->signal.sig;
177 : m = mask->sig;
178 :
179 : /*
180 : * Handle the first word specially: it contains the
181 : * synchronous signals that need to be dequeued first.
182 : */
183 11084 : x = *s &~ *m;
184 11084 : if (x) {
185 5368 : if (x & SYNCHRONOUS_MASK)
186 : x &= SYNCHRONOUS_MASK;
187 5368 : sig = ffz(~x) + 1;
188 5368 : return sig;
189 : }
190 :
191 : switch (_NSIG_WORDS) {
192 : default:
193 : for (i = 1; i < _NSIG_WORDS; ++i) {
194 : x = *++s &~ *++m;
195 : if (!x)
196 : continue;
197 : sig = ffz(~x) + i*_NSIG_BPW + 1;
198 : break;
199 : }
200 : break;
201 :
202 : case 2:
203 5716 : x = s[1] &~ m[1];
204 5716 : if (!x)
205 : break;
206 0 : sig = ffz(~x) + _NSIG_BPW + 1;
207 0 : break;
208 :
209 : case 1:
210 : /* Nothing to do */
211 : break;
212 : }
213 :
214 5716 : return sig;
215 : }
216 :
217 : static inline void print_dropped_signal(int sig)
218 : {
219 : static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
220 :
221 0 : if (!print_fatal_signals)
222 : return;
223 :
224 0 : if (!__ratelimit(&ratelimit_state))
225 : return;
226 :
227 0 : printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 0 : current->comm, current->pid, sig);
229 : }
230 :
231 : /**
232 : * task_set_jobctl_pending - set jobctl pending bits
233 : * @task: target task
234 : * @mask: pending bits to set
235 : *
236 : * Clear @mask from @task->jobctl. @mask must be subset of
237 : * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
238 : * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
239 : * cleared. If @task is already being killed or exiting, this function
240 : * becomes noop.
241 : *
242 : * CONTEXT:
243 : * Must be called with @task->sighand->siglock held.
244 : *
245 : * RETURNS:
246 : * %true if @mask is set, %false if made noop because @task was dying.
247 : */
248 0 : bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
249 : {
250 : BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 : JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 : BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
253 :
254 0 : if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
255 : return false;
256 :
257 0 : if (mask & JOBCTL_STOP_SIGMASK)
258 0 : task->jobctl &= ~JOBCTL_STOP_SIGMASK;
259 :
260 0 : task->jobctl |= mask;
261 0 : return true;
262 : }
263 :
264 : /**
265 : * task_clear_jobctl_trapping - clear jobctl trapping bit
266 : * @task: target task
267 : *
268 : * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
269 : * Clear it and wake up the ptracer. Note that we don't need any further
270 : * locking. @task->siglock guarantees that @task->parent points to the
271 : * ptracer.
272 : *
273 : * CONTEXT:
274 : * Must be called with @task->sighand->siglock held.
275 : */
276 14 : void task_clear_jobctl_trapping(struct task_struct *task)
277 : {
278 14 : if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 0 : task->jobctl &= ~JOBCTL_TRAPPING;
280 0 : smp_mb(); /* advised by wake_up_bit() */
281 0 : wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
282 : }
283 14 : }
284 :
285 : /**
286 : * task_clear_jobctl_pending - clear jobctl pending bits
287 : * @task: target task
288 : * @mask: pending bits to clear
289 : *
290 : * Clear @mask from @task->jobctl. @mask must be subset of
291 : * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
292 : * STOP bits are cleared together.
293 : *
294 : * If clearing of @mask leaves no stop or trap pending, this function calls
295 : * task_clear_jobctl_trapping().
296 : *
297 : * CONTEXT:
298 : * Must be called with @task->sighand->siglock held.
299 : */
300 14 : void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
301 : {
302 : BUG_ON(mask & ~JOBCTL_PENDING_MASK);
303 :
304 14 : if (mask & JOBCTL_STOP_PENDING)
305 14 : mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
306 :
307 14 : task->jobctl &= ~mask;
308 :
309 14 : if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 14 : task_clear_jobctl_trapping(task);
311 14 : }
312 :
313 : /**
314 : * task_participate_group_stop - participate in a group stop
315 : * @task: task participating in a group stop
316 : *
317 : * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
318 : * Group stop states are cleared and the group stop count is consumed if
319 : * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
320 : * stop, the appropriate %SIGNAL_* flags are set.
321 : *
322 : * CONTEXT:
323 : * Must be called with @task->sighand->siglock held.
324 : *
325 : * RETURNS:
326 : * %true if group stop completion should be notified to the parent, %false
327 : * otherwise.
328 : */
329 0 : static bool task_participate_group_stop(struct task_struct *task)
330 : {
331 0 : struct signal_struct *sig = task->signal;
332 0 : bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
333 :
334 : WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
335 :
336 0 : task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
337 :
338 0 : if (!consume)
339 : return false;
340 :
341 0 : if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 0 : sig->group_stop_count--;
343 :
344 : /*
345 : * Tell the caller to notify completion iff we are entering into a
346 : * fresh group stop. Read comment in do_signal_stop() for details.
347 : */
348 0 : if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 0 : sig->flags = SIGNAL_STOP_STOPPED;
350 0 : return true;
351 : }
352 : return false;
353 : }
354 :
355 : /*
356 : * allocate a new signal queue record
357 : * - this may be called without locks if and only if t == current, otherwise an
358 : * appropriate lock must be held to stop the target task from exiting
359 : */
360 : static struct sigqueue *
361 2805 : __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
362 : {
363 : struct sigqueue *q = NULL;
364 : struct user_struct *user;
365 :
366 : /*
367 : * Protect access to @t credentials. This can go away when all
368 : * callers hold rcu read lock.
369 : */
370 : rcu_read_lock();
371 2805 : user = get_uid(__task_cred(t)->user);
372 2805 : atomic_inc(&user->sigpending);
373 : rcu_read_unlock();
374 :
375 2807 : if (override_rlimit ||
376 2 : atomic_read(&user->sigpending) <=
377 : task_rlimit(t, RLIMIT_SIGPENDING)) {
378 2805 : q = kmem_cache_alloc(sigqueue_cachep, flags);
379 : } else {
380 : print_dropped_signal(sig);
381 : }
382 :
383 2805 : if (unlikely(q == NULL)) {
384 : atomic_dec(&user->sigpending);
385 0 : free_uid(user);
386 : } else {
387 2805 : INIT_LIST_HEAD(&q->list);
388 2805 : q->flags = 0;
389 2805 : q->user = user;
390 : }
391 :
392 2805 : return q;
393 : }
394 :
395 5263 : static void __sigqueue_free(struct sigqueue *q)
396 : {
397 5263 : if (q->flags & SIGQUEUE_PREALLOC)
398 5263 : return;
399 2804 : atomic_dec(&q->user->sigpending);
400 2804 : free_uid(q->user);
401 2804 : kmem_cache_free(sigqueue_cachep, q);
402 : }
403 :
404 5830 : void flush_sigqueue(struct sigpending *queue)
405 : {
406 : struct sigqueue *q;
407 :
408 : sigemptyset(&queue->signal);
409 11712 : while (!list_empty(&queue->list)) {
410 : q = list_entry(queue->list.next, struct sigqueue , list);
411 26 : list_del_init(&q->list);
412 26 : __sigqueue_free(q);
413 : }
414 5830 : }
415 :
416 : /*
417 : * Flush all pending signals for a task.
418 : */
419 1 : void __flush_signals(struct task_struct *t)
420 : {
421 : clear_tsk_thread_flag(t, TIF_SIGPENDING);
422 1 : flush_sigqueue(&t->pending);
423 1 : flush_sigqueue(&t->signal->shared_pending);
424 1 : }
425 :
426 1 : void flush_signals(struct task_struct *t)
427 : {
428 : unsigned long flags;
429 :
430 1 : spin_lock_irqsave(&t->sighand->siglock, flags);
431 1 : __flush_signals(t);
432 : spin_unlock_irqrestore(&t->sighand->siglock, flags);
433 1 : }
434 :
435 4298 : static void __flush_itimer_signals(struct sigpending *pending)
436 : {
437 : sigset_t signal, retain;
438 : struct sigqueue *q, *n;
439 :
440 4298 : signal = pending->signal;
441 : sigemptyset(&retain);
442 :
443 4298 : list_for_each_entry_safe(q, n, &pending->list, list) {
444 0 : int sig = q->info.si_signo;
445 :
446 0 : if (likely(q->info.si_code != SI_TIMER)) {
447 : sigaddset(&retain, sig);
448 : } else {
449 : sigdelset(&signal, sig);
450 0 : list_del_init(&q->list);
451 0 : __sigqueue_free(q);
452 : }
453 : }
454 :
455 : sigorsets(&pending->signal, &signal, &retain);
456 4298 : }
457 :
458 2149 : void flush_itimer_signals(void)
459 : {
460 2149 : struct task_struct *tsk = current;
461 : unsigned long flags;
462 :
463 2149 : spin_lock_irqsave(&tsk->sighand->siglock, flags);
464 2149 : __flush_itimer_signals(&tsk->pending);
465 2149 : __flush_itimer_signals(&tsk->signal->shared_pending);
466 : spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
467 2149 : }
468 :
469 1 : void ignore_signals(struct task_struct *t)
470 : {
471 : int i;
472 :
473 65 : for (i = 0; i < _NSIG; ++i)
474 64 : t->sighand->action[i].sa.sa_handler = SIG_IGN;
475 :
476 1 : flush_signals(t);
477 1 : }
478 :
479 : /*
480 : * Flush all handlers for a task.
481 : */
482 :
483 : void
484 2171 : flush_signal_handlers(struct task_struct *t, int force_default)
485 : {
486 : int i;
487 2171 : struct k_sigaction *ka = &t->sighand->action[0];
488 141115 : for (i = _NSIG ; i != 0 ; i--) {
489 138944 : if (force_default || ka->sa.sa_handler != SIG_IGN)
490 137065 : ka->sa.sa_handler = SIG_DFL;
491 138944 : ka->sa.sa_flags = 0;
492 : #ifdef __ARCH_HAS_SA_RESTORER
493 138944 : ka->sa.sa_restorer = NULL;
494 : #endif
495 : sigemptyset(&ka->sa.sa_mask);
496 138944 : ka++;
497 : }
498 2171 : }
499 :
500 0 : int unhandled_signal(struct task_struct *tsk, int sig)
501 : {
502 0 : void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
503 0 : if (is_global_init(tsk))
504 : return 1;
505 0 : if (handler != SIG_IGN && handler != SIG_DFL)
506 : return 0;
507 : /* if ptraced, let the tracer determine */
508 0 : return !tsk->ptrace;
509 : }
510 :
511 : /*
512 : * Notify the system that a driver wants to block all signals for this
513 : * process, and wants to be notified if any signals at all were to be
514 : * sent/acted upon. If the notifier routine returns non-zero, then the
515 : * signal will be acted upon after all. If the notifier routine returns 0,
516 : * then then signal will be blocked. Only one block per process is
517 : * allowed. priv is a pointer to private data that the notifier routine
518 : * can use to determine if the signal should be blocked or not.
519 : */
520 : void
521 0 : block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
522 : {
523 : unsigned long flags;
524 :
525 0 : spin_lock_irqsave(¤t->sighand->siglock, flags);
526 0 : current->notifier_mask = mask;
527 0 : current->notifier_data = priv;
528 0 : current->notifier = notifier;
529 : spin_unlock_irqrestore(¤t->sighand->siglock, flags);
530 0 : }
531 :
532 : /* Notify the system that blocking has ended. */
533 :
534 : void
535 0 : unblock_all_signals(void)
536 : {
537 : unsigned long flags;
538 :
539 0 : spin_lock_irqsave(¤t->sighand->siglock, flags);
540 0 : current->notifier = NULL;
541 0 : current->notifier_data = NULL;
542 0 : recalc_sigpending();
543 : spin_unlock_irqrestore(¤t->sighand->siglock, flags);
544 0 : }
545 :
546 5246 : static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
547 : {
548 : struct sigqueue *q, *first = NULL;
549 :
550 : /*
551 : * Collect the siginfo appropriate to this signal. Check if
552 : * there is another siginfo for the same signal.
553 : */
554 10485 : list_for_each_entry(q, &list->list, list) {
555 5239 : if (q->info.si_signo == sig) {
556 5236 : if (first)
557 : goto still_pending;
558 : first = q;
559 : }
560 : }
561 :
562 : sigdelset(&list->signal, sig);
563 :
564 5246 : if (first) {
565 : still_pending:
566 5236 : list_del_init(&first->list);
567 5236 : copy_siginfo(info, &first->info);
568 5236 : __sigqueue_free(first);
569 : } else {
570 : /*
571 : * Ok, it wasn't in the queue. This must be
572 : * a fast-pathed signal or we must have been
573 : * out of queue space. So zero out the info.
574 : */
575 10 : info->si_signo = sig;
576 10 : info->si_errno = 0;
577 10 : info->si_code = SI_USER;
578 10 : info->si_pid = 0;
579 10 : info->si_uid = 0;
580 : }
581 5246 : }
582 :
583 10464 : static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
584 : siginfo_t *info)
585 : {
586 10464 : int sig = next_signal(pending, mask);
587 :
588 10464 : if (sig) {
589 5246 : if (current->notifier) {
590 0 : if (sigismember(current->notifier_mask, sig)) {
591 0 : if (!(current->notifier)(current->notifier_data)) {
592 : clear_thread_flag(TIF_SIGPENDING);
593 0 : return 0;
594 : }
595 : }
596 : }
597 :
598 5246 : collect_signal(sig, pending, info);
599 : }
600 :
601 10464 : return sig;
602 : }
603 :
604 : /*
605 : * Dequeue a signal and return the element to the caller, which is
606 : * expected to free it.
607 : *
608 : * All callers have to hold the siglock.
609 : */
610 5250 : int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
611 : {
612 : int signr;
613 :
614 : /* We only dequeue private signals from ourselves, we don't let
615 : * signalfd steal them
616 : */
617 5250 : signr = __dequeue_signal(&tsk->pending, mask, info);
618 5250 : if (!signr) {
619 5214 : signr = __dequeue_signal(&tsk->signal->shared_pending,
620 : mask, info);
621 : /*
622 : * itimer signal ?
623 : *
624 : * itimers are process shared and we restart periodic
625 : * itimers in the signal delivery path to prevent DoS
626 : * attacks in the high resolution timer case. This is
627 : * compliant with the old way of self-restarting
628 : * itimers, as the SIGALRM is a legacy signal and only
629 : * queued once. Changing the restart behaviour to
630 : * restart the timer in the signal dequeue path is
631 : * reducing the timer noise on heavy loaded !highres
632 : * systems too.
633 : */
634 5214 : if (unlikely(signr == SIGALRM)) {
635 2459 : struct hrtimer *tmr = &tsk->signal->real_timer;
636 :
637 4918 : if (!hrtimer_is_queued(tmr) &&
638 2459 : tsk->signal->it_real_incr.tv64 != 0) {
639 0 : hrtimer_forward(tmr, tmr->base->get_time(),
640 0 : tsk->signal->it_real_incr);
641 : hrtimer_restart(tmr);
642 : }
643 : }
644 : }
645 :
646 5250 : recalc_sigpending();
647 5250 : if (!signr)
648 : return 0;
649 :
650 5246 : if (unlikely(sig_kernel_stop(signr))) {
651 : /*
652 : * Set a marker that we have dequeued a stop signal. Our
653 : * caller might release the siglock and then the pending
654 : * stop signal it is about to process is no longer in the
655 : * pending bitmasks, but must still be cleared by a SIGCONT
656 : * (and overruled by a SIGKILL). So those cases clear this
657 : * shared flag after we've set it. Note that this flag may
658 : * remain set after the signal we return is ignored or
659 : * handled. That doesn't matter because its only purpose
660 : * is to alert stop-signal processing code when another
661 : * processor has come along and cleared the flag.
662 : */
663 0 : current->jobctl |= JOBCTL_STOP_DEQUEUED;
664 : }
665 5246 : if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
666 : /*
667 : * Release the siglock to ensure proper locking order
668 : * of timer locks outside of siglocks. Note, we leave
669 : * irqs disabled here, since the posix-timers code is
670 : * about to disable them again anyway.
671 : */
672 : spin_unlock(&tsk->sighand->siglock);
673 2458 : do_schedule_next_timer(info);
674 : spin_lock(&tsk->sighand->siglock);
675 : }
676 5246 : return signr;
677 : }
678 :
679 : /*
680 : * Tell a process that it has a new active signal..
681 : *
682 : * NOTE! we rely on the previous spin_lock to
683 : * lock interrupts for us! We can only be called with
684 : * "siglock" held, and the local interrupt must
685 : * have been disabled when that got acquired!
686 : *
687 : * No need to set need_resched since signal event passing
688 : * goes through ->blocked
689 : */
690 4697 : void signal_wake_up_state(struct task_struct *t, unsigned int state)
691 : {
692 : set_tsk_thread_flag(t, TIF_SIGPENDING);
693 : /*
694 : * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
695 : * case. We don't check t->state here because there is a race with it
696 : * executing another processor and just now entering stopped state.
697 : * By using wake_up_state, we ensure the process will wake up and
698 : * handle its death signal.
699 : */
700 4697 : if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
701 : kick_process(t);
702 4697 : }
703 :
704 : /*
705 : * Remove signals in mask from the pending set and queue.
706 : * Returns 1 if any signals were found.
707 : *
708 : * All callers must be holding the siglock.
709 : */
710 6488 : static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
711 : {
712 : struct sigqueue *q, *n;
713 : sigset_t m;
714 :
715 : sigandsets(&m, mask, &s->signal);
716 6488 : if (sigisemptyset(&m))
717 : return 0;
718 :
719 : sigandnsets(&s->signal, &s->signal, mask);
720 0 : list_for_each_entry_safe(q, n, &s->list, list) {
721 0 : if (sigismember(mask, q->info.si_signo)) {
722 0 : list_del_init(&q->list);
723 0 : __sigqueue_free(q);
724 : }
725 : }
726 : return 1;
727 : }
728 :
729 : static inline int is_si_special(const struct siginfo *info)
730 : {
731 : return info <= SEND_SIG_FORCED;
732 : }
733 :
734 : static inline bool si_fromuser(const struct siginfo *info)
735 : {
736 6028 : return info == SEND_SIG_NOINFO ||
737 2991 : (!is_si_special(info) && SI_FROMUSER(info));
738 : }
739 :
740 : /*
741 : * called with RCU read lock from check_kill_permission()
742 : */
743 35 : static int kill_ok_by_cred(struct task_struct *t)
744 : {
745 35 : const struct cred *cred = current_cred();
746 35 : const struct cred *tcred = __task_cred(t);
747 :
748 35 : if (uid_eq(cred->euid, tcred->suid) ||
749 1 : uid_eq(cred->euid, tcred->uid) ||
750 1 : uid_eq(cred->uid, tcred->suid) ||
751 : uid_eq(cred->uid, tcred->uid))
752 : return 1;
753 :
754 1 : if (ns_capable(tcred->user_ns, CAP_KILL))
755 : return 1;
756 :
757 0 : return 0;
758 : }
759 :
760 : /*
761 : * Bad permissions for sending the signal
762 : * - the caller must hold the RCU read lock
763 : */
764 46 : static int check_kill_permission(int sig, struct siginfo *info,
765 : struct task_struct *t)
766 : {
767 : struct pid *sid;
768 : int error;
769 :
770 46 : if (!valid_signal(sig))
771 : return -EINVAL;
772 :
773 46 : if (!si_fromuser(info))
774 : return 0;
775 :
776 : error = audit_signal_info(sig, t); /* Let audit system see the signal */
777 : if (error)
778 : return error;
779 :
780 109 : if (!same_thread_group(current, t) &&
781 35 : !kill_ok_by_cred(t)) {
782 0 : switch (sig) {
783 : case SIGCONT:
784 : sid = task_session(t);
785 : /*
786 : * We don't return the error if sid == NULL. The
787 : * task was unhashed, the caller must notice this.
788 : */
789 0 : if (!sid || sid == task_session(current))
790 : break;
791 : default:
792 : return -EPERM;
793 : }
794 : }
795 :
796 : return security_task_kill(t, info, sig, 0);
797 : }
798 :
799 : /**
800 : * ptrace_trap_notify - schedule trap to notify ptracer
801 : * @t: tracee wanting to notify tracer
802 : *
803 : * This function schedules sticky ptrace trap which is cleared on the next
804 : * TRAP_STOP to notify ptracer of an event. @t must have been seized by
805 : * ptracer.
806 : *
807 : * If @t is running, STOP trap will be taken. If trapped for STOP and
808 : * ptracer is listening for events, tracee is woken up so that it can
809 : * re-trap for the new event. If trapped otherwise, STOP trap will be
810 : * eventually taken without returning to userland after the existing traps
811 : * are finished by PTRACE_CONT.
812 : *
813 : * CONTEXT:
814 : * Must be called with @task->sighand->siglock held.
815 : */
816 0 : static void ptrace_trap_notify(struct task_struct *t)
817 : {
818 : WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
819 : assert_spin_locked(&t->sighand->siglock);
820 :
821 0 : task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
822 0 : ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
823 0 : }
824 :
825 : /*
826 : * Handle magic process-wide effects of stop/continue signals. Unlike
827 : * the signal actions, these happen immediately at signal-generation
828 : * time regardless of blocking, ignoring, or handling. This does the
829 : * actual continuing for SIGCONT, but not the actual stopping for stop
830 : * signals. The process stop is done as a signal action for SIG_DFL.
831 : *
832 : * Returns true if the signal should be actually delivered, otherwise
833 : * it should be dropped.
834 : */
835 5427 : static bool prepare_signal(int sig, struct task_struct *p, bool force)
836 : {
837 5427 : struct signal_struct *signal = p->signal;
838 : struct task_struct *t;
839 : sigset_t flush;
840 :
841 5427 : if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
842 1 : if (signal->flags & SIGNAL_GROUP_COREDUMP)
843 0 : return sig == SIGKILL;
844 : /*
845 : * The process is in the middle of dying, nothing to do.
846 : */
847 5426 : } else if (sig_kernel_stop(sig)) {
848 : /*
849 : * This is a stop signal. Remove SIGCONT from all queues.
850 : */
851 : siginitset(&flush, sigmask(SIGCONT));
852 0 : flush_sigqueue_mask(&flush, &signal->shared_pending);
853 0 : for_each_thread(p, t)
854 0 : flush_sigqueue_mask(&flush, &t->pending);
855 5426 : } else if (sig == SIGCONT) {
856 : unsigned int why;
857 : /*
858 : * Remove all stop signals from all queues, wake all threads.
859 : */
860 : siginitset(&flush, SIG_KERNEL_STOP_MASK);
861 4 : flush_sigqueue_mask(&flush, &signal->shared_pending);
862 8 : for_each_thread(p, t) {
863 4 : flush_sigqueue_mask(&flush, &t->pending);
864 4 : task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
865 4 : if (likely(!(t->ptrace & PT_SEIZED)))
866 4 : wake_up_state(t, __TASK_STOPPED);
867 : else
868 0 : ptrace_trap_notify(t);
869 : }
870 :
871 : /*
872 : * Notify the parent with CLD_CONTINUED if we were stopped.
873 : *
874 : * If we were in the middle of a group stop, we pretend it
875 : * was already finished, and then continued. Since SIGCHLD
876 : * doesn't queue we report only CLD_STOPPED, as if the next
877 : * CLD_CONTINUED was dropped.
878 : */
879 : why = 0;
880 4 : if (signal->flags & SIGNAL_STOP_STOPPED)
881 : why |= SIGNAL_CLD_CONTINUED;
882 4 : else if (signal->group_stop_count)
883 : why |= SIGNAL_CLD_STOPPED;
884 :
885 4 : if (why) {
886 : /*
887 : * The first thread which returns from do_signal_stop()
888 : * will take ->siglock, notice SIGNAL_CLD_MASK, and
889 : * notify its parent. See get_signal_to_deliver().
890 : */
891 0 : signal->flags = why | SIGNAL_STOP_CONTINUED;
892 0 : signal->group_stop_count = 0;
893 0 : signal->group_exit_code = 0;
894 : }
895 : }
896 :
897 5427 : return !sig_ignored(p, sig, force);
898 : }
899 :
900 : /*
901 : * Test if P wants to take SIG. After we've checked all threads with this,
902 : * it's equivalent to finding no threads not blocking SIG. Any threads not
903 : * blocking SIG were ruled out because they are not running and already
904 : * have pending signals. Such threads will dequeue from the shared queue
905 : * as soon as they're available, so putting the signal on the shared queue
906 : * will be equivalent to sending it to one such thread.
907 : */
908 : static inline int wants_signal(int sig, struct task_struct *p)
909 : {
910 5262 : if (sigismember(&p->blocked, sig))
911 : return 0;
912 4701 : if (p->flags & PF_EXITING)
913 : return 0;
914 4697 : if (sig == SIGKILL)
915 : return 1;
916 4697 : if (task_is_stopped_or_traced(p))
917 : return 0;
918 9364 : return task_curr(p) || !signal_pending(p);
919 : }
920 :
921 5262 : static void complete_signal(int sig, struct task_struct *p, int group)
922 : {
923 5262 : struct signal_struct *signal = p->signal;
924 : struct task_struct *t;
925 :
926 : /*
927 : * Now find a thread we can wake up to take the signal off the queue.
928 : *
929 : * If the main thread wants the signal, it gets first crack.
930 : * Probably the least surprising to the average bear.
931 : */
932 5262 : if (wants_signal(sig, p))
933 : t = p;
934 1130 : else if (!group || thread_group_empty(p))
935 : /*
936 : * There is just one thread and it does not need to be woken.
937 : * It will dequeue unblocked signals before it runs again.
938 : */
939 : return;
940 : else {
941 : /*
942 : * Otherwise try to find a suitable thread.
943 : */
944 0 : t = signal->curr_target;
945 0 : while (!wants_signal(sig, t)) {
946 0 : t = next_thread(t);
947 0 : if (t == signal->curr_target)
948 : /*
949 : * No thread needs to be woken.
950 : * Any eligible threads will see
951 : * the signal in the queue soon.
952 : */
953 : return;
954 : }
955 0 : signal->curr_target = t;
956 : }
957 :
958 : /*
959 : * Found a killable thread. If the signal will be fatal,
960 : * then start taking the whole group down immediately.
961 : */
962 4707 : if (sig_fatal(p, sig) &&
963 20 : !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
964 10 : !sigismember(&t->real_blocked, sig) &&
965 10 : (sig == SIGKILL || !t->ptrace)) {
966 : /*
967 : * This signal will be fatal to the whole group.
968 : */
969 10 : if (!sig_kernel_coredump(sig)) {
970 : /*
971 : * Start a group exit and wake everybody up.
972 : * This way we don't have other threads
973 : * running and doing things after a slower
974 : * thread has the fatal signal pending.
975 : */
976 10 : signal->flags = SIGNAL_GROUP_EXIT;
977 10 : signal->group_exit_code = sig;
978 10 : signal->group_stop_count = 0;
979 : t = p;
980 : do {
981 10 : task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
982 : sigaddset(&t->pending.signal, SIGKILL);
983 : signal_wake_up(t, 1);
984 10 : } while_each_thread(p, t);
985 : return;
986 : }
987 : }
988 :
989 : /*
990 : * The signal is already in the shared-pending queue.
991 : * Tell the chosen thread to wake up and dequeue it.
992 : */
993 : signal_wake_up(t, sig == SIGKILL);
994 : return;
995 : }
996 :
997 : static inline int legacy_queue(struct sigpending *signals, int sig)
998 : {
999 5748 : return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1000 : }
1001 :
1002 : #ifdef CONFIG_USER_NS
1003 : static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1004 : {
1005 : if (current_user_ns() == task_cred_xxx(t, user_ns))
1006 : return;
1007 :
1008 : if (SI_FROMKERNEL(info))
1009 : return;
1010 :
1011 : rcu_read_lock();
1012 : info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1013 : make_kuid(current_user_ns(), info->si_uid));
1014 : rcu_read_unlock();
1015 : }
1016 : #else
1017 : static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1018 : {
1019 : return;
1020 : }
1021 : #endif
1022 :
1023 2968 : static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1024 : int group, int from_ancestor_ns)
1025 : {
1026 : struct sigpending *pending;
1027 : struct sigqueue *q;
1028 : int override_rlimit;
1029 : int ret = 0, result;
1030 :
1031 : assert_spin_locked(&t->sighand->siglock);
1032 :
1033 : result = TRACE_SIGNAL_IGNORED;
1034 2968 : if (!prepare_signal(sig, t,
1035 2968 : from_ancestor_ns || (info == SEND_SIG_FORCED)))
1036 : goto ret;
1037 :
1038 2874 : pending = group ? &t->signal->shared_pending : &t->pending;
1039 : /*
1040 : * Short-circuit ignored signals and support queuing
1041 : * exactly one non-rt signal, so that we can get more
1042 : * detailed information about the cause of the signal.
1043 : */
1044 : result = TRACE_SIGNAL_ALREADY_PENDING;
1045 2874 : if (legacy_queue(pending, sig))
1046 : goto ret;
1047 :
1048 : result = TRACE_SIGNAL_DELIVERED;
1049 : /*
1050 : * fast-pathed signals for kernel-internal things like SIGSTOP
1051 : * or SIGKILL.
1052 : */
1053 2803 : if (info == SEND_SIG_FORCED)
1054 : goto out_set;
1055 :
1056 : /*
1057 : * Real-time signals must be queued if sent by sigqueue, or
1058 : * some other real-time mechanism. It is implementation
1059 : * defined whether kill() does so. We attempt to do so, on
1060 : * the principle of least surprise, but since kill is not
1061 : * allowed to fail with EAGAIN when low on memory we just
1062 : * make sure at least one signal gets delivered and don't
1063 : * pass on the info struct.
1064 : */
1065 2803 : if (sig < SIGRTMIN)
1066 2803 : override_rlimit = (is_si_special(info) || info->si_code >= 0);
1067 : else
1068 : override_rlimit = 0;
1069 :
1070 2803 : q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1071 : override_rlimit);
1072 2803 : if (q) {
1073 2803 : list_add_tail(&q->list, &pending->list);
1074 2803 : switch ((unsigned long) info) {
1075 : case (unsigned long) SEND_SIG_NOINFO:
1076 3 : q->info.si_signo = sig;
1077 3 : q->info.si_errno = 0;
1078 3 : q->info.si_code = SI_USER;
1079 3 : q->info.si_pid = task_tgid_nr_ns(current,
1080 : task_active_pid_ns(t));
1081 6 : q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1082 3 : break;
1083 : case (unsigned long) SEND_SIG_PRIV:
1084 9 : q->info.si_signo = sig;
1085 9 : q->info.si_errno = 0;
1086 9 : q->info.si_code = SI_KERNEL;
1087 9 : q->info.si_pid = 0;
1088 9 : q->info.si_uid = 0;
1089 9 : break;
1090 : default:
1091 2791 : copy_siginfo(&q->info, info);
1092 2791 : if (from_ancestor_ns)
1093 0 : q->info.si_pid = 0;
1094 : break;
1095 : }
1096 :
1097 : userns_fixup_signal_uid(&q->info, t);
1098 :
1099 0 : } else if (!is_si_special(info)) {
1100 0 : if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1101 : /*
1102 : * Queue overflow, abort. We may abort if the
1103 : * signal was rt and sent by user using something
1104 : * other than kill().
1105 : */
1106 : result = TRACE_SIGNAL_OVERFLOW_FAIL;
1107 : ret = -EAGAIN;
1108 : goto ret;
1109 : } else {
1110 : /*
1111 : * This is a silent loss of information. We still
1112 : * send the signal, but the *info bits are lost.
1113 : */
1114 : result = TRACE_SIGNAL_LOSE_INFO;
1115 : }
1116 : }
1117 :
1118 : out_set:
1119 : signalfd_notify(t, sig);
1120 : sigaddset(&pending->signal, sig);
1121 2803 : complete_signal(sig, t, group);
1122 : ret:
1123 : trace_signal_generate(sig, info, t, group, result);
1124 2968 : return ret;
1125 : }
1126 :
1127 2968 : static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1128 : int group)
1129 : {
1130 : int from_ancestor_ns = 0;
1131 :
1132 : #ifdef CONFIG_PID_NS
1133 3007 : from_ancestor_ns = si_fromuser(info) &&
1134 39 : !task_pid_nr_ns(current, task_active_pid_ns(t));
1135 : #endif
1136 :
1137 2968 : return __send_signal(sig, info, t, group, from_ancestor_ns);
1138 : }
1139 :
1140 0 : static void print_fatal_signal(int signr)
1141 : {
1142 0 : struct pt_regs *regs = signal_pt_regs();
1143 0 : printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1144 :
1145 : #if defined(__i386__) && !defined(__arch_um__)
1146 : printk(KERN_INFO "code at %08lx: ", regs->ip);
1147 : {
1148 : int i;
1149 : for (i = 0; i < 16; i++) {
1150 : unsigned char insn;
1151 :
1152 : if (get_user(insn, (unsigned char *)(regs->ip + i)))
1153 : break;
1154 : printk(KERN_CONT "%02x ", insn);
1155 : }
1156 : }
1157 : printk(KERN_CONT "\n");
1158 : #endif
1159 0 : preempt_disable();
1160 0 : show_regs(regs);
1161 0 : preempt_enable();
1162 0 : }
1163 :
1164 0 : static int __init setup_print_fatal_signals(char *str)
1165 : {
1166 0 : get_option (&str, &print_fatal_signals);
1167 :
1168 0 : return 1;
1169 : }
1170 :
1171 : __setup("print-fatal-signals=", setup_print_fatal_signals);
1172 :
1173 : int
1174 2 : __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1175 : {
1176 2894 : return send_signal(sig, info, p, 1);
1177 : }
1178 :
1179 : static int
1180 : specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1181 : {
1182 26 : return send_signal(sig, info, t, 0);
1183 : }
1184 :
1185 48 : int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1186 : bool group)
1187 : {
1188 : unsigned long flags;
1189 : int ret = -ESRCH;
1190 :
1191 48 : if (lock_task_sighand(p, &flags)) {
1192 48 : ret = send_signal(sig, info, p, group);
1193 48 : unlock_task_sighand(p, &flags);
1194 : }
1195 :
1196 48 : return ret;
1197 : }
1198 :
1199 : /*
1200 : * Force a signal that the process can't ignore: if necessary
1201 : * we unblock the signal and change any SIG_IGN to SIG_DFL.
1202 : *
1203 : * Note: If we unblock the signal, we always reset it to SIG_DFL,
1204 : * since we do not want to have a signal handler that was blocked
1205 : * be invoked when user space had explicitly blocked it.
1206 : *
1207 : * We don't want to have recursive SIGSEGV's etc, for example,
1208 : * that is why we also clear SIGNAL_UNKILLABLE.
1209 : */
1210 : int
1211 26 : force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1212 : {
1213 : unsigned long int flags;
1214 : int ret, blocked, ignored;
1215 : struct k_sigaction *action;
1216 :
1217 26 : spin_lock_irqsave(&t->sighand->siglock, flags);
1218 26 : action = &t->sighand->action[sig-1];
1219 26 : ignored = action->sa.sa_handler == SIG_IGN;
1220 : blocked = sigismember(&t->blocked, sig);
1221 26 : if (blocked || ignored) {
1222 0 : action->sa.sa_handler = SIG_DFL;
1223 0 : if (blocked) {
1224 : sigdelset(&t->blocked, sig);
1225 0 : recalc_sigpending_and_wake(t);
1226 : }
1227 : }
1228 26 : if (action->sa.sa_handler == SIG_DFL)
1229 0 : t->signal->flags &= ~SIGNAL_UNKILLABLE;
1230 : ret = specific_send_sig_info(sig, info, t);
1231 : spin_unlock_irqrestore(&t->sighand->siglock, flags);
1232 :
1233 26 : return ret;
1234 : }
1235 :
1236 : /*
1237 : * Nuke all other threads in the group.
1238 : */
1239 0 : int zap_other_threads(struct task_struct *p)
1240 : {
1241 : struct task_struct *t = p;
1242 : int count = 0;
1243 :
1244 0 : p->signal->group_stop_count = 0;
1245 :
1246 0 : while_each_thread(p, t) {
1247 0 : task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1248 0 : count++;
1249 :
1250 : /* Don't bother with already dead threads */
1251 0 : if (t->exit_state)
1252 0 : continue;
1253 : sigaddset(&t->pending.signal, SIGKILL);
1254 : signal_wake_up(t, 1);
1255 : }
1256 :
1257 0 : return count;
1258 : }
1259 :
1260 6470 : struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1261 : unsigned long *flags)
1262 : {
1263 : struct sighand_struct *sighand;
1264 :
1265 : for (;;) {
1266 : /*
1267 : * Disable interrupts early to avoid deadlocks.
1268 : * See rcu_read_unlock() comment header for details.
1269 : */
1270 6470 : local_irq_save(*flags);
1271 : rcu_read_lock();
1272 6470 : sighand = rcu_dereference(tsk->sighand);
1273 6470 : if (unlikely(sighand == NULL)) {
1274 : rcu_read_unlock();
1275 0 : local_irq_restore(*flags);
1276 : break;
1277 : }
1278 : /*
1279 : * This sighand can be already freed and even reused, but
1280 : * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1281 : * initializes ->siglock: this slab can't go away, it has
1282 : * the same object type, ->siglock can't be reinitialized.
1283 : *
1284 : * We need to ensure that tsk->sighand is still the same
1285 : * after we take the lock, we can race with de_thread() or
1286 : * __exit_signal(). In the latter case the next iteration
1287 : * must see ->sighand == NULL.
1288 : */
1289 : spin_lock(&sighand->siglock);
1290 6470 : if (likely(sighand == tsk->sighand)) {
1291 : rcu_read_unlock();
1292 : break;
1293 : }
1294 : spin_unlock(&sighand->siglock);
1295 : rcu_read_unlock();
1296 0 : local_irq_restore(*flags);
1297 : }
1298 :
1299 6470 : return sighand;
1300 : }
1301 :
1302 : /*
1303 : * send signal info to all the members of a group
1304 : */
1305 46 : int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1306 : {
1307 : int ret;
1308 :
1309 : rcu_read_lock();
1310 46 : ret = check_kill_permission(sig, info, p);
1311 : rcu_read_unlock();
1312 :
1313 46 : if (!ret && sig)
1314 45 : ret = do_send_sig_info(sig, info, p, true);
1315 :
1316 46 : return ret;
1317 : }
1318 :
1319 : /*
1320 : * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1321 : * control characters do (^C, ^Z etc)
1322 : * - the caller must hold at least a readlock on tasklist_lock
1323 : */
1324 10 : int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1325 : {
1326 : struct task_struct *p = NULL;
1327 : int retval, success;
1328 :
1329 : success = 0;
1330 : retval = -ESRCH;
1331 23 : do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1332 13 : int err = group_send_sig_info(sig, info, p);
1333 13 : success |= !err;
1334 : retval = err;
1335 : } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1336 10 : return success ? 0 : retval;
1337 : }
1338 :
1339 33 : int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1340 : {
1341 : int error = -ESRCH;
1342 : struct task_struct *p;
1343 :
1344 : for (;;) {
1345 : rcu_read_lock();
1346 33 : p = pid_task(pid, PIDTYPE_PID);
1347 33 : if (p)
1348 33 : error = group_send_sig_info(sig, info, p);
1349 : rcu_read_unlock();
1350 33 : if (likely(!p || error != -ESRCH))
1351 33 : return error;
1352 :
1353 : /*
1354 : * The task was unhashed in between, try again. If it
1355 : * is dead, pid_task() will return NULL, if we race with
1356 : * de_thread() it will find the new leader.
1357 : */
1358 : }
1359 : }
1360 :
1361 0 : int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1362 : {
1363 : int error;
1364 : rcu_read_lock();
1365 0 : error = kill_pid_info(sig, info, find_vpid(pid));
1366 : rcu_read_unlock();
1367 0 : return error;
1368 : }
1369 :
1370 : static int kill_as_cred_perm(const struct cred *cred,
1371 : struct task_struct *target)
1372 : {
1373 0 : const struct cred *pcred = __task_cred(target);
1374 0 : if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1375 0 : !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1376 : return 0;
1377 : return 1;
1378 : }
1379 :
1380 : /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1381 0 : int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1382 : const struct cred *cred, u32 secid)
1383 : {
1384 : int ret = -EINVAL;
1385 : struct task_struct *p;
1386 : unsigned long flags;
1387 :
1388 0 : if (!valid_signal(sig))
1389 : return ret;
1390 :
1391 : rcu_read_lock();
1392 0 : p = pid_task(pid, PIDTYPE_PID);
1393 0 : if (!p) {
1394 : ret = -ESRCH;
1395 : goto out_unlock;
1396 : }
1397 0 : if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1398 : ret = -EPERM;
1399 : goto out_unlock;
1400 : }
1401 : ret = security_task_kill(p, info, sig, secid);
1402 : if (ret)
1403 : goto out_unlock;
1404 :
1405 0 : if (sig) {
1406 0 : if (lock_task_sighand(p, &flags)) {
1407 0 : ret = __send_signal(sig, info, p, 1, 0);
1408 0 : unlock_task_sighand(p, &flags);
1409 : } else
1410 : ret = -ESRCH;
1411 : }
1412 : out_unlock:
1413 : rcu_read_unlock();
1414 0 : return ret;
1415 : }
1416 : EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1417 :
1418 : /*
1419 : * kill_something_info() interprets pid in interesting ways just like kill(2).
1420 : *
1421 : * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1422 : * is probably wrong. Should make it like BSD or SYSV.
1423 : */
1424 :
1425 35 : static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1426 : {
1427 : int ret;
1428 :
1429 35 : if (pid > 0) {
1430 : rcu_read_lock();
1431 33 : ret = kill_pid_info(sig, info, find_vpid(pid));
1432 : rcu_read_unlock();
1433 33 : return ret;
1434 : }
1435 :
1436 2 : read_lock(&tasklist_lock);
1437 2 : if (pid != -1) {
1438 4 : ret = __kill_pgrp_info(sig, info,
1439 2 : pid ? find_vpid(-pid) : task_pgrp(current));
1440 : } else {
1441 : int retval = 0, count = 0;
1442 : struct task_struct * p;
1443 :
1444 0 : for_each_process(p) {
1445 0 : if (task_pid_vnr(p) > 1 &&
1446 0 : !same_thread_group(p, current)) {
1447 0 : int err = group_send_sig_info(sig, info, p);
1448 0 : ++count;
1449 0 : if (err != -EPERM)
1450 : retval = err;
1451 : }
1452 : }
1453 0 : ret = count ? retval : -ESRCH;
1454 : }
1455 4 : read_unlock(&tasklist_lock);
1456 :
1457 2 : return ret;
1458 : }
1459 :
1460 : /*
1461 : * These are for backward compatibility with the rest of the kernel source.
1462 : */
1463 :
1464 0 : int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1465 : {
1466 : /*
1467 : * Make sure legacy kernel users don't send in bad values
1468 : * (normal paths check this in check_kill_permission).
1469 : */
1470 3 : if (!valid_signal(sig))
1471 : return -EINVAL;
1472 :
1473 3 : return do_send_sig_info(sig, info, p, false);
1474 : }
1475 :
1476 : #define __si_special(priv) \
1477 : ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1478 :
1479 : int
1480 3 : send_sig(int sig, struct task_struct *p, int priv)
1481 : {
1482 6 : return send_sig_info(sig, __si_special(priv), p);
1483 : }
1484 :
1485 : void
1486 0 : force_sig(int sig, struct task_struct *p)
1487 : {
1488 0 : force_sig_info(sig, SEND_SIG_PRIV, p);
1489 0 : }
1490 :
1491 : /*
1492 : * When things go south during signal handling, we
1493 : * will force a SIGSEGV. And if the signal that caused
1494 : * the problem was already a SIGSEGV, we'll want to
1495 : * make sure we don't even try to deliver the signal..
1496 : */
1497 : int
1498 0 : force_sigsegv(int sig, struct task_struct *p)
1499 : {
1500 0 : if (sig == SIGSEGV) {
1501 : unsigned long flags;
1502 0 : spin_lock_irqsave(&p->sighand->siglock, flags);
1503 0 : p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1504 : spin_unlock_irqrestore(&p->sighand->siglock, flags);
1505 : }
1506 : force_sig(SIGSEGV, p);
1507 0 : return 0;
1508 : }
1509 :
1510 8 : int kill_pgrp(struct pid *pid, int sig, int priv)
1511 : {
1512 : int ret;
1513 :
1514 8 : read_lock(&tasklist_lock);
1515 8 : ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1516 16 : read_unlock(&tasklist_lock);
1517 :
1518 8 : return ret;
1519 : }
1520 : EXPORT_SYMBOL(kill_pgrp);
1521 :
1522 0 : int kill_pid(struct pid *pid, int sig, int priv)
1523 : {
1524 0 : return kill_pid_info(sig, __si_special(priv), pid);
1525 : }
1526 : EXPORT_SYMBOL(kill_pid);
1527 :
1528 : /*
1529 : * These functions support sending signals using preallocated sigqueue
1530 : * structures. This is needed "because realtime applications cannot
1531 : * afford to lose notifications of asynchronous events, like timer
1532 : * expirations or I/O completions". In the case of POSIX Timers
1533 : * we allocate the sigqueue structure from the timer_create. If this
1534 : * allocation fails we are able to report the failure to the application
1535 : * with an EAGAIN error.
1536 : */
1537 2 : struct sigqueue *sigqueue_alloc(void)
1538 : {
1539 2 : struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1540 :
1541 2 : if (q)
1542 2 : q->flags |= SIGQUEUE_PREALLOC;
1543 :
1544 2 : return q;
1545 : }
1546 :
1547 1 : void sigqueue_free(struct sigqueue *q)
1548 : {
1549 : unsigned long flags;
1550 : spinlock_t *lock = ¤t->sighand->siglock;
1551 :
1552 : BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1553 : /*
1554 : * We must hold ->siglock while testing q->list
1555 : * to serialize with collect_signal() or with
1556 : * __exit_signal()->flush_sigqueue().
1557 : */
1558 1 : spin_lock_irqsave(lock, flags);
1559 1 : q->flags &= ~SIGQUEUE_PREALLOC;
1560 : /*
1561 : * If it is queued it will be freed when dequeued,
1562 : * like the "regular" sigqueue.
1563 : */
1564 2 : if (!list_empty(&q->list))
1565 : q = NULL;
1566 : spin_unlock_irqrestore(lock, flags);
1567 :
1568 1 : if (q)
1569 1 : __sigqueue_free(q);
1570 1 : }
1571 :
1572 2459 : int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1573 : {
1574 2459 : int sig = q->info.si_signo;
1575 : struct sigpending *pending;
1576 : unsigned long flags;
1577 : int ret, result;
1578 :
1579 : BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1580 :
1581 : ret = -1;
1582 2459 : if (!likely(lock_task_sighand(t, &flags)))
1583 : goto ret;
1584 :
1585 : ret = 1; /* the signal is ignored */
1586 : result = TRACE_SIGNAL_IGNORED;
1587 2459 : if (!prepare_signal(sig, t, false))
1588 : goto out;
1589 :
1590 : ret = 0;
1591 4918 : if (unlikely(!list_empty(&q->list))) {
1592 : /*
1593 : * If an SI_TIMER entry is already queue just increment
1594 : * the overrun count.
1595 : */
1596 : BUG_ON(q->info.si_code != SI_TIMER);
1597 0 : q->info.si_overrun++;
1598 : result = TRACE_SIGNAL_ALREADY_PENDING;
1599 0 : goto out;
1600 : }
1601 2459 : q->info.si_overrun = 0;
1602 :
1603 : signalfd_notify(t, sig);
1604 2459 : pending = group ? &t->signal->shared_pending : &t->pending;
1605 2459 : list_add_tail(&q->list, &pending->list);
1606 : sigaddset(&pending->signal, sig);
1607 2459 : complete_signal(sig, t, group);
1608 : result = TRACE_SIGNAL_DELIVERED;
1609 : out:
1610 : trace_signal_generate(sig, &q->info, t, group, result);
1611 2459 : unlock_task_sighand(t, &flags);
1612 : ret:
1613 2459 : return ret;
1614 : }
1615 :
1616 : /*
1617 : * Let a parent know about the death of a child.
1618 : * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1619 : *
1620 : * Returns true if our parent ignored us and so we've switched to
1621 : * self-reaping.
1622 : */
1623 2925 : bool do_notify_parent(struct task_struct *tsk, int sig)
1624 : {
1625 : struct siginfo info;
1626 : unsigned long flags;
1627 : struct sighand_struct *psig;
1628 : bool autoreap = false;
1629 : cputime_t utime, stime;
1630 :
1631 : BUG_ON(sig == -1);
1632 :
1633 : /* do_notify_parent_cldstop should have been called instead. */
1634 2925 : BUG_ON(task_is_stopped_or_traced(tsk));
1635 :
1636 : BUG_ON(!tsk->ptrace &&
1637 : (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1638 :
1639 2925 : if (sig != SIGCHLD) {
1640 : /*
1641 : * This is only possible if parent == real_parent.
1642 : * Check if it has changed security domain.
1643 : */
1644 0 : if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1645 : sig = SIGCHLD;
1646 : }
1647 :
1648 2925 : info.si_signo = sig;
1649 2925 : info.si_errno = 0;
1650 : /*
1651 : * We are under tasklist_lock here so our parent is tied to
1652 : * us and cannot change.
1653 : *
1654 : * task_active_pid_ns will always return the same pid namespace
1655 : * until a task passes through release_task.
1656 : *
1657 : * write_lock() currently calls preempt_disable() which is the
1658 : * same as rcu_read_lock(), but according to Oleg, this is not
1659 : * correct to rely on this
1660 : */
1661 : rcu_read_lock();
1662 5850 : info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1663 5850 : info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1664 2925 : task_uid(tsk));
1665 : rcu_read_unlock();
1666 :
1667 : task_cputime(tsk, &utime, &stime);
1668 2925 : info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1669 2925 : info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1670 :
1671 2925 : info.si_status = tsk->exit_code & 0x7f;
1672 2925 : if (tsk->exit_code & 0x80)
1673 0 : info.si_code = CLD_DUMPED;
1674 2925 : else if (tsk->exit_code & 0x7f)
1675 11 : info.si_code = CLD_KILLED;
1676 : else {
1677 2914 : info.si_code = CLD_EXITED;
1678 2914 : info.si_status = tsk->exit_code >> 8;
1679 : }
1680 :
1681 2925 : psig = tsk->parent->sighand;
1682 2925 : spin_lock_irqsave(&psig->siglock, flags);
1683 5850 : if (!tsk->ptrace && sig == SIGCHLD &&
1684 5817 : (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1685 2892 : (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1686 : /*
1687 : * We are exiting and our parent doesn't care. POSIX.1
1688 : * defines special semantics for setting SIGCHLD to SIG_IGN
1689 : * or setting the SA_NOCLDWAIT flag: we should be reaped
1690 : * automatically and not left for our parent's wait4 call.
1691 : * Rather than having the parent do it as a magic kind of
1692 : * signal handler, we just set this to tell do_exit that we
1693 : * can be cleaned up without becoming a zombie. Note that
1694 : * we still call __wake_up_parent in this case, because a
1695 : * blocked sys_wait4 might now return -ECHILD.
1696 : *
1697 : * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1698 : * is implementation-defined: we do (if you don't want
1699 : * it, just use SIG_IGN instead).
1700 : */
1701 : autoreap = true;
1702 33 : if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1703 : sig = 0;
1704 : }
1705 2925 : if (valid_signal(sig) && sig)
1706 2892 : __group_send_sig_info(sig, &info, tsk->parent);
1707 2925 : __wake_up_parent(tsk, tsk->parent);
1708 : spin_unlock_irqrestore(&psig->siglock, flags);
1709 :
1710 2925 : return autoreap;
1711 : }
1712 :
1713 : /**
1714 : * do_notify_parent_cldstop - notify parent of stopped/continued state change
1715 : * @tsk: task reporting the state change
1716 : * @for_ptracer: the notification is for ptracer
1717 : * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1718 : *
1719 : * Notify @tsk's parent that the stopped/continued state has changed. If
1720 : * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1721 : * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1722 : *
1723 : * CONTEXT:
1724 : * Must be called with tasklist_lock at least read locked.
1725 : */
1726 0 : static void do_notify_parent_cldstop(struct task_struct *tsk,
1727 : bool for_ptracer, int why)
1728 : {
1729 : struct siginfo info;
1730 : unsigned long flags;
1731 : struct task_struct *parent;
1732 : struct sighand_struct *sighand;
1733 : cputime_t utime, stime;
1734 :
1735 0 : if (for_ptracer) {
1736 0 : parent = tsk->parent;
1737 : } else {
1738 0 : tsk = tsk->group_leader;
1739 0 : parent = tsk->real_parent;
1740 : }
1741 :
1742 0 : info.si_signo = SIGCHLD;
1743 0 : info.si_errno = 0;
1744 : /*
1745 : * see comment in do_notify_parent() about the following 4 lines
1746 : */
1747 : rcu_read_lock();
1748 0 : info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1749 0 : info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1750 : rcu_read_unlock();
1751 :
1752 : task_cputime(tsk, &utime, &stime);
1753 0 : info.si_utime = cputime_to_clock_t(utime);
1754 0 : info.si_stime = cputime_to_clock_t(stime);
1755 :
1756 0 : info.si_code = why;
1757 0 : switch (why) {
1758 : case CLD_CONTINUED:
1759 0 : info.si_status = SIGCONT;
1760 0 : break;
1761 : case CLD_STOPPED:
1762 0 : info.si_status = tsk->signal->group_exit_code & 0x7f;
1763 0 : break;
1764 : case CLD_TRAPPED:
1765 0 : info.si_status = tsk->exit_code & 0x7f;
1766 0 : break;
1767 : default:
1768 : BUG();
1769 : }
1770 :
1771 0 : sighand = parent->sighand;
1772 0 : spin_lock_irqsave(&sighand->siglock, flags);
1773 0 : if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1774 0 : !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1775 : __group_send_sig_info(SIGCHLD, &info, parent);
1776 : /*
1777 : * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1778 : */
1779 0 : __wake_up_parent(tsk, parent);
1780 : spin_unlock_irqrestore(&sighand->siglock, flags);
1781 0 : }
1782 :
1783 : static inline int may_ptrace_stop(void)
1784 : {
1785 0 : if (!likely(current->ptrace))
1786 : return 0;
1787 : /*
1788 : * Are we in the middle of do_coredump?
1789 : * If so and our tracer is also part of the coredump stopping
1790 : * is a deadlock situation, and pointless because our tracer
1791 : * is dead so don't allow us to stop.
1792 : * If SIGKILL was already sent before the caller unlocked
1793 : * ->siglock we must see ->core_state != NULL. Otherwise it
1794 : * is safe to enter schedule().
1795 : *
1796 : * This is almost outdated, a task with the pending SIGKILL can't
1797 : * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1798 : * after SIGKILL was already dequeued.
1799 : */
1800 0 : if (unlikely(current->mm->core_state) &&
1801 0 : unlikely(current->mm == current->parent->mm))
1802 : return 0;
1803 :
1804 : return 1;
1805 : }
1806 :
1807 : /*
1808 : * Return non-zero if there is a SIGKILL that should be waking us up.
1809 : * Called with the siglock held.
1810 : */
1811 : static int sigkill_pending(struct task_struct *tsk)
1812 : {
1813 : return sigismember(&tsk->pending.signal, SIGKILL) ||
1814 : sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1815 : }
1816 :
1817 : /*
1818 : * This must be called with current->sighand->siglock held.
1819 : *
1820 : * This should be the path for all ptrace stops.
1821 : * We always set current->last_siginfo while stopped here.
1822 : * That makes it a way to test a stopped process for
1823 : * being ptrace-stopped vs being job-control-stopped.
1824 : *
1825 : * If we actually decide not to stop at all because the tracer
1826 : * is gone, we keep current->exit_code unless clear_code.
1827 : */
1828 0 : static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1829 : __releases(¤t->sighand->siglock)
1830 : __acquires(¤t->sighand->siglock)
1831 : {
1832 : bool gstop_done = false;
1833 :
1834 : if (arch_ptrace_stop_needed(exit_code, info)) {
1835 : /*
1836 : * The arch code has something special to do before a
1837 : * ptrace stop. This is allowed to block, e.g. for faults
1838 : * on user stack pages. We can't keep the siglock while
1839 : * calling arch_ptrace_stop, so we must release it now.
1840 : * To preserve proper semantics, we must do this before
1841 : * any signal bookkeeping like checking group_stop_count.
1842 : * Meanwhile, a SIGKILL could come in before we retake the
1843 : * siglock. That must prevent us from sleeping in TASK_TRACED.
1844 : * So after regaining the lock, we must check for SIGKILL.
1845 : */
1846 : spin_unlock_irq(¤t->sighand->siglock);
1847 : arch_ptrace_stop(exit_code, info);
1848 : spin_lock_irq(¤t->sighand->siglock);
1849 : if (sigkill_pending(current))
1850 0 : return;
1851 : }
1852 :
1853 : /*
1854 : * We're committing to trapping. TRACED should be visible before
1855 : * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1856 : * Also, transition to TRACED and updates to ->jobctl should be
1857 : * atomic with respect to siglock and should be done after the arch
1858 : * hook as siglock is released and regrabbed across it.
1859 : */
1860 0 : set_current_state(TASK_TRACED);
1861 :
1862 0 : current->last_siginfo = info;
1863 0 : current->exit_code = exit_code;
1864 :
1865 : /*
1866 : * If @why is CLD_STOPPED, we're trapping to participate in a group
1867 : * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1868 : * across siglock relocks since INTERRUPT was scheduled, PENDING
1869 : * could be clear now. We act as if SIGCONT is received after
1870 : * TASK_TRACED is entered - ignore it.
1871 : */
1872 0 : if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1873 0 : gstop_done = task_participate_group_stop(current);
1874 :
1875 : /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1876 0 : task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1877 0 : if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1878 0 : task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1879 :
1880 : /* entering a trap, clear TRAPPING */
1881 0 : task_clear_jobctl_trapping(current);
1882 :
1883 : spin_unlock_irq(¤t->sighand->siglock);
1884 0 : read_lock(&tasklist_lock);
1885 0 : if (may_ptrace_stop()) {
1886 : /*
1887 : * Notify parents of the stop.
1888 : *
1889 : * While ptraced, there are two parents - the ptracer and
1890 : * the real_parent of the group_leader. The ptracer should
1891 : * know about every stop while the real parent is only
1892 : * interested in the completion of group stop. The states
1893 : * for the two don't interact with each other. Notify
1894 : * separately unless they're gonna be duplicates.
1895 : */
1896 0 : do_notify_parent_cldstop(current, true, why);
1897 0 : if (gstop_done && ptrace_reparented(current))
1898 0 : do_notify_parent_cldstop(current, false, why);
1899 :
1900 : /*
1901 : * Don't want to allow preemption here, because
1902 : * sys_ptrace() needs this task to be inactive.
1903 : *
1904 : * XXX: implement read_unlock_no_resched().
1905 : */
1906 0 : preempt_disable();
1907 0 : read_unlock(&tasklist_lock);
1908 0 : preempt_enable_no_resched();
1909 : freezable_schedule();
1910 : } else {
1911 : /*
1912 : * By the time we got the lock, our tracer went away.
1913 : * Don't drop the lock yet, another tracer may come.
1914 : *
1915 : * If @gstop_done, the ptracer went away between group stop
1916 : * completion and here. During detach, it would have set
1917 : * JOBCTL_STOP_PENDING on us and we'll re-enter
1918 : * TASK_STOPPED in do_signal_stop() on return, so notifying
1919 : * the real parent of the group stop completion is enough.
1920 : */
1921 0 : if (gstop_done)
1922 0 : do_notify_parent_cldstop(current, false, why);
1923 :
1924 : /* tasklist protects us from ptrace_freeze_traced() */
1925 0 : __set_current_state(TASK_RUNNING);
1926 0 : if (clear_code)
1927 0 : current->exit_code = 0;
1928 0 : read_unlock(&tasklist_lock);
1929 : }
1930 :
1931 : /*
1932 : * We are back. Now reacquire the siglock before touching
1933 : * last_siginfo, so that we are sure to have synchronized with
1934 : * any signal-sending on another CPU that wants to examine it.
1935 : */
1936 : spin_lock_irq(¤t->sighand->siglock);
1937 0 : current->last_siginfo = NULL;
1938 :
1939 : /* LISTENING can be set only during STOP traps, clear it */
1940 0 : current->jobctl &= ~JOBCTL_LISTENING;
1941 :
1942 : /*
1943 : * Queued signals ignored us while we were stopped for tracing.
1944 : * So check for any that we should take before resuming user mode.
1945 : * This sets TIF_SIGPENDING, but never clears it.
1946 : */
1947 0 : recalc_sigpending_tsk(current);
1948 : }
1949 :
1950 0 : static void ptrace_do_notify(int signr, int exit_code, int why)
1951 : {
1952 : siginfo_t info;
1953 :
1954 0 : memset(&info, 0, sizeof info);
1955 0 : info.si_signo = signr;
1956 0 : info.si_code = exit_code;
1957 0 : info.si_pid = task_pid_vnr(current);
1958 0 : info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1959 :
1960 : /* Let the debugger run. */
1961 0 : ptrace_stop(exit_code, why, 1, &info);
1962 0 : }
1963 :
1964 0 : void ptrace_notify(int exit_code)
1965 : {
1966 : BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1967 0 : if (unlikely(current->task_works))
1968 0 : task_work_run();
1969 :
1970 : spin_lock_irq(¤t->sighand->siglock);
1971 0 : ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1972 : spin_unlock_irq(¤t->sighand->siglock);
1973 0 : }
1974 :
1975 : /**
1976 : * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1977 : * @signr: signr causing group stop if initiating
1978 : *
1979 : * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1980 : * and participate in it. If already set, participate in the existing
1981 : * group stop. If participated in a group stop (and thus slept), %true is
1982 : * returned with siglock released.
1983 : *
1984 : * If ptraced, this function doesn't handle stop itself. Instead,
1985 : * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1986 : * untouched. The caller must ensure that INTERRUPT trap handling takes
1987 : * places afterwards.
1988 : *
1989 : * CONTEXT:
1990 : * Must be called with @current->sighand->siglock held, which is released
1991 : * on %true return.
1992 : *
1993 : * RETURNS:
1994 : * %false if group stop is already cancelled or ptrace trap is scheduled.
1995 : * %true if participated in group stop.
1996 : */
1997 0 : static bool do_signal_stop(int signr)
1998 : __releases(¤t->sighand->siglock)
1999 : {
2000 0 : struct signal_struct *sig = current->signal;
2001 :
2002 0 : if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2003 : unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2004 : struct task_struct *t;
2005 :
2006 : /* signr will be recorded in task->jobctl for retries */
2007 : WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2008 :
2009 0 : if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2010 0 : unlikely(signal_group_exit(sig)))
2011 : return false;
2012 : /*
2013 : * There is no group stop already in progress. We must
2014 : * initiate one now.
2015 : *
2016 : * While ptraced, a task may be resumed while group stop is
2017 : * still in effect and then receive a stop signal and
2018 : * initiate another group stop. This deviates from the
2019 : * usual behavior as two consecutive stop signals can't
2020 : * cause two group stops when !ptraced. That is why we
2021 : * also check !task_is_stopped(t) below.
2022 : *
2023 : * The condition can be distinguished by testing whether
2024 : * SIGNAL_STOP_STOPPED is already set. Don't generate
2025 : * group_exit_code in such case.
2026 : *
2027 : * This is not necessary for SIGNAL_STOP_CONTINUED because
2028 : * an intervening stop signal is required to cause two
2029 : * continued events regardless of ptrace.
2030 : */
2031 0 : if (!(sig->flags & SIGNAL_STOP_STOPPED))
2032 0 : sig->group_exit_code = signr;
2033 :
2034 0 : sig->group_stop_count = 0;
2035 :
2036 0 : if (task_set_jobctl_pending(current, signr | gstop))
2037 0 : sig->group_stop_count++;
2038 :
2039 0 : t = current;
2040 0 : while_each_thread(current, t) {
2041 : /*
2042 : * Setting state to TASK_STOPPED for a group
2043 : * stop is always done with the siglock held,
2044 : * so this check has no races.
2045 : */
2046 0 : if (!task_is_stopped(t) &&
2047 0 : task_set_jobctl_pending(t, signr | gstop)) {
2048 0 : sig->group_stop_count++;
2049 0 : if (likely(!(t->ptrace & PT_SEIZED)))
2050 : signal_wake_up(t, 0);
2051 : else
2052 0 : ptrace_trap_notify(t);
2053 : }
2054 : }
2055 : }
2056 :
2057 0 : if (likely(!current->ptrace)) {
2058 : int notify = 0;
2059 :
2060 : /*
2061 : * If there are no other threads in the group, or if there
2062 : * is a group stop in progress and we are the last to stop,
2063 : * report to the parent.
2064 : */
2065 0 : if (task_participate_group_stop(current))
2066 : notify = CLD_STOPPED;
2067 :
2068 0 : __set_current_state(TASK_STOPPED);
2069 : spin_unlock_irq(¤t->sighand->siglock);
2070 :
2071 : /*
2072 : * Notify the parent of the group stop completion. Because
2073 : * we're not holding either the siglock or tasklist_lock
2074 : * here, ptracer may attach inbetween; however, this is for
2075 : * group stop and should always be delivered to the real
2076 : * parent of the group leader. The new ptracer will get
2077 : * its notification when this task transitions into
2078 : * TASK_TRACED.
2079 : */
2080 0 : if (notify) {
2081 0 : read_lock(&tasklist_lock);
2082 0 : do_notify_parent_cldstop(current, false, notify);
2083 0 : read_unlock(&tasklist_lock);
2084 : }
2085 :
2086 : /* Now we don't run again until woken by SIGCONT or SIGKILL */
2087 : freezable_schedule();
2088 : return true;
2089 : } else {
2090 : /*
2091 : * While ptraced, group stop is handled by STOP trap.
2092 : * Schedule it and let the caller deal with it.
2093 : */
2094 0 : task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2095 0 : return false;
2096 : }
2097 : }
2098 :
2099 : /**
2100 : * do_jobctl_trap - take care of ptrace jobctl traps
2101 : *
2102 : * When PT_SEIZED, it's used for both group stop and explicit
2103 : * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2104 : * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2105 : * the stop signal; otherwise, %SIGTRAP.
2106 : *
2107 : * When !PT_SEIZED, it's used only for group stop trap with stop signal
2108 : * number as exit_code and no siginfo.
2109 : *
2110 : * CONTEXT:
2111 : * Must be called with @current->sighand->siglock held, which may be
2112 : * released and re-acquired before returning with intervening sleep.
2113 : */
2114 0 : static void do_jobctl_trap(void)
2115 : {
2116 0 : struct signal_struct *signal = current->signal;
2117 0 : int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2118 :
2119 0 : if (current->ptrace & PT_SEIZED) {
2120 0 : if (!signal->group_stop_count &&
2121 0 : !(signal->flags & SIGNAL_STOP_STOPPED))
2122 : signr = SIGTRAP;
2123 : WARN_ON_ONCE(!signr);
2124 0 : ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2125 : CLD_STOPPED);
2126 : } else {
2127 : WARN_ON_ONCE(!signr);
2128 0 : ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2129 0 : current->exit_code = 0;
2130 : }
2131 0 : }
2132 :
2133 0 : static int ptrace_signal(int signr, siginfo_t *info)
2134 : {
2135 : ptrace_signal_deliver();
2136 : /*
2137 : * We do not check sig_kernel_stop(signr) but set this marker
2138 : * unconditionally because we do not know whether debugger will
2139 : * change signr. This flag has no meaning unless we are going
2140 : * to stop after return from ptrace_stop(). In this case it will
2141 : * be checked in do_signal_stop(), we should only stop if it was
2142 : * not cleared by SIGCONT while we were sleeping. See also the
2143 : * comment in dequeue_signal().
2144 : */
2145 0 : current->jobctl |= JOBCTL_STOP_DEQUEUED;
2146 0 : ptrace_stop(signr, CLD_TRAPPED, 0, info);
2147 :
2148 : /* We're back. Did the debugger cancel the sig? */
2149 0 : signr = current->exit_code;
2150 0 : if (signr == 0)
2151 : return signr;
2152 :
2153 0 : current->exit_code = 0;
2154 :
2155 : /*
2156 : * Update the siginfo structure if the signal has
2157 : * changed. If the debugger wanted something
2158 : * specific in the siginfo structure then it should
2159 : * have updated *info via PTRACE_SETSIGINFO.
2160 : */
2161 0 : if (signr != info->si_signo) {
2162 0 : info->si_signo = signr;
2163 0 : info->si_errno = 0;
2164 0 : info->si_code = SI_USER;
2165 : rcu_read_lock();
2166 0 : info->si_pid = task_pid_vnr(current->parent);
2167 0 : info->si_uid = from_kuid_munged(current_user_ns(),
2168 0 : task_uid(current->parent));
2169 : rcu_read_unlock();
2170 : }
2171 :
2172 : /* If the (new) signal is now blocked, requeue it. */
2173 0 : if (sigismember(¤t->blocked, signr)) {
2174 : specific_send_sig_info(signr, info, current);
2175 : signr = 0;
2176 : }
2177 :
2178 0 : return signr;
2179 : }
2180 :
2181 5124 : int get_signal(struct ksignal *ksig)
2182 : {
2183 5124 : struct sighand_struct *sighand = current->sighand;
2184 5124 : struct signal_struct *signal = current->signal;
2185 : int signr;
2186 :
2187 5124 : if (unlikely(current->task_works))
2188 0 : task_work_run();
2189 :
2190 : if (unlikely(uprobe_deny_signal()))
2191 : return 0;
2192 :
2193 : /*
2194 : * Do this once, we can't return to user-mode if freezing() == T.
2195 : * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2196 : * thus do not need another check after return.
2197 : */
2198 : try_to_freeze();
2199 :
2200 : relock:
2201 : spin_lock_irq(&sighand->siglock);
2202 : /*
2203 : * Every stopped thread goes here after wakeup. Check to see if
2204 : * we should notify the parent, prepare_signal(SIGCONT) encodes
2205 : * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2206 : */
2207 5124 : if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2208 : int why;
2209 :
2210 0 : if (signal->flags & SIGNAL_CLD_CONTINUED)
2211 : why = CLD_CONTINUED;
2212 : else
2213 : why = CLD_STOPPED;
2214 :
2215 0 : signal->flags &= ~SIGNAL_CLD_MASK;
2216 :
2217 : spin_unlock_irq(&sighand->siglock);
2218 :
2219 : /*
2220 : * Notify the parent that we're continuing. This event is
2221 : * always per-process and doesn't make whole lot of sense
2222 : * for ptracers, who shouldn't consume the state via
2223 : * wait(2) either, but, for backward compatibility, notify
2224 : * the ptracer of the group leader too unless it's gonna be
2225 : * a duplicate.
2226 : */
2227 0 : read_lock(&tasklist_lock);
2228 0 : do_notify_parent_cldstop(current, false, why);
2229 :
2230 0 : if (ptrace_reparented(current->group_leader))
2231 0 : do_notify_parent_cldstop(current->group_leader,
2232 : true, why);
2233 0 : read_unlock(&tasklist_lock);
2234 :
2235 : goto relock;
2236 : }
2237 :
2238 : for (;;) {
2239 : struct k_sigaction *ka;
2240 :
2241 5128 : if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2242 0 : do_signal_stop(0))
2243 : goto relock;
2244 :
2245 5128 : if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2246 0 : do_jobctl_trap();
2247 : spin_unlock_irq(&sighand->siglock);
2248 : goto relock;
2249 : }
2250 :
2251 5128 : signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2252 :
2253 5128 : if (!signr)
2254 : break; /* will return 0 */
2255 :
2256 5124 : if (unlikely(current->ptrace) && signr != SIGKILL) {
2257 0 : signr = ptrace_signal(signr, &ksig->info);
2258 0 : if (!signr)
2259 0 : continue;
2260 : }
2261 :
2262 5124 : ka = &sighand->action[signr-1];
2263 :
2264 : /* Trace actually delivered signals. */
2265 : trace_signal_deliver(signr, &ksig->info, ka);
2266 :
2267 5124 : if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2268 0 : continue;
2269 5124 : if (ka->sa.sa_handler != SIG_DFL) {
2270 : /* Run the handler. */
2271 5110 : ksig->ka = *ka;
2272 :
2273 5110 : if (ka->sa.sa_flags & SA_ONESHOT)
2274 0 : ka->sa.sa_handler = SIG_DFL;
2275 :
2276 : break; /* will return non-zero "signr" value */
2277 : }
2278 :
2279 : /*
2280 : * Now we are doing the default action for this signal.
2281 : */
2282 14 : if (sig_kernel_ignore(signr)) /* Default is nothing. */
2283 4 : continue;
2284 :
2285 : /*
2286 : * Global init gets no signals it doesn't want.
2287 : * Container-init gets no signals it doesn't want from same
2288 : * container.
2289 : *
2290 : * Note that if global/container-init sees a sig_kernel_only()
2291 : * signal here, the signal must have been generated internally
2292 : * or must have come from an ancestor namespace. In either
2293 : * case, the signal cannot be dropped.
2294 : */
2295 10 : if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2296 0 : !sig_kernel_only(signr))
2297 0 : continue;
2298 :
2299 10 : if (sig_kernel_stop(signr)) {
2300 : /*
2301 : * The default action is to stop all threads in
2302 : * the thread group. The job control signals
2303 : * do nothing in an orphaned pgrp, but SIGSTOP
2304 : * always works. Note that siglock needs to be
2305 : * dropped during the call to is_orphaned_pgrp()
2306 : * because of lock ordering with tasklist_lock.
2307 : * This allows an intervening SIGCONT to be posted.
2308 : * We need to check for that and bail out if necessary.
2309 : */
2310 0 : if (signr != SIGSTOP) {
2311 : spin_unlock_irq(&sighand->siglock);
2312 :
2313 : /* signals can be posted during this window */
2314 :
2315 0 : if (is_current_pgrp_orphaned())
2316 : goto relock;
2317 :
2318 : spin_lock_irq(&sighand->siglock);
2319 : }
2320 :
2321 0 : if (likely(do_signal_stop(ksig->info.si_signo))) {
2322 : /* It released the siglock. */
2323 : goto relock;
2324 : }
2325 :
2326 : /*
2327 : * We didn't actually stop, due to a race
2328 : * with SIGCONT or something like that.
2329 : */
2330 0 : continue;
2331 : }
2332 :
2333 : spin_unlock_irq(&sighand->siglock);
2334 :
2335 : /*
2336 : * Anything else is fatal, maybe with a core dump.
2337 : */
2338 10 : current->flags |= PF_SIGNALED;
2339 :
2340 10 : if (sig_kernel_coredump(signr)) {
2341 0 : if (print_fatal_signals)
2342 0 : print_fatal_signal(ksig->info.si_signo);
2343 : proc_coredump_connector(current);
2344 : /*
2345 : * If it was able to dump core, this kills all
2346 : * other threads in the group and synchronizes with
2347 : * their demise. If we lost the race with another
2348 : * thread getting here, it set group_exit_code
2349 : * first and our do_group_exit call below will use
2350 : * that value and ignore the one we pass it.
2351 : */
2352 : do_coredump(&ksig->info);
2353 : }
2354 :
2355 : /*
2356 : * Death signals, no core dump.
2357 : */
2358 10 : do_group_exit(ksig->info.si_signo);
2359 : /* NOTREACHED */
2360 : }
2361 : spin_unlock_irq(&sighand->siglock);
2362 :
2363 5114 : ksig->sig = signr;
2364 5114 : return ksig->sig > 0;
2365 : }
2366 :
2367 : /**
2368 : * signal_delivered -
2369 : * @ksig: kernel signal struct
2370 : * @stepping: nonzero if debugger single-step or block-step in use
2371 : *
2372 : * This function should be called when a signal has successfully been
2373 : * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2374 : * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2375 : * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2376 : */
2377 5110 : static void signal_delivered(struct ksignal *ksig, int stepping)
2378 : {
2379 : sigset_t blocked;
2380 :
2381 : /* A signal was successfully delivered, and the
2382 : saved sigmask was stored on the signal frame,
2383 : and will be restored by sigreturn. So we can
2384 : simply clear the restore sigmask flag. */
2385 : clear_restore_sigmask();
2386 :
2387 5110 : sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2388 5110 : if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2389 5110 : sigaddset(&blocked, ksig->sig);
2390 : set_current_blocked(&blocked);
2391 : tracehook_signal_handler(stepping);
2392 5110 : }
2393 :
2394 5110 : void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2395 : {
2396 5110 : if (failed)
2397 0 : force_sigsegv(ksig->sig, current);
2398 : else
2399 5110 : signal_delivered(ksig, stepping);
2400 5110 : }
2401 :
2402 : /*
2403 : * It could be that complete_signal() picked us to notify about the
2404 : * group-wide signal. Other threads should be notified now to take
2405 : * the shared signals in @which since we will not.
2406 : */
2407 0 : static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2408 : {
2409 : sigset_t retarget;
2410 : struct task_struct *t;
2411 :
2412 0 : sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2413 0 : if (sigisemptyset(&retarget))
2414 0 : return;
2415 :
2416 : t = tsk;
2417 0 : while_each_thread(tsk, t) {
2418 0 : if (t->flags & PF_EXITING)
2419 0 : continue;
2420 :
2421 0 : if (!has_pending_signals(&retarget, &t->blocked))
2422 0 : continue;
2423 : /* Remove the signals this thread can handle. */
2424 : sigandsets(&retarget, &retarget, &t->blocked);
2425 :
2426 0 : if (!signal_pending(t))
2427 : signal_wake_up(t, 0);
2428 :
2429 0 : if (sigisemptyset(&retarget))
2430 : break;
2431 : }
2432 : }
2433 :
2434 2914 : void exit_signals(struct task_struct *tsk)
2435 : {
2436 : int group_stop = 0;
2437 : sigset_t unblocked;
2438 :
2439 : /*
2440 : * @tsk is about to have PF_EXITING set - lock out users which
2441 : * expect stable threadgroup.
2442 : */
2443 : threadgroup_change_begin(tsk);
2444 :
2445 2914 : if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2446 2914 : tsk->flags |= PF_EXITING;
2447 : threadgroup_change_end(tsk);
2448 5828 : return;
2449 : }
2450 :
2451 : spin_lock_irq(&tsk->sighand->siglock);
2452 : /*
2453 : * From now this task is not visible for group-wide signals,
2454 : * see wants_signal(), do_signal_stop().
2455 : */
2456 0 : tsk->flags |= PF_EXITING;
2457 :
2458 : threadgroup_change_end(tsk);
2459 :
2460 0 : if (!signal_pending(tsk))
2461 : goto out;
2462 :
2463 0 : unblocked = tsk->blocked;
2464 : signotset(&unblocked);
2465 0 : retarget_shared_pending(tsk, &unblocked);
2466 :
2467 0 : if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2468 0 : task_participate_group_stop(tsk))
2469 : group_stop = CLD_STOPPED;
2470 : out:
2471 : spin_unlock_irq(&tsk->sighand->siglock);
2472 :
2473 : /*
2474 : * If group stop has completed, deliver the notification. This
2475 : * should always go to the real parent of the group leader.
2476 : */
2477 0 : if (unlikely(group_stop)) {
2478 0 : read_lock(&tasklist_lock);
2479 0 : do_notify_parent_cldstop(tsk, false, group_stop);
2480 0 : read_unlock(&tasklist_lock);
2481 : }
2482 : }
2483 :
2484 : EXPORT_SYMBOL(recalc_sigpending);
2485 : EXPORT_SYMBOL_GPL(dequeue_signal);
2486 : EXPORT_SYMBOL(flush_signals);
2487 : EXPORT_SYMBOL(force_sig);
2488 : EXPORT_SYMBOL(send_sig);
2489 : EXPORT_SYMBOL(send_sig_info);
2490 : EXPORT_SYMBOL(sigprocmask);
2491 : EXPORT_SYMBOL(block_all_signals);
2492 : EXPORT_SYMBOL(unblock_all_signals);
2493 :
2494 :
2495 : /*
2496 : * System call entry points.
2497 : */
2498 :
2499 : /**
2500 : * sys_restart_syscall - restart a system call
2501 : */
2502 0 : SYSCALL_DEFINE0(restart_syscall)
2503 : {
2504 0 : struct restart_block *restart = ¤t_thread_info()->restart_block;
2505 0 : return restart->fn(restart);
2506 : }
2507 :
2508 0 : long do_no_restart_syscall(struct restart_block *param)
2509 : {
2510 0 : return -EINTR;
2511 : }
2512 :
2513 106050 : static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2514 : {
2515 106051 : if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2516 : sigset_t newblocked;
2517 : /* A set of now blocked but previously unblocked signals. */
2518 0 : sigandnsets(&newblocked, newset, ¤t->blocked);
2519 0 : retarget_shared_pending(tsk, &newblocked);
2520 : }
2521 106050 : tsk->blocked = *newset;
2522 106050 : recalc_sigpending();
2523 106050 : }
2524 :
2525 : /**
2526 : * set_current_blocked - change current->blocked mask
2527 : * @newset: new mask
2528 : *
2529 : * It is wrong to change ->blocked directly, this helper should be used
2530 : * to ensure the process can't miss a shared signal we are going to block.
2531 : */
2532 5083 : void set_current_blocked(sigset_t *newset)
2533 : {
2534 : sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2535 10193 : __set_current_blocked(newset);
2536 5083 : }
2537 :
2538 106050 : void __set_current_blocked(const sigset_t *newset)
2539 : {
2540 106050 : struct task_struct *tsk = current;
2541 :
2542 : spin_lock_irq(&tsk->sighand->siglock);
2543 106050 : __set_task_blocked(tsk, newset);
2544 : spin_unlock_irq(&tsk->sighand->siglock);
2545 106050 : }
2546 :
2547 : /*
2548 : * This is also useful for kernel threads that want to temporarily
2549 : * (or permanently) block certain signals.
2550 : *
2551 : * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2552 : * interface happily blocks "unblockable" signals like SIGKILL
2553 : * and friends.
2554 : */
2555 95857 : int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2556 : {
2557 95857 : struct task_struct *tsk = current;
2558 : sigset_t newset;
2559 :
2560 : /* Lockless, only current can change ->blocked, never from irq */
2561 95857 : if (oldset)
2562 560 : *oldset = tsk->blocked;
2563 :
2564 95857 : switch (how) {
2565 : case SIG_BLOCK:
2566 : sigorsets(&newset, &tsk->blocked, set);
2567 : break;
2568 : case SIG_UNBLOCK:
2569 : sigandnsets(&newset, &tsk->blocked, set);
2570 : break;
2571 : case SIG_SETMASK:
2572 48978 : newset = *set;
2573 48978 : break;
2574 : default:
2575 : return -EINVAL;
2576 : }
2577 :
2578 95857 : __set_current_blocked(&newset);
2579 95857 : return 0;
2580 : }
2581 :
2582 : /**
2583 : * sys_rt_sigprocmask - change the list of currently blocked signals
2584 : * @how: whether to add, remove, or set signals
2585 : * @nset: stores pending signals
2586 : * @oset: previous value of signal mask if non-null
2587 : * @sigsetsize: size of sigset_t type
2588 : */
2589 247358 : SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2590 : sigset_t __user *, oset, size_t, sigsetsize)
2591 : {
2592 : sigset_t old_set, new_set;
2593 : int error;
2594 :
2595 : /* XXX: Don't preclude handling different sized sigset_t's. */
2596 123679 : if (sigsetsize != sizeof(sigset_t))
2597 : return -EINVAL;
2598 :
2599 123679 : old_set = current->blocked;
2600 :
2601 123679 : if (nset) {
2602 94773 : if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2603 : return -EFAULT;
2604 : sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2605 :
2606 94773 : error = sigprocmask(how, &new_set, NULL);
2607 94773 : if (error)
2608 : return error;
2609 : }
2610 :
2611 123679 : if (oset) {
2612 75343 : if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2613 : return -EFAULT;
2614 : }
2615 :
2616 : return 0;
2617 : }
2618 :
2619 : #ifdef CONFIG_COMPAT
2620 : COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2621 : compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2622 : {
2623 : #ifdef __BIG_ENDIAN
2624 : sigset_t old_set = current->blocked;
2625 :
2626 : /* XXX: Don't preclude handling different sized sigset_t's. */
2627 : if (sigsetsize != sizeof(sigset_t))
2628 : return -EINVAL;
2629 :
2630 : if (nset) {
2631 : compat_sigset_t new32;
2632 : sigset_t new_set;
2633 : int error;
2634 : if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2635 : return -EFAULT;
2636 :
2637 : sigset_from_compat(&new_set, &new32);
2638 : sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2639 :
2640 : error = sigprocmask(how, &new_set, NULL);
2641 : if (error)
2642 : return error;
2643 : }
2644 : if (oset) {
2645 : compat_sigset_t old32;
2646 : sigset_to_compat(&old32, &old_set);
2647 : if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2648 : return -EFAULT;
2649 : }
2650 : return 0;
2651 : #else
2652 : return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2653 : (sigset_t __user *)oset, sigsetsize);
2654 : #endif
2655 : }
2656 : #endif
2657 :
2658 0 : static int do_sigpending(void *set, unsigned long sigsetsize)
2659 : {
2660 0 : if (sigsetsize > sizeof(sigset_t))
2661 : return -EINVAL;
2662 :
2663 : spin_lock_irq(¤t->sighand->siglock);
2664 0 : sigorsets(set, ¤t->pending.signal,
2665 0 : ¤t->signal->shared_pending.signal);
2666 : spin_unlock_irq(¤t->sighand->siglock);
2667 :
2668 : /* Outside the lock because only this thread touches it. */
2669 0 : sigandsets(set, ¤t->blocked, set);
2670 0 : return 0;
2671 : }
2672 :
2673 : /**
2674 : * sys_rt_sigpending - examine a pending signal that has been raised
2675 : * while blocked
2676 : * @uset: stores pending signals
2677 : * @sigsetsize: size of sigset_t type or larger
2678 : */
2679 0 : SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2680 : {
2681 : sigset_t set;
2682 0 : int err = do_sigpending(&set, sigsetsize);
2683 0 : if (!err && copy_to_user(uset, &set, sigsetsize))
2684 : err = -EFAULT;
2685 : return err;
2686 : }
2687 :
2688 : #ifdef CONFIG_COMPAT
2689 : COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2690 : compat_size_t, sigsetsize)
2691 : {
2692 : #ifdef __BIG_ENDIAN
2693 : sigset_t set;
2694 : int err = do_sigpending(&set, sigsetsize);
2695 : if (!err) {
2696 : compat_sigset_t set32;
2697 : sigset_to_compat(&set32, &set);
2698 : /* we can get here only if sigsetsize <= sizeof(set) */
2699 : if (copy_to_user(uset, &set32, sigsetsize))
2700 : err = -EFAULT;
2701 : }
2702 : return err;
2703 : #else
2704 : return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2705 : #endif
2706 : }
2707 : #endif
2708 :
2709 : #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2710 :
2711 0 : int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2712 : {
2713 : int err;
2714 :
2715 0 : if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2716 : return -EFAULT;
2717 0 : if (from->si_code < 0)
2718 0 : return __copy_to_user(to, from, sizeof(siginfo_t))
2719 0 : ? -EFAULT : 0;
2720 : /*
2721 : * If you change siginfo_t structure, please be sure
2722 : * this code is fixed accordingly.
2723 : * Please remember to update the signalfd_copyinfo() function
2724 : * inside fs/signalfd.c too, in case siginfo_t changes.
2725 : * It should never copy any pad contained in the structure
2726 : * to avoid security leaks, but must copy the generic
2727 : * 3 ints plus the relevant union member.
2728 : */
2729 0 : err = __put_user(from->si_signo, &to->si_signo);
2730 0 : err |= __put_user(from->si_errno, &to->si_errno);
2731 0 : err |= __put_user((short)from->si_code, &to->si_code);
2732 0 : switch (from->si_code & __SI_MASK) {
2733 : case __SI_KILL:
2734 0 : err |= __put_user(from->si_pid, &to->si_pid);
2735 0 : err |= __put_user(from->si_uid, &to->si_uid);
2736 0 : break;
2737 : case __SI_TIMER:
2738 0 : err |= __put_user(from->si_tid, &to->si_tid);
2739 0 : err |= __put_user(from->si_overrun, &to->si_overrun);
2740 0 : err |= __put_user(from->si_ptr, &to->si_ptr);
2741 0 : break;
2742 : case __SI_POLL:
2743 0 : err |= __put_user(from->si_band, &to->si_band);
2744 0 : err |= __put_user(from->si_fd, &to->si_fd);
2745 0 : break;
2746 : case __SI_FAULT:
2747 0 : err |= __put_user(from->si_addr, &to->si_addr);
2748 : #ifdef __ARCH_SI_TRAPNO
2749 : err |= __put_user(from->si_trapno, &to->si_trapno);
2750 : #endif
2751 : #ifdef BUS_MCEERR_AO
2752 : /*
2753 : * Other callers might not initialize the si_lsb field,
2754 : * so check explicitly for the right codes here.
2755 : */
2756 0 : if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2757 0 : err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2758 : #endif
2759 : #ifdef SEGV_BNDERR
2760 0 : err |= __put_user(from->si_lower, &to->si_lower);
2761 0 : err |= __put_user(from->si_upper, &to->si_upper);
2762 : #endif
2763 0 : break;
2764 : case __SI_CHLD:
2765 0 : err |= __put_user(from->si_pid, &to->si_pid);
2766 0 : err |= __put_user(from->si_uid, &to->si_uid);
2767 0 : err |= __put_user(from->si_status, &to->si_status);
2768 0 : err |= __put_user(from->si_utime, &to->si_utime);
2769 0 : err |= __put_user(from->si_stime, &to->si_stime);
2770 0 : break;
2771 : case __SI_RT: /* This is not generated by the kernel as of now. */
2772 : case __SI_MESGQ: /* But this is */
2773 0 : err |= __put_user(from->si_pid, &to->si_pid);
2774 0 : err |= __put_user(from->si_uid, &to->si_uid);
2775 0 : err |= __put_user(from->si_ptr, &to->si_ptr);
2776 0 : break;
2777 : #ifdef __ARCH_SIGSYS
2778 : case __SI_SYS:
2779 0 : err |= __put_user(from->si_call_addr, &to->si_call_addr);
2780 0 : err |= __put_user(from->si_syscall, &to->si_syscall);
2781 0 : err |= __put_user(from->si_arch, &to->si_arch);
2782 0 : break;
2783 : #endif
2784 : default: /* this is just in case for now ... */
2785 0 : err |= __put_user(from->si_pid, &to->si_pid);
2786 0 : err |= __put_user(from->si_uid, &to->si_uid);
2787 0 : break;
2788 : }
2789 0 : return err;
2790 : }
2791 :
2792 : #endif
2793 :
2794 : /**
2795 : * do_sigtimedwait - wait for queued signals specified in @which
2796 : * @which: queued signals to wait for
2797 : * @info: if non-null, the signal's siginfo is returned here
2798 : * @ts: upper bound on process time suspension
2799 : */
2800 0 : int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2801 : const struct timespec *ts)
2802 : {
2803 0 : struct task_struct *tsk = current;
2804 : long timeout = MAX_SCHEDULE_TIMEOUT;
2805 0 : sigset_t mask = *which;
2806 : int sig;
2807 :
2808 0 : if (ts) {
2809 0 : if (!timespec_valid(ts))
2810 : return -EINVAL;
2811 0 : timeout = timespec_to_jiffies(ts);
2812 : /*
2813 : * We can be close to the next tick, add another one
2814 : * to ensure we will wait at least the time asked for.
2815 : */
2816 0 : if (ts->tv_sec || ts->tv_nsec)
2817 0 : timeout++;
2818 : }
2819 :
2820 : /*
2821 : * Invert the set of allowed signals to get those we want to block.
2822 : */
2823 : sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2824 : signotset(&mask);
2825 :
2826 : spin_lock_irq(&tsk->sighand->siglock);
2827 0 : sig = dequeue_signal(tsk, &mask, info);
2828 0 : if (!sig && timeout) {
2829 : /*
2830 : * None ready, temporarily unblock those we're interested
2831 : * while we are sleeping in so that we'll be awakened when
2832 : * they arrive. Unblocking is always fine, we can avoid
2833 : * set_current_blocked().
2834 : */
2835 0 : tsk->real_blocked = tsk->blocked;
2836 : sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2837 0 : recalc_sigpending();
2838 : spin_unlock_irq(&tsk->sighand->siglock);
2839 :
2840 : timeout = freezable_schedule_timeout_interruptible(timeout);
2841 :
2842 : spin_lock_irq(&tsk->sighand->siglock);
2843 0 : __set_task_blocked(tsk, &tsk->real_blocked);
2844 : sigemptyset(&tsk->real_blocked);
2845 0 : sig = dequeue_signal(tsk, &mask, info);
2846 : }
2847 : spin_unlock_irq(&tsk->sighand->siglock);
2848 :
2849 0 : if (sig)
2850 : return sig;
2851 0 : return timeout ? -EINTR : -EAGAIN;
2852 : }
2853 :
2854 : /**
2855 : * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2856 : * in @uthese
2857 : * @uthese: queued signals to wait for
2858 : * @uinfo: if non-null, the signal's siginfo is returned here
2859 : * @uts: upper bound on process time suspension
2860 : * @sigsetsize: size of sigset_t type
2861 : */
2862 0 : SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2863 : siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2864 : size_t, sigsetsize)
2865 : {
2866 : sigset_t these;
2867 : struct timespec ts;
2868 : siginfo_t info;
2869 : int ret;
2870 :
2871 : /* XXX: Don't preclude handling different sized sigset_t's. */
2872 0 : if (sigsetsize != sizeof(sigset_t))
2873 : return -EINVAL;
2874 :
2875 0 : if (copy_from_user(&these, uthese, sizeof(these)))
2876 : return -EFAULT;
2877 :
2878 0 : if (uts) {
2879 0 : if (copy_from_user(&ts, uts, sizeof(ts)))
2880 : return -EFAULT;
2881 : }
2882 :
2883 0 : ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2884 :
2885 0 : if (ret > 0 && uinfo) {
2886 0 : if (copy_siginfo_to_user(uinfo, &info))
2887 : ret = -EFAULT;
2888 : }
2889 :
2890 : return ret;
2891 : }
2892 :
2893 : /**
2894 : * sys_kill - send a signal to a process
2895 : * @pid: the PID of the process
2896 : * @sig: signal to be sent
2897 : */
2898 70 : SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2899 : {
2900 : struct siginfo info;
2901 :
2902 35 : info.si_signo = sig;
2903 35 : info.si_errno = 0;
2904 35 : info.si_code = SI_USER;
2905 70 : info.si_pid = task_tgid_vnr(current);
2906 70 : info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2907 :
2908 35 : return kill_something_info(sig, &info, pid);
2909 : }
2910 :
2911 : static int
2912 0 : do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2913 : {
2914 : struct task_struct *p;
2915 : int error = -ESRCH;
2916 :
2917 : rcu_read_lock();
2918 0 : p = find_task_by_vpid(pid);
2919 0 : if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2920 0 : error = check_kill_permission(sig, info, p);
2921 : /*
2922 : * The null signal is a permissions and process existence
2923 : * probe. No signal is actually delivered.
2924 : */
2925 0 : if (!error && sig) {
2926 0 : error = do_send_sig_info(sig, info, p, false);
2927 : /*
2928 : * If lock_task_sighand() failed we pretend the task
2929 : * dies after receiving the signal. The window is tiny,
2930 : * and the signal is private anyway.
2931 : */
2932 0 : if (unlikely(error == -ESRCH))
2933 : error = 0;
2934 : }
2935 : }
2936 : rcu_read_unlock();
2937 :
2938 0 : return error;
2939 : }
2940 :
2941 0 : static int do_tkill(pid_t tgid, pid_t pid, int sig)
2942 : {
2943 0 : struct siginfo info = {};
2944 :
2945 0 : info.si_signo = sig;
2946 : info.si_errno = 0;
2947 0 : info.si_code = SI_TKILL;
2948 0 : info.si_pid = task_tgid_vnr(current);
2949 0 : info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2950 :
2951 0 : return do_send_specific(tgid, pid, sig, &info);
2952 : }
2953 :
2954 : /**
2955 : * sys_tgkill - send signal to one specific thread
2956 : * @tgid: the thread group ID of the thread
2957 : * @pid: the PID of the thread
2958 : * @sig: signal to be sent
2959 : *
2960 : * This syscall also checks the @tgid and returns -ESRCH even if the PID
2961 : * exists but it's not belonging to the target process anymore. This
2962 : * method solves the problem of threads exiting and PIDs getting reused.
2963 : */
2964 0 : SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2965 : {
2966 : /* This is only valid for single tasks */
2967 0 : if (pid <= 0 || tgid <= 0)
2968 : return -EINVAL;
2969 :
2970 0 : return do_tkill(tgid, pid, sig);
2971 : }
2972 :
2973 : /**
2974 : * sys_tkill - send signal to one specific task
2975 : * @pid: the PID of the task
2976 : * @sig: signal to be sent
2977 : *
2978 : * Send a signal to only one task, even if it's a CLONE_THREAD task.
2979 : */
2980 0 : SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2981 : {
2982 : /* This is only valid for single tasks */
2983 0 : if (pid <= 0)
2984 : return -EINVAL;
2985 :
2986 0 : return do_tkill(0, pid, sig);
2987 : }
2988 :
2989 0 : static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2990 : {
2991 : /* Not even root can pretend to send signals from the kernel.
2992 : * Nor can they impersonate a kill()/tgkill(), which adds source info.
2993 : */
2994 0 : if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2995 0 : (task_pid_vnr(current) != pid)) {
2996 : /* We used to allow any < 0 si_code */
2997 : WARN_ON_ONCE(info->si_code < 0);
2998 : return -EPERM;
2999 : }
3000 0 : info->si_signo = sig;
3001 :
3002 : /* POSIX.1b doesn't mention process groups. */
3003 0 : return kill_proc_info(sig, info, pid);
3004 : }
3005 :
3006 : /**
3007 : * sys_rt_sigqueueinfo - send signal information to a signal
3008 : * @pid: the PID of the thread
3009 : * @sig: signal to be sent
3010 : * @uinfo: signal info to be sent
3011 : */
3012 0 : SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3013 : siginfo_t __user *, uinfo)
3014 : {
3015 : siginfo_t info;
3016 0 : if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3017 : return -EFAULT;
3018 0 : return do_rt_sigqueueinfo(pid, sig, &info);
3019 : }
3020 :
3021 : #ifdef CONFIG_COMPAT
3022 : COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3023 : compat_pid_t, pid,
3024 : int, sig,
3025 : struct compat_siginfo __user *, uinfo)
3026 : {
3027 : siginfo_t info;
3028 : int ret = copy_siginfo_from_user32(&info, uinfo);
3029 : if (unlikely(ret))
3030 : return ret;
3031 : return do_rt_sigqueueinfo(pid, sig, &info);
3032 : }
3033 : #endif
3034 :
3035 0 : static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3036 : {
3037 : /* This is only valid for single tasks */
3038 0 : if (pid <= 0 || tgid <= 0)
3039 : return -EINVAL;
3040 :
3041 : /* Not even root can pretend to send signals from the kernel.
3042 : * Nor can they impersonate a kill()/tgkill(), which adds source info.
3043 : */
3044 0 : if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3045 0 : (task_pid_vnr(current) != pid)) {
3046 : /* We used to allow any < 0 si_code */
3047 : WARN_ON_ONCE(info->si_code < 0);
3048 : return -EPERM;
3049 : }
3050 0 : info->si_signo = sig;
3051 :
3052 0 : return do_send_specific(tgid, pid, sig, info);
3053 : }
3054 :
3055 0 : SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3056 : siginfo_t __user *, uinfo)
3057 : {
3058 : siginfo_t info;
3059 :
3060 0 : if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3061 : return -EFAULT;
3062 :
3063 0 : return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3064 : }
3065 :
3066 : #ifdef CONFIG_COMPAT
3067 : COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3068 : compat_pid_t, tgid,
3069 : compat_pid_t, pid,
3070 : int, sig,
3071 : struct compat_siginfo __user *, uinfo)
3072 : {
3073 : siginfo_t info;
3074 :
3075 : if (copy_siginfo_from_user32(&info, uinfo))
3076 : return -EFAULT;
3077 : return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3078 : }
3079 : #endif
3080 :
3081 : /*
3082 : * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3083 : */
3084 22 : void kernel_sigaction(int sig, __sighandler_t action)
3085 : {
3086 : spin_lock_irq(¤t->sighand->siglock);
3087 22 : current->sighand->action[sig - 1].sa.sa_handler = action;
3088 22 : if (action == SIG_IGN) {
3089 : sigset_t mask;
3090 :
3091 : sigemptyset(&mask);
3092 : sigaddset(&mask, sig);
3093 :
3094 0 : flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3095 0 : flush_sigqueue_mask(&mask, ¤t->pending);
3096 0 : recalc_sigpending();
3097 : }
3098 : spin_unlock_irq(¤t->sighand->siglock);
3099 22 : }
3100 : EXPORT_SYMBOL(kernel_sigaction);
3101 :
3102 15368 : int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3103 : {
3104 15368 : struct task_struct *p = current, *t;
3105 : struct k_sigaction *k;
3106 : sigset_t mask;
3107 :
3108 15368 : if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3109 : return -EINVAL;
3110 :
3111 15338 : k = &p->sighand->action[sig-1];
3112 :
3113 : spin_lock_irq(&p->sighand->siglock);
3114 15338 : if (oact)
3115 10062 : *oact = *k;
3116 :
3117 15338 : if (act) {
3118 : sigdelsetmask(&act->sa.sa_mask,
3119 : sigmask(SIGKILL) | sigmask(SIGSTOP));
3120 13836 : *k = *act;
3121 : /*
3122 : * POSIX 3.3.1.3:
3123 : * "Setting a signal action to SIG_IGN for a signal that is
3124 : * pending shall cause the pending signal to be discarded,
3125 : * whether or not it is blocked."
3126 : *
3127 : * "Setting a signal action to SIG_DFL for a signal that is
3128 : * pending and whose default action is to ignore the signal
3129 : * (for example, SIGCHLD), shall cause the pending signal to
3130 : * be discarded, whether or not it is blocked"
3131 : */
3132 13836 : if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3133 : sigemptyset(&mask);
3134 : sigaddset(&mask, sig);
3135 3240 : flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3136 6480 : for_each_thread(p, t)
3137 3240 : flush_sigqueue_mask(&mask, &t->pending);
3138 : }
3139 : }
3140 :
3141 : spin_unlock_irq(&p->sighand->siglock);
3142 : return 0;
3143 : }
3144 :
3145 : static int
3146 0 : do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3147 : {
3148 : stack_t oss;
3149 : int error;
3150 :
3151 0 : oss.ss_sp = (void __user *) current->sas_ss_sp;
3152 0 : oss.ss_size = current->sas_ss_size;
3153 : oss.ss_flags = sas_ss_flags(sp);
3154 :
3155 0 : if (uss) {
3156 : void __user *ss_sp;
3157 : size_t ss_size;
3158 : int ss_flags;
3159 :
3160 : error = -EFAULT;
3161 0 : if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3162 : goto out;
3163 0 : error = __get_user(ss_sp, &uss->ss_sp) |
3164 0 : __get_user(ss_flags, &uss->ss_flags) |
3165 0 : __get_user(ss_size, &uss->ss_size);
3166 0 : if (error)
3167 : goto out;
3168 :
3169 : error = -EPERM;
3170 0 : if (on_sig_stack(sp))
3171 : goto out;
3172 :
3173 : error = -EINVAL;
3174 : /*
3175 : * Note - this code used to test ss_flags incorrectly:
3176 : * old code may have been written using ss_flags==0
3177 : * to mean ss_flags==SS_ONSTACK (as this was the only
3178 : * way that worked) - this fix preserves that older
3179 : * mechanism.
3180 : */
3181 0 : if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3182 : goto out;
3183 :
3184 0 : if (ss_flags == SS_DISABLE) {
3185 : ss_size = 0;
3186 : ss_sp = NULL;
3187 : } else {
3188 : error = -ENOMEM;
3189 0 : if (ss_size < MINSIGSTKSZ)
3190 : goto out;
3191 : }
3192 :
3193 0 : current->sas_ss_sp = (unsigned long) ss_sp;
3194 0 : current->sas_ss_size = ss_size;
3195 : }
3196 :
3197 : error = 0;
3198 0 : if (uoss) {
3199 : error = -EFAULT;
3200 0 : if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3201 : goto out;
3202 0 : error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3203 0 : __put_user(oss.ss_size, &uoss->ss_size) |
3204 0 : __put_user(oss.ss_flags, &uoss->ss_flags);
3205 : }
3206 :
3207 : out:
3208 0 : return error;
3209 : }
3210 0 : SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3211 : {
3212 0 : return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3213 : }
3214 :
3215 0 : int restore_altstack(const stack_t __user *uss)
3216 : {
3217 0 : int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3218 : /* squash all but EFAULT for now */
3219 0 : return err == -EFAULT ? err : 0;
3220 : }
3221 :
3222 0 : int __save_altstack(stack_t __user *uss, unsigned long sp)
3223 : {
3224 0 : struct task_struct *t = current;
3225 0 : return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3226 0 : __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3227 0 : __put_user(t->sas_ss_size, &uss->ss_size);
3228 : }
3229 :
3230 : #ifdef CONFIG_COMPAT
3231 : COMPAT_SYSCALL_DEFINE2(sigaltstack,
3232 : const compat_stack_t __user *, uss_ptr,
3233 : compat_stack_t __user *, uoss_ptr)
3234 : {
3235 : stack_t uss, uoss;
3236 : int ret;
3237 : mm_segment_t seg;
3238 :
3239 : if (uss_ptr) {
3240 : compat_stack_t uss32;
3241 :
3242 : memset(&uss, 0, sizeof(stack_t));
3243 : if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3244 : return -EFAULT;
3245 : uss.ss_sp = compat_ptr(uss32.ss_sp);
3246 : uss.ss_flags = uss32.ss_flags;
3247 : uss.ss_size = uss32.ss_size;
3248 : }
3249 : seg = get_fs();
3250 : set_fs(KERNEL_DS);
3251 : ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3252 : (stack_t __force __user *) &uoss,
3253 : compat_user_stack_pointer());
3254 : set_fs(seg);
3255 : if (ret >= 0 && uoss_ptr) {
3256 : if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3257 : __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3258 : __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3259 : __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3260 : ret = -EFAULT;
3261 : }
3262 : return ret;
3263 : }
3264 :
3265 : int compat_restore_altstack(const compat_stack_t __user *uss)
3266 : {
3267 : int err = compat_sys_sigaltstack(uss, NULL);
3268 : /* squash all but -EFAULT for now */
3269 : return err == -EFAULT ? err : 0;
3270 : }
3271 :
3272 : int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3273 : {
3274 : struct task_struct *t = current;
3275 : return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3276 : __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3277 : __put_user(t->sas_ss_size, &uss->ss_size);
3278 : }
3279 : #endif
3280 :
3281 : #ifdef __ARCH_WANT_SYS_SIGPENDING
3282 :
3283 : /**
3284 : * sys_sigpending - examine pending signals
3285 : * @set: where mask of pending signal is returned
3286 : */
3287 0 : SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3288 : {
3289 0 : return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3290 : }
3291 :
3292 : #endif
3293 :
3294 : #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3295 : /**
3296 : * sys_sigprocmask - examine and change blocked signals
3297 : * @how: whether to add, remove, or set signals
3298 : * @nset: signals to add or remove (if non-null)
3299 : * @oset: previous value of signal mask if non-null
3300 : *
3301 : * Some platforms have their own version with special arguments;
3302 : * others support only sys_rt_sigprocmask.
3303 : */
3304 :
3305 0 : SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3306 : old_sigset_t __user *, oset)
3307 : {
3308 : old_sigset_t old_set, new_set;
3309 : sigset_t new_blocked;
3310 :
3311 0 : old_set = current->blocked.sig[0];
3312 :
3313 0 : if (nset) {
3314 0 : if (copy_from_user(&new_set, nset, sizeof(*nset)))
3315 : return -EFAULT;
3316 :
3317 0 : new_blocked = current->blocked;
3318 :
3319 0 : switch (how) {
3320 : case SIG_BLOCK:
3321 0 : sigaddsetmask(&new_blocked, new_set);
3322 : break;
3323 : case SIG_UNBLOCK:
3324 0 : sigdelsetmask(&new_blocked, new_set);
3325 : break;
3326 : case SIG_SETMASK:
3327 0 : new_blocked.sig[0] = new_set;
3328 : break;
3329 : default:
3330 : return -EINVAL;
3331 : }
3332 :
3333 0 : set_current_blocked(&new_blocked);
3334 : }
3335 :
3336 0 : if (oset) {
3337 0 : if (copy_to_user(oset, &old_set, sizeof(*oset)))
3338 : return -EFAULT;
3339 : }
3340 :
3341 : return 0;
3342 : }
3343 : #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3344 :
3345 : #ifndef CONFIG_ODD_RT_SIGACTION
3346 : /**
3347 : * sys_rt_sigaction - alter an action taken by a process
3348 : * @sig: signal to be sent
3349 : * @act: new sigaction
3350 : * @oact: used to save the previous sigaction
3351 : * @sigsetsize: size of sigset_t type
3352 : */
3353 30736 : SYSCALL_DEFINE4(rt_sigaction, int, sig,
3354 : const struct sigaction __user *, act,
3355 : struct sigaction __user *, oact,
3356 : size_t, sigsetsize)
3357 : {
3358 : struct k_sigaction new_sa, old_sa;
3359 : int ret = -EINVAL;
3360 :
3361 : /* XXX: Don't preclude handling different sized sigset_t's. */
3362 15368 : if (sigsetsize != sizeof(sigset_t))
3363 : goto out;
3364 :
3365 15368 : if (act) {
3366 13860 : if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3367 : return -EFAULT;
3368 : }
3369 :
3370 15368 : ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3371 :
3372 15368 : if (!ret && oact) {
3373 10062 : if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3374 : return -EFAULT;
3375 : }
3376 : out:
3377 : return ret;
3378 : }
3379 : #ifdef CONFIG_COMPAT
3380 : COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3381 : const struct compat_sigaction __user *, act,
3382 : struct compat_sigaction __user *, oact,
3383 : compat_size_t, sigsetsize)
3384 : {
3385 : struct k_sigaction new_ka, old_ka;
3386 : compat_sigset_t mask;
3387 : #ifdef __ARCH_HAS_SA_RESTORER
3388 : compat_uptr_t restorer;
3389 : #endif
3390 : int ret;
3391 :
3392 : /* XXX: Don't preclude handling different sized sigset_t's. */
3393 : if (sigsetsize != sizeof(compat_sigset_t))
3394 : return -EINVAL;
3395 :
3396 : if (act) {
3397 : compat_uptr_t handler;
3398 : ret = get_user(handler, &act->sa_handler);
3399 : new_ka.sa.sa_handler = compat_ptr(handler);
3400 : #ifdef __ARCH_HAS_SA_RESTORER
3401 : ret |= get_user(restorer, &act->sa_restorer);
3402 : new_ka.sa.sa_restorer = compat_ptr(restorer);
3403 : #endif
3404 : ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3405 : ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3406 : if (ret)
3407 : return -EFAULT;
3408 : sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3409 : }
3410 :
3411 : ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3412 : if (!ret && oact) {
3413 : sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3414 : ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3415 : &oact->sa_handler);
3416 : ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3417 : ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3418 : #ifdef __ARCH_HAS_SA_RESTORER
3419 : ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3420 : &oact->sa_restorer);
3421 : #endif
3422 : }
3423 : return ret;
3424 : }
3425 : #endif
3426 : #endif /* !CONFIG_ODD_RT_SIGACTION */
3427 :
3428 : #ifdef CONFIG_OLD_SIGACTION
3429 0 : SYSCALL_DEFINE3(sigaction, int, sig,
3430 : const struct old_sigaction __user *, act,
3431 : struct old_sigaction __user *, oact)
3432 : {
3433 : struct k_sigaction new_ka, old_ka;
3434 : int ret;
3435 :
3436 0 : if (act) {
3437 : old_sigset_t mask;
3438 0 : if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3439 0 : __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3440 0 : __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3441 0 : __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3442 0 : __get_user(mask, &act->sa_mask))
3443 : return -EFAULT;
3444 : #ifdef __ARCH_HAS_KA_RESTORER
3445 : new_ka.ka_restorer = NULL;
3446 : #endif
3447 : siginitset(&new_ka.sa.sa_mask, mask);
3448 : }
3449 :
3450 0 : ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3451 :
3452 0 : if (!ret && oact) {
3453 0 : if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3454 0 : __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3455 0 : __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3456 0 : __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3457 0 : __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3458 : return -EFAULT;
3459 : }
3460 :
3461 : return ret;
3462 : }
3463 : #endif
3464 : #ifdef CONFIG_COMPAT_OLD_SIGACTION
3465 : COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3466 : const struct compat_old_sigaction __user *, act,
3467 : struct compat_old_sigaction __user *, oact)
3468 : {
3469 : struct k_sigaction new_ka, old_ka;
3470 : int ret;
3471 : compat_old_sigset_t mask;
3472 : compat_uptr_t handler, restorer;
3473 :
3474 : if (act) {
3475 : if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3476 : __get_user(handler, &act->sa_handler) ||
3477 : __get_user(restorer, &act->sa_restorer) ||
3478 : __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3479 : __get_user(mask, &act->sa_mask))
3480 : return -EFAULT;
3481 :
3482 : #ifdef __ARCH_HAS_KA_RESTORER
3483 : new_ka.ka_restorer = NULL;
3484 : #endif
3485 : new_ka.sa.sa_handler = compat_ptr(handler);
3486 : new_ka.sa.sa_restorer = compat_ptr(restorer);
3487 : siginitset(&new_ka.sa.sa_mask, mask);
3488 : }
3489 :
3490 : ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3491 :
3492 : if (!ret && oact) {
3493 : if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3494 : __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3495 : &oact->sa_handler) ||
3496 : __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3497 : &oact->sa_restorer) ||
3498 : __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3499 : __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3500 : return -EFAULT;
3501 : }
3502 : return ret;
3503 : }
3504 : #endif
3505 :
3506 : #ifdef CONFIG_SGETMASK_SYSCALL
3507 :
3508 : /*
3509 : * For backwards compatibility. Functionality superseded by sigprocmask.
3510 : */
3511 : SYSCALL_DEFINE0(sgetmask)
3512 : {
3513 : /* SMP safe */
3514 : return current->blocked.sig[0];
3515 : }
3516 :
3517 : SYSCALL_DEFINE1(ssetmask, int, newmask)
3518 : {
3519 : int old = current->blocked.sig[0];
3520 : sigset_t newset;
3521 :
3522 : siginitset(&newset, newmask);
3523 : set_current_blocked(&newset);
3524 :
3525 : return old;
3526 : }
3527 : #endif /* CONFIG_SGETMASK_SYSCALL */
3528 :
3529 : #ifdef __ARCH_WANT_SYS_SIGNAL
3530 : /*
3531 : * For backwards compatibility. Functionality superseded by sigaction.
3532 : */
3533 : SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3534 : {
3535 : struct k_sigaction new_sa, old_sa;
3536 : int ret;
3537 :
3538 : new_sa.sa.sa_handler = handler;
3539 : new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3540 : sigemptyset(&new_sa.sa.sa_mask);
3541 :
3542 : ret = do_sigaction(sig, &new_sa, &old_sa);
3543 :
3544 : return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3545 : }
3546 : #endif /* __ARCH_WANT_SYS_SIGNAL */
3547 :
3548 : #ifdef __ARCH_WANT_SYS_PAUSE
3549 :
3550 0 : SYSCALL_DEFINE0(pause)
3551 : {
3552 0 : while (!signal_pending(current)) {
3553 0 : current->state = TASK_INTERRUPTIBLE;
3554 0 : schedule();
3555 : }
3556 0 : return -ERESTARTNOHAND;
3557 : }
3558 :
3559 : #endif
3560 :
3561 0 : int sigsuspend(sigset_t *set)
3562 : {
3563 0 : current->saved_sigmask = current->blocked;
3564 : set_current_blocked(set);
3565 :
3566 0 : current->state = TASK_INTERRUPTIBLE;
3567 0 : schedule();
3568 : set_restore_sigmask();
3569 0 : return -ERESTARTNOHAND;
3570 : }
3571 :
3572 : /**
3573 : * sys_rt_sigsuspend - replace the signal mask for a value with the
3574 : * @unewset value until a signal is received
3575 : * @unewset: new signal mask value
3576 : * @sigsetsize: size of sigset_t type
3577 : */
3578 0 : SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3579 : {
3580 : sigset_t newset;
3581 :
3582 : /* XXX: Don't preclude handling different sized sigset_t's. */
3583 0 : if (sigsetsize != sizeof(sigset_t))
3584 : return -EINVAL;
3585 :
3586 0 : if (copy_from_user(&newset, unewset, sizeof(newset)))
3587 : return -EFAULT;
3588 0 : return sigsuspend(&newset);
3589 : }
3590 :
3591 : #ifdef CONFIG_COMPAT
3592 : COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3593 : {
3594 : #ifdef __BIG_ENDIAN
3595 : sigset_t newset;
3596 : compat_sigset_t newset32;
3597 :
3598 : /* XXX: Don't preclude handling different sized sigset_t's. */
3599 : if (sigsetsize != sizeof(sigset_t))
3600 : return -EINVAL;
3601 :
3602 : if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3603 : return -EFAULT;
3604 : sigset_from_compat(&newset, &newset32);
3605 : return sigsuspend(&newset);
3606 : #else
3607 : /* on little-endian bitmaps don't care about granularity */
3608 : return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3609 : #endif
3610 : }
3611 : #endif
3612 :
3613 : #ifdef CONFIG_OLD_SIGSUSPEND
3614 : SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3615 : {
3616 : sigset_t blocked;
3617 : siginitset(&blocked, mask);
3618 : return sigsuspend(&blocked);
3619 : }
3620 : #endif
3621 : #ifdef CONFIG_OLD_SIGSUSPEND3
3622 0 : SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3623 : {
3624 : sigset_t blocked;
3625 : siginitset(&blocked, mask);
3626 0 : return sigsuspend(&blocked);
3627 : }
3628 : #endif
3629 :
3630 0 : __weak const char *arch_vma_name(struct vm_area_struct *vma)
3631 : {
3632 0 : return NULL;
3633 : }
3634 :
3635 1 : void __init signals_init(void)
3636 : {
3637 1 : sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3638 1 : }
3639 :
3640 : #ifdef CONFIG_KGDB_KDB
3641 : #include <linux/kdb.h>
3642 : /*
3643 : * kdb_send_sig_info - Allows kdb to send signals without exposing
3644 : * signal internals. This function checks if the required locks are
3645 : * available before calling the main signal code, to avoid kdb
3646 : * deadlocks.
3647 : */
3648 : void
3649 : kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3650 : {
3651 : static struct task_struct *kdb_prev_t;
3652 : int sig, new_t;
3653 : if (!spin_trylock(&t->sighand->siglock)) {
3654 : kdb_printf("Can't do kill command now.\n"
3655 : "The sigmask lock is held somewhere else in "
3656 : "kernel, try again later\n");
3657 : return;
3658 : }
3659 : spin_unlock(&t->sighand->siglock);
3660 : new_t = kdb_prev_t != t;
3661 : kdb_prev_t = t;
3662 : if (t->state != TASK_RUNNING && new_t) {
3663 : kdb_printf("Process is not RUNNING, sending a signal from "
3664 : "kdb risks deadlock\n"
3665 : "on the run queue locks. "
3666 : "The signal has _not_ been sent.\n"
3667 : "Reissue the kill command if you want to risk "
3668 : "the deadlock.\n");
3669 : return;
3670 : }
3671 : sig = info->si_signo;
3672 : if (send_sig_info(sig, info, t))
3673 : kdb_printf("Fail to deliver Signal %d to process %d.\n",
3674 : sig, t->pid);
3675 : else
3676 : kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3677 : }
3678 : #endif /* CONFIG_KGDB_KDB */
|