patch-2.4.0-test8 linux/kernel/signal.c
Next file: linux/kernel/timer.c
Previous file: linux/kernel/sched.c
Back to the patch index
Back to the overall index
- Lines: 896
- Date:
Tue Sep 5 09:53:59 2000
- Orig file:
v2.4.0-test7/linux/kernel/signal.c
- Orig date:
Wed Aug 23 18:36:39 2000
diff -u --recursive --new-file v2.4.0-test7/linux/kernel/signal.c linux/kernel/signal.c
@@ -28,32 +28,32 @@
#define SIG_SLAB_DEBUG 0
#endif
-static kmem_cache_t *signal_queue_cachep;
+static kmem_cache_t *sigqueue_cachep;
atomic_t nr_queued_signals;
int max_queued_signals = 1024;
void __init signals_init(void)
{
- signal_queue_cachep =
- kmem_cache_create("signal_queue",
- sizeof(struct signal_queue),
- __alignof__(struct signal_queue),
+ sigqueue_cachep =
+ kmem_cache_create("sigqueue",
+ sizeof(struct sigqueue),
+ __alignof__(struct sigqueue),
SIG_SLAB_DEBUG, NULL, NULL);
- if (!signal_queue_cachep)
- panic("signals_init(): cannot create signal_queue SLAB cache");
+ if (!sigqueue_cachep)
+ panic("signals_init(): cannot create sigueue SLAB cache");
}
/* Given the mask, find the first available signal that should be serviced. */
static int
-next_signal(sigset_t *signal, sigset_t *mask)
+next_signal(struct task_struct *tsk, sigset_t *mask)
{
unsigned long i, *s, *m, x;
int sig = 0;
- s = signal->sig;
+ s = tsk->pending.signal.sig;
m = mask->sig;
switch (_NSIG_WORDS) {
default:
@@ -81,6 +81,23 @@
return sig;
}
+static void flush_sigqueue(struct sigpending *queue)
+{
+ struct sigqueue *q, *n;
+
+ sigemptyset(&queue->signal);
+ q = queue->head;
+ queue->head = NULL;
+ queue->tail = &queue->head;
+
+ while (q) {
+ n = q->next;
+ kmem_cache_free(sigqueue_cachep, q);
+ atomic_dec(&nr_queued_signals);
+ q = n;
+ }
+}
+
/*
* Flush all pending signals for a task.
*/
@@ -88,20 +105,23 @@
void
flush_signals(struct task_struct *t)
{
- struct signal_queue *q, *n;
-
t->sigpending = 0;
- sigemptyset(&t->signal);
- q = t->sigqueue;
- t->sigqueue = NULL;
- t->sigqueue_tail = &t->sigqueue;
+ flush_sigqueue(&t->pending);
+}
- while (q) {
- n = q->next;
- kmem_cache_free(signal_queue_cachep, q);
- atomic_dec(&nr_queued_signals);
- q = n;
- }
+void exit_sighand(struct task_struct *tsk)
+{
+ struct signal_struct * sig = tsk->sig;
+
+ spin_lock_irq(&tsk->sigmask_lock);
+ if (sig) {
+ tsk->sig = NULL;
+ if (atomic_dec_and_test(&sig->count))
+ kmem_cache_free(sigact_cachep, sig);
+ }
+ tsk->sigpending = 0;
+ flush_sigqueue(&tsk->pending);
+ spin_unlock_irq(&tsk->sigmask_lock);
}
/*
@@ -133,9 +153,13 @@
void
block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(¤t->sigmask_lock, flags);
current->notifier_mask = mask;
current->notifier_data = priv;
current->notifier = notifier;
+ spin_unlock_irqrestore(¤t->sigmask_lock, flags);
}
/* Notify the system that blocking has ended. */
@@ -143,9 +167,60 @@
void
unblock_all_signals(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(¤t->sigmask_lock, flags);
current->notifier = NULL;
current->notifier_data = NULL;
recalc_sigpending(current);
+ spin_unlock_irqrestore(¤t->sigmask_lock, flags);
+}
+
+static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+{
+ if (sigismember(&list->signal, sig)) {
+ /* Collect the siginfo appropriate to this signal. */
+ struct sigqueue *q, **pp;
+ pp = &list->head;
+ while ((q = *pp) != NULL) {
+ if (q->info.si_signo == sig)
+ goto found_it;
+ pp = &q->next;
+ }
+
+ /* Ok, it wasn't in the queue. We must have
+ been out of queue space. So zero out the
+ info. */
+ info->si_signo = sig;
+ info->si_errno = 0;
+ info->si_code = 0;
+ info->si_pid = 0;
+ info->si_uid = 0;
+ return 1;
+
+found_it:
+ if ((*pp = q->next) == NULL)
+ list->tail = pp;
+
+ /* Copy the sigqueue information and free the queue entry */
+ copy_siginfo(info, &q->info);
+ kmem_cache_free(sigqueue_cachep,q);
+ atomic_dec(&nr_queued_signals);
+
+ /* Non-RT signals can exist multiple times.. */
+ if (sig >= SIGRTMIN) {
+ while ((q = *pp) != NULL) {
+ if (q->info.si_signo == sig)
+ goto found_another;
+ pp = &q->next;
+ }
+ }
+
+ sigdelset(&list->signal, sig);
+found_another:
+ return 1;
+ }
+ return 0;
}
/*
@@ -165,82 +240,24 @@
signal_pending(current));
#endif
- sig = next_signal(¤t->signal, mask);
+ sig = next_signal(current, mask);
if (current->notifier) {
- sigset_t merged;
- int i;
- int altsig;
-
- for (i = 0; i < _NSIG_WORDS; i++)
- merged.sig[i] = mask->sig[i]
- | current->notifier_mask->sig[i];
- altsig = next_signal(¤t->signal, &merged);
- if (sig != altsig) {
+ if (sigismember(current->notifier_mask, sig)) {
if (!(current->notifier)(current->notifier_data)) {
current->sigpending = 0;
return 0;
- }
+ }
}
}
if (sig) {
- int reset = 1;
-
- /* Collect the siginfo appropriate to this signal. */
- struct signal_queue *q, **pp;
- pp = ¤t->sigqueue;
- q = current->sigqueue;
-
- /* Find the one we're interested in ... */
- for ( ; q ; pp = &q->next, q = q->next)
- if (q->info.si_signo == sig)
- break;
- if (q) {
- if ((*pp = q->next) == NULL)
- current->sigqueue_tail = pp;
- copy_siginfo(info, &q->info);
- kmem_cache_free(signal_queue_cachep,q);
- atomic_dec(&nr_queued_signals);
-
- /* Then see if this signal is still pending.
- (Non rt signals may not be queued twice.)
- */
- if (sig >= SIGRTMIN)
- for (q = *pp; q; q = q->next)
- if (q->info.si_signo == sig) {
- reset = 0;
- break;
- }
-
- } else {
- /* Ok, it wasn't in the queue. We must have
- been out of queue space. So zero out the
- info. */
- info->si_signo = sig;
- info->si_errno = 0;
- info->si_code = 0;
- info->si_pid = 0;
- info->si_uid = 0;
- }
-
- if (reset) {
- sigdelset(¤t->signal, sig);
- recalc_sigpending(current);
- }
-
+ if (!collect_signal(sig, ¤t->pending, info))
+ sig = 0;
+
/* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
we need to xchg out the timer overrun values. */
- } else {
- /* XXX: Once CLONE_PID is in to join those "threads" that are
- part of the same "process", look for signals sent to the
- "process" as well. */
-
- /* Sanity check... */
- if (mask == ¤t->blocked && signal_pending(current)) {
- printk(KERN_CRIT "SIG: sigpending lied\n");
- current->sigpending = 0;
- }
}
+ recalc_sigpending(current);
#if DEBUG_SIG
printk(" %d -> %d\n", signal_pending(current), sig);
@@ -249,42 +266,92 @@
return sig;
}
+static int rm_from_queue(int sig, struct sigpending *s)
+{
+ struct sigqueue *q, **pp;
+
+ if (!sigismember(&s->signal, sig))
+ return 0;
+
+ sigdelset(&s->signal, sig);
+
+ pp = &s->head;
+
+ while ((q = *pp) != NULL) {
+ if (q->info.si_signo == sig) {
+ if ((*pp = q->next) == NULL)
+ s->tail = pp;
+ kmem_cache_free(sigqueue_cachep,q);
+ atomic_dec(&nr_queued_signals);
+ continue;
+ }
+ pp = &q->next;
+ }
+ return 1;
+}
+
/*
- * Remove signal sig from queue and from t->signal.
- * Returns 1 if sig was found in t->signal.
+ * Remove signal sig from t->pending.
+ * Returns 1 if sig was found.
*
* All callers must be holding t->sigmask_lock.
*/
static int rm_sig_from_queue(int sig, struct task_struct *t)
{
- struct signal_queue *q, **pp;
+ return rm_from_queue(sig, &t->pending);
+}
- if (sig >= SIGRTMIN) {
- printk(KERN_CRIT "SIG: rm_sig_from_queue() doesn't support rt signals\n");
- return 0;
- }
+/*
+ * Bad permissions for sending the signal
+ */
+int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
+{
+ return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
+ && ((sig != SIGCONT) || (current->session != t->session))
+ && (current->euid ^ t->suid) && (current->euid ^ t->uid)
+ && (current->uid ^ t->suid) && (current->uid ^ t->uid)
+ && !capable(CAP_KILL);
+}
+
+/*
+ * Signal type:
+ * < 0 : global action (kill - spread to all non-blocked threads)
+ * = 0 : ignored
+ * > 0 : wake up.
+ */
+static int signal_type(int sig, struct signal_struct *signals)
+{
+ unsigned long handler;
- if (!sigismember(&t->signal, sig))
+ if (!signals)
return 0;
+
+ handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
+ if (handler > 1)
+ return 1;
- sigdelset(&t->signal, sig);
+ /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
+ if (handler == 1)
+ return sig == SIGCHLD;
- pp = &t->sigqueue;
- q = t->sigqueue;
+ /* Default handler. Normally lethal, but.. */
+ switch (sig) {
- /* Find the one we're interested in ...
- It may appear only once. */
- for ( ; q ; pp = &q->next, q = q->next)
- if (q->info.si_signo == sig)
- break;
- if (q) {
- if ((*pp = q->next) == NULL)
- t->sigqueue_tail = pp;
- kmem_cache_free(signal_queue_cachep,q);
- atomic_dec(&nr_queued_signals);
+ /* Ignored */
+ case SIGCONT: case SIGWINCH:
+ case SIGCHLD: case SIGURG:
+ return 0;
+
+ /* Implicit behaviour */
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ return 1;
+
+ /* Implicit actions (kill or do special stuff) */
+ default:
+ return -1;
}
- return 1;
}
+
/*
* Determine whether a signal should be posted or not.
@@ -296,99 +363,44 @@
*/
static int ignored_signal(int sig, struct task_struct *t)
{
- struct signal_struct *signals;
- struct k_sigaction *ka;
-
/* Don't ignore traced or blocked signals */
if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
return 0;
-
- signals = t->sig;
- if (!signals)
- return 1;
-
- ka = &signals->action[sig-1];
- switch ((unsigned long) ka->sa.sa_handler) {
- case (unsigned long) SIG_DFL:
- if (sig == SIGCONT ||
- sig == SIGWINCH ||
- sig == SIGCHLD ||
- sig == SIGURG)
- break;
- return 0;
- case (unsigned long) SIG_IGN:
- if (sig != SIGCHLD)
- break;
- /* fallthrough */
- default:
- return 0;
- }
- return 1;
+ return signal_type(sig, t->sig) == 0;
}
-int
-send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+/*
+ * Handle TASK_STOPPED cases etc implicit behaviour
+ * of certain magical signals.
+ *
+ * SIGKILL gets spread out to every thread.
+ */
+static void handle_stop_signal(int sig, struct task_struct *t)
{
- unsigned long flags;
- int ret;
- struct signal_queue *q = 0;
-
-
-#if DEBUG_SIG
-printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
-#endif
-
- ret = -EINVAL;
- if (sig < 0 || sig > _NSIG)
- goto out_nolock;
- /* The somewhat baroque permissions check... */
- ret = -EPERM;
- if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
- && ((sig != SIGCONT) || (current->session != t->session))
- && (current->euid ^ t->suid) && (current->euid ^ t->uid)
- && (current->uid ^ t->suid) && (current->uid ^ t->uid)
- && !capable(CAP_KILL))
- goto out_nolock;
-
- /* The null signal is a permissions and process existance probe.
- No signal is actually delivered. Same goes for zombies. */
- ret = 0;
- if (!sig || !t->sig)
- goto out_nolock;
-
- spin_lock_irqsave(&t->sigmask_lock, flags);
switch (sig) {
case SIGKILL: case SIGCONT:
/* Wake up the process if stopped. */
if (t->state == TASK_STOPPED)
wake_up_process(t);
t->exit_code = 0;
- if (rm_sig_from_queue(SIGSTOP, t) || rm_sig_from_queue(SIGTSTP, t) ||
- rm_sig_from_queue(SIGTTOU, t) || rm_sig_from_queue(SIGTTIN, t))
- recalc_sigpending(t);
+ rm_sig_from_queue(SIGSTOP, t);
+ rm_sig_from_queue(SIGTSTP, t);
+ rm_sig_from_queue(SIGTTOU, t);
+ rm_sig_from_queue(SIGTTIN, t);
break;
case SIGSTOP: case SIGTSTP:
case SIGTTIN: case SIGTTOU:
/* If we're stopping again, cancel SIGCONT */
- if (rm_sig_from_queue(SIGCONT, t))
- recalc_sigpending(t);
+ rm_sig_from_queue(SIGCONT, t);
break;
}
+}
- /* Optimize away the signal, if it's a signal that can be
- handled immediately (ie non-blocked and untraced) and
- that is ignored (either explicitly or by default). */
-
- if (ignored_signal(sig, t))
- goto out;
-
- /* Support queueing exactly one non-rt signal, so that we
- can get more detailed information about the cause of
- the signal. */
- if (sig < SIGRTMIN && sigismember(&t->signal, sig))
- goto out;
+static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
+{
+ struct sigqueue * q = NULL;
/* Real-time signals must be queued if sent by sigqueue, or
some other real-time mechanism. It is implementation
@@ -399,15 +411,14 @@
pass on the info struct. */
if (atomic_read(&nr_queued_signals) < max_queued_signals) {
- q = (struct signal_queue *)
- kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
+ q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
}
if (q) {
atomic_inc(&nr_queued_signals);
q->next = NULL;
- *t->sigqueue_tail = q;
- t->sigqueue_tail = &q->next;
+ *signals->tail = q;
+ signals->tail = &q->next;
switch ((unsigned long) info) {
case 0:
q->info.si_signo = sig;
@@ -433,38 +444,107 @@
* Queue overflow, abort. We may abort if the signal was rt
* and sent by user using something other than kill().
*/
- ret = -EAGAIN;
- goto out;
+ return -EAGAIN;
+ }
+
+ sigaddset(&signals->signal, sig);
+ return 0;
+}
+
+/*
+ * Tell a process that it has a new active signal..
+ *
+ * NOTE! we rely on the previous spin_lock to
+ * lock interrupts for us! We can only be called with
+ * "sigmask_lock" held, and the local interrupt must
+ * have been disabled when that got aquired!
+ *
+ * No need to set need_resched since signal event passing
+ * goes through ->blocked
+ */
+static inline void signal_wake_up(struct task_struct *t)
+{
+ t->sigpending = 1;
+
+ if (t->state & TASK_INTERRUPTIBLE) {
+ wake_up_process(t);
+ return;
}
- sigaddset(&t->signal, sig);
- if (!sigismember(&t->blocked, sig)) {
- t->sigpending = 1;
#ifdef CONFIG_SMP
- /*
- * If the task is running on a different CPU
- * force a reschedule on the other CPU - note that
- * the code below is a tad loose and might occasionally
- * kick the wrong CPU if we catch the process in the
- * process of changing - but no harm is done by that
- * other than doing an extra (lightweight) IPI interrupt.
- *
- * note that we rely on the previous spin_lock to
- * lock interrupts for us! No need to set need_resched
- * since signal event passing goes through ->blocked.
- */
- spin_lock(&runqueue_lock);
- if (t->has_cpu && t->processor != smp_processor_id())
- smp_send_reschedule(t->processor);
- spin_unlock(&runqueue_lock);
+ /*
+ * If the task is running on a different CPU
+ * force a reschedule on the other CPU to make
+ * it notice the new signal quickly.
+ *
+ * The code below is a tad loose and might occasionally
+ * kick the wrong CPU if we catch the process in the
+ * process of changing - but no harm is done by that
+ * other than doing an extra (lightweight) IPI interrupt.
+ */
+ spin_lock(&runqueue_lock);
+ if (t->has_cpu && t->processor != smp_processor_id())
+ smp_send_reschedule(t->processor);
+ spin_unlock(&runqueue_lock);
#endif /* CONFIG_SMP */
- }
+}
+
+static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
+{
+ int retval = send_signal(sig, info, &t->pending);
+
+ if (!retval && !sigismember(&t->blocked, sig))
+ signal_wake_up(t);
+
+ return retval;
+}
+
+int
+send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+{
+ unsigned long flags;
+ int ret;
+
+
+#if DEBUG_SIG
+printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
+#endif
+
+ ret = -EINVAL;
+ if (sig < 0 || sig > _NSIG)
+ goto out_nolock;
+ /* The somewhat baroque permissions check... */
+ ret = -EPERM;
+ if (bad_signal(sig, info, t))
+ goto out_nolock;
+
+ /* The null signal is a permissions and process existance probe.
+ No signal is actually delivered. Same goes for zombies. */
+ ret = 0;
+ if (!sig || !t->sig)
+ goto out_nolock;
+ spin_lock_irqsave(&t->sigmask_lock, flags);
+ handle_stop_signal(sig, t);
+
+ /* Optimize away the signal, if it's a signal that can be
+ handled immediately (ie non-blocked and untraced) and
+ that is ignored (either explicitly or by default). */
+
+ if (ignored_signal(sig, t))
+ goto out;
+
+ /* Support queueing exactly one non-rt signal, so that we
+ can get more detailed information about the cause of
+ the signal. */
+ if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
+ goto out;
+
+ ret = deliver_signal(sig, info, t);
out:
spin_unlock_irqrestore(&t->sigmask_lock, flags);
- if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
- wake_up_process(t);
-
+ if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
+ wake_up_process(t);
out_nolock:
#if DEBUG_SIG
printk(" %d -> %d\n", signal_pending(t), ret);
@@ -509,22 +589,17 @@
int retval = -EINVAL;
if (pgrp > 0) {
struct task_struct *p;
- int found = 0;
retval = -ESRCH;
read_lock(&tasklist_lock);
for_each_task(p) {
if (p->pgrp == pgrp) {
int err = send_sig_info(sig, info, p);
- if (err != 0)
+ if (retval)
retval = err;
- else
- found++;
}
}
read_unlock(&tasklist_lock);
- if (found)
- retval = 0;
}
return retval;
}
@@ -541,22 +616,17 @@
int retval = -EINVAL;
if (sess > 0) {
struct task_struct *p;
- int found = 0;
retval = -ESRCH;
read_lock(&tasklist_lock);
for_each_task(p) {
if (p->leader && p->session == sess) {
int err = send_sig_info(sig, info, p);
- if (err)
+ if (retval)
retval = err;
- else
- found++;
}
}
read_unlock(&tasklist_lock);
- if (found)
- retval = 0;
}
return retval;
}
@@ -576,6 +646,7 @@
return error;
}
+
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
*
@@ -583,8 +654,7 @@
* is probably wrong. Should make it like BSD or SYSV.
*/
-int
-kill_something_info(int sig, struct siginfo *info, int pid)
+static int kill_something_info(int sig, struct siginfo *info, int pid)
{
if (!pid) {
return kill_pg_info(sig, info, current->pgrp);
@@ -645,11 +715,24 @@
}
/*
+ * Joy. Or not. Pthread wants us to wake up every thread
+ * in our parent group.
+ */
+static void wake_up_parent(struct task_struct *parent)
+{
+ struct task_struct *tsk = parent;
+
+ do {
+ wake_up_interruptible(&tsk->wait_chldexit);
+ tsk = next_thread(tsk);
+ } while (tsk != parent);
+}
+
+/*
* Let a parent know about a status change of a child.
*/
-void
-notify_parent(struct task_struct *tsk, int sig)
+void do_notify_parent(struct task_struct *tsk, int sig)
{
struct siginfo info;
int why, status;
@@ -693,7 +776,23 @@
info.si_status = status;
send_sig_info(sig, &info, tsk->p_pptr);
- wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
+ wake_up_parent(tsk->p_pptr);
+}
+
+
+/*
+ * We need the tasklist lock because it's the only
+ * thing that protects out "parent" pointer.
+ *
+ * exit.c calls "do_notify_parent()" directly, because
+ * it already has the tasklist lock.
+ */
+void
+notify_parent(struct task_struct *tsk, int sig)
+{
+ read_lock(&tasklist_lock);
+ do_notify_parent(tsk, sig);
+ read_unlock(&tasklist_lock);
}
EXPORT_SYMBOL(dequeue_signal);
@@ -780,25 +879,29 @@
return error;
}
-asmlinkage long
-sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
+long do_sigpending(void *set, unsigned long sigsetsize)
{
- int error = -EINVAL;
+ long error = -EINVAL;
sigset_t pending;
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
+ if (sigsetsize > sizeof(sigset_t))
goto out;
spin_lock_irq(¤t->sigmask_lock);
- sigandsets(&pending, ¤t->blocked, ¤t->signal);
+ sigandsets(&pending, ¤t->blocked, ¤t->pending.signal);
spin_unlock_irq(¤t->sigmask_lock);
error = -EFAULT;
- if (!copy_to_user(set, &pending, sizeof(*set)))
+ if (!copy_to_user(set, &pending, sigsetsize))
error = 0;
out:
return error;
+}
+
+asmlinkage long
+sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
+{
+ return do_sigpending(set, sigsetsize);
}
asmlinkage long
@@ -914,10 +1017,12 @@
(act && (sig == SIGKILL || sig == SIGSTOP)))
return -EINVAL;
- spin_lock_irq(¤t->sigmask_lock);
k = ¤t->sig->action[sig-1];
- if (oact) *oact = *k;
+ spin_lock(¤t->sig->siglock);
+
+ if (oact)
+ *oact = *k;
if (act) {
*k = *act;
@@ -945,33 +1050,14 @@
&& (sig == SIGCONT ||
sig == SIGCHLD ||
sig == SIGWINCH))) {
- /* So dequeue any that might be pending.
- XXX: process-wide signals? */
- if (sig >= SIGRTMIN &&
- sigismember(¤t->signal, sig)) {
- struct signal_queue *q, **pp;
- pp = ¤t->sigqueue;
- q = current->sigqueue;
- while (q) {
- if (q->info.si_signo != sig)
- pp = &q->next;
- else {
- if ((*pp = q->next) == NULL)
- current->sigqueue_tail = pp;
- kmem_cache_free(signal_queue_cachep, q);
- atomic_dec(&nr_queued_signals);
- }
- q = *pp;
- }
-
- }
- sigdelset(¤t->signal, sig);
- recalc_sigpending(current);
+ spin_lock_irq(¤t->sigmask_lock);
+ if (rm_sig_from_queue(sig, current))
+ recalc_sigpending(current);
+ spin_unlock_irq(¤t->sigmask_lock);
}
}
- spin_unlock_irq(¤t->sigmask_lock);
-
+ spin_unlock(¤t->sig->siglock);
return 0;
}
@@ -1039,6 +1125,12 @@
return error;
}
+asmlinkage long
+sys_sigpending(old_sigset_t *set)
+{
+ return do_sigpending(set, sizeof(*set));
+}
+
#if !defined(__alpha__)
/* Alpha has its own versions with special arguments. */
@@ -1088,22 +1180,6 @@
}
error = 0;
out:
- return error;
-}
-
-asmlinkage long
-sys_sigpending(old_sigset_t *set)
-{
- int error;
- old_sigset_t pending;
-
- spin_lock_irq(¤t->sigmask_lock);
- pending = current->blocked.sig[0] & current->signal.sig[0];
- spin_unlock_irq(¤t->sigmask_lock);
-
- error = -EFAULT;
- if (!copy_to_user(set, &pending, sizeof(*set)))
- error = 0;
return error;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)