patch-2.4.0-test8 linux/kernel/sched.c
Next file: linux/kernel/signal.c
Previous file: linux/kernel/ptrace.c
Back to the patch index
Back to the overall index
- Lines: 124
- Date:
Fri Sep 1 14:05:25 2000
- Orig file:
v2.4.0-test7/linux/kernel/sched.c
- Orig date:
Wed Aug 23 18:36:39 2000
diff -u --recursive --new-file v2.4.0-test7/linux/kernel/sched.c linux/kernel/sched.c
@@ -213,7 +213,9 @@
* This function must be inline as anything that saves and restores
* flags has to do so within the same register window on sparc (Anton)
*/
-static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
+static FASTCALL(void reschedule_idle(struct task_struct * p));
+
+static void reschedule_idle(struct task_struct * p)
{
#ifdef CONFIG_SMP
int this_cpu = smp_processor_id();
@@ -284,7 +286,6 @@
goto preempt_now;
}
- spin_unlock_irqrestore(&runqueue_lock, flags);
return;
send_now_idle:
@@ -296,12 +297,10 @@
if ((tsk->processor != current->processor) && !tsk->need_resched)
smp_send_reschedule(tsk->processor);
tsk->need_resched = 1;
- spin_unlock_irqrestore(&runqueue_lock, flags);
return;
preempt_now:
tsk->need_resched = 1;
- spin_unlock_irqrestore(&runqueue_lock, flags);
/*
* the APIC stuff can go outside of the lock because
* it uses no task information, only CPU#.
@@ -316,7 +315,6 @@
tsk = cpu_curr(this_cpu);
if (preemption_goodness(tsk, p, this_cpu) > 1)
tsk->need_resched = 1;
- spin_unlock_irqrestore(&runqueue_lock, flags);
#endif
}
@@ -365,9 +363,7 @@
if (task_on_runqueue(p))
goto out;
add_to_runqueue(p);
- reschedule_idle(p, flags); // spin_unlocks runqueue
-
- return;
+ reschedule_idle(p);
out:
spin_unlock_irqrestore(&runqueue_lock, flags);
}
@@ -455,17 +451,34 @@
static inline void __schedule_tail(struct task_struct *prev)
{
#ifdef CONFIG_SMP
- if ((prev->state == TASK_RUNNING) &&
- (prev != idle_task(smp_processor_id()))) {
- unsigned long flags;
-
- spin_lock_irqsave(&runqueue_lock, flags);
- prev->has_cpu = 0;
- reschedule_idle(prev, flags); // spin_unlocks runqueue
- } else {
- wmb();
- prev->has_cpu = 0;
- }
+ unsigned long flags;
+
+ /*
+ * fast path falls through. We have to take the runqueue lock
+ * unconditionally to make sure that the test of prev->state
+ * and setting has_cpu is atomic wrt. interrupts. It's not
+ * a big problem in the common case because we recently took
+ * the runqueue lock so it's likely to be in this processor's
+ * cache.
+ */
+ spin_lock_irqsave(&runqueue_lock, flags);
+ prev->has_cpu = 0;
+ if (prev->state == TASK_RUNNING)
+ goto running_again;
+out_unlock:
+ spin_unlock_irqrestore(&runqueue_lock, flags);
+ return;
+
+ /*
+ * Slow path - we 'push' the previous process and
+ * reschedule_idle() will attempt to find a new
+ * processor for it. (but it might preempt the
+ * current process as well.)
+ */
+running_again:
+ if (prev != idle_task(smp_processor_id()))
+ reschedule_idle(prev);
+ goto out_unlock;
#endif /* CONFIG_SMP */
}
@@ -638,6 +651,9 @@
same_process:
reacquire_kernel_lock(current);
+ if (current->need_resched)
+ goto tq_scheduler_back;
+
return;
recalculate:
@@ -1124,13 +1140,13 @@
printk("\n");
{
- struct signal_queue *q;
+ struct sigqueue *q;
char s[sizeof(sigset_t)*2+1], b[sizeof(sigset_t)*2+1];
- render_sigset_t(&p->signal, s);
+ render_sigset_t(&p->pending.signal, s);
render_sigset_t(&p->blocked, b);
printk(" sig: %d %s %s :", signal_pending(p), s, b);
- for (q = p->sigqueue; q ; q = q->next)
+ for (q = p->pending.head; q ; q = q->next)
printk(" %d", q->info.si_signo);
printk(" X\n");
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)