patch-2.4.0-test9 linux/kernel/sched.c
Next file: linux/kernel/signal.c
Previous file: linux/kernel/ksyms.c
Back to the patch index
Back to the overall index
- Lines: 151
- Date:
Mon Oct 2 11:45:01 2000
- Orig file:
v2.4.0-test8/linux/kernel/sched.c
- Orig date:
Fri Sep 1 14:05:25 2000
diff -u --recursive --new-file v2.4.0-test8/linux/kernel/sched.c linux/kernel/sched.c
@@ -141,61 +141,54 @@
int weight;
/*
- * Realtime process, select the first one on the
- * runqueue (taking priorities within processes
- * into account).
+ * select the current process after every other
+ * runnable process, but before the idle thread.
+ * Also, dont trigger a counter recalculation.
*/
- if (p->policy != SCHED_OTHER) {
- weight = 1000 + p->rt_priority;
+ weight = -1;
+ if (p->policy & SCHED_YIELD)
goto out;
- }
/*
- * Give the process a first-approximation goodness value
- * according to the number of clock-ticks it has left.
- *
- * Don't do any other calculations if the time slice is
- * over..
+ * Non-RT process - normal case first.
*/
- weight = p->counter;
- if (!weight)
- goto out;
+ if (p->policy == SCHED_OTHER) {
+ /*
+ * Give the process a first-approximation goodness value
+ * according to the number of clock-ticks it has left.
+ *
+ * Don't do any other calculations if the time slice is
+ * over..
+ */
+ weight = p->counter;
+ if (!weight)
+ goto out;
#ifdef CONFIG_SMP
- /* Give a largish advantage to the same processor... */
- /* (this is equivalent to penalizing other processors) */
- if (p->processor == this_cpu)
- weight += PROC_CHANGE_PENALTY;
+ /* Give a largish advantage to the same processor... */
+ /* (this is equivalent to penalizing other processors) */
+ if (p->processor == this_cpu)
+ weight += PROC_CHANGE_PENALTY;
#endif
- /* .. and a slight advantage to the current MM */
- if (p->mm == this_mm || !p->mm)
- weight += 1;
- weight += 20 - p->nice;
+ /* .. and a slight advantage to the current MM */
+ if (p->mm == this_mm || !p->mm)
+ weight += 1;
+ weight += 20 - p->nice;
+ goto out;
+ }
+ /*
+ * Realtime process, select the first one on the
+ * runqueue (taking priorities within processes
+ * into account).
+ */
+ weight = 1000 + p->rt_priority;
out:
return weight;
}
/*
- * subtle. We want to discard a yielded process only if it's being
- * considered for a reschedule. Wakeup-time 'queries' of the scheduling
- * state do not count. Another optimization we do: sched_yield()-ed
- * processes are runnable (and thus will be considered for scheduling)
- * right when they are calling schedule(). So the only place we need
- * to care about SCHED_YIELD is when we calculate the previous process'
- * goodness ...
- */
-static inline int prev_goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
-{
- if (p->policy & SCHED_YIELD) {
- p->policy &= ~SCHED_YIELD;
- return 0;
- }
- return goodness(p, this_cpu, this_mm);
-}
-
-/*
* the 'goodness value' of replacing a process on a given CPU.
* positive value means 'replace', zero or negative means 'dont'.
*/
@@ -451,6 +444,7 @@
static inline void __schedule_tail(struct task_struct *prev)
{
#ifdef CONFIG_SMP
+ int yield;
unsigned long flags;
/*
@@ -462,6 +456,8 @@
* cache.
*/
spin_lock_irqsave(&runqueue_lock, flags);
+ yield = prev->policy & SCHED_YIELD;
+ prev->policy &= ~SCHED_YIELD;
prev->has_cpu = 0;
if (prev->state == TASK_RUNNING)
goto running_again;
@@ -476,9 +472,11 @@
* current process as well.)
*/
running_again:
- if (prev != idle_task(smp_processor_id()))
+ if ((prev != idle_task(smp_processor_id())) && !yield)
reschedule_idle(prev);
goto out_unlock;
+#else
+ prev->policy &= ~SCHED_YIELD;
#endif /* CONFIG_SMP */
}
@@ -669,7 +667,7 @@
goto repeat_schedule;
still_running:
- c = prev_goodness(prev, this_cpu, prev->active_mm);
+ c = goodness(prev, this_cpu, prev->active_mm);
next = prev;
goto still_running_back;
@@ -1030,12 +1028,13 @@
asmlinkage long sys_sched_yield(void)
{
- spin_lock_irq(&runqueue_lock);
+ /*
+ * This process can only be rescheduled by us,
+ * so this is safe without any locking.
+ */
if (current->policy == SCHED_OTHER)
current->policy |= SCHED_YIELD;
current->need_resched = 1;
- move_last_runqueue(current);
- spin_unlock_irq(&runqueue_lock);
return 0;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)