Re: Scheduler Bug (set_cpus_allowed)

Robert Love (rml@tech9.net)
10 Jun 2002 16:28:06 -0700


On Mon, 2002-06-10 at 16:24, Ingo Molnar wrote:

> sure, agreed. I've added it to my tree.

What do you think of this?

No more explicit preempt disables...

Robert Love

diff -urN linux-2.5.21/kernel/sched.c linux/kernel/sched.c
--- linux-2.5.21/kernel/sched.c Sat Jun 8 22:28:13 2002
+++ linux/kernel/sched.c Sun Jun 9 13:01:32 2002
@@ -153,17 +153,22 @@
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)

+/*
+ * task_rq_lock - lock the runqueue a given task resides on and disable
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * explicitly disabling preemption.
+ */
static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
{
struct runqueue *rq;

repeat_lock_task:
- preempt_disable();
+ local_irq_save(*flags);
rq = task_rq(p);
- spin_lock_irqsave(&rq->lock, *flags);
+ spin_lock(&rq->lock);
if (unlikely(rq != task_rq(p))) {
- spin_unlock_irqrestore(&rq->lock, *flags);
- preempt_enable();
+ spin_unlock(&rq->lock);
+ local_irq_restore(*flags);
goto repeat_lock_task;
}
return rq;
@@ -171,8 +176,25 @@

static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
{
- spin_unlock_irqrestore(&rq->lock, *flags);
- preempt_enable();
+ spin_unlock(&rq->lock);
+ local_irq_restore(*flags);
+}
+
+/*
+ * rq_lock - lock a given runqueue and disable interrupts.
+ */
+static inline runqueue_t *rq_lock(runqueue_t *rq)
+{
+ local_irq_disable();
+ rq = this_rq();
+ spin_lock(&rq->lock);
+ return rq;
+}
+
+static inline void rq_unlock(runqueue_t *rq)
+{
+ spin_unlock(&rq->lock);
+ local_irq_enable();
}

/*
@@ -353,9 +375,7 @@
{
runqueue_t *rq;

- preempt_disable();
- rq = this_rq();
- spin_lock_irq(&rq->lock);
+ rq = rq_lock(rq);

p->state = TASK_RUNNING;
if (!rt_task(p)) {
@@ -371,8 +391,7 @@
p->thread_info->cpu = smp_processor_id();
activate_task(p, rq);

- spin_unlock_irq(&rq->lock);
- preempt_enable();
+ rq_unlock(rq);
}

/*
@@ -1342,8 +1361,7 @@
runqueue_t *rq;
prio_array_t *array;

- preempt_disable();
- rq = this_rq();
+ rq = rq_lock(rq);

/*
* Decrease the yielding task's priority by one, to avoid
@@ -1353,7 +1371,6 @@
* If priority is already MAX_PRIO-1 then we still
* roundrobin the task within the runlist.
*/
- spin_lock_irq(&rq->lock);
array = current->array;
/*
* If the task has reached maximum priority (or is a RT task)
@@ -1371,7 +1388,6 @@
__set_bit(current->prio, array->bitmap);
}
spin_unlock(&rq->lock);
- preempt_enable_no_resched();

schedule();

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/