summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPaul E. McKenney2015-10-07 18:10:48 +0200
committerPaul E. McKenney2015-12-04 21:27:31 +0100
commit46a5d164db53ba6066b11889abb7fa6bddbe5cf7 (patch)
tree37e726508f20e8dd951f04dd3266be912abc5b44 /kernel/sched/core.c
parentrcu: Avoid tick_nohz_active checks on NOCBs CPUs (diff)
downloadkernel-qcow2-linux-46a5d164db53ba6066b11889abb7fa6bddbe5cf7.tar.gz
kernel-qcow2-linux-46a5d164db53ba6066b11889abb7fa6bddbe5cf7.tar.xz
kernel-qcow2-linux-46a5d164db53ba6066b11889abb7fa6bddbe5cf7.zip
rcu: Stop disabling interrupts in scheduler fastpaths
We need the scheduler's fastpaths to be, well, fast, and unnecessarily disabling and re-enabling interrupts is not necessarily consistent with this goal. Especially given that there are regions of the scheduler that already have interrupts disabled. This commit therefore moves the call to rcu_note_context_switch() to one of the interrupts-disabled regions of the scheduler, and removes the now-redundant disabling and re-enabling of interrupts from rcu_note_context_switch() and the functions it calls. Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> [ paulmck: Shift rcu_note_context_switch() to avoid deadlock, as suggested by Peter Zijlstra. ]
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4d568ac9319e..ec72de234feb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3085,7 +3085,6 @@ static void __sched notrace __schedule(bool preempt)
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- rcu_note_context_switch();
prev = rq->curr;
/*
@@ -3104,13 +3103,16 @@ static void __sched notrace __schedule(bool preempt)
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ local_irq_disable();
+ rcu_note_context_switch();
+
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
- raw_spin_lock_irq(&rq->lock);
+ raw_spin_lock(&rq->lock);
lockdep_pin_lock(&rq->lock);
rq->clock_skip_update <<= 1; /* promote REQ to ACT */