summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra2017-02-21 14:23:38 +0100
committerIngo Molnar2017-03-16 09:46:23 +0100
commit7134b3e941613dcb959b4b178cc4a35e45cbbc0d (patch)
treee7f3c680ae086a6631c003afd41599ee11a1a2ff /kernel/sched/core.c
parentsched/core: Add {EN,DE}QUEUE_NOCLOCK flags (diff)
downloadkernel-qcow2-linux-7134b3e941613dcb959b4b178cc4a35e45cbbc0d.tar.gz
kernel-qcow2-linux-7134b3e941613dcb959b4b178cc4a35e45cbbc0d.tar.xz
kernel-qcow2-linux-7134b3e941613dcb959b4b178cc4a35e45cbbc0d.zip
sched/core: Add ENQUEUE_NOCLOCK to ENQUEUE_RESTORE
In all cases, ENQUEUE_RESTORE should also have ENQUEUE_NOCLOCK because DEQUEUE_SAVE will have done an update_rq_clock(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ce363bdc7e6b..247d0a0c319e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1070,7 +1070,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
p->sched_class->set_cpus_allowed(p, new_mask);
if (queued)
- enqueue_task(rq, p, ENQUEUE_RESTORE);
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
set_curr_task(rq, p);
}
@@ -3815,7 +3815,7 @@ void set_user_nice(struct task_struct *p, long nice)
delta = p->prio - old_prio;
if (queued) {
- enqueue_task(rq, p, ENQUEUE_RESTORE);
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -5517,7 +5517,7 @@ void sched_setnuma(struct task_struct *p, int nid)
p->numa_preferred_nid = nid;
if (queued)
- enqueue_task(rq, p, ENQUEUE_RESTORE);
+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
set_curr_task(rq, p);
task_rq_unlock(rq, p, &rf);
@@ -6431,7 +6431,7 @@ void sched_move_task(struct task_struct *tsk)
sched_change_group(tsk, TASK_MOVE_GROUP);
if (queued)
- enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
+ enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE | ENQUEUE_NOCLOCK);
if (running)
set_curr_task(rq, tsk);