summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra2017-12-21 10:01:24 +0100
committerIngo Molnar2018-03-09 07:59:16 +0100
commita22e47a4e3f5a9e50a827c5d94705ace3b1eac0b (patch)
tree907cfb61b5233db6935d234e9de615796d137acf /kernel/sched
parentcpufreq/schedutil: Rewrite CPUFREQ_RT support (diff)
downloadkernel-qcow2-linux-a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b.tar.gz
kernel-qcow2-linux-a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b.tar.xz
kernel-qcow2-linux-a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b.zip
sched/core: Convert nohz_flags to atomic_t
Using atomic_t allows us to use the more flexible bitops provided there. Also its smaller. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c23
-rw-r--r--kernel/sched/sched.h11
3 files changed, 24 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4f5eeb63ab5b..96ad1c003d74 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -583,7 +583,7 @@ static inline bool got_nohz_idle_kick(void)
{
int cpu = smp_processor_id();
- if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+ if (!(atomic_read(nohz_flags(cpu)) & NOHZ_BALANCE_KICK))
return false;
if (idle_cpu(cpu) && !need_resched())
@@ -593,7 +593,7 @@ static inline bool got_nohz_idle_kick(void)
* We can't run Idle Load Balance on this CPU for this time so we
* cancel it and clear NOHZ_BALANCE_KICK
*/
- clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+ atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(cpu));
return false;
}
@@ -6074,7 +6074,7 @@ void __init sched_init(void)
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_load_update_tick = jiffies;
- rq->nohz_flags = 0;
+ atomic_set(&rq->nohz_flags, 0);
#endif
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 097db34d5ba2..5d150478dd58 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9072,6 +9072,7 @@ static inline int find_new_ilb(void)
*/
static void nohz_balancer_kick(void)
{
+ unsigned int flags;
int ilb_cpu;
nohz.next_balance++;
@@ -9081,7 +9082,8 @@ static void nohz_balancer_kick(void)
if (ilb_cpu >= nr_cpu_ids)
return;
- if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
+ flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu));
+ if (flags & NOHZ_BALANCE_KICK)
return;
/*
* Use smp_send_reschedule() instead of resched_cpu().
@@ -9095,7 +9097,9 @@ static void nohz_balancer_kick(void)
void nohz_balance_exit_idle(unsigned int cpu)
{
- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+ unsigned int flags = atomic_read(nohz_flags(cpu));
+
+ if (unlikely(flags & NOHZ_TICK_STOPPED)) {
/*
* Completely isolated CPUs don't ever set, so we must test.
*/
@@ -9103,7 +9107,8 @@ void nohz_balance_exit_idle(unsigned int cpu)
cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
atomic_dec(&nohz.nr_cpus);
}
- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+ atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
}
@@ -9155,7 +9160,7 @@ void nohz_balance_enter_idle(int cpu)
if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
return;
- if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
+ if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED)
return;
/* If we're a completely isolated CPU, we don't play: */
@@ -9164,7 +9169,7 @@ void nohz_balance_enter_idle(int cpu)
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus);
- set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+ atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu));
}
#endif
@@ -9302,8 +9307,10 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
- if (idle != CPU_IDLE ||
- !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+ if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK))
+ return;
+
+ if (idle != CPU_IDLE)
goto end;
for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
@@ -9349,7 +9356,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
if (likely(update_next_balance))
nohz.next_balance = next_balance;
end:
- clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
+ atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 23ba4dd76ac4..d98e761b962f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -763,7 +763,7 @@ struct rq {
#ifdef CONFIG_SMP
unsigned long last_load_update_tick;
#endif /* CONFIG_SMP */
- unsigned long nohz_flags;
+ atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */
/* capture load from *all* tasks on this CPU: */
@@ -2034,10 +2034,11 @@ extern void cfs_bandwidth_usage_inc(void);
extern void cfs_bandwidth_usage_dec(void);
#ifdef CONFIG_NO_HZ_COMMON
-enum rq_nohz_flag_bits {
- NOHZ_TICK_STOPPED,
- NOHZ_BALANCE_KICK,
-};
+#define NOHZ_TICK_STOPPED_BIT 0
+#define NOHZ_BALANCE_KICK_BIT 1
+
+#define NOHZ_TICK_STOPPED BIT(NOHZ_TICK_STOPPED_BIT)
+#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)