summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5530ed211f72..ba053d88c8c6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -223,6 +223,7 @@ struct rq {
unsigned long raw_weighted_load;
#ifdef CONFIG_SMP
unsigned long cpu_load[3];
+ unsigned char idle_at_tick;
#endif
unsigned long long nr_switches;
@@ -2943,12 +2944,7 @@ static void run_rebalance_domains(struct softirq_action *h)
struct rq *this_rq = cpu_rq(this_cpu);
unsigned long interval;
struct sched_domain *sd;
- /*
- * We are idle if there are no processes running. This
- * is valid even if we are the idle process (SMT).
- */
- enum idle_type idle = !this_rq->nr_running ?
- SCHED_IDLE : NOT_IDLE;
+ enum idle_type idle = this_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE;
/* Earliest time when we have to call run_rebalance_domains again */
unsigned long next_balance = jiffies + 60*HZ;
@@ -3218,14 +3214,16 @@ void scheduler_tick(void)
unsigned long long now = sched_clock();
struct task_struct *p = current;
int cpu = smp_processor_id();
+ int idle_at_tick = idle_cpu(cpu);
struct rq *rq = cpu_rq(cpu);
update_cpu_clock(p, rq, now);
- if (p != rq->idle)
+ if (!idle_at_tick)
task_running_tick(rq, p);
#ifdef CONFIG_SMP
update_load(rq);
+ rq->idle_at_tick = idle_at_tick;
if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
#endif