summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorThomas Gleixner2016-03-10 12:54:13 +0100
committerThomas Gleixner2016-05-06 14:58:24 +0200
commit40190a78f85fec29f0fdd21f6b4415712085711e (patch)
tree361bbb6d81e35ac5bd4783926aabf7394de85dd4 /kernel/sched/core.c
parentsched: Move sched_domains_numa_masks_clear() to DOWN_PREPARE (diff)
downloadkernel-qcow2-linux-40190a78f85fec29f0fdd21f6b4415712085711e.tar.gz
kernel-qcow2-linux-40190a78f85fec29f0fdd21f6b4415712085711e.tar.xz
kernel-qcow2-linux-40190a78f85fec29f0fdd21f6b4415712085711e.zip
sched/hotplug: Convert cpu_[in]active notifiers to state machine
Now that we reduced everything into single notifiers, it's simple to move them into the hotplug state machine space. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c67
1 files changed, 21 insertions, 46 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 541f9ab8ce4f..73bcd937d436 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6634,9 +6634,6 @@ static void sched_domains_numa_masks_set(unsigned int cpu)
int node = cpu_to_node(cpu);
int i, j;
- if (!sched_smp_initialized)
- return;
-
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++) {
if (node_distance(j, node) <= sched_domains_numa_distance[i])
@@ -6649,9 +6646,6 @@ static void sched_domains_numa_masks_clear(unsigned int cpu)
{
int i, j;
- if (!sched_smp_initialized)
- return;
-
for (i = 0; i < sched_domains_numa_levels; i++) {
for (j = 0; j < nr_node_ids; j++)
cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
@@ -7051,12 +7045,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
* If we come here as part of a suspend/resume, don't touch cpusets because we
* want to restore it back to its original state upon resume anyway.
*/
-static void cpuset_cpu_active(bool frozen)
+static void cpuset_cpu_active(void)
{
- if (!sched_smp_initialized)
- return;
-
- if (frozen) {
+ if (cpuhp_tasks_frozen) {
/*
* num_cpus_frozen tracks how many CPUs are involved in suspend
* resume sequence. As long as this is not the last online
@@ -7077,17 +7068,14 @@ static void cpuset_cpu_active(bool frozen)
cpuset_update_active_cpus(true);
}
-static int cpuset_cpu_inactive(unsigned int cpu, bool frozen)
+static int cpuset_cpu_inactive(unsigned int cpu)
{
unsigned long flags;
struct dl_bw *dl_b;
bool overflow;
int cpus;
- if (!sched_smp_initialized)
- return 0;
-
- if (!frozen) {
+ if (!cpuhp_tasks_frozen) {
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
@@ -7108,42 +7096,33 @@ static int cpuset_cpu_inactive(unsigned int cpu, bool frozen)
return 0;
}
-static int sched_cpu_active(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+int sched_cpu_activate(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
+ set_cpu_active(cpu, true);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- set_cpu_active(cpu, true);
+ if (sched_smp_initialized) {
sched_domains_numa_masks_set(cpu);
- cpuset_cpu_active(action & CPU_TASKS_FROZEN);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
+ cpuset_cpu_active();
}
+ return 0;
}
-static int sched_cpu_inactive(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+int sched_cpu_deactivate(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
int ret;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- set_cpu_active(cpu, false);
- ret = cpuset_cpu_inactive(cpu, action & CPU_TASKS_FROZEN);
- if (ret) {
- set_cpu_active(cpu, true);
- return notifier_from_errno(ret);
- }
- sched_domains_numa_masks_clear(cpu);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
+ set_cpu_active(cpu, false);
+
+ if (!sched_smp_initialized)
+ return 0;
+
+ ret = cpuset_cpu_inactive(cpu);
+ if (ret) {
+ set_cpu_active(cpu, true);
+ return ret;
}
+ sched_domains_numa_masks_clear(cpu);
+ return 0;
}
int sched_cpu_starting(unsigned int cpu)
@@ -7197,10 +7176,6 @@ static int __init migration_init(void)
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
- /* Register cpu active notifiers */
- cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
- cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
-
return 0;
}
early_initcall(migration_init);