diff options
author | Paolo Bonzini | 2019-07-11 15:14:16 +0200 |
---|---|---|
committer | Paolo Bonzini | 2019-07-11 15:14:16 +0200 |
commit | a45ff5994c9cde41af627c46abb9f32beae68943 (patch) | |
tree | b726cc506ed1b01484f183ab2679cdd618e1e9b1 /kernel/cgroup/cpuset.c | |
parent | Documentation: virtual: Add toctree hooks (diff) | |
parent | KVM: arm/arm64: Initialise host's MPIDRs by reading the actual register (diff) | |
download | kernel-qcow2-linux-a45ff5994c9cde41af627c46abb9f32beae68943.tar.gz kernel-qcow2-linux-a45ff5994c9cde41af627c46abb9f32beae68943.tar.xz kernel-qcow2-linux-a45ff5994c9cde41af627c46abb9f32beae68943.zip |
Merge tag 'kvm-arm-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm updates for 5.3
- Add support for chained PMU counters in guests
- Improve SError handling
- Handle Neoverse N1 erratum #1349291
- Allow side-channel mitigation status to be migrated
- Standardise most AArch64 system register accesses to msr_s/mrs_s
- Fix host MPIDR corruption on 32bit
Diffstat (limited to 'kernel/cgroup/cpuset.c')
-rw-r--r-- | kernel/cgroup/cpuset.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 6a1942ed781c..515525ff1cfd 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -3254,10 +3254,23 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) spin_unlock_irqrestore(&callback_lock, flags); } +/** + * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. + * @tsk: pointer to task_struct with which the scheduler is struggling + * + * Description: In the case that the scheduler cannot find an allowed cpu in + * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy + * mode however, this value is the same as task_cs(tsk)->effective_cpus, + * which will not contain a sane cpumask during cases such as cpu hotplugging. + * This is the absolute last resort for the scheduler and it is only used if + * _every_ other avenue has been traveled. + **/ + void cpuset_cpus_allowed_fallback(struct task_struct *tsk) { rcu_read_lock(); - do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus); + do_set_cpus_allowed(tsk, is_in_v2_mode() ? + task_cs(tsk)->cpus_allowed : cpu_possible_mask); rcu_read_unlock(); /* |