summaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
authorDominik Dingel2014-12-01 17:24:42 +0100
committerChristian Borntraeger2015-12-15 17:08:21 +0100
commita3a92c31bf0b57ad0ca7f092a6f3a57168ba9ae2 (patch)
tree1e8fd0810ebeb5331a8cb3ec700b023a17162aaa /arch/s390/kvm/kvm-s390.c
parentKVM: s390: obey kptr_restrict in traces (diff)
downloadkernel-qcow2-linux-a3a92c31bf0b57ad0ca7f092a6f3a57168ba9ae2.tar.gz
kernel-qcow2-linux-a3a92c31bf0b57ad0ca7f092a6f3a57168ba9ae2.tar.xz
kernel-qcow2-linux-a3a92c31bf0b57ad0ca7f092a6f3a57168ba9ae2.zip
KVM: s390: fix mismatch between user and in-kernel guest limit
While the userspace interface requests the maximum size the gmap code expects to get a maximum address. This error resulted in bigger page tables than necessary for some guest sizes, e.g. a 2GB guest used 3 levels instead of 2. At the same time we introduce KVM_S390_NO_MEM_LIMIT, which allows in a bright future that a guest spans the complete 64 bit address space. We also switch to TASK_MAX_SIZE for the initial memory size, this is a cosmetic change as the previous size also resulted in a 4 level pagetable creation. Reported-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c14845c3a6f8..8aa5e55a8ecd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -378,8 +378,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
- kvm->arch.gmap->asce_end);
- if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
+ kvm->arch.mem_limit);
+ if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
ret = -EFAULT;
break;
default:
@@ -431,9 +431,17 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (get_user(new_limit, (u64 __user *)attr->addr))
return -EFAULT;
- if (new_limit > kvm->arch.gmap->asce_end)
+ if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
+ new_limit > kvm->arch.mem_limit)
return -E2BIG;
+ if (!new_limit)
+ return -EINVAL;
+
+ /* gmap_alloc takes last usable address */
+ if (new_limit != KVM_S390_NO_MEM_LIMIT)
+ new_limit -= 1;
+
ret = -EBUSY;
mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) == 0) {
@@ -450,7 +458,9 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
}
}
mutex_unlock(&kvm->lock);
- VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
+ VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
+ VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
+ (void *) kvm->arch.gmap->asce);
break;
}
default:
@@ -1172,8 +1182,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type & KVM_VM_S390_UCONTROL) {
kvm->arch.gmap = NULL;
+ kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
} else {
- kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
+ kvm->arch.mem_limit = TASK_MAX_SIZE;
+ kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
if (!kvm->arch.gmap)
goto out_err;
kvm->arch.gmap->private = kvm;
@@ -2829,6 +2841,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->memory_size & 0xffffful)
return -EINVAL;
+ if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
+ return -EINVAL;
+
return 0;
}