summaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra2009-04-06 11:45:05 +0200
committerIngo Molnar2009-04-07 10:48:57 +0200
commitebb3c4c4cb81d64cc041356915ec015e2c57092a (patch)
tree7f01155a3f90e1b42890f2fb59a003ef5e499aca /kernel/perf_counter.c
parentperf_counter: theres more to overflow than writing events (diff)
downloadkernel-qcow2-linux-ebb3c4c4cb81d64cc041356915ec015e2c57092a.tar.gz
kernel-qcow2-linux-ebb3c4c4cb81d64cc041356915ec015e2c57092a.tar.xz
kernel-qcow2-linux-ebb3c4c4cb81d64cc041356915ec015e2c57092a.zip
perf_counter: fix the mlock accounting
Reading through the code I saw I forgot the finish the mlock accounting. Do so now. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094517.899767331@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 195e976eb07d..c841563de043 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1461,13 +1461,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
if (atomic_dec_and_mutex_lock(&counter->mmap_count,
&counter->mmap_mutex)) {
+ vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
perf_mmap_data_free(counter);
mutex_unlock(&counter->mmap_mutex);
}
}
static struct vm_operations_struct perf_mmap_vmops = {
- .open = perf_mmap_open,
+ .open = perf_mmap_open,
.close = perf_mmap_close,
.fault = perf_mmap_fault,
};
@@ -1499,24 +1500,32 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_pgoff != 0)
return -EINVAL;
- locked = vma_size >> PAGE_SHIFT;
- locked += vma->vm_mm->locked_vm;
+ mutex_lock(&counter->mmap_mutex);
+ if (atomic_inc_not_zero(&counter->mmap_count)) {
+ if (nr_pages != counter->data->nr_pages)
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ locked = vma->vm_mm->locked_vm;
+ locked += nr_pages + 1;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
- return -EPERM;
-
- mutex_lock(&counter->mmap_mutex);
- if (atomic_inc_not_zero(&counter->mmap_count))
- goto out;
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ ret = -EPERM;
+ goto unlock;
+ }
WARN_ON(counter->data);
ret = perf_mmap_data_alloc(counter, nr_pages);
- if (!ret)
- atomic_set(&counter->mmap_count, 1);
-out:
+ if (ret)
+ goto unlock;
+
+ atomic_set(&counter->mmap_count, 1);
+ vma->vm_mm->locked_vm += nr_pages + 1;
+unlock:
mutex_unlock(&counter->mmap_mutex);
vma->vm_flags &= ~VM_MAYWRITE;