summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/internal.h5
-rw-r--r--mm/madvise.c4
-rw-r--r--mm/oom_kill.c9
3 files changed, 9 insertions, 9 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 9ad04fc6eefe..8ab72f4374e0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -43,6 +43,11 @@ int do_swap_page(struct vm_fault *vmf);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
+static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma)
+{
+ return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
+}
+
void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
diff --git a/mm/madvise.c b/mm/madvise.c
index 7f1490f0d3a6..b530a4986035 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -25,6 +25,8 @@
#include <asm/tlb.h>
+#include "internal.h"
+
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
* take mmap_sem for writing. Others, which simply traverse vmas, need
@@ -474,7 +476,7 @@ static long madvise_dontneed(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
*prev = vma;
- if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
+ if (!can_madv_dontneed_vma(vma))
return -EINVAL;
madvise_userfault_dontneed(vma, prev, start, end);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a1977247c7ea..8256788ac119 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -508,14 +508,7 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, 0, -1);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
- if (is_vm_hugetlb_page(vma))
- continue;
-
- /*
- * mlocked VMAs require explicit munlocking before unmap.
- * Let's keep it simple here and skip such VMAs.
- */
- if (vma->vm_flags & VM_LOCKED)
+ if (!can_madv_dontneed_vma(vma))
continue;
/*