summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V2017-07-07 00:38:56 +0200
committerLinus Torvalds2017-07-07 01:24:33 +0200
commit4dc71451a2078efcad2f66bd6ef130d2296827b1 (patch)
tree42f0896ed2bc9e7e59a4943b1b50fde56fb73a6a
parentmm/hugetlb: move default definition of hugepd_t earlier in the header (diff)
downloadkernel-qcow2-linux-4dc71451a2078efcad2f66bd6ef130d2296827b1.tar.gz
kernel-qcow2-linux-4dc71451a2078efcad2f66bd6ef130d2296827b1.tar.xz
kernel-qcow2-linux-4dc71451a2078efcad2f66bd6ef130d2296827b1.zip
mm/follow_page_mask: add support for hugepage directory entry
Architectures like ppc64 supports hugepage size that is not mapped to any of of the page table levels. Instead they add an alternate page table entry format called hugepage directory (hugepd). hugepd indicates that the page table entry maps to a set of hugetlb pages. Add support for this in generic follow_page_mask code. We already support this format in the generic gup code. The default implementation prints warning and returns NULL. We will add ppc64 support in later patches Link: http://lkml.kernel.org/r/1494926612-23928-7-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Mike Kravetz <kravetz@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--mm/gup.c33
-rw-r--r--mm/hugetlb.c8
3 files changed, 45 insertions, 0 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f01427c79947..c92a1f0c7240 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -141,6 +141,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
@@ -175,6 +178,7 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
static inline void hugetlb_show_meminfo(void)
{
}
+#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
#define follow_huge_pmd(mm, addr, pmd, flags) NULL
#define follow_huge_pud(mm, addr, pud, flags) NULL
#define follow_huge_pgd(mm, addr, pgd, flags) NULL
diff --git a/mm/gup.c b/mm/gup.c
index fe95a37a4172..9e472cb835b5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -226,6 +226,14 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
+ if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pmd_val(*pmd)), flags,
+ PMD_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
if (pmd_devmap(*pmd)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags);
@@ -292,6 +300,14 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
+ if (is_hugepd(__hugepd(pud_val(*pud)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pud_val(*pud)), flags,
+ PUD_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags);
@@ -311,6 +327,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
unsigned int flags, unsigned int *page_mask)
{
p4d_t *p4d;
+ struct page *page;
p4d = p4d_offset(pgdp, address);
if (p4d_none(*p4d))
@@ -319,6 +336,14 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
if (unlikely(p4d_bad(*p4d)))
return no_page_table(vma, flags);
+ if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(p4d_val(*p4d)), flags,
+ P4D_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
return follow_pud_mask(vma, address, p4d, flags, page_mask);
}
@@ -363,6 +388,14 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return page;
return no_page_table(vma, flags);
}
+ if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pgd_val(*pgd)), flags,
+ PGDIR_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
return follow_p4d_mask(vma, address, pgd, flags, page_mask);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a446869aa7f1..332637342555 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4669,6 +4669,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address,
}
struct page * __weak
+follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd, int flags, int pdshift)
+{
+ WARN(1, "hugepd follow called with no support for hugepage directory format\n");
+ return NULL;
+}
+
+struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags)
{