summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorHugh Dickins2016-01-10 01:54:59 +0100
committerMichael Ellerman2016-01-12 01:03:53 +0100
commit2f10f1a7884e97a68e52c4b6f7866e29cf3fe7e6 (patch)
treecd886883448dc68d534d3127b3c5074efd472254 /arch/powerpc
parentpowerpc/mm: Fix _PAGE_PTE breaking swapoff (diff)
downloadkernel-qcow2-linux-2f10f1a7884e97a68e52c4b6f7866e29cf3fe7e6.tar.gz
kernel-qcow2-linux-2f10f1a7884e97a68e52c4b6f7866e29cf3fe7e6.tar.xz
kernel-qcow2-linux-2f10f1a7884e97a68e52c4b6f7866e29cf3fe7e6.zip
powerpc/mm: fix _PAGE_SWP_SOFT_DIRTY breaking swapoff
Swapoff after swapping hangs on the G5, when CONFIG_CHECKPOINT_RESTORE=y but CONFIG_MEM_SOFT_DIRTY is not set. That's because the non-zero _PAGE_SWP_SOFT_DIRTY bit, added by CONFIG_HAVE_ARCH_SOFT_DIRTY=y, is not discounted when CONFIG_MEM_SOFT_DIRTY is not set: so swap ptes cannot be recognized. (I suspect that the peculiar dependence of HAVE_ARCH_SOFT_DIRTY on CHECKPOINT_RESTORE in arch/powerpc/Kconfig comes from an incomplete attempt to solve this problem.) It's true that the relationship between CONFIG_HAVE_ARCH_SOFT_DIRTY and and CONFIG_MEM_SOFT_DIRTY is too confusing, and it's true that swapoff should be made more robust; but nevertheless, fix up the powerpc ifdefs as x86_64 and s390 (which met the same problem) have them, defining the bits as 0 if CONFIG_MEM_SOFT_DIRTY is not set. Fixes: 7207f43665b8 ("powerpc/mm: Add page soft dirty tracking") Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org> Acked-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h9
2 files changed, 11 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 9e861b4378bd..2ff8b3df553d 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -33,7 +33,12 @@
#define _PAGE_F_GIX_SHIFT 12
#define _PAGE_F_SECOND 0x08000 /* Whether to use secondary hash or not */
#define _PAGE_SPECIAL 0x10000 /* software: special page */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY 0x20000 /* software: software dirty tracking */
+#else
+#define _PAGE_SOFT_DIRTY 0x00000
+#endif
/*
* THP pages can't be special. So use the _PAGE_SPECIAL
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8e040c42e931..b3a5badab69f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -167,8 +167,13 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
-#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
+#else
+#define _PAGE_SWP_SOFT_DIRTY 0UL
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
@@ -181,8 +186,6 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
}
-#else
-#define _PAGE_SWP_SOFT_DIRTY 0
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));