diff options
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 181f5d2718a9..663a5449367a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -100,7 +100,7 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode) /* * Check PageMovable before holding a PG_lock because page's owner * assumes anybody doesn't touch PG_lock of newly allocated page - * so unconditionally grapping the lock ruins page's owner side. + * so unconditionally grabbing the lock ruins page's owner side. */ if (unlikely(!__PageMovable(page))) goto out_putpage; @@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, pte = swp_entry_to_pte(entry); } else if (is_device_public_page(new)) { pte = pte_mkdevmap(pte); - flush_dcache_page(new); } - } else - flush_dcache_page(new); + } #ifdef CONFIG_HUGETLB_PAGE if (PageHuge(new)) { @@ -374,7 +372,7 @@ unlock: } #endif -static int expected_page_refs(struct page *page) +static int expected_page_refs(struct address_space *mapping, struct page *page) { int expected_count = 1; @@ -384,7 +382,7 @@ static int expected_page_refs(struct page *page) */ expected_count += is_device_private_page(page); expected_count += is_device_public_page(page); - if (page_mapping(page)) + if (mapping) expected_count += hpage_nr_pages(page) + page_has_private(page); return expected_count; @@ -405,7 +403,7 @@ int migrate_page_move_mapping(struct address_space *mapping, XA_STATE(xas, &mapping->i_pages, page_index(page)); struct zone *oldzone, *newzone; int dirty; - int expected_count = expected_page_refs(page) + extra_count; + int expected_count = expected_page_refs(mapping, page) + extra_count; if (!mapping) { /* Anonymous page without mapping */ @@ -750,7 +748,7 @@ static int __buffer_migrate_page(struct address_space *mapping, return migrate_page(mapping, newpage, page, mode); /* Check whether page does not have extra refs before we do more work */ - expected_count = expected_page_refs(page); + expected_count = expected_page_refs(mapping, page); if (page_count(page) != expected_count) return -EAGAIN; @@ -911,7 +909,7 @@ static int fallback_migrate_page(struct address_space *mapping, */ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) - return -EAGAIN; + return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; return migrate_page(mapping, newpage, page, mode); } @@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page, */ if (!PageMappingFlags(page)) page->mapping = NULL; + + if (unlikely(is_zone_device_page(newpage))) { + if (is_device_public_page(newpage)) + flush_dcache_page(newpage); + } else + flush_dcache_page(newpage); + } out: return rc; @@ -1287,7 +1292,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, struct anon_vma *anon_vma = NULL; /* - * Movability of hugepages depends on architectures and hugepage size. + * Migratability of hugepages depends on architectures and their size. * This check is necessary because some callers of hugepage migration * like soft offline and memory hotremove don't walk through page * tables or check whether the hugepage is pmd-based or not before |