summaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
authorLinus Torvalds2016-07-29 01:36:48 +0200
committerLinus Torvalds2016-07-29 01:36:48 +0200
commit1c88e19b0f6a8471ee50d5062721ba30b8fd4ba9 (patch)
tree6d227487ca2cf391589c73af1c40ec7b7126feec /mm/memory_hotplug.c
parentMerge tag 'dmaengine-4.8-rc1' of git://git.infradead.org/users/vkoul/slave-dma (diff)
parentmm, compaction: simplify contended compaction handling (diff)
downloadkernel-qcow2-linux-1c88e19b0f6a8471ee50d5062721ba30b8fd4ba9.tar.gz
kernel-qcow2-linux-1c88e19b0f6a8471ee50d5062721ba30b8fd4ba9.tar.xz
kernel-qcow2-linux-1c88e19b0f6a8471ee50d5062721ba30b8fd4ba9.zip
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "The rest of MM" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (101 commits) mm, compaction: simplify contended compaction handling mm, compaction: introduce direct compaction priority mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations mm, page_alloc: make THP-specific decisions more generic mm, page_alloc: restructure direct compaction handling in slowpath mm, page_alloc: don't retry initial attempt in slowpath mm, page_alloc: set alloc_flags only once in slowpath lib/stackdepot.c: use __GFP_NOWARN for stack allocations mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB mm, kasan: account for object redzone in SLUB's nearest_obj() mm: fix use-after-free if memory allocation failed in vma_adjust() zsmalloc: Delete an unnecessary check before the function call "iput" mm/memblock.c: fix index adjustment error in __next_mem_range_rev() mem-hotplug: alloc new page from a nearest neighbor node when mem-offline mm: optimize copy_page_to/from_iter_iovec mm: add cond_resched() to generic_swapfile_activate() Revert "mm, mempool: only set __GFP_NOMEMALLOC if there are free elements" mm, compaction: don't isolate PageWriteback pages in MIGRATE_SYNC_LIGHT mode mm: hwpoison: remove incorrect comments make __section_nr() more efficient ...
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 82d0b98d27f8..3894b65b1555 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1209,9 +1209,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
arch_refresh_nodedata(nid, pgdat);
} else {
- /* Reset the nr_zones and classzone_idx to 0 before reuse */
+ /* Reset the nr_zones, order and classzone_idx before reuse */
pgdat->nr_zones = 0;
- pgdat->classzone_idx = 0;
+ pgdat->kswapd_order = 0;
+ pgdat->kswapd_classzone_idx = 0;
}
/* we can use NODE_DATA(nid) from here */
@@ -1547,6 +1548,37 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
return 0;
}
+static struct page *new_node_page(struct page *page, unsigned long private,
+ int **result)
+{
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+ int nid = page_to_nid(page);
+ nodemask_t nmask = node_online_map;
+ struct page *new_page;
+
+ /*
+ * TODO: allocate a destination hugepage from a nearest neighbor node,
+ * accordance with memory policy of the user process if possible. For
+ * now as a simple work-around, we use the next node for destination.
+ */
+ if (PageHuge(page))
+ return alloc_huge_page_node(page_hstate(compound_head(page)),
+ next_node_in(nid, nmask));
+
+ node_clear(nid, nmask);
+ if (PageHighMem(page)
+ || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
+ gfp_mask |= __GFP_HIGHMEM;
+
+ new_page = __alloc_pages_nodemask(gfp_mask, 0,
+ node_zonelist(nid, gfp_mask), &nmask);
+ if (!new_page)
+ new_page = __alloc_pages(gfp_mask, 0,
+ node_zonelist(nid, gfp_mask));
+
+ return new_page;
+}
+
#define NR_OFFLINE_AT_ONCE_PAGES (256)
static int
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
@@ -1586,7 +1618,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
put_page(page);
list_add_tail(&page->lru, &source);
move_pages--;
- inc_zone_page_state(page, NR_ISOLATED_ANON +
+ inc_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
} else {
@@ -1610,11 +1642,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
goto out;
}
- /*
- * alloc_migrate_target should be improooooved!!
- * migrate_pages returns # of failed pages.
- */
- ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
+ /* Allocate a new page from the nearest neighbor node */
+ ret = migrate_pages(&source, new_node_page, NULL, 0,
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
if (ret)
putback_movable_pages(&source);