summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/migrate.h16
-rw-r--r--mm/memory_hotplug.c11
-rw-r--r--mm/page_isolation.c18
3 files changed, 19 insertions, 26 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 48e24844b3c5..d9675b665cc4 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -4,6 +4,7 @@
#include <linux/mm.h>
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
+#include <linux/hugetlb.h>
typedef struct page *new_page_t(struct page *page, unsigned long private,
int **reason);
@@ -30,6 +31,21 @@ enum migrate_reason {
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
extern char *migrate_reason_names[MR_TYPES];
+static inline struct page *new_page_nodemask(struct page *page,
+ int preferred_nid, nodemask_t *nodemask)
+{
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+ if (PageHuge(page))
+ return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
+ nodemask);
+
+ if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
+ gfp_mask |= __GFP_HIGHMEM;
+
+ return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
+}
+
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1cf3404bd065..203c46306a74 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1433,7 +1433,6 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
static struct page *new_node_page(struct page *page, unsigned long private,
int **result)
{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
int nid = page_to_nid(page);
nodemask_t nmask = node_states[N_MEMORY];
@@ -1446,15 +1445,7 @@ static struct page *new_node_page(struct page *page, unsigned long private,
if (nodes_empty(nmask))
node_set(nid, nmask);
- if (PageHuge(page))
- return alloc_huge_page_nodemask(
- page_hstate(compound_head(page)), &nmask);
-
- if (PageHighMem(page)
- || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
- gfp_mask |= __GFP_HIGHMEM;
-
- return __alloc_pages_nodemask(gfp_mask, 0, nid, &nmask);
+ return new_page_nodemask(page, nid, &nmask);
}
#define NR_OFFLINE_AT_ONCE_PAGES (256)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 3606104893e0..757410d9f758 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -8,6 +8,7 @@
#include <linux/memory.h>
#include <linux/hugetlb.h>
#include <linux/page_owner.h>
+#include <linux/migrate.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
@@ -294,20 +295,5 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
struct page *alloc_migrate_target(struct page *page, unsigned long private,
int **resultp)
{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
-
- /*
- * TODO: allocate a destination hugepage from a nearest neighbor node,
- * accordance with memory policy of the user process if possible. For
- * now as a simple work-around, we use the next node for destination.
- */
- if (PageHuge(page))
- return alloc_huge_page_node(page_hstate(compound_head(page)),
- next_node_in(page_to_nid(page),
- node_online_map));
-
- if (PageHighMem(page))
- gfp_mask |= __GFP_HIGHMEM;
-
- return alloc_page(gfp_mask);
+ return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
}