summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c209
1 files changed, 94 insertions, 115 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f8f3bfc435ee..452513bf02ce 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -63,6 +63,7 @@
#include <linux/sched/rt.h>
#include <linux/page_owner.h>
#include <linux/kthread.h>
+#include <linux/memcontrol.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -286,7 +287,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
/* Returns true if the struct page for the pfn is uninitialised */
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
{
- if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+ int nid = early_pfn_to_nid(pfn);
+
+ if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
return true;
return false;
@@ -656,6 +659,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
INIT_LIST_HEAD(&page->lru);
@@ -673,6 +679,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
set_page_private(page, 0);
@@ -998,6 +1007,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
+ if (compound)
+ ClearPageDoubleMap(page);
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_pages_check(page, page + i);
@@ -1008,8 +1019,12 @@ static __always_inline bool free_pages_prepare(struct page *page,
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
- if (PageAnonHead(page))
+ if (PageMappingFlags(page))
page->mapping = NULL;
+ if (memcg_kmem_enabled() && PageKmemcg(page)) {
+ memcg_kmem_uncharge(page, order);
+ __ClearPageKmemcg(page);
+ }
if (check_free)
bad += free_pages_check(page);
if (bad)
@@ -1267,7 +1282,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
spin_lock(&early_pfn_lock);
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
if (nid < 0)
- nid = 0;
+ nid = first_online_node;
spin_unlock(&early_pfn_lock);
return nid;
@@ -1716,6 +1731,19 @@ static bool check_new_pages(struct page *page, unsigned int order)
return false;
}
+inline void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags)
+{
+ set_page_private(page, 0);
+ set_page_refcounted(page);
+
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+ kernel_poison_pages(page, 1 << order, 1);
+ kasan_alloc_pages(page, order);
+ set_page_owner(page, order, gfp_flags);
+}
+
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags)
{
@@ -1728,13 +1756,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
poisoned &= page_is_poisoned(p);
}
- set_page_private(page, 0);
- set_page_refcounted(page);
-
- arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
- kernel_poison_pages(page, 1 << order, 1);
- kasan_alloc_pages(page, order);
+ post_alloc_hook(page, order, gfp_flags);
if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
@@ -1743,8 +1765,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
- set_page_owner(page, order, gfp_flags);
-
/*
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
@@ -2453,7 +2473,6 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
void split_page(struct page *page, unsigned int order)
{
int i;
- gfp_t gfp_mask;
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
@@ -2467,12 +2486,9 @@ void split_page(struct page *page, unsigned int order)
split_page(virt_to_page(page[0].shadow), order);
#endif
- gfp_mask = get_page_owner_gfp(page);
- set_page_owner(page, 0, gfp_mask);
- for (i = 1; i < (1 << order); i++) {
+ for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
- set_page_owner(page + i, 0, gfp_mask);
- }
+ split_page_owner(page, order);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -2501,8 +2517,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
- set_page_owner(page, order, __GFP_MOVABLE);
-
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
@@ -2519,33 +2533,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
/*
- * Similar to split_page except the page is already free. As this is only
- * being used for migration, the migratetype of the block also changes.
- * As this is called with interrupts disabled, the caller is responsible
- * for calling arch_alloc_page() and kernel_map_page() after interrupts
- * are enabled.
- *
- * Note: this is probably too low level an operation for use in drivers.
- * Please consult with lkml before using this in your driver.
- */
-int split_free_page(struct page *page)
-{
- unsigned int order;
- int nr_pages;
-
- order = page_order(page);
-
- nr_pages = __isolate_free_page(page, order);
- if (!nr_pages)
- return 0;
-
- /* Split into individual pages */
- set_page_refcounted(page);
- split_page(page, order);
- return nr_pages;
-}
-
-/*
* Update NUMA hit/miss statistics
*
* Must be called with interrupts disabled.
@@ -2609,11 +2596,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
page = list_last_entry(list, struct page, lru);
else
page = list_first_entry(list, struct page, lru);
- } while (page && check_new_pcp(page));
- __dec_zone_state(zone, NR_ALLOC_BATCH);
- list_del(&page->lru);
- pcp->count--;
+ __dec_zone_state(zone, NR_ALLOC_BATCH);
+ list_del(&page->lru);
+ pcp->count--;
+
+ } while (check_new_pcp(page));
} else {
/*
* We most definitely don't want callers attempting to
@@ -3023,6 +3011,7 @@ reset_fair:
apply_fair = false;
fair_skipped = false;
reset_alloc_batches(ac->preferred_zoneref->zone);
+ z = ac->preferred_zoneref;
goto zonelist_scan;
}
@@ -3095,6 +3084,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct oom_control oc = {
.zonelist = ac->zonelist,
.nodemask = ac->nodemask,
+ .memcg = NULL,
.gfp_mask = gfp_mask,
.order = order,
};
@@ -3596,6 +3586,17 @@ retry:
*/
alloc_flags = gfp_to_alloc_flags(gfp_mask);
+ /*
+ * Reset the zonelist iterators if memory policies can be ignored.
+ * These allocations are high priority and system rather than user
+ * orientated.
+ */
+ if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
+ ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ ac->high_zoneidx, ac->nodemask);
+ }
+
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, order,
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3605,6 @@ retry:
/* Allocate without watermarks if the context allows */
if (alloc_flags & ALLOC_NO_WATERMARKS) {
- /*
- * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
- * the allocation is high priority and these type of
- * allocations are system rather than user orientated
- */
- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
page = get_page_from_freelist(gfp_mask, order,
ALLOC_NO_WATERMARKS, ac);
if (page)
@@ -3808,7 +3803,11 @@ retry_cpuset:
/* Dirty zone balancing only done in the fast path */
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
- /* The preferred zone is used for statistics later */
+ /*
+ * The preferred zone is used for statistics but crucially it is
+ * also used as the starting point for the zonelist iterator. It
+ * may get reset for allocations that ignore memory policies.
+ */
ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
ac.high_zoneidx, ac.nodemask);
if (!ac.preferred_zoneref) {
@@ -3849,6 +3848,14 @@ no_zone:
}
out:
+ if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
+ if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
+ __free_pages(page, order);
+ page = NULL;
+ } else
+ __SetPageKmemcg(page);
+ }
+
if (kmemcheck_enabled && page)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
@@ -4004,56 +4011,6 @@ void __free_page_frag(void *addr)
}
EXPORT_SYMBOL(__free_page_frag);
-/*
- * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
- * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
- * equivalent to alloc_pages.
- *
- * It should be used when the caller would like to use kmalloc, but since the
- * allocation is large, it has to fall back to the page allocator.
- */
-struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
-{
- struct page *page;
-
- page = alloc_pages(gfp_mask, order);
- if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
- __free_pages(page, order);
- page = NULL;
- }
- return page;
-}
-
-struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
-{
- struct page *page;
-
- page = alloc_pages_node(nid, gfp_mask, order);
- if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
- __free_pages(page, order);
- page = NULL;
- }
- return page;
-}
-
-/*
- * __free_kmem_pages and free_kmem_pages will free pages allocated with
- * alloc_kmem_pages.
- */
-void __free_kmem_pages(struct page *page, unsigned int order)
-{
- memcg_kmem_uncharge(page, order);
- __free_pages(page, order);
-}
-
-void free_kmem_pages(unsigned long addr, unsigned int order)
-{
- if (addr != 0) {
- VM_BUG_ON(!virt_addr_valid((void *)addr));
- __free_kmem_pages(virt_to_page((void *)addr), order);
- }
-}
-
static void *make_alloc_exact(unsigned long addr, unsigned int order,
size_t size)
{
@@ -4355,6 +4312,9 @@ void show_free_areas(unsigned int filter)
" unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ " anon_thp: %lu shmem_thp: %lu shmem_pmdmapped: %lu\n"
+#endif
" free:%lu free_pcp:%lu free_cma:%lu\n",
global_page_state(NR_ACTIVE_ANON),
global_page_state(NR_INACTIVE_ANON),
@@ -4372,6 +4332,11 @@ void show_free_areas(unsigned int filter)
global_page_state(NR_SHMEM),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE),
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR,
+ global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR,
+ global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR,
+#endif
global_page_state(NR_FREE_PAGES),
free_pcp,
global_page_state(NR_FREE_CMA_PAGES));
@@ -4406,6 +4371,11 @@ void show_free_areas(unsigned int filter)
" writeback:%lukB"
" mapped:%lukB"
" shmem:%lukB"
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ " shmem_thp: %lukB"
+ " shmem_pmdmapped: %lukB"
+ " anon_thp: %lukB"
+#endif
" slab_reclaimable:%lukB"
" slab_unreclaimable:%lukB"
" kernel_stack:%lukB"
@@ -4438,6 +4408,12 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_WRITEBACK)),
K(zone_page_state(zone, NR_FILE_MAPPED)),
K(zone_page_state(zone, NR_SHMEM)),
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ K(zone_page_state(zone, NR_SHMEM_THPS) * HPAGE_PMD_NR),
+ K(zone_page_state(zone, NR_SHMEM_PMDMAPPED)
+ * HPAGE_PMD_NR),
+ K(zone_page_state(zone, NR_ANON_THPS) * HPAGE_PMD_NR),
+#endif
K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
zone_page_state(zone, NR_KERNEL_STACK) *
@@ -6448,15 +6424,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
sizeof(arch_zone_lowest_possible_pfn));
memset(arch_zone_highest_possible_pfn, 0,
sizeof(arch_zone_highest_possible_pfn));
- arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
- arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
- for (i = 1; i < MAX_NR_ZONES; i++) {
+
+ start_pfn = find_min_pfn_with_active_regions();
+
+ for (i = 0; i < MAX_NR_ZONES; i++) {
if (i == ZONE_MOVABLE)
continue;
- arch_zone_lowest_possible_pfn[i] =
- arch_zone_highest_possible_pfn[i-1];
- arch_zone_highest_possible_pfn[i] =
- max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
+
+ end_pfn = max(max_zone_pfn[i], start_pfn);
+ arch_zone_lowest_possible_pfn[i] = start_pfn;
+ arch_zone_highest_possible_pfn[i] = end_pfn;
+
+ start_pfn = end_pfn;
}
arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;