summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2f901a6e13d2..a81f2a8556c8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2288,7 +2288,7 @@ found:
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static int set_max_huge_pages(struct hstate *h, unsigned long count,
+static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
@@ -2296,6 +2296,26 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count,
spin_lock(&hugetlb_lock);
/*
+ * Check for a node specific request.
+ * Changing node specific huge page count may require a corresponding
+ * change to the global count. In any case, the passed node mask
+ * (nodes_allowed) will restrict alloc/free to the specified node.
+ */
+ if (nid != NUMA_NO_NODE) {
+ unsigned long old_count = count;
+
+ count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+ /*
+ * User may have specified a large count value which caused the
+ * above calculation to overflow. In this case, they wanted
+ * to allocate as many huge pages as possible. Set count to
+ * largest possible value to align with their intention.
+ */
+ if (count < old_count)
+ count = ULONG_MAX;
+ }
+
+ /*
* Gigantic pages runtime allocation depend on the capability for large
* page range allocation.
* If the system does not provide this feature, return an error when
@@ -2446,15 +2466,22 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
}
} else if (nodes_allowed) {
/*
- * per node hstate attribute: adjust count to global,
- * but restrict alloc/free to the specified node.
+ * Node specific request. count adjustment happens in
+ * set_max_huge_pages() after acquiring hugetlb_lock.
*/
- count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
init_nodemask_of_node(nodes_allowed, nid);
- } else
- nodes_allowed = &node_states[N_MEMORY];
+ } else {
+ /*
+ * Node specific request, but we could not allocate the few
+ * words required for a node mask. We are unlikely to hit
+ * this condition. Since we can not pass down the appropriate
+ * node mask, just return ENOMEM.
+ */
+ err = -ENOMEM;
+ goto out;
+ }
- err = set_max_huge_pages(h, count, nodes_allowed);
+ err = set_max_huge_pages(h, count, nid, nodes_allowed);
out:
if (nodes_allowed != &node_states[N_MEMORY])