summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--mm/sparse.c22
2 files changed, 25 insertions, 2 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e82fc1a52cd0..d6120fa69116 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -509,6 +509,10 @@ struct mem_section {
* pages. However, it is stored with some other magic.
* (see sparse.c::sparse_init_one_section())
*
+ * Additionally during early boot we encode node id of
+ * the location of the section here to guide allocation.
+ * (see sparse.c::memory_present())
+ *
* Making it a UL at least makes someone do a cast
* before using it wrong.
*/
@@ -548,6 +552,7 @@ extern int __section_nr(struct mem_section* ms);
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_MAP_LAST_BIT (1UL<<2)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
+#define SECTION_NID_SHIFT 2
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
diff --git a/mm/sparse.c b/mm/sparse.c
index 100040c0dfb6..e0a3fe48aa37 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -99,6 +99,22 @@ int __section_nr(struct mem_section* ms)
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
+/*
+ * During early boot, before section_mem_map is used for an actual
+ * mem_map, we use section_mem_map to store the section's NUMA
+ * node. This keeps us from having to use another data structure. The
+ * node information is cleared just before we store the real mem_map.
+ */
+static inline unsigned long sparse_encode_early_nid(int nid)
+{
+ return (nid << SECTION_NID_SHIFT);
+}
+
+static inline int sparse_early_nid(struct mem_section *section)
+{
+ return (section->section_mem_map >> SECTION_NID_SHIFT);
+}
+
/* Record a memory area against a node. */
void memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -113,7 +129,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
ms = __nr_to_section(section);
if (!ms->section_mem_map)
- ms->section_mem_map = SECTION_MARKED_PRESENT;
+ ms->section_mem_map = sparse_encode_early_nid(nid) |
+ SECTION_MARKED_PRESENT;
}
}
@@ -164,6 +181,7 @@ static int sparse_init_one_section(struct mem_section *ms,
if (!valid_section(ms))
return -EINVAL;
+ ms->section_mem_map &= ~SECTION_MAP_MASK;
ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
return 1;
@@ -172,8 +190,8 @@ static int sparse_init_one_section(struct mem_section *ms,
static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
{
struct page *map;
- int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
struct mem_section *ms = __nr_to_section(pnum);
+ int nid = sparse_early_nid(ms);
map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
if (map)