From 60305db9884515ca063474e262b454f6da04e4e2 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 22 Jan 2015 10:01:40 +0000 Subject: arm64/efi: move virtmap init to early initcall Now that the create_mapping() code in mm/mmu.c is able to support setting up kernel page tables at initcall time, we can move the whole virtmap creation to arm64_enable_runtime_services() instead of having a distinct stage during early boot. This also allows us to drop the arm64-specific EFI_VIRTMAP flag. Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/kernel/efi.c | 111 +++++++++++++++++++++++------------------------- 1 file changed, 53 insertions(+), 58 deletions(-) (limited to 'arch/arm64/kernel/efi.c') diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index c9cb0fbe7aa4..b42c7b480e1e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -38,6 +38,19 @@ struct efi_memory_map memmap; static u64 efi_system_table; +static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; + +static struct mm_struct efi_mm = { + .mm_rb = RB_ROOT, + .pgd = efi_pgd, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), + .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), + .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), + INIT_MM_CONTEXT(efi_mm) +}; + static int uefi_debug __initdata; static int __init uefi_debug_setup(char *str) { @@ -213,6 +226,45 @@ void __init efi_init(void) return; reserve_regions(); + early_memunmap(memmap.map, params.mmap_size); +} + +static bool __init efi_virtmap_init(void) +{ + efi_memory_desc_t *md; + + for_each_efi_memory_desc(&memmap, md) { + u64 paddr, npages, size; + pgprot_t prot; + + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (md->virt_addr == 0) + return false; + + paddr = md->phys_addr; + npages = md->num_pages; + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + pr_info(" EFI remap 0x%016llx => %p\n", + md->phys_addr, (void *)md->virt_addr); + + /* + * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be + * executable, everything else can be mapped with the XN bits + * set. + */ + if (!is_normal_ram(md)) + prot = __pgprot(PROT_DEVICE_nGnRE); + else if (md->type == EFI_RUNTIME_SERVICES_CODE) + prot = PAGE_KERNEL_EXEC; + else + prot = PAGE_KERNEL; + + create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); + } + return true; } /* @@ -254,7 +306,7 @@ static int __init arm64_enable_runtime_services(void) } set_bit(EFI_SYSTEM_TABLES, &efi.flags); - if (!efi_enabled(EFI_VIRTMAP)) { + if (!efi_virtmap_init()) { pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); return -1; } @@ -283,19 +335,6 @@ static int __init arm64_dmi_init(void) } core_initcall(arm64_dmi_init); -static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; - -static struct mm_struct efi_mm = { - .mm_rb = RB_ROOT, - .pgd = efi_pgd, - .mm_users = ATOMIC_INIT(2), - .mm_count = ATOMIC_INIT(1), - .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), - .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), - .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), - INIT_MM_CONTEXT(efi_mm) -}; - static void efi_set_pgd(struct mm_struct *mm) { cpu_switch_mm(mm->pgd, mm); @@ -315,47 +354,3 @@ void efi_virtmap_unload(void) efi_set_pgd(current->active_mm); preempt_enable(); } - -void __init efi_virtmap_init(void) -{ - efi_memory_desc_t *md; - - if (!efi_enabled(EFI_BOOT)) - return; - - for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; - pgprot_t prot; - - if (!(md->attribute & EFI_MEMORY_RUNTIME)) - continue; - if (WARN(md->virt_addr == 0, - "UEFI virtual mapping incomplete or missing -- no entry found for 0x%llx\n", - md->phys_addr)) - return; - - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - - pr_info(" EFI remap 0x%016llx => %p\n", - md->phys_addr, (void *)md->virt_addr); - - /* - * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be - * executable, everything else can be mapped with the XN bits - * set. - */ - if (!is_normal_ram(md)) - prot = __pgprot(PROT_DEVICE_nGnRE); - else if (md->type == EFI_RUNTIME_SERVICES_CODE) - prot = PAGE_KERNEL_EXEC; - else - prot = PAGE_KERNEL; - - create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); - } - set_bit(EFI_VIRTMAP, &efi.flags); - early_memunmap(memmap.map, memmap.map_end - memmap.map); -} -- cgit v1.2.3-55-g7522