summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile13
-rw-r--r--arch/arm/kernel/bios32.c9
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S124
-rw-r--r--arch/arm/kernel/entry-v7m.S143
-rw-r--r--arch/arm/kernel/head-nommu.S10
-rw-r--r--arch/arm/kernel/psci.c7
-rw-r--r--arch/arm/kernel/psci_smp.c84
-rw-r--r--arch/arm/kernel/setup.c26
-rw-r--r--arch/arm/kernel/traps.c8
10 files changed, 412 insertions, 16 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5f3338eacad2..f4285b5ffb05 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -15,7 +15,7 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
-obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
+obj-y := elf.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o sched_clock.o \
setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
@@ -23,6 +23,12 @@ obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
+ifeq ($(CONFIG_CPU_V7M),y)
+obj-y += entry-v7m.o
+else
+obj-y += entry-armv.o
+endif
+
obj-$(CONFIG_OC_ETM) += etm.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
@@ -82,6 +88,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
-obj-$(CONFIG_ARM_PSCI) += psci.o
+ifeq ($(CONFIG_ARM_PSCI),y)
+obj-y += psci.o
+obj-$(CONFIG_SMP) += psci_smp.o
+endif
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index b2ed73c45489..261fcc826169 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -445,7 +445,8 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
return 0;
}
-static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
+static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
+ struct list_head *head)
{
struct pci_sys_data *sys = NULL;
int ret;
@@ -480,7 +481,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
if (hw->scan)
sys->bus = hw->scan(nr, sys);
else
- sys->bus = pci_scan_root_bus(NULL, sys->busnr,
+ sys->bus = pci_scan_root_bus(parent, sys->busnr,
hw->ops, sys, &sys->resources);
if (!sys->bus)
@@ -497,7 +498,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
}
}
-void pci_common_init(struct hw_pci *hw)
+void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
{
struct pci_sys_data *sys;
LIST_HEAD(head);
@@ -505,7 +506,7 @@ void pci_common_init(struct hw_pci *hw)
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
if (hw->preinit)
hw->preinit();
- pcibios_init_hw(hw, &head);
+ pcibios_init_hw(parent, hw, &head);
if (hw->postinit)
hw->postinit();
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc5bc0a97131..85a72b0809ca 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -350,6 +350,9 @@ ENDPROC(ftrace_stub)
.align 5
ENTRY(vector_swi)
+#ifdef CONFIG_CPU_V7M
+ v7m_exception_entry
+#else
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
ARM( add r8, sp, #S_PC )
@@ -360,6 +363,7 @@ ENTRY(vector_swi)
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
+#endif
zero_fp
/*
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 160f3376ba6d..de23a9beed13 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -5,6 +5,7 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/thread_info.h>
+#include <asm/v7m.h>
@ Bad Abort numbers
@ -----------------
@@ -44,6 +45,116 @@
#endif
.endm
+#ifdef CONFIG_CPU_V7M
+/*
+ * ARMv7-M exception entry/exit macros.
+ *
+ * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
+ * automatically saved on the current stack (32 words) before
+ * switching to the exception stack (SP_main).
+ *
+ * If exception is taken while in user mode, SP_main is
+ * empty. Otherwise, SP_main is aligned to 64 bit automatically
+ * (CCR.STKALIGN set).
+ *
+ * Linux assumes that the interrupts are disabled when entering an
+ * exception handler and it may BUG if this is not the case. Interrupts
+ * are disabled during entry and reenabled in the exit macro.
+ *
+ * v7m_exception_slow_exit is used when returning from SVC or PendSV.
+ * When returning to kernel mode, we don't return from exception.
+ */
+ .macro v7m_exception_entry
+ @ determine the location of the registers saved by the core during
+ @ exception entry. Depending on the mode the cpu was in when the
+ @ exception happend that is either on the main or the process stack.
+ @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
+ @ was used.
+ tst lr, #EXC_RET_STACK_MASK
+ mrsne r12, psp
+ moveq r12, sp
+
+ @ we cannot rely on r0-r3 and r12 matching the value saved in the
+ @ exception frame because of tail-chaining. So these have to be
+ @ reloaded.
+ ldmia r12!, {r0-r3}
+
+ @ Linux expects to have irqs off. Do it here before taking stack space
+ cpsid i
+
+ sub sp, #S_FRAME_SIZE-S_IP
+ stmdb sp!, {r0-r11}
+
+ @ load saved r12, lr, return address and xPSR.
+ @ r0-r7 are used for signals and never touched from now on. Clobbering
+ @ r8-r12 is OK.
+ mov r9, r12
+ ldmia r9!, {r8, r10-r12}
+
+ @ calculate the original stack pointer value.
+ @ r9 currently points to the memory location just above the auto saved
+ @ xPSR.
+ @ The cpu might automatically 8-byte align the stack. Bit 9
+ @ of the saved xPSR specifies if stack aligning took place. In this case
+ @ another 32-bit value is included in the stack.
+
+ tst r12, V7M_xPSR_FRAMEPTRALIGN
+ addne r9, r9, #4
+
+ @ store saved r12 using str to have a register to hold the base for stm
+ str r8, [sp, #S_IP]
+ add r8, sp, #S_SP
+ @ store r13-r15, xPSR
+ stmia r8!, {r9-r12}
+ @ store old_r0
+ str r0, [r8]
+ .endm
+
+ /*
+ * PENDSV and SVCALL are configured to have the same exception
+ * priorities. As a kernel thread runs at SVCALL execution priority it
+ * can never be preempted and so we will never have to return to a
+ * kernel thread here.
+ */
+ .macro v7m_exception_slow_exit ret_r0
+ cpsid i
+ ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
+
+ @ read original r12, sp, lr, pc and xPSR
+ add r12, sp, #S_IP
+ ldmia r12, {r1-r5}
+
+ @ an exception frame is always 8-byte aligned. To tell the hardware if
+ @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
+ @ accordingly.
+ tst r2, #4
+ subne r2, r2, #4
+ orrne r5, V7M_xPSR_FRAMEPTRALIGN
+ biceq r5, V7M_xPSR_FRAMEPTRALIGN
+
+ @ write basic exception frame
+ stmdb r2!, {r1, r3-r5}
+ ldmia sp, {r1, r3-r5}
+ .if \ret_r0
+ stmdb r2!, {r0, r3-r5}
+ .else
+ stmdb r2!, {r1, r3-r5}
+ .endif
+
+ @ restore process sp
+ msr psp, r2
+
+ @ restore original r4-r11
+ ldmia sp!, {r0-r11}
+
+ @ restore main sp
+ add sp, sp, #S_FRAME_SIZE-S_IP
+
+ cpsie i
+ bx lr
+ .endm
+#endif /* CONFIG_CPU_V7M */
+
@
@ Store/load the USER SP and LR registers by switching to the SYS
@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
@@ -165,6 +276,18 @@
rfeia sp!
.endm
+#ifdef CONFIG_CPU_V7M
+ /*
+ * Note we don't need to do clrex here as clearing the local monitor is
+ * part of each exception entry and exit sequence.
+ */
+ .macro restore_user_regs, fast = 0, offset = 0
+ .if \offset
+ add sp, #\offset
+ .endif
+ v7m_exception_slow_exit ret_r0 = \fast
+ .endm
+#else /* ifdef CONFIG_CPU_V7M */
.macro restore_user_regs, fast = 0, offset = 0
clrex @ clear the exclusive monitor
mov r2, sp
@@ -181,6 +304,7 @@
add sp, sp, #S_FRAME_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
.endm
+#endif /* ifdef CONFIG_CPU_V7M / else */
.macro get_thread_info, rd
mov \rd, sp
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
new file mode 100644
index 000000000000..e00621f1403f
--- /dev/null
+++ b/arch/arm/kernel/entry-v7m.S
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/arm/kernel/entry-v7m.S
+ *
+ * Copyright (C) 2008 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Low-level vector interface routines for the ARMv7-M architecture
+ */
+#include <asm/memory.h>
+#include <asm/glue.h>
+#include <asm/thread_notify.h>
+#include <asm/v7m.h>
+
+#include <mach/entry-macro.S>
+
+#include "entry-header.S"
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
+#endif
+
+__invalid_entry:
+ v7m_exception_entry
+ adr r0, strerr
+ mrs r1, ipsr
+ mov r2, lr
+ bl printk
+ mov r0, sp
+ bl show_regs
+1: b 1b
+ENDPROC(__invalid_entry)
+
+strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
+
+ .align 2
+__irq_entry:
+ v7m_exception_entry
+
+ @
+ @ Invoke the IRQ handler
+ @
+ mrs r0, ipsr
+ ldr r1, =V7M_xPSR_EXCEPTIONNO
+ and r0, r1
+ sub r0, #16
+ mov r1, sp
+ stmdb sp!, {lr}
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ bl nvic_do_IRQ
+
+ pop {lr}
+ @
+ @ Check for any pending work if returning to user
+ @
+ ldr r1, =BASEADDR_V7M_SCB
+ ldr r0, [r1, V7M_SCB_ICSR]
+ tst r0, V7M_SCB_ICSR_RETTOBASE
+ beq 2f
+
+ get_thread_info tsk
+ ldr r2, [tsk, #TI_FLAGS]
+ tst r2, #_TIF_WORK_MASK
+ beq 2f @ no work pending
+ mov r0, #V7M_SCB_ICSR_PENDSVSET
+ str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
+
+2:
+ @ registers r0-r3 and r12 are automatically restored on exception
+ @ return. r4-r7 were not clobbered in v7m_exception_entry so for
+ @ correctness they don't need to be restored. So only r8-r11 must be
+ @ restored here. The easiest way to do so is to restore r0-r7, too.
+ ldmia sp!, {r0-r11}
+ add sp, #S_FRAME_SIZE-S_IP
+ cpsie i
+ bx lr
+ENDPROC(__irq_entry)
+
+__pendsv_entry:
+ v7m_exception_entry
+
+ ldr r1, =BASEADDR_V7M_SCB
+ mov r0, #V7M_SCB_ICSR_PENDSVCLR
+ str r0, [r1, V7M_SCB_ICSR] @ clear PendSV
+
+ @ execute the pending work, including reschedule
+ get_thread_info tsk
+ mov why, #0
+ b ret_to_user
+ENDPROC(__pendsv_entry)
+
+/*
+ * Register switch for ARMv7-M processors.
+ * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
+ * previous and next are guaranteed not to be the same.
+ */
+ENTRY(__switch_to)
+ .fnstart
+ .cantunwind
+ add ip, r1, #TI_CPU_SAVE
+ stmia ip!, {r4 - r11} @ Store most regs on stack
+ str sp, [ip], #4
+ str lr, [ip], #4
+ mov r5, r0
+ add r4, r2, #TI_CPU_SAVE
+ ldr r0, =thread_notify_head
+ mov r1, #THREAD_NOTIFY_SWITCH
+ bl atomic_notifier_call_chain
+ mov ip, r4
+ mov r0, r5
+ ldmia ip!, {r4 - r11} @ Load all regs saved previously
+ ldr sp, [ip]
+ ldr pc, [ip, #4]!
+ .fnend
+ENDPROC(__switch_to)
+
+ .data
+ .align 8
+/*
+ * Vector table (64 words => 256 bytes natural alignment)
+ */
+ENTRY(vector_table)
+ .long 0 @ 0 - Reset stack pointer
+ .long __invalid_entry @ 1 - Reset
+ .long __invalid_entry @ 2 - NMI
+ .long __invalid_entry @ 3 - HardFault
+ .long __invalid_entry @ 4 - MemManage
+ .long __invalid_entry @ 5 - BusFault
+ .long __invalid_entry @ 6 - UsageFault
+ .long __invalid_entry @ 7 - Reserved
+ .long __invalid_entry @ 8 - Reserved
+ .long __invalid_entry @ 9 - Reserved
+ .long __invalid_entry @ 10 - Reserved
+ .long vector_swi @ 11 - SVCall
+ .long __invalid_entry @ 12 - Debug Monitor
+ .long __invalid_entry @ 13 - Reserved
+ .long __pendsv_entry @ 14 - PendSV
+ .long __invalid_entry @ 15 - SysTick
+ .rept 64 - 16
+ .long __irq_entry @ 16..64 - External Interrupts
+ .endr
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 6a2e09c952c7..8812ce88f7a1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -19,6 +19,7 @@
#include <asm/asm-offsets.h>
#include <asm/cp15.h>
#include <asm/thread_info.h>
+#include <asm/v7m.h>
/*
* Kernel startup entry point.
@@ -50,10 +51,13 @@ ENTRY(stext)
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
-#ifndef CONFIG_CPU_CP15
- ldr r9, =CONFIG_PROCESSOR_ID
-#else
+#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
+#elif defined(CONFIG_CPU_V7M)
+ ldr r9, =BASEADDR_V7M_SCB
+ ldr r9, [r9, V7M_SCB_CPUID]
+#else
+ ldr r9, =CONFIG_PROCESSOR_ID
#endif
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index 36531643cc2c..46931880093d 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -158,7 +158,7 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
-static int __init psci_init(void)
+void __init psci_init(void)
{
struct device_node *np;
const char *method;
@@ -166,7 +166,7 @@ static int __init psci_init(void)
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
- return 0;
+ return;
pr_info("probing function IDs from device-tree\n");
@@ -206,6 +206,5 @@ static int __init psci_init(void)
out_put_node:
of_node_put(np);
- return 0;
+ return;
}
-early_initcall(psci_init);
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
new file mode 100644
index 000000000000..23a11424c568
--- /dev/null
+++ b/arch/arm/kernel/psci_smp.c
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/psci.h>
+#include <asm/smp_plat.h>
+
+/*
+ * psci_smp assumes that the following is true about PSCI:
+ *
+ * cpu_suspend Suspend the execution on a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * cpu_off Power down a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * no return on successful call
+ *
+ * cpu_on Power up a CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * migrate Migrate the context to a different CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * returns 0 success, < 0 on failure
+ *
+ */
+
+extern void secondary_startup(void);
+
+static int __cpuinit psci_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ if (psci_ops.cpu_on)
+ return psci_ops.cpu_on(cpu_logical_map(cpu),
+ __pa(secondary_startup));
+ return -ENODEV;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __ref psci_cpu_die(unsigned int cpu)
+{
+ const struct psci_power_state ps = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ if (psci_ops.cpu_off)
+ psci_ops.cpu_off(ps);
+
+ /* We should never return */
+ panic("psci: cpu %d failed to shutdown\n", cpu);
+}
+#else
+#define psci_cpu_die NULL
+#endif
+
+bool __init psci_smp_available(void)
+{
+ /* is cpu_on available at least? */
+ return (psci_ops.cpu_on != NULL);
+}
+
+struct smp_operations __initdata psci_smp_ops = {
+ .smp_boot_secondary = psci_boot_secondary,
+ .cpu_die = psci_cpu_die,
+};
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b4b1d397592b..1c8278de6c46 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/procinfo.h>
+#include <asm/psci.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -128,7 +129,9 @@ struct stack {
u32 und[3];
} ____cacheline_aligned;
+#ifndef CONFIG_CPU_V7M
static struct stack stacks[NR_CPUS];
+#endif
char elf_platform[ELF_PLATFORM_SIZE];
EXPORT_SYMBOL(elf_platform);
@@ -207,7 +210,7 @@ static const char *proc_arch[] = {
"5TEJ",
"6TEJ",
"7",
- "?(11)",
+ "7M",
"?(12)",
"?(13)",
"?(14)",
@@ -216,6 +219,12 @@ static const char *proc_arch[] = {
"?(17)",
};
+#ifdef CONFIG_CPU_V7M
+static int __get_cpu_architecture(void)
+{
+ return CPU_ARCH_ARMv7M;
+}
+#else
static int __get_cpu_architecture(void)
{
int cpu_arch;
@@ -248,6 +257,7 @@ static int __get_cpu_architecture(void)
return cpu_arch;
}
+#endif
int __pure cpu_architecture(void)
{
@@ -293,7 +303,9 @@ static void __init cacheid_init(void)
{
unsigned int arch = cpu_architecture();
- if (arch >= CPU_ARCH_ARMv6) {
+ if (arch == CPU_ARCH_ARMv7M) {
+ cacheid = 0;
+ } else if (arch >= CPU_ARCH_ARMv6) {
unsigned int cachetype = read_cpuid_cachetype();
if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
@@ -392,6 +404,7 @@ static void __init feat_v6_fixup(void)
*/
void notrace cpu_init(void)
{
+#ifndef CONFIG_CPU_V7M
unsigned int cpu = smp_processor_id();
struct stack *stk = &stacks[cpu];
@@ -442,6 +455,7 @@ void notrace cpu_init(void)
"I" (offsetof(struct stack, und[0])),
PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
+#endif
}
u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
@@ -796,9 +810,15 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
arm_dt_init_cpu_maps();
+ psci_init();
#ifdef CONFIG_SMP
if (is_smp()) {
- smp_set_ops(mdesc->smp);
+ if (!mdesc->smp_init || !mdesc->smp_init()) {
+ if (psci_smp_available())
+ smp_set_ops(&psci_smp_ops);
+ else if (mdesc->smp)
+ smp_set_ops(mdesc->smp);
+ }
smp_init_cpus();
}
#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 18b32e8e4497..486e12a0f26a 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -812,6 +812,7 @@ static void __init kuser_get_tls_init(unsigned long vectors)
void __init early_trap_init(void *vectors_base)
{
+#ifndef CONFIG_CPU_V7M
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
@@ -843,4 +844,11 @@ void __init early_trap_init(void *vectors_base)
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+#else /* ifndef CONFIG_CPU_V7M */
+ /*
+ * on V7-M there is no need to copy the vector table to a dedicated
+ * memory area. The address is configurable and so a table in the kernel
+ * image can be used.
+ */
+#endif
}