From 77404d81cadf192cc1261d6269f622a06b83cdd5 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 13:44:00 -0400 Subject: ARM: MCPM: remove backward compatibility code Now that no one uses the old callbacks anymore, let's remove them and associated support code. Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 45 +++++--------------------------------------- 1 file changed, 5 insertions(+), 40 deletions(-) (limited to 'arch/arm/common') diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 5f8a52ac7edf..0908f96278c4 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -78,16 +78,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) bool cpu_is_down, cluster_is_down; int ret = 0; + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (!platform_ops) return -EUNATCH; /* try not to shadow power_up errors */ might_sleep(); - /* backward compatibility callback */ - if (platform_ops->power_up) - return platform_ops->power_up(cpu, cluster); - - pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); - /* * Since this is called with IRQs enabled, and no arch_spin_lock_irq * variant exists, we need to disable IRQs manually here. @@ -128,29 +123,17 @@ void mcpm_cpu_power_down(void) bool cpu_going_down, last_man; phys_reset_t phys_reset; + mpidr = read_cpuid_mpidr(); + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (WARN_ON_ONCE(!platform_ops)) return; BUG_ON(!irqs_disabled()); - /* - * Do this before calling into the power_down method, - * as it might not always be safe to do afterwards. - */ setup_mm_for_reboot(); - /* backward compatibility callback */ - if (platform_ops->power_down) { - platform_ops->power_down(); - goto not_dead; - } - - mpidr = read_cpuid_mpidr(); - cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); - __mcpm_cpu_going_down(cpu, cluster); - arch_spin_lock(&mcpm_lock); BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); @@ -187,7 +170,6 @@ void mcpm_cpu_power_down(void) if (cpu_going_down) wfi(); -not_dead: /* * It is possible for a power_up request to happen concurrently * with a power_down request for the same CPU. In this case the @@ -224,17 +206,6 @@ void mcpm_cpu_suspend(u64 expected_residency) if (WARN_ON_ONCE(!platform_ops)) return; - /* backward compatibility callback */ - if (platform_ops->suspend) { - phys_reset_t phys_reset; - BUG_ON(!irqs_disabled()); - setup_mm_for_reboot(); - platform_ops->suspend(expected_residency); - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); - phys_reset(virt_to_phys(mcpm_entry_point)); - BUG(); - } - /* Some platforms might have to enable special resume modes, etc. */ if (platform_ops->cpu_suspend_prepare) { unsigned int mpidr = read_cpuid_mpidr(); @@ -256,12 +227,6 @@ int mcpm_cpu_powered_up(void) if (!platform_ops) return -EUNATCH; - /* backward compatibility callback */ - if (platform_ops->powered_up) { - platform_ops->powered_up(); - return 0; - } - mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); -- cgit v1.2.3-55-g7522 From 7cc8b991cdc985aaa73bf9c429c810cd442fb74d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 14:11:07 -0400 Subject: ARM: MCPM: make internal helpers private to the core code This concerns the following helpers: __mcpm_cpu_going_down() __mcpm_cpu_down() __mcpm_outbound_enter_critical() __mcpm_outbound_leave_critical() __mcpm_cluster_state() They are and should only be used by the core code now. Therefore their declarations are removed from mcpm.h and their definitions are made static, hence the need to move them before their users which accounts for the bulk of this patch. This left the mcpm_sync_struct definition at an odd location, therefore it is moved as well with some comment clarifications. Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 229 ++++++++++++++++++++++--------------------- arch/arm/include/asm/mcpm.h | 52 +++++----- 2 files changed, 138 insertions(+), 143 deletions(-) (limited to 'arch/arm/common') diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 0908f96278c4..c5fe2e33e6c3 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -20,6 +20,121 @@ #include #include + +struct sync_struct mcpm_sync; + +/* + * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. + * This must be called at the point of committing to teardown of a CPU. + * The CPU cache (SCTRL.C bit) is expected to still be active. + */ +static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) +{ + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); +} + +/* + * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the + * cluster can be torn down without disrupting this CPU. + * To avoid deadlocks, this must be called before a CPU is powered down. + * The CPU cache (SCTRL.C bit) is expected to be off. + * However L2 cache might or might not be active. + */ +static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) +{ + dmb(); + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); + sev(); +} + +/* + * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. + * @state: the final state of the cluster: + * CLUSTER_UP: no destructive teardown was done and the cluster has been + * restored to the previous state (CPU cache still active); or + * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off + * (CPU cache disabled, L2 cache either enabled or disabled). + */ +static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) +{ + dmb(); + mcpm_sync.clusters[cluster].cluster = state; + sync_cache_w(&mcpm_sync.clusters[cluster].cluster); + sev(); +} + +/* + * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. + * This function should be called by the last man, after local CPU teardown + * is complete. CPU cache expected to be active. + * + * Returns: + * false: the critical section was not entered because an inbound CPU was + * observed, or the cluster is already being set up; + * true: the critical section was entered: it is now safe to tear down the + * cluster. + */ +static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) +{ + unsigned int i; + struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; + + /* Warn inbound CPUs that the cluster is being torn down: */ + c->cluster = CLUSTER_GOING_DOWN; + sync_cache_w(&c->cluster); + + /* Back out if the inbound cluster is already in the critical region: */ + sync_cache_r(&c->inbound); + if (c->inbound == INBOUND_COMING_UP) + goto abort; + + /* + * Wait for all CPUs to get out of the GOING_DOWN state, so that local + * teardown is complete on each CPU before tearing down the cluster. + * + * If any CPU has been woken up again from the DOWN state, then we + * shouldn't be taking the cluster down at all: abort in that case. + */ + sync_cache_r(&c->cpus); + for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { + int cpustate; + + if (i == cpu) + continue; + + while (1) { + cpustate = c->cpus[i].cpu; + if (cpustate != CPU_GOING_DOWN) + break; + + wfe(); + sync_cache_r(&c->cpus[i].cpu); + } + + switch (cpustate) { + case CPU_DOWN: + continue; + + default: + goto abort; + } + } + + return true; + +abort: + __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); + return false; +} + +static int __mcpm_cluster_state(unsigned int cluster) +{ + sync_cache_r(&mcpm_sync.clusters[cluster].cluster); + return mcpm_sync.clusters[cluster].cluster; +} + extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) @@ -299,120 +414,6 @@ int __init mcpm_loopback(void (*cache_disable)(void)) #endif -struct sync_struct mcpm_sync; - -/* - * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. - * This must be called at the point of committing to teardown of a CPU. - * The CPU cache (SCTRL.C bit) is expected to still be active. - */ -void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) -{ - mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; - sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); -} - -/* - * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the - * cluster can be torn down without disrupting this CPU. - * To avoid deadlocks, this must be called before a CPU is powered down. - * The CPU cache (SCTRL.C bit) is expected to be off. - * However L2 cache might or might not be active. - */ -void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) -{ - dmb(); - mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; - sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); - sev(); -} - -/* - * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. - * @state: the final state of the cluster: - * CLUSTER_UP: no destructive teardown was done and the cluster has been - * restored to the previous state (CPU cache still active); or - * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off - * (CPU cache disabled, L2 cache either enabled or disabled). - */ -void __mcpm_outbound_leave_critical(unsigned int cluster, int state) -{ - dmb(); - mcpm_sync.clusters[cluster].cluster = state; - sync_cache_w(&mcpm_sync.clusters[cluster].cluster); - sev(); -} - -/* - * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. - * This function should be called by the last man, after local CPU teardown - * is complete. CPU cache expected to be active. - * - * Returns: - * false: the critical section was not entered because an inbound CPU was - * observed, or the cluster is already being set up; - * true: the critical section was entered: it is now safe to tear down the - * cluster. - */ -bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) -{ - unsigned int i; - struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; - - /* Warn inbound CPUs that the cluster is being torn down: */ - c->cluster = CLUSTER_GOING_DOWN; - sync_cache_w(&c->cluster); - - /* Back out if the inbound cluster is already in the critical region: */ - sync_cache_r(&c->inbound); - if (c->inbound == INBOUND_COMING_UP) - goto abort; - - /* - * Wait for all CPUs to get out of the GOING_DOWN state, so that local - * teardown is complete on each CPU before tearing down the cluster. - * - * If any CPU has been woken up again from the DOWN state, then we - * shouldn't be taking the cluster down at all: abort in that case. - */ - sync_cache_r(&c->cpus); - for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { - int cpustate; - - if (i == cpu) - continue; - - while (1) { - cpustate = c->cpus[i].cpu; - if (cpustate != CPU_GOING_DOWN) - break; - - wfe(); - sync_cache_r(&c->cpus[i].cpu); - } - - switch (cpustate) { - case CPU_DOWN: - continue; - - default: - goto abort; - } - } - - return true; - -abort: - __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); - return false; -} - -int __mcpm_cluster_state(unsigned int cluster) -{ - sync_cache_r(&mcpm_sync.clusters[cluster].cluster); - return mcpm_sync.clusters[cluster].cluster; -} - extern unsigned long mcpm_power_up_setup_phys; int __init mcpm_sync_init( diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index e2118c941dbf..6a40d5f8db60 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -245,35 +245,6 @@ struct mcpm_platform_ops { */ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); -/* Synchronisation structures for coordinating safe cluster setup/teardown: */ - -/* - * When modifying this structure, make sure you update the MCPM_SYNC_ defines - * to match. - */ -struct mcpm_sync_struct { - /* individual CPU states */ - struct { - s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); - } cpus[MAX_CPUS_PER_CLUSTER]; - - /* cluster state */ - s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); - - /* inbound-side state */ - s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); -}; - -struct sync_struct { - struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; -}; - -void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); -void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); -void __mcpm_outbound_leave_critical(unsigned int cluster, int state); -bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); -int __mcpm_cluster_state(unsigned int cluster); - /** * mcpm_sync_init - Initialize the cluster synchronization support * @@ -312,6 +283,29 @@ int __init mcpm_loopback(void (*cache_disable)(void)); void __init mcpm_smp_set_ops(void); +/* + * Synchronisation structures for coordinating safe cluster setup/teardown. + * This is private to the MCPM core code and shared between C and assembly. + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + #else /* -- cgit v1.2.3-55-g7522 From 1c2c7d51c8101ab3c5d8585713d2dcd52d77d33e Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 14:51:36 -0400 Subject: ARM: MCPM: add references to the available documentation in the code Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm/common') diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index c5fe2e33e6c3..492467587c58 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -20,6 +20,11 @@ #include #include +/* + * The public API for this code is documented in arch/arm/include/asm/mcpm.h. + * For a comprehensive description of the main algorithm used here, please + * see Documentation/arm/cluster-pm-race-avoidance.txt. + */ struct sync_struct mcpm_sync; -- cgit v1.2.3-55-g7522 From 7895f73169ade9a74940ae6b0b4ee82faf286861 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 28 Apr 2015 15:51:19 -0400 Subject: ARM: MCPM: remove residency argument from mcpm_cpu_suspend() This is currently unused. If a suspend must be limited to CPU level only by preventing the last man from triggering a cluster level suspend then this should be determined according to many other criteria the MCPM layer is currently not aware of. It is unlikely that mcpm_cpu_suspend() would be the proper conduit for that information anyway. Signed-off-by: Nicolas Pitre Acked-by: Dave Martin --- arch/arm/common/mcpm_entry.c | 2 +- arch/arm/include/asm/mcpm.h | 15 +++++---------- arch/arm/mach-exynos/suspend.c | 8 +------- drivers/cpuidle/cpuidle-big_little.c | 8 +------- 4 files changed, 8 insertions(+), 25 deletions(-) (limited to 'arch/arm/common') diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 492467587c58..a923524d1040 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -321,7 +321,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) return ret; } -void mcpm_cpu_suspend(u64 expected_residency) +void mcpm_cpu_suspend(void) { if (WARN_ON_ONCE(!platform_ops)) return; diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 6a40d5f8db60..acd4983d9b1f 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); /** * mcpm_cpu_suspend - bring the calling CPU in a suspended state * - * @expected_residency: duration in microseconds the CPU is expected - * to remain suspended, or 0 if unknown/infinity. - * - * The calling CPU is suspended. The expected residency argument is used - * as a hint by the platform specific backend to implement the appropriate - * sleep state level according to the knowledge it has on wake-up latency - * for the given hardware. + * The calling CPU is suspended. This is similar to mcpm_cpu_power_down() + * except for possible extra platform specific configuration steps to allow + * an asynchronous wake-up e.g. with a pending interrupt. * * If this CPU is found to be the "last man standing" in the cluster - * then the cluster may be prepared for power-down too, if the expected - * residency makes it worthwhile. + * then the cluster may be prepared for power-down too. * * This must be called with interrupts disabled. * @@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); * This will return if mcpm_platform_register() has not been called * previously in which case the caller should take appropriate action. */ -void mcpm_cpu_suspend(u64 expected_residency); +void mcpm_cpu_suspend(void); /** * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 3e6aea7f83af..372bd0b099c1 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -311,13 +311,7 @@ static int exynos5420_cpu_suspend(unsigned long arg) if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) { mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume); - - /* - * Residency value passed to mcpm_cpu_suspend back-end - * has to be given clear semantics. Set to 0 as a - * temporary value. - */ - mcpm_cpu_suspend(0); + mcpm_cpu_suspend(); } pr_info("Failed to suspend the system\n"); diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index 40c34faffe59..db2ede565f1a 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -108,13 +108,7 @@ static int notrace bl_powerdown_finisher(unsigned long arg) unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); mcpm_set_entry_vector(cpu, cluster, cpu_resume); - - /* - * Residency value passed to mcpm_cpu_suspend back-end - * has to be given clear semantics. Set to 0 as a - * temporary value. - */ - mcpm_cpu_suspend(0); + mcpm_cpu_suspend(); /* return value != 0 means failure */ return 1; -- cgit v1.2.3-55-g7522 From 14327c662822e5e874cb971a7162067519300ca8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Apr 2015 14:17:25 +0100 Subject: ARM: replace BSYM() with badr assembly macro BSYM() was invented to allow us to work around a problem with the assembler, where local symbols resolved by the assembler for the 'adr' instruction did not take account of their ISA. Since we don't want BSYM() used elsewhere, replace BSYM() with a new macro 'badr', which is like the 'adr' pseudo-op, but with the BSYM() mechanics integrated into it. This ensures that the BSYM()-ification is only used in conjunction with 'adr'. Acked-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/boot/compressed/head.S | 4 ++-- arch/arm/common/mcpm_head.S | 2 +- arch/arm/include/asm/assembler.h | 17 ++++++++++++++++- arch/arm/include/asm/entry-macro-multi.S | 4 ++-- arch/arm/include/asm/unified.h | 2 -- arch/arm/kernel/entry-armv.S | 12 ++++++------ arch/arm/kernel/entry-common.S | 6 +++--- arch/arm/kernel/entry-ftrace.S | 2 +- arch/arm/kernel/head-nommu.S | 6 +++--- arch/arm/kernel/head.S | 8 ++++---- arch/arm/kernel/sleep.S | 2 +- arch/arm/lib/call_with_stack.S | 2 +- arch/arm/mm/proc-v7m.S | 2 +- 13 files changed, 41 insertions(+), 28 deletions(-) (limited to 'arch/arm/common') diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 2c45b5709fa4..06e983f59980 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -130,7 +130,7 @@ start: .endr ARM( mov r0, r0 ) ARM( b 1f ) - THUMB( adr r12, BSYM(1f) ) + THUMB( badr r12, 1f ) THUMB( bx r12 ) .word _magic_sig @ Magic numbers to help the loader @@ -447,7 +447,7 @@ dtb_check_done: bl cache_clean_flush - adr r0, BSYM(restart) + badr r0, restart add r0, r0, r6 mov pc, r0 diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S index e02db4b81a66..08b3bb9bc6a2 100644 --- a/arch/arm/common/mcpm_head.S +++ b/arch/arm/common/mcpm_head.S @@ -49,7 +49,7 @@ ENTRY(mcpm_entry_point) ARM_BE8(setend be) - THUMB( adr r12, BSYM(1f) ) + THUMB( badr r12, 1f ) THUMB( bx r12 ) THUMB( .thumb ) 1: diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 186270b3e194..4abe57279c66 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -177,6 +177,21 @@ restore_irqs_notrace \oldcpsr .endm +/* + * Assembly version of "adr rd, BSYM(sym)". This should only be used to + * reference local symbols in the same assembly file which are to be + * resolved by the assembler. Other usage is undefined. + */ + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro badr\c, rd, sym +#ifdef CONFIG_THUMB2_KERNEL + adr\c \rd, \sym + 1 +#else + adr\c \rd, \sym +#endif + .endm + .endr + /* * Get current thread_info. */ @@ -326,7 +341,7 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) bne 1f orr \reg, \reg, #PSR_A_BIT - adr lr, BSYM(2f) + badr lr, 2f msr spsr_cxsf, \reg __MSR_ELR_HYP(14) __ERET diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index 469a2b30fa27..609184f522ee 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -10,7 +10,7 @@ @ @ routine called with r0 = irq number, r1 = struct pt_regs * @ - adrne lr, BSYM(1b) + badrne lr, 1b bne asm_do_IRQ #ifdef CONFIG_SMP @@ -23,7 +23,7 @@ ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp - adrne lr, BSYM(1b) + badrne lr, 1b bne do_IPI #endif 9997: diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index 200f9a7cd623..a91ae499614c 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -45,7 +45,6 @@ #define THUMB(x...) x #ifdef __ASSEMBLY__ #define W(instr) instr.w -#define BSYM(sym) sym + 1 #else #define WASM(instr) #instr ".w" #endif @@ -59,7 +58,6 @@ #define THUMB(x...) #ifdef __ASSEMBLY__ #define W(instr) instr -#define BSYM(sym) sym #else #define WASM(instr) #instr #endif diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 570306c49406..f8f7398c74c2 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -40,7 +40,7 @@ #ifdef CONFIG_MULTI_IRQ_HANDLER ldr r1, =handle_arch_irq mov r0, sp - adr lr, BSYM(9997f) + badr lr, 9997f ldr pc, [r1] #else arch_irq_handler_default @@ -273,7 +273,7 @@ __und_svc: str r4, [sp, #S_PC] orr r0, r9, r0, lsl #16 #endif - adr r9, BSYM(__und_svc_finish) + badr r9, __und_svc_finish mov r2, r4 bl call_fpe @@ -469,7 +469,7 @@ __und_usr: @ instruction, or the more conventional lr if we are to treat @ this as a real undefined instruction @ - adr r9, BSYM(ret_from_exception) + badr r9, ret_from_exception @ IRQs must be enabled before attempting to read the instruction from @ user space since that could cause a page/translation fault if the @@ -486,7 +486,7 @@ __und_usr: @ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r4 = PC value for the faulting instruction @ lr = 32-bit undefined instruction function - adr lr, BSYM(__und_usr_fault_32) + badr lr, __und_usr_fault_32 b call_fpe __und_usr_thumb: @@ -522,7 +522,7 @@ ARM_BE8(rev16 r0, r0) @ little endian instruction add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update orr r0, r0, r5, lsl #16 - adr lr, BSYM(__und_usr_fault_32) + badr lr, __und_usr_fault_32 @ r0 = the two 16-bit Thumb instructions which caused the exception @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) @ r4 = PC value for the first 16-bit Thumb instruction @@ -716,7 +716,7 @@ __und_usr_fault_32: __und_usr_fault_16: mov r1, #2 1: mov r0, sp - adr lr, BSYM(ret_from_exception) + badr lr, ret_from_exception b __und_fault ENDPROC(__und_usr_fault_32) ENDPROC(__und_usr_fault_16) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f8ccc21fa032..6ab159384667 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -88,7 +88,7 @@ ENTRY(ret_from_fork) bl schedule_tail cmp r5, #0 movne r0, r4 - adrne lr, BSYM(1f) + badrne lr, 1f retne r5 1: get_thread_info tsk b ret_slow_syscall @@ -196,7 +196,7 @@ local_restart: bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit - adr lr, BSYM(ret_fast_syscall) @ return address + badr lr, ret_fast_syscall @ return address ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine add r1, sp, #S_OFF @@ -231,7 +231,7 @@ __sys_trace: add r0, sp, #S_OFF bl syscall_trace_enter - adr lr, BSYM(__sys_trace_return) @ return address + badr lr, __sys_trace_return @ return address mov scno, r0 @ syscall number (possibly new) add r1, sp, #S_R0 + S_OFF @ pointer to regs cmp scno, #NR_syscalls @ check upper syscall limit diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index fe57c73e70a4..c73c4030ca5d 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -87,7 +87,7 @@ 1: mcount_get_lr r1 @ lr of instrumented func mcount_adjust_addr r0, lr @ instrumented function - adr lr, BSYM(2f) + badr lr, 2f mov pc, r2 2: mcount_exit .endm diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index aebfbf79a1a3..b6f3cb6333e4 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -46,7 +46,7 @@ ENTRY(stext) .arm ENTRY(stext) - THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( badr r9, 1f ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) @@ -79,7 +79,7 @@ ENTRY(stext) #endif ldr r13, =__mmap_switched @ address to jump to after @ initialising sctlr - adr lr, BSYM(1f) @ return (PIC) address + badr lr, 1f @ return (PIC) address ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 ret r12 @@ -115,7 +115,7 @@ ENTRY(secondary_startup) bl __setup_mpu @ Initialize the MPU #endif - adr lr, BSYM(__after_proc_init) @ return address + badr lr, __after_proc_init @ return address mov r13, r12 @ __secondary_switched address ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 3637973a9708..ab3c478aaced 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -80,7 +80,7 @@ ENTRY(stext) ARM_BE8(setend be ) @ ensure we are in BE8 mode - THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. + THUMB( badr r9, 1f ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) @@ -136,7 +136,7 @@ ENTRY(stext) */ ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled - adr lr, BSYM(1f) @ return (PIC) address + badr lr, 1f @ return (PIC) address mov r8, r4 @ set TTBR1 to swapper_pg_dir ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 @@ -348,7 +348,7 @@ __turn_mmu_on_loc: .text ENTRY(secondary_startup_arm) .arm - THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM. + THUMB( badr r9, 1f ) @ Kernel is entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) @@ -384,7 +384,7 @@ ENTRY(secondary_startup) ldr r4, [r7, lr] @ get secondary_data.pgdir add r7, r7, #4 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir - adr lr, BSYM(__enable_mmu) @ return address + badr lr, __enable_mmu @ return address mov r13, r12 @ __secondary_switched address ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 @ initialise processor diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 7d37bfc50830..76bb3128e135 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -81,7 +81,7 @@ ENTRY(__cpu_suspend) mov r1, r4 @ size of save block add r0, sp, #8 @ pointer to save block bl __cpu_suspend_save - adr lr, BSYM(cpu_suspend_abort) + badr lr, cpu_suspend_abort ldmfd sp!, {r0, pc} @ call suspend fn ENDPROC(__cpu_suspend) .ltorg diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S index ed1a421813cb..bf3a40889205 100644 --- a/arch/arm/lib/call_with_stack.S +++ b/arch/arm/lib/call_with_stack.S @@ -35,7 +35,7 @@ ENTRY(call_with_stack) mov r2, r0 mov r0, r1 - adr lr, BSYM(1f) + badr lr, 1f ret r2 1: ldr lr, [sp] diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index e08e1f2bab76..67d9209077c6 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -98,7 +98,7 @@ __v7m_setup: str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority @ SVC to run the kernel in this mode - adr r1, BSYM(1f) + badr r1, 1f ldr r5, [r12, #11 * 4] @ read the SVC vector entry str r1, [r12, #11 * 4] @ write the temporary SVC vector entry mov r6, lr @ save LR -- cgit v1.2.3-55-g7522 From 1e5f0519f4cbf8b8830b88039e16222f186a4ab4 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 18 May 2015 16:29:04 +0100 Subject: ARM: 8365/1: introduce sp804_timer_disable and remove arm_timer.h inclusion The header asm/hardware/arm_timer.h is included in various machine specific files to access TIMER_CTRL and initialise to a known state. This patch introduces a new function sp804_timer_disable to disable the SP804 timers and uses the same for initialising the timers to known(off) state, thereby removing the dependency on the header asm/hardware/arm_timer.h This change is in prepartion to move sp804 timer support out of arch/arm so that it can be used on ARM64 platforms. Cc: Daniel Lezcano Cc: Arnd Bergmann Cc: Olof Johansson Acked-by: Thomas Gleixner Signed-off-by: Sudeep Holla Signed-off-by: Russell King --- arch/arm/common/timer-sp.c | 5 +++++ arch/arm/include/asm/hardware/timer-sp.h | 1 + arch/arm/mach-integrator/integrator_ap.c | 1 - arch/arm/mach-realview/core.c | 9 ++++----- arch/arm/mach-versatile/core.c | 9 ++++----- 5 files changed, 14 insertions(+), 11 deletions(-) (limited to 'arch/arm/common') diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index 19211324772f..000aea3722bc 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c @@ -71,6 +71,11 @@ static u64 notrace sp804_read(void) return ~readl_relaxed(sched_clock_base + TIMER_VALUE); } +void __init sp804_timer_disable(void __iomem *base) +{ + writel(0, base + TIMER_CTRL); +} + void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, const char *name, struct clk *clk, diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h index bb28af7c32de..05eaefa46742 100644 --- a/arch/arm/include/asm/hardware/timer-sp.h +++ b/arch/arm/include/asm/hardware/timer-sp.h @@ -4,6 +4,7 @@ void __sp804_clocksource_and_sched_clock_init(void __iomem *, const char *, struct clk *, int); void __sp804_clockevents_init(void __iomem *, unsigned int, struct clk *, const char *); +void sp804_timer_disable(void __iomem *); static inline void sp804_clocksource_init(void __iomem *base, const char *name) { diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 30003ba447a5..5b0e363fe5ba 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -37,7 +37,6 @@ #include #include -#include #include #include /* HZ */ #include diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index c309593abdb2..c611f489bdd2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include @@ -381,10 +380,10 @@ void __init realview_timer_init(unsigned int timer_irq) /* * Initialise to a known state (all timers off) */ - writel(0, timer0_va_base + TIMER_CTRL); - writel(0, timer1_va_base + TIMER_CTRL); - writel(0, timer2_va_base + TIMER_CTRL); - writel(0, timer3_va_base + TIMER_CTRL); + sp804_timer_disable(timer0_va_base); + sp804_timer_disable(timer1_va_base); + sp804_timer_disable(timer2_va_base); + sp804_timer_disable(timer3_va_base); sp804_clocksource_init(timer3_va_base, "timer3"); sp804_clockevents_init(timer0_va_base, timer_irq, "timer0"); diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 6ea09fe53426..f98c1961be6a 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -42,7 +42,6 @@ #include #include -#include #include #include @@ -798,10 +797,10 @@ void __init versatile_timer_init(void) /* * Initialise to a known state (all timers off) */ - writel(0, TIMER0_VA_BASE + TIMER_CTRL); - writel(0, TIMER1_VA_BASE + TIMER_CTRL); - writel(0, TIMER2_VA_BASE + TIMER_CTRL); - writel(0, TIMER3_VA_BASE + TIMER_CTRL); + sp804_timer_disable(TIMER0_VA_BASE); + sp804_timer_disable(TIMER1_VA_BASE); + sp804_timer_disable(TIMER2_VA_BASE); + sp804_timer_disable(TIMER3_VA_BASE); sp804_clocksource_init(TIMER3_VA_BASE, "timer3"); sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMERINT0_1, "timer0"); -- cgit v1.2.3-55-g7522 From 0b7402dce445ba0d11401c2cb806e8fc260c9e49 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Mon, 18 May 2015 16:29:40 +0100 Subject: ARM: 8366/1: move Dual-Timer SP804 driver to drivers/clocksource The ARM Dual-Timer SP804 module is peripheral found not only on ARM32 platforms but also on ARM64 platforms. This patch moves the driver out of arch/arm to driver/clocksource so that it can be used on ARM64 platforms also. Cc: Daniel Lezcano Cc: Rob Herring Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Olof Johansson Acked-by: Thomas Gleixner Signed-off-by: Sudeep Holla Signed-off-by: Russell King --- arch/arm/Kconfig | 5 - arch/arm/common/Makefile | 1 - arch/arm/common/timer-sp.c | 309 ----------------------------- arch/arm/include/asm/hardware/arm_timer.h | 35 ---- arch/arm/include/asm/hardware/timer-sp.h | 24 --- arch/arm/mach-nspire/nspire.c | 2 - arch/arm/mach-realview/core.c | 4 +- arch/arm/mach-versatile/core.c | 3 +- drivers/clocksource/Kconfig | 5 + drivers/clocksource/Makefile | 1 + drivers/clocksource/timer-integrator-ap.c | 3 +- drivers/clocksource/timer-sp.h | 30 +++ drivers/clocksource/timer-sp804.c | 310 ++++++++++++++++++++++++++++++ include/clocksource/timer-sp804.h | 28 +++ 14 files changed, 380 insertions(+), 380 deletions(-) delete mode 100644 arch/arm/common/timer-sp.c delete mode 100644 arch/arm/include/asm/hardware/arm_timer.h delete mode 100644 arch/arm/include/asm/hardware/timer-sp.h create mode 100644 drivers/clocksource/timer-sp.h create mode 100644 drivers/clocksource/timer-sp804.c create mode 100644 include/clocksource/timer-sp804.h (limited to 'arch/arm/common') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d0950ce75f3e..34b728583dae 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -975,11 +975,6 @@ config PLAT_PXA config PLAT_VERSATILE bool -config ARM_TIMER_SP804 - bool - select CLKSRC_MMIO - select CLKSRC_OF if OF - source "arch/arm/firmware/Kconfig" source arch/arm/mm/Kconfig diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 70b1eff477b3..6ee5959a813b 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -11,7 +11,6 @@ obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o obj-$(CONFIG_SHARP_SCOOP) += scoop.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o -obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o CFLAGS_REMOVE_mcpm_entry.o = -pg AFLAGS_mcpm_head.o := -march=armv7-a diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c deleted file mode 100644 index 000aea3722bc..000000000000 --- a/arch/arm/common/timer-sp.c +++ /dev/null @@ -1,309 +0,0 @@ -/* - * linux/arch/arm/common/timer-sp.c - * - * Copyright (C) 1999 - 2003 ARM Limited - * Copyright (C) 2000 Deep Blue Solutions Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static long __init sp804_get_clock_rate(struct clk *clk) -{ - long rate; - int err; - - err = clk_prepare(clk); - if (err) { - pr_err("sp804: clock failed to prepare: %d\n", err); - clk_put(clk); - return err; - } - - err = clk_enable(clk); - if (err) { - pr_err("sp804: clock failed to enable: %d\n", err); - clk_unprepare(clk); - clk_put(clk); - return err; - } - - rate = clk_get_rate(clk); - if (rate < 0) { - pr_err("sp804: clock failed to get rate: %ld\n", rate); - clk_disable(clk); - clk_unprepare(clk); - clk_put(clk); - } - - return rate; -} - -static void __iomem *sched_clock_base; - -static u64 notrace sp804_read(void) -{ - return ~readl_relaxed(sched_clock_base + TIMER_VALUE); -} - -void __init sp804_timer_disable(void __iomem *base) -{ - writel(0, base + TIMER_CTRL); -} - -void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, - const char *name, - struct clk *clk, - int use_sched_clock) -{ - long rate; - - if (!clk) { - clk = clk_get_sys("sp804", name); - if (IS_ERR(clk)) { - pr_err("sp804: clock not found: %d\n", - (int)PTR_ERR(clk)); - return; - } - } - - rate = sp804_get_clock_rate(clk); - - if (rate < 0) - return; - - /* setup timer 0 as free-running clocksource */ - writel(0, base + TIMER_CTRL); - writel(0xffffffff, base + TIMER_LOAD); - writel(0xffffffff, base + TIMER_VALUE); - writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC, - base + TIMER_CTRL); - - clocksource_mmio_init(base + TIMER_VALUE, name, - rate, 200, 32, clocksource_mmio_readl_down); - - if (use_sched_clock) { - sched_clock_base = base; - sched_clock_register(sp804_read, 32, rate); - } -} - - -static void __iomem *clkevt_base; -static unsigned long clkevt_reload; - -/* - * IRQ handler for the timer - */ -static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; - - /* clear the interrupt */ - writel(1, clkevt_base + TIMER_INTCLR); - - evt->event_handler(evt); - - return IRQ_HANDLED; -} - -static void sp804_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE; - - writel(ctrl, clkevt_base + TIMER_CTRL); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - writel(clkevt_reload, clkevt_base + TIMER_LOAD); - ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; - break; - - case CLOCK_EVT_MODE_ONESHOT: - /* period set, and timer enabled in 'next_event' hook */ - ctrl |= TIMER_CTRL_ONESHOT; - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - default: - break; - } - - writel(ctrl, clkevt_base + TIMER_CTRL); -} - -static int sp804_set_next_event(unsigned long next, - struct clock_event_device *evt) -{ - unsigned long ctrl = readl(clkevt_base + TIMER_CTRL); - - writel(next, clkevt_base + TIMER_LOAD); - writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL); - - return 0; -} - -static struct clock_event_device sp804_clockevent = { - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_DYNIRQ, - .set_mode = sp804_set_mode, - .set_next_event = sp804_set_next_event, - .rating = 300, -}; - -static struct irqaction sp804_timer_irq = { - .name = "timer", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = sp804_timer_interrupt, - .dev_id = &sp804_clockevent, -}; - -void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) -{ - struct clock_event_device *evt = &sp804_clockevent; - long rate; - - if (!clk) - clk = clk_get_sys("sp804", name); - if (IS_ERR(clk)) { - pr_err("sp804: %s clock not found: %d\n", name, - (int)PTR_ERR(clk)); - return; - } - - rate = sp804_get_clock_rate(clk); - if (rate < 0) - return; - - clkevt_base = base; - clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); - evt->name = name; - evt->irq = irq; - evt->cpumask = cpu_possible_mask; - - writel(0, base + TIMER_CTRL); - - setup_irq(irq, &sp804_timer_irq); - clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); -} - -static void __init sp804_of_init(struct device_node *np) -{ - static bool initialized = false; - void __iomem *base; - int irq; - u32 irq_num = 0; - struct clk *clk1, *clk2; - const char *name = of_get_property(np, "compatible", NULL); - - base = of_iomap(np, 0); - if (WARN_ON(!base)) - return; - - /* Ensure timers are disabled */ - writel(0, base + TIMER_CTRL); - writel(0, base + TIMER_2_BASE + TIMER_CTRL); - - if (initialized || !of_device_is_available(np)) - goto err; - - clk1 = of_clk_get(np, 0); - if (IS_ERR(clk1)) - clk1 = NULL; - - /* Get the 2nd clock if the timer has 3 timer clocks */ - if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) { - clk2 = of_clk_get(np, 1); - if (IS_ERR(clk2)) { - pr_err("sp804: %s clock not found: %d\n", np->name, - (int)PTR_ERR(clk2)); - clk2 = NULL; - } - } else - clk2 = clk1; - - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) - goto err; - - of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); - if (irq_num == 2) { - __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); - __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); - } else { - __sp804_clockevents_init(base, irq, clk1 , name); - __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, - name, clk2, 1); - } - initialized = true; - - return; -err: - iounmap(base); -} -CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init); - -static void __init integrator_cp_of_init(struct device_node *np) -{ - static int init_count = 0; - void __iomem *base; - int irq; - const char *name = of_get_property(np, "compatible", NULL); - struct clk *clk; - - base = of_iomap(np, 0); - if (WARN_ON(!base)) - return; - clk = of_clk_get(np, 0); - if (WARN_ON(IS_ERR(clk))) - return; - - /* Ensure timer is disabled */ - writel(0, base + TIMER_CTRL); - - if (init_count == 2 || !of_device_is_available(np)) - goto err; - - if (!init_count) - __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); - else { - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) - goto err; - - __sp804_clockevents_init(base, irq, clk, name); - } - - init_count++; - return; -err: - iounmap(base); -} -CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init); diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h deleted file mode 100644 index d6030ff599db..000000000000 --- a/arch/arm/include/asm/hardware/arm_timer.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H -#define __ASM_ARM_HARDWARE_ARM_TIMER_H - -/* - * ARM timer implementation, found in Integrator, Versatile and Realview - * platforms. Not all platforms support all registers and bits in these - * registers, so we mark them with A for Integrator AP, C for Integrator - * CP, V for Versatile and R for Realview. - * - * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview - * can have 16-bit or 32-bit selectable via a bit in the control register. - * - * Every SP804 contains two identical timers. - */ -#define TIMER_1_BASE 0x00 -#define TIMER_2_BASE 0x20 - -#define TIMER_LOAD 0x00 /* ACVR rw */ -#define TIMER_VALUE 0x04 /* ACVR ro */ -#define TIMER_CTRL 0x08 /* ACVR rw */ -#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */ -#define TIMER_CTRL_32BIT (1 << 1) /* CVR */ -#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */ -#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */ -#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */ -#define TIMER_CTRL_IE (1 << 5) /* VR */ -#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */ -#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */ - -#define TIMER_INTCLR 0x0c /* ACVR wo */ -#define TIMER_RIS 0x10 /* CVR ro */ -#define TIMER_MIS 0x14 /* CVR ro */ -#define TIMER_BGLOAD 0x18 /* CVR rw */ - -#endif diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h deleted file mode 100644 index 05eaefa46742..000000000000 --- a/arch/arm/include/asm/hardware/timer-sp.h +++ /dev/null @@ -1,24 +0,0 @@ -struct clk; - -void __sp804_clocksource_and_sched_clock_init(void __iomem *, - const char *, struct clk *, int); -void __sp804_clockevents_init(void __iomem *, unsigned int, - struct clk *, const char *); -void sp804_timer_disable(void __iomem *); - -static inline void sp804_clocksource_init(void __iomem *base, const char *name) -{ - __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); -} - -static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, - const char *name) -{ - __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); -} - -static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) -{ - __sp804_clockevents_init(base, irq, NULL, name); - -} diff --git a/arch/arm/mach-nspire/nspire.c b/arch/arm/mach-nspire/nspire.c index 3445a5686805..34c2a1b32e7d 100644 --- a/arch/arm/mach-nspire/nspire.c +++ b/arch/arm/mach-nspire/nspire.c @@ -22,8 +22,6 @@ #include #include -#include - #include "mmio.h" #include "clcd.h" diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index c611f489bdd2..44575edc44b1 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -35,6 +35,8 @@ #include #include +#include + #include #include #include @@ -44,10 +46,8 @@ #include #include - #include #include -#include #include diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index f98c1961be6a..23a04fe5d2ad 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -41,6 +41,8 @@ #include #include +#include + #include #include #include @@ -51,7 +53,6 @@ #include #include #include -#include #include diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 51d7865fdddb..0f1c0e7f86da 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -132,6 +132,11 @@ config ARM_GLOBAL_TIMER help This options enables support for the ARM global timer unit +config ARM_TIMER_SP804 + bool "Support for Dual Timer SP804 module" + select CLKSRC_MMIO + select CLKSRC_OF if OF + config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK bool depends on ARM_GLOBAL_TIMER diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 5b85f6adb258..5ca59f9b377f 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o +obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index b9efd30513d5..d7d21e4dcef0 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -26,7 +26,8 @@ #include #include #include -#include + +#include "timer-sp.h" static void __iomem * sched_clk_base; diff --git a/drivers/clocksource/timer-sp.h b/drivers/clocksource/timer-sp.h new file mode 100644 index 000000000000..050d88561e9c --- /dev/null +++ b/drivers/clocksource/timer-sp.h @@ -0,0 +1,30 @@ +/* + * ARM timer implementation, found in Integrator, Versatile and Realview + * platforms. Not all platforms support all registers and bits in these + * registers, so we mark them with A for Integrator AP, C for Integrator + * CP, V for Versatile and R for Realview. + * + * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview + * can have 16-bit or 32-bit selectable via a bit in the control register. + * + * Every SP804 contains two identical timers. + */ +#define TIMER_1_BASE 0x00 +#define TIMER_2_BASE 0x20 + +#define TIMER_LOAD 0x00 /* ACVR rw */ +#define TIMER_VALUE 0x04 /* ACVR ro */ +#define TIMER_CTRL 0x08 /* ACVR rw */ +#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */ +#define TIMER_CTRL_32BIT (1 << 1) /* CVR */ +#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */ +#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */ +#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */ +#define TIMER_CTRL_IE (1 << 5) /* VR */ +#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */ +#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */ + +#define TIMER_INTCLR 0x0c /* ACVR wo */ +#define TIMER_RIS 0x10 /* CVR ro */ +#define TIMER_MIS 0x14 /* CVR ro */ +#define TIMER_BGLOAD 0x18 /* CVR rw */ diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c new file mode 100644 index 000000000000..ca02503f17d1 --- /dev/null +++ b/drivers/clocksource/timer-sp804.c @@ -0,0 +1,310 @@ +/* + * linux/drivers/clocksource/timer-sp.c + * + * Copyright (C) 1999 - 2003 ARM Limited + * Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "timer-sp.h" + +static long __init sp804_get_clock_rate(struct clk *clk) +{ + long rate; + int err; + + err = clk_prepare(clk); + if (err) { + pr_err("sp804: clock failed to prepare: %d\n", err); + clk_put(clk); + return err; + } + + err = clk_enable(clk); + if (err) { + pr_err("sp804: clock failed to enable: %d\n", err); + clk_unprepare(clk); + clk_put(clk); + return err; + } + + rate = clk_get_rate(clk); + if (rate < 0) { + pr_err("sp804: clock failed to get rate: %ld\n", rate); + clk_disable(clk); + clk_unprepare(clk); + clk_put(clk); + } + + return rate; +} + +static void __iomem *sched_clock_base; + +static u64 notrace sp804_read(void) +{ + return ~readl_relaxed(sched_clock_base + TIMER_VALUE); +} + +void __init sp804_timer_disable(void __iomem *base) +{ + writel(0, base + TIMER_CTRL); +} + +void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base, + const char *name, + struct clk *clk, + int use_sched_clock) +{ + long rate; + + if (!clk) { + clk = clk_get_sys("sp804", name); + if (IS_ERR(clk)) { + pr_err("sp804: clock not found: %d\n", + (int)PTR_ERR(clk)); + return; + } + } + + rate = sp804_get_clock_rate(clk); + + if (rate < 0) + return; + + /* setup timer 0 as free-running clocksource */ + writel(0, base + TIMER_CTRL); + writel(0xffffffff, base + TIMER_LOAD); + writel(0xffffffff, base + TIMER_VALUE); + writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC, + base + TIMER_CTRL); + + clocksource_mmio_init(base + TIMER_VALUE, name, + rate, 200, 32, clocksource_mmio_readl_down); + + if (use_sched_clock) { + sched_clock_base = base; + sched_clock_register(sp804_read, 32, rate); + } +} + + +static void __iomem *clkevt_base; +static unsigned long clkevt_reload; + +/* + * IRQ handler for the timer + */ +static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + /* clear the interrupt */ + writel(1, clkevt_base + TIMER_INTCLR); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static void sp804_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE; + + writel(ctrl, clkevt_base + TIMER_CTRL); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + writel(clkevt_reload, clkevt_base + TIMER_LOAD); + ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE; + break; + + case CLOCK_EVT_MODE_ONESHOT: + /* period set, and timer enabled in 'next_event' hook */ + ctrl |= TIMER_CTRL_ONESHOT; + break; + + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + default: + break; + } + + writel(ctrl, clkevt_base + TIMER_CTRL); +} + +static int sp804_set_next_event(unsigned long next, + struct clock_event_device *evt) +{ + unsigned long ctrl = readl(clkevt_base + TIMER_CTRL); + + writel(next, clkevt_base + TIMER_LOAD); + writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL); + + return 0; +} + +static struct clock_event_device sp804_clockevent = { + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_DYNIRQ, + .set_mode = sp804_set_mode, + .set_next_event = sp804_set_next_event, + .rating = 300, +}; + +static struct irqaction sp804_timer_irq = { + .name = "timer", + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = sp804_timer_interrupt, + .dev_id = &sp804_clockevent, +}; + +void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name) +{ + struct clock_event_device *evt = &sp804_clockevent; + long rate; + + if (!clk) + clk = clk_get_sys("sp804", name); + if (IS_ERR(clk)) { + pr_err("sp804: %s clock not found: %d\n", name, + (int)PTR_ERR(clk)); + return; + } + + rate = sp804_get_clock_rate(clk); + if (rate < 0) + return; + + clkevt_base = base; + clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ); + evt->name = name; + evt->irq = irq; + evt->cpumask = cpu_possible_mask; + + writel(0, base + TIMER_CTRL); + + setup_irq(irq, &sp804_timer_irq); + clockevents_config_and_register(evt, rate, 0xf, 0xffffffff); +} + +static void __init sp804_of_init(struct device_node *np) +{ + static bool initialized = false; + void __iomem *base; + int irq; + u32 irq_num = 0; + struct clk *clk1, *clk2; + const char *name = of_get_property(np, "compatible", NULL); + + base = of_iomap(np, 0); + if (WARN_ON(!base)) + return; + + /* Ensure timers are disabled */ + writel(0, base + TIMER_CTRL); + writel(0, base + TIMER_2_BASE + TIMER_CTRL); + + if (initialized || !of_device_is_available(np)) + goto err; + + clk1 = of_clk_get(np, 0); + if (IS_ERR(clk1)) + clk1 = NULL; + + /* Get the 2nd clock if the timer has 3 timer clocks */ + if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) { + clk2 = of_clk_get(np, 1); + if (IS_ERR(clk2)) { + pr_err("sp804: %s clock not found: %d\n", np->name, + (int)PTR_ERR(clk2)); + clk2 = NULL; + } + } else + clk2 = clk1; + + irq = irq_of_parse_and_map(np, 0); + if (irq <= 0) + goto err; + + of_property_read_u32(np, "arm,sp804-has-irq", &irq_num); + if (irq_num == 2) { + __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name); + __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1); + } else { + __sp804_clockevents_init(base, irq, clk1 , name); + __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE, + name, clk2, 1); + } + initialized = true; + + return; +err: + iounmap(base); +} +CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init); + +static void __init integrator_cp_of_init(struct device_node *np) +{ + static int init_count = 0; + void __iomem *base; + int irq; + const char *name = of_get_property(np, "compatible", NULL); + struct clk *clk; + + base = of_iomap(np, 0); + if (WARN_ON(!base)) + return; + clk = of_clk_get(np, 0); + if (WARN_ON(IS_ERR(clk))) + return; + + /* Ensure timer is disabled */ + writel(0, base + TIMER_CTRL); + + if (init_count == 2 || !of_device_is_available(np)) + goto err; + + if (!init_count) + __sp804_clocksource_and_sched_clock_init(base, name, clk, 0); + else { + irq = irq_of_parse_and_map(np, 0); + if (irq <= 0) + goto err; + + __sp804_clockevents_init(base, irq, clk, name); + } + + init_count++; + return; +err: + iounmap(base); +} +CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init); diff --git a/include/clocksource/timer-sp804.h b/include/clocksource/timer-sp804.h new file mode 100644 index 000000000000..1f8a1caa7cb4 --- /dev/null +++ b/include/clocksource/timer-sp804.h @@ -0,0 +1,28 @@ +#ifndef __CLKSOURCE_TIMER_SP804_H +#define __CLKSOURCE_TIMER_SP804_H + +struct clk; + +void __sp804_clocksource_and_sched_clock_init(void __iomem *, + const char *, struct clk *, int); +void __sp804_clockevents_init(void __iomem *, unsigned int, + struct clk *, const char *); +void sp804_timer_disable(void __iomem *); + +static inline void sp804_clocksource_init(void __iomem *base, const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); +} + +static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, + const char *name) +{ + __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); +} + +static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) +{ + __sp804_clockevents_init(base, irq, NULL, name); + +} +#endif -- cgit v1.2.3-55-g7522