summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig22
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_arch_timer.c143
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c (renamed from drivers/clocksource/tcb_clksrc.c)126
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c15
-rw-r--r--drivers/clocksource/timer-ixp4xx.c282
-rw-r--r--drivers/clocksource/timer-milbeaut.c66
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c2
-rw-r--r--drivers/clocksource/timer-sun4i.c5
-rw-r--r--drivers/clocksource/timer-tegra20.c63
-rw-r--r--drivers/clocksource/timer-ti-dm.c29
11 files changed, 529 insertions, 227 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 171502a356aa..6bcaa4e2e72c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -69,6 +69,13 @@ config FTTMR010_TIMER
Enables support for the Faraday Technology timer block
FTTMR010.
+config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
config ROCKCHIP_TIMER
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
@@ -145,6 +152,7 @@ config VT8500_TIMER
config NPCM7XX_TIMER
bool "NPCM7xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
+ select TIMER_OF
select CLKSRC_MMIO
help
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
@@ -379,7 +387,7 @@ config ARM_GLOBAL_TIMER
This options enables support for the ARM global timer unit
config ARM_TIMER_SP804
- bool "Support for Dual Timer SP804 module"
+ bool "Support for Dual Timer SP804 module" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && CLKDEV_LOOKUP
select CLKSRC_MMIO
select TIMER_OF if OF
@@ -399,8 +407,11 @@ config ARMV7M_SYSTICK
This options enables support for the ARMv7M system timer unit
config ATMEL_PIT
+ bool "Atmel PIT support" if COMPILE_TEST
+ depends on HAS_IOMEM
select TIMER_OF if OF
- def_bool SOC_AT91SAM9 || SOC_SAMA5
+ help
+ Support for the Periodic Interval Timer found on Atmel SoCs.
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
@@ -410,6 +421,13 @@ config ATMEL_ST
help
Support for the Atmel ST timer.
+config ATMEL_TCB_CLKSRC
+ bool "Atmel TC Block timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF if OF
+ help
+ Support for Timer Counter Blocks on Atmel SoCs.
+
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index be6e0fbc7489..236858fa7fbf 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += timer-atmel-tcb.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += timer-cs5535.o
@@ -20,6 +20,7 @@ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
+obj-$(CONFIG_IXP4XX_TIMER) += timer-ixp4xx.o
obj-$(CONFIG_ROCKCHIP_TIMER) += timer-rockchip.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aa4ec53281ce..b2a951a798e2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*/
-#define pr_fmt(fmt) "arm_arch_timer: " fmt
+#define pr_fmt(fmt) "arch_timer: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
@@ -33,9 +33,6 @@
#include <clocksource/arm_arch_timer.h>
-#undef pr_fmt
-#define pr_fmt(fmt) "arch_timer: " fmt
-
#define CNTTIDR 0x08
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
@@ -152,6 +149,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
+static u64 arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct_stable();
+}
+
+static u64 arch_counter_get_cntpct(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static u64 arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct_stable();
+}
+
+static u64 arch_counter_get_cntvct(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
/*
* Default to cp15 based access because arm64 uses this function for
* sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -319,13 +336,6 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
}
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static u64 notrace arm64_1188873_read_cntvct_el0(void)
-{
- return read_sysreg(cntvct_el0);
-}
-#endif
-
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
/*
* The low bits of the counter registers are indeterminate while bit 10 or
@@ -372,8 +382,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
@@ -457,14 +466,6 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
- {
- .match_type = ate_match_local_cap_id,
- .id = (void *)ARM64_WORKAROUND_1188873,
- .desc = "ARM erratum 1188873",
- .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
- },
-#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
{
.match_type = ate_match_dt,
@@ -552,11 +553,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- /*
- * Use the locked version, as we're called from the CPU
- * hotplug framework. Otherwise, we end-up in deadlock-land.
- */
- static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
+ if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
+ atomic_set(&timer_unstable_counter_workaround_in_use, 1);
/*
* Don't use the vdso fastpath if errata require using the
@@ -573,7 +571,7 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
void *arg)
{
- const struct arch_timer_erratum_workaround *wa;
+ const struct arch_timer_erratum_workaround *wa, *__wa;
ate_match_fn_t match_fn = NULL;
bool local = false;
@@ -597,53 +595,32 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
if (!wa)
return;
- if (needs_unstable_timer_counter_workaround()) {
- const struct arch_timer_erratum_workaround *__wa;
- __wa = __this_cpu_read(timer_unstable_counter_workaround);
- if (__wa && wa != __wa)
- pr_warn("Can't enable workaround for %s (clashes with %s\n)",
- wa->desc, __wa->desc);
+ __wa = __this_cpu_read(timer_unstable_counter_workaround);
+ if (__wa && wa != __wa)
+ pr_warn("Can't enable workaround for %s (clashes with %s\n)",
+ wa->desc, __wa->desc);
- if (__wa)
- return;
- }
+ if (__wa)
+ return;
arch_timer_enable_workaround(wa, local);
pr_info("Enabling %s workaround for %s\n",
local ? "local" : "global", wa->desc);
}
-#define erratum_handler(fn, r, ...) \
-({ \
- bool __val; \
- if (needs_unstable_timer_counter_workaround()) { \
- const struct arch_timer_erratum_workaround *__wa; \
- __wa = __this_cpu_read(timer_unstable_counter_workaround); \
- if (__wa && __wa->fn) { \
- r = __wa->fn(__VA_ARGS__); \
- __val = true; \
- } else { \
- __val = false; \
- } \
- } else { \
- __val = false; \
- } \
- __val; \
-})
-
static bool arch_timer_this_cpu_has_cntvct_wa(void)
{
- const struct arch_timer_erratum_workaround *wa;
+ return has_erratum_handler(read_cntvct_el0);
+}
- wa = __this_cpu_read(timer_unstable_counter_workaround);
- return wa && wa->read_cntvct_el0;
+static bool arch_timer_counter_has_wa(void)
+{
+ return atomic_read(&timer_unstable_counter_workaround_in_use);
}
#else
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
-#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
-#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
-#define erratum_handler(fn, r, ...) ({false;})
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
+#define arch_timer_counter_has_wa() ({false;})
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline irqreturn_t timer_handler(const int access,
@@ -736,11 +713,6 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_virt, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
@@ -748,11 +720,6 @@ static int arch_timer_set_next_event_virt(unsigned long evt,
static int arch_timer_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_phys, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -777,6 +744,10 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
+ typeof(clk->set_next_event) sne;
+
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+
if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
@@ -787,20 +758,20 @@ static void __arch_timer_setup(unsigned type,
case ARCH_TIMER_VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- clk->set_next_event = arch_timer_set_next_event_virt;
+ sne = erratum_handler(set_next_event_virt);
break;
case ARCH_TIMER_PHYS_SECURE_PPI:
case ARCH_TIMER_PHYS_NONSECURE_PPI:
case ARCH_TIMER_HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- clk->set_next_event = arch_timer_set_next_event_phys;
+ sne = erratum_handler(set_next_event_phys);
break;
default:
BUG();
}
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+ clk->set_next_event = sne;
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -833,7 +804,11 @@ static void arch_timer_evtstrm_enable(int divider)
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
+#ifdef CONFIG_ARM64
+ cpu_set_named_feature(EVTSTRM);
+#else
elf_hwcap |= HWCAP_EVTSTRM;
+#endif
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
@@ -998,12 +973,22 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
+ u64 (*rd)(void);
+
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
- arch_timer_read_counter = arch_counter_get_cntvct;
- else
- arch_timer_read_counter = arch_counter_get_cntpct;
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntvct_stable;
+ else
+ rd = arch_counter_get_cntvct;
+ } else {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntpct_stable;
+ else
+ rd = arch_counter_get_cntpct;
+ }
+ arch_timer_read_counter = rd;
clocksource_counter.archdata.vdso_direct = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
@@ -1055,7 +1040,11 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self,
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
+#ifdef CONFIG_ARM64
+ if (cpu_have_named_feature(EVTSTRM))
+#else
if (elf_hwcap & HWCAP_EVTSTRM)
+#endif
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/timer-atmel-tcb.c
index f987027ca566..6ed31f9def7e 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -9,9 +9,11 @@
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
#include <linux/syscore_ops.h>
-#include <linux/atmel_tc.h>
+#include <soc/at91/atmel_tcb.h>
/*
@@ -28,13 +30,6 @@
* source, used in either periodic or oneshot mode. This runs
* at 32 KiHZ, and can handle delays of up to two seconds.
*
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
- * this code can be used when init_timers() is called, well before most
- * devices are set up. (Some low end AT91 parts, which can run uClinux,
- * have only the timers in one TC block... they currently don't support
- * the tclib code, because of that initialization issue.)
- *
* REVISIT behavior during system suspend states... we should disable
* all clocks and save the power. Easily done for clockevent devices,
* but clocksources won't necessarily get the needed notifications.
@@ -112,7 +107,6 @@ static void tc_clksrc_resume(struct clocksource *cs)
}
static struct clocksource clksrc = {
- .name = "tcb_clksrc",
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
@@ -121,6 +115,16 @@ static struct clocksource clksrc = {
.resume = tc_clksrc_resume,
};
+static u64 notrace tc_sched_clock_read(void)
+{
+ return tc_get_cycles(&clksrc);
+}
+
+static u64 notrace tc_sched_clock_read32(void)
+{
+ return tc_get_cycles32(&clksrc);
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -214,7 +218,6 @@ static int tc_next_event(unsigned long delta, struct clock_event_device *d)
static struct tc_clkevt_device clkevt = {
.clkevt = {
- .name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
@@ -330,39 +333,74 @@ static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_id
writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
-static int __init tcb_clksrc_init(void)
-{
- static char bootinfo[] __initdata
- = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
+static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
+
+static const struct of_device_id atmel_tcb_of_match[] = {
+ { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
+ { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
+ { /* sentinel */ }
+};
- struct platform_device *pdev;
- struct atmel_tc *tc;
+static int __init tcb_clksrc_init(struct device_node *node)
+{
+ struct atmel_tc tc;
struct clk *t0_clk;
+ const struct of_device_id *match;
+ u64 (*tc_sched_clock)(void);
u32 rate, divided_rate = 0;
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
+ int bits;
int i;
int ret;
- tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK);
- if (!tc) {
- pr_debug("can't alloc TC for clocksource\n");
- return -ENODEV;
+ /* Protect against multiple calls */
+ if (tcaddr)
+ return 0;
+
+ tc.regs = of_iomap(node->parent, 0);
+ if (!tc.regs)
+ return -ENXIO;
+
+ t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
+ if (IS_ERR(t0_clk))
+ return PTR_ERR(t0_clk);
+
+ tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
+ if (IS_ERR(tc.slow_clk))
+ return PTR_ERR(tc.slow_clk);
+
+ tc.clk[0] = t0_clk;
+ tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
+ if (IS_ERR(tc.clk[1]))
+ tc.clk[1] = t0_clk;
+ tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
+ if (IS_ERR(tc.clk[2]))
+ tc.clk[2] = t0_clk;
+
+ tc.irq[2] = of_irq_get(node->parent, 2);
+ if (tc.irq[2] <= 0) {
+ tc.irq[2] = of_irq_get(node->parent, 0);
+ if (tc.irq[2] <= 0)
+ return -EINVAL;
}
- tcaddr = tc->regs;
- pdev = tc->pdev;
- t0_clk = tc->clk[0];
+ match = of_match_node(atmel_tcb_of_match, node->parent);
+ bits = (uintptr_t)match->data;
+
+ for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
+ writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
+
ret = clk_prepare_enable(t0_clk);
if (ret) {
pr_debug("can't enable T0 clk\n");
- goto err_free_tc;
+ return ret;
}
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
- for (i = 0; i < 5; i++) {
- unsigned divisor = atmel_tc_divisors[i];
+ for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
+ unsigned divisor = atmel_tcb_divisors[i];
unsigned tmp;
/* remember 32 KiHz clock for later */
@@ -381,27 +419,31 @@ static int __init tcb_clksrc_init(void)
best_divisor_idx = i;
}
-
- printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
- divided_rate / 1000000,
+ clksrc.name = kbasename(node->parent->full_name);
+ clkevt.clkevt.name = kbasename(node->parent->full_name);
+ pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
((divided_rate % 1000000) + 500) / 1000);
- if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
+ tcaddr = tc.regs;
+
+ if (bits == 32) {
/* use apropriate function to read 32 bit counter */
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
- tcb_setup_single_chan(tc, best_divisor_idx);
+ tcb_setup_single_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read32;
} else {
- /* tclib will give us three clocks no matter what the
+ /* we have three clocks no matter what the
* underlying platform supports.
*/
- ret = clk_prepare_enable(tc->clk[1]);
+ ret = clk_prepare_enable(tc.clk[1]);
if (ret) {
pr_debug("can't enable T1 clk\n");
goto err_disable_t0;
}
/* setup both channel 0 & 1 */
- tcb_setup_dual_chan(tc, best_divisor_idx);
+ tcb_setup_dual_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read;
}
/* and away we go! */
@@ -410,24 +452,26 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- ret = setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(&tc, clk32k_divisor_idx);
if (ret)
goto err_unregister_clksrc;
+ sched_clock_register(tc_sched_clock, 32, divided_rate);
+
return 0;
err_unregister_clksrc:
clocksource_unregister(&clksrc);
err_disable_t1:
- if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
- clk_disable_unprepare(tc->clk[1]);
+ if (bits != 32)
+ clk_disable_unprepare(tc.clk[1]);
err_disable_t0:
clk_disable_unprepare(t0_clk);
-err_free_tc:
- atmel_tc_free(tc);
+ tcaddr = NULL;
+
return ret;
}
-arch_initcall(tcb_clksrc_init);
+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c
index 846d18daf893..e1c34b2f53a5 100644
--- a/drivers/clocksource/timer-fsl-ftm.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
@@ -19,20 +19,9 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
+#include <linux/fsl/ftm.h>
-#define FTM_SC 0x00
-#define FTM_SC_CLK_SHIFT 3
-#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT)
-#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT)
-#define FTM_SC_PS_MASK 0x7
-#define FTM_SC_TOIE BIT(6)
-#define FTM_SC_TOF BIT(7)
-
-#define FTM_CNT 0x04
-#define FTM_MOD 0x08
-#define FTM_CNTIN 0x4C
-
-#define FTM_PS_MAX 7
+#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
struct ftm_clock_device {
void __iomem *clksrc_base;
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
new file mode 100644
index 000000000000..5c2190b654cd
--- /dev/null
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IXP4 timer driver
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+/* Goes away with OF conversion */
+#include <linux/platform_data/timer-ixp4xx.h>
+
+/*
+ * Constants to make it easy to access Timer Control/Status registers
+ */
+#define IXP4XX_OSTS_OFFSET 0x00 /* Continuous Timestamp */
+#define IXP4XX_OST1_OFFSET 0x04 /* Timer 1 Timestamp */
+#define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */
+#define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */
+#define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */
+#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */
+#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */
+#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */
+#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */
+
+/*
+ * Timer register values and bit definitions
+ */
+#define IXP4XX_OST_ENABLE 0x00000001
+#define IXP4XX_OST_ONE_SHOT 0x00000002
+/* Low order bits of reload value ignored */
+#define IXP4XX_OST_RELOAD_MASK 0x00000003
+#define IXP4XX_OST_DISABLED 0x00000000
+#define IXP4XX_OSST_TIMER_1_PEND 0x00000001
+#define IXP4XX_OSST_TIMER_2_PEND 0x00000002
+#define IXP4XX_OSST_TIMER_TS_PEND 0x00000004
+#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008
+#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010
+
+#define IXP4XX_WDT_KEY 0x0000482E
+#define IXP4XX_WDT_RESET_ENABLE 0x00000001
+#define IXP4XX_WDT_IRQ_ENABLE 0x00000002
+#define IXP4XX_WDT_COUNT_ENABLE 0x00000004
+
+struct ixp4xx_timer {
+ void __iomem *base;
+ unsigned int tick_rate;
+ u32 latch;
+ struct clock_event_device clkevt;
+#ifdef CONFIG_ARM
+ struct delay_timer delay_timer;
+#endif
+};
+
+/*
+ * A local singleton used by sched_clock and delay timer reads, which are
+ * fast and stateless
+ */
+static struct ixp4xx_timer *local_ixp4xx_timer;
+
+static inline struct ixp4xx_timer *
+to_ixp4xx_timer(struct clock_event_device *evt)
+{
+ return container_of(evt, struct ixp4xx_timer, clkevt);
+}
+
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static u64 ixp4xx_clocksource_read(struct clocksource *c)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
+{
+ struct ixp4xx_timer *tmr = dev_id;
+ struct clock_event_device *evt = &tmr->clkevt;
+
+ /* Clear Pending Interrupt */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int ixp4xx_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ /* Keep enable/oneshot bits */
+ val &= IXP4XX_OST_RELOAD_MASK;
+ __raw_writel((cycles & ~IXP4XX_OST_RELOAD_MASK) | val,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_shutdown(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val &= ~IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_oneshot(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+
+ __raw_writel(IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_periodic(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = tmr->latch & ~IXP4XX_OST_RELOAD_MASK;
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_resume(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+/*
+ * IXP4xx timer tick
+ * We use OS timer1 on the CPU for the timer tick and the timestamp
+ * counter as a source of real clock ticks to account for missed jiffies.
+ */
+static __init int ixp4xx_timer_register(void __iomem *base,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ struct ixp4xx_timer *tmr;
+ int ret;
+
+ tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
+ if (!tmr)
+ return -ENOMEM;
+ tmr->base = base;
+ tmr->tick_rate = timer_freq;
+
+ /*
+ * The timer register doesn't allow to specify the two least
+ * significant bits of the timeout value and assumes them being zero.
+ * So make sure the latch is the best value with the two least
+ * significant bits unset.
+ */
+ tmr->latch = DIV_ROUND_CLOSEST(timer_freq,
+ (IXP4XX_OST_RELOAD_MASK + 1) * HZ)
+ * (IXP4XX_OST_RELOAD_MASK + 1);
+
+ local_ixp4xx_timer = tmr;
+
+ /* Reset/disable counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ /* Clear any pending interrupt on timer 1 */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ /* Reset time-stamp counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSTS_OFFSET);
+
+ clocksource_mmio_init(NULL, "OSTS", timer_freq, 200, 32,
+ ixp4xx_clocksource_read);
+
+ tmr->clkevt.name = "ixp4xx timer1";
+ tmr->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ tmr->clkevt.rating = 200;
+ tmr->clkevt.set_state_shutdown = ixp4xx_shutdown;
+ tmr->clkevt.set_state_periodic = ixp4xx_set_periodic;
+ tmr->clkevt.set_state_oneshot = ixp4xx_set_oneshot;
+ tmr->clkevt.tick_resume = ixp4xx_resume;
+ tmr->clkevt.set_next_event = ixp4xx_set_next_event;
+ tmr->clkevt.cpumask = cpumask_of(0);
+ tmr->clkevt.irq = timer_irq;
+ ret = request_irq(timer_irq, ixp4xx_timer_interrupt,
+ IRQF_TIMER, "IXP4XX-TIMER1", tmr);
+ if (ret) {
+ pr_crit("no timer IRQ\n");
+ return -ENODEV;
+ }
+ clockevents_config_and_register(&tmr->clkevt, timer_freq,
+ 0xf, 0xfffffffe);
+
+ sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
+ * @timerbase: physical base of timer block
+ * @timer_irq: Linux IRQ number for the timer
+ * @timer_freq: Fixed frequency of the timer
+ */
+void __init ixp4xx_timer_setup(resource_size_t timerbase,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ void __iomem *base;
+
+ base = ioremap(timerbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return;
+ }
+ ixp4xx_timer_register(base, timer_irq, timer_freq);
+}
+EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
+
+#ifdef CONFIG_OF
+static __init int ixp4xx_of_timer_init(struct device_node *np)
+{
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ\n");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ /* TODO: get some fixed clocks into the device tree */
+ ret = ixp4xx_timer_register(base, irq, 66666000);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+ return ret;
+}
+TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
+#endif
diff --git a/drivers/clocksource/timer-milbeaut.c b/drivers/clocksource/timer-milbeaut.c
index f2019a88e3ee..fa9fb4eacade 100644
--- a/drivers/clocksource/timer-milbeaut.c
+++ b/drivers/clocksource/timer-milbeaut.c
@@ -26,8 +26,8 @@
#define MLB_TMR_TMCSR_CSL_DIV2 0
#define MLB_TMR_DIV_CNT 2
-#define MLB_TMR_SRC_CH (1)
-#define MLB_TMR_EVT_CH (0)
+#define MLB_TMR_SRC_CH 1
+#define MLB_TMR_EVT_CH 0
#define MLB_TMR_SRC_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH)
#define MLB_TMR_EVT_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH)
@@ -43,6 +43,8 @@
#define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS)
#define MLB_TIMER_RATING 500
+#define MLB_TIMER_ONESHOT 0
+#define MLB_TIMER_PERIODIC 1
static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
{
@@ -59,27 +61,53 @@ static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mlb_set_state_periodic(struct clock_event_device *clk)
+static void mlb_evt_timer_start(struct timer_of *to, bool periodic)
{
- struct timer_of *to = to_timer_of(clk);
u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+ val |= MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+ if (periodic)
+ val |= MLB_TMR_TMCSR_RELD;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
- writel_relaxed(to->of_clk.period, timer_of_base(to) +
- MLB_TMR_EVT_TMRLR1_OFS);
- val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE |
- MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+static void mlb_evt_timer_stop(struct timer_of *to)
+{
+ u32 val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+ val &= ~MLB_TMR_TMCSR_CNTE;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
+
+static void mlb_evt_timer_register_count(struct timer_of *to, unsigned long cnt)
+{
+ writel_relaxed(cnt, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
+}
+
+static int mlb_set_state_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_register_count(to, to->of_clk.period);
+ mlb_evt_timer_start(to, MLB_TIMER_PERIODIC);
return 0;
}
static int mlb_set_state_oneshot(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
- u32 val = MLB_TMR_TMCSR_CSL_DIV2;
- writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
+ return 0;
+}
+
+static int mlb_set_state_shutdown(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mlb_evt_timer_stop(to);
return 0;
}
@@ -88,22 +116,21 @@ static int mlb_clkevt_next_event(unsigned long event,
{
struct timer_of *to = to_timer_of(clk);
- writel_relaxed(event, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
- writel_relaxed(MLB_TMR_TMCSR_CSL_DIV2 |
- MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_INTE |
- MLB_TMR_TMCSR_TRG, timer_of_base(to) +
- MLB_TMR_EVT_TMCSR_OFS);
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_register_count(to, event);
+ mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
return 0;
}
static int mlb_config_clock_source(struct timer_of *to)
{
- writel_relaxed(0, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
- writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMR_OFS);
+ u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
- writel_relaxed(BIT(4) | BIT(1) | BIT(0), timer_of_base(to) +
- MLB_TMR_SRC_TMCSR_OFS);
+ val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG;
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
return 0;
}
@@ -123,6 +150,7 @@ static struct timer_of to = {
.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT,
.set_state_oneshot = mlb_set_state_oneshot,
.set_state_periodic = mlb_set_state_periodic,
+ .set_state_shutdown = mlb_set_state_shutdown,
.set_next_event = mlb_clkevt_next_event,
},
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index eed6feff8b5f..30c6f4ce672b 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -296,4 +296,4 @@ err_alloc:
TIMER_OF_DECLARE(ox810se_rps,
"oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
TIMER_OF_DECLARE(ox820_rps,
- "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
+ "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 6e0180aaf784..65f38f6ca714 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -186,7 +186,8 @@ static int __init sun4i_timer_init(struct device_node *node)
*/
if (of_machine_is_compatible("allwinner,sun4i-a10") ||
of_machine_is_compatible("allwinner,sun5i-a13") ||
- of_machine_is_compatible("allwinner,sun5i-a10s"))
+ of_machine_is_compatible("allwinner,sun5i-a10s") ||
+ of_machine_is_compatible("allwinner,suniv-f1c100s"))
sched_clock_register(sun4i_timer_sched_read, 32,
timer_of_rate(&to));
@@ -218,3 +219,5 @@ static int __init sun4i_timer_init(struct device_node *node)
}
TIMER_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
+TIMER_OF_DECLARE(suniv, "allwinner,suniv-f1c100s-timer",
+ sun4i_timer_init);
diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
index fdb3d795a409..919b3568c495 100644
--- a/drivers/clocksource/timer-tegra20.c
+++ b/drivers/clocksource/timer-tegra20.c
@@ -60,9 +60,6 @@
static u32 usec_config;
static void __iomem *timer_reg_base;
#ifdef CONFIG_ARM
-static void __iomem *rtc_base;
-static struct timespec64 persistent_ts;
-static u64 persistent_ms, last_persistent_ms;
static struct delay_timer tegra_delay_timer;
#endif
@@ -199,40 +196,30 @@ static unsigned long tegra_delay_timer_read_counter_long(void)
return readl(timer_reg_base + TIMERUS_CNTR_1US);
}
+static struct timer_of suspend_rtc_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
/*
* tegra_rtc_read - Reads the Tegra RTC registers
* Care must be taken that this funciton is not called while the
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
-static u64 tegra_rtc_read_ms(void)
+static u64 tegra_rtc_read_ms(struct clocksource *cs)
{
- u32 ms = readl(rtc_base + RTC_MILLISECONDS);
- u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
+ u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
return (u64)s * MSEC_PER_SEC + ms;
}
-/*
- * tegra_read_persistent_clock64 - Return time from a persistent clock.
- *
- * Reads the time from a source which isn't disabled during PM, the
- * 32k sync timer. Convert the cycles elapsed since last read into
- * nsecs and adds to a monotonically increasing timespec64.
- * Care must be taken that this funciton is not called while the
- * tegra_rtc driver could be executing to avoid race conditions
- * on the RTC shadow register
- */
-static void tegra_read_persistent_clock64(struct timespec64 *ts)
-{
- u64 delta;
-
- last_persistent_ms = persistent_ms;
- persistent_ms = tegra_rtc_read_ms();
- delta = persistent_ms - last_persistent_ms;
-
- timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC);
- *ts = persistent_ts;
-}
+static struct clocksource suspend_rtc_clocksource = {
+ .name = "tegra_suspend_timer",
+ .rating = 200,
+ .read = tegra_rtc_read_ms,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
#endif
static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
@@ -385,25 +372,15 @@ out:
static int __init tegra20_init_rtc(struct device_node *np)
{
- struct clk *clk;
+ int ret;
- rtc_base = of_iomap(np, 0);
- if (!rtc_base) {
- pr_err("Can't map RTC registers\n");
- return -ENXIO;
- }
+ ret = timer_of_init(np, &suspend_rtc_to);
+ if (ret)
+ return ret;
- /*
- * rtc registers are used by read_persistent_clock, keep the rtc clock
- * enabled
- */
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- pr_warn("Unable to get rtc-tegra clock\n");
- else
- clk_prepare_enable(clk);
+ clocksource_register_hz(&suspend_rtc_clocksource, 1000);
- return register_persistent_clock(tegra_read_persistent_clock64);
+ return 0;
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
#endif
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 3352da6ed61f..e40b55a7086f 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
return 0;
}
-/* Optimized set_load which removes costly spin wait in timer_start */
-static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
- int autoreload, unsigned int load)
-{
- u32 l;
-
- if (unlikely(!timer))
- return -EINVAL;
-
- omap_dm_timer_enable(timer);
-
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- if (autoreload) {
- l |= OMAP_TIMER_CTRL_AR;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
- } else {
- l &= ~OMAP_TIMER_CTRL_AR;
- }
- l |= OMAP_TIMER_CTRL_ST;
-
- __omap_dm_timer_load_start(timer, l, load, timer->posted);
-
- /* Save the context */
- timer->context.tclr = l;
- timer->context.tldr = load;
- timer->context.tcrr = load;
- return 0;
-}
static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
unsigned int match)
{
@@ -998,5 +970,4 @@ module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");