summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/time.c
diff options
context:
space:
mode:
authorNathan Lynch2007-05-23 02:51:25 +0200
committerPaul Mackerras2007-06-25 08:58:11 +0200
commitdf211c8a478e16232ebbc16e650b19163183fcb6 (patch)
tree90972e63f445e0edad7a195a025ad3fffa9283f3 /arch/powerpc/kernel/time.c
parent[POWERPC] Don't link timer.o for powerpc systems using generic rtc (diff)
downloadkernel-qcow2-linux-df211c8a478e16232ebbc16e650b19163183fcb6.tar.gz
kernel-qcow2-linux-df211c8a478e16232ebbc16e650b19163183fcb6.tar.xz
kernel-qcow2-linux-df211c8a478e16232ebbc16e650b19163183fcb6.zip
[POWERPC] Remove spinlock from struct cpu_purr_data
cpu_purr_data is a per-cpu array used to account for stolen time on partitioned systems. It used to be the case that cpus accessed each others' cpu_purr_data, so each entry was protected by a spinlock. However, the code was reworked ("Simplify stolen time calculation") with the result that each cpu accesses its own cpu_purr_data and not those of other cpus. This means we can get rid of the spinlock as long as we're careful to disable interrupts when accessing cpu_purr_data in process context. Signed-off-by: Nathan Lynch <ntl@pobox.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r--arch/powerpc/kernel/time.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2c8564d54e4d..0a8a820672f4 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -222,19 +222,28 @@ struct cpu_purr_data {
int initialized; /* thread is running */
u64 tb; /* last TB value read */
u64 purr; /* last PURR value read */
- spinlock_t lock;
};
+/*
+ * Each entry in the cpu_purr_data array is manipulated only by its
+ * "owner" cpu -- usually in the timer interrupt but also occasionally
+ * in process context for cpu online. As long as cpus do not touch
+ * each others' cpu_purr_data, disabling local interrupts is
+ * sufficient to serialize accesses.
+ */
static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
static void snapshot_tb_and_purr(void *data)
{
+ unsigned long flags;
struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
+ local_irq_save(flags);
p->tb = mftb();
p->purr = mfspr(SPRN_PURR);
wmb();
p->initialized = 1;
+ local_irq_restore(flags);
}
/*
@@ -242,15 +251,14 @@ static void snapshot_tb_and_purr(void *data)
*/
void snapshot_timebases(void)
{
- int cpu;
-
if (!cpu_has_feature(CPU_FTR_PURR))
return;
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
}
+/*
+ * Must be called with interrupts disabled.
+ */
void calculate_steal_time(void)
{
u64 tb, purr;
@@ -262,7 +270,6 @@ void calculate_steal_time(void)
pme = &per_cpu(cpu_purr_data, smp_processor_id());
if (!pme->initialized)
return; /* this can happen in early boot */
- spin_lock(&pme->lock);
tb = mftb();
purr = mfspr(SPRN_PURR);
stolen = (tb - pme->tb) - (purr - pme->purr);
@@ -270,7 +277,6 @@ void calculate_steal_time(void)
account_steal_time(current, stolen);
pme->tb = tb;
pme->purr = purr;
- spin_unlock(&pme->lock);
}
/*
@@ -284,12 +290,12 @@ static void snapshot_purr(void)
if (!cpu_has_feature(CPU_FTR_PURR))
return;
+ local_irq_save(flags);
pme = &per_cpu(cpu_purr_data, smp_processor_id());
- spin_lock_irqsave(&pme->lock, flags);
pme->tb = mftb();
pme->purr = mfspr(SPRN_PURR);
pme->initialized = 1;
- spin_unlock_irqrestore(&pme->lock, flags);
+ local_irq_restore(flags);
}
#endif /* CONFIG_PPC_SPLPAR */