summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar2008-05-12 21:20:46 +0200
committerThomas Gleixner2008-05-23 20:42:55 +0200
commitcdd31cd2d7a0dcbec2cce3974f7129dd4fc8c879 (patch)
tree5da4691e9f2ed0850a505975216694928a6545c4 /kernel
parentftrace: fast, scalable, synchronized timestamps (diff)
downloadkernel-qcow2-linux-cdd31cd2d7a0dcbec2cce3974f7129dd4fc8c879.tar.gz
kernel-qcow2-linux-cdd31cd2d7a0dcbec2cce3974f7129dd4fc8c879.tar.xz
kernel-qcow2-linux-cdd31cd2d7a0dcbec2cce3974f7129dd4fc8c879.zip
ftrace: remove-idx-sync
remove idx syncing - it's expensive on SMP. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c40
-rw-r--r--kernel/trace/trace.h2
2 files changed, 5 insertions, 37 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9a931c7c2da3..ce8ceb8aea6a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -99,7 +99,6 @@ notrace cycle_t ftrace_now(int cpu)
return time;
}
-static atomic_t tracer_counter;
static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
@@ -661,7 +660,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
pc = preempt_count();
- entry->idx = atomic_inc_return(&tracer_counter);
entry->preempt_count = pc & 0xff;
entry->pid = tsk->pid;
entry->t = ftrace_now(raw_smp_processor_id());
@@ -757,8 +755,10 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu)
if (!head_page(tr->data[cpu]))
continue;
ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
- if (ent &&
- (!next || (long)(next->idx - ent->idx) > 0)) {
+ /*
+ * Pick the entry with the smallest timestamp:
+ */
+ if (ent && (!next || ent->t < next->t)) {
next = ent;
next_cpu = cpu;
}
@@ -800,8 +800,6 @@ trace_consume(struct trace_iterator *iter)
if (data->trace_head == data->trace_tail &&
data->trace_head_idx == data->trace_tail_idx)
data->trace_idx = 0;
-
- trace_iterator_increment(iter);
}
static notrace void *
@@ -1160,33 +1158,6 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
}
}
-static notrace void sync_time_offset(struct trace_iterator *iter)
-{
- struct trace_array_cpu *prev_array, *array;
- struct trace_entry *prev_entry, *entry;
- cycle_t prev_t, t;
-
- entry = iter->ent;
- prev_entry = iter->prev_ent;
- if (!prev_entry)
- return;
-
- prev_array = iter->tr->data[iter->prev_cpu];
- array = iter->tr->data[iter->cpu];
-
- prev_t = prev_entry->t + prev_array->time_offset;
- t = entry->t + array->time_offset;
-
- /*
- * If time goes backwards we increase the offset of
- * the current array, to not have observable time warps.
- * This will quickly synchronize the time offsets of
- * multiple CPUs:
- */
- if (t < prev_t)
- array->time_offset += prev_t - t;
-}
-
static notrace int
print_trace_fmt(struct trace_iterator *iter)
{
@@ -1200,12 +1171,11 @@ print_trace_fmt(struct trace_iterator *iter)
int S;
int ret;
- sync_time_offset(iter);
entry = iter->ent;
comm = trace_find_cmdline(iter->ent->pid);
- t = ns2usecs(entry->t + iter->tr->data[iter->cpu]->time_offset);
+ t = ns2usecs(entry->t);
usec_rem = do_div(t, 1000000ULL);
secs = (unsigned long)t;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 30cad677e9d0..27fa2d06f499 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -38,7 +38,6 @@ struct trace_entry {
char preempt_count;
int pid;
cycle_t t;
- unsigned long idx;
union {
struct ftrace_entry fn;
struct ctx_switch_entry ctx;
@@ -57,7 +56,6 @@ struct trace_array_cpu {
atomic_t disabled;
spinlock_t lock;
struct lock_class_key lock_key;
- cycle_t time_offset;
/* these fields get copied into max-trace: */
unsigned trace_head_idx;