Login | Register For Free | Help
Search for: (Advanced)

Mailing List Archive: Linux: Kernel

[PATCHv3 5/5] cpuidle: coupled: add trace events

 

 

Linux kernel RSS feed   Index | Next | Previous | View Threaded


ccross at android

Apr 30, 2012, 1:09 PM

Post #1 of 3 (76 views)
Permalink
[PATCHv3 5/5] cpuidle: coupled: add trace events

Adds trace events to allow debugging of coupled cpuidle.
Can be used to verify cpuidle performance, including time spent
spinning and time spent in safe states.

Reviewed-by: Santosh Shilimkar <santosh.shilimkar [at] ti>
Tested-by: Santosh Shilimkar <santosh.shilimkar [at] ti>
Reviewed-by: Kevin Hilman <khilman [at] ti>
Tested-by: Kevin Hilman <khilman [at] ti>
Signed-off-by: Colin Cross <ccross [at] android>
---
drivers/cpuidle/coupled.c | 29 +++++-
include/trace/events/cpuidle.h | 243 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 270 insertions(+), 2 deletions(-)
create mode 100644 include/trace/events/cpuidle.h

v3:
* removed debugging code from cpuidle_coupled_parallel_barrier
so this patch can be merged to help with debugging new
coupled cpuidle drivers
* made tracing _rcuidle

diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 242dc7c..6b63d67 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -26,6 +26,11 @@

#include "cpuidle.h"

+#define CREATE_TRACE_POINTS
+#include <trace/events/cpuidle.h>
+
+atomic_t cpuidle_trace_seq;
+
/**
* DOC: Coupled cpuidle states
*
@@ -232,6 +237,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
static void cpuidle_coupled_poked(void *info)
{
int cpu = (unsigned long)info;
+ trace_coupled_poked_rcuidle(cpu);
cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
}

@@ -251,8 +257,10 @@ static void cpuidle_coupled_poke(int cpu)
{
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);

- if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask)) {
+ trace_coupled_poke_rcuidle(cpu);
__smp_call_function_single(cpu, csd, 0);
+ }
}

/**
@@ -361,28 +369,37 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
BUG_ON(atomic_read(&coupled->ready_count));
cpuidle_coupled_set_waiting(dev, coupled, next_state);

+ trace_coupled_enter_rcuidle(dev->cpu);
+
retry:
/*
* Wait for all coupled cpus to be idle, using the deepest state
* allowed for a single cpu.
*/
while (!need_resched() && !cpuidle_coupled_cpus_waiting(coupled)) {
+ trace_coupled_safe_enter_rcuidle(dev->cpu);
entered_state = cpuidle_enter_state(dev, drv,
dev->safe_state_index);
+ trace_coupled_safe_exit_rcuidle(dev->cpu);

+ trace_coupled_spin_rcuidle(dev->cpu);
local_irq_enable();
while (cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked_mask))
cpu_relax();
local_irq_disable();
+ trace_coupled_unspin_rcuidle(dev->cpu);
}

/* give a chance to process any remaining pokes */
+ trace_coupled_spin_rcuidle(dev->cpu);
local_irq_enable();
while (cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked_mask))
cpu_relax();
local_irq_disable();
+ trace_coupled_unspin_rcuidle(dev->cpu);

if (need_resched()) {
+ trace_coupled_abort_rcuidle(dev->cpu);
cpuidle_coupled_set_not_waiting(dev, coupled);
goto out;
}
@@ -401,29 +418,35 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
smp_mb__after_atomic_inc();
/* alive_count can't change while ready_count > 0 */
alive = atomic_read(&coupled->alive_count);
+ trace_coupled_spin_rcuidle(dev->cpu);
while (atomic_read(&coupled->ready_count) != alive) {
/* Check if any other cpus bailed out of idle. */
if (!cpuidle_coupled_cpus_waiting(coupled)) {
atomic_dec(&coupled->ready_count);
smp_mb__after_atomic_dec();
+ trace_coupled_detected_abort_rcuidle(dev->cpu);
goto retry;
}

cpu_relax();
}
+ trace_coupled_unspin_rcuidle(dev->cpu);

/* all cpus have acked the coupled state */
smp_rmb();

next_state = cpuidle_coupled_get_state(dev, coupled);
-
+ trace_coupled_idle_enter_rcuidle(dev->cpu);
entered_state = cpuidle_enter_state(dev, drv, next_state);
+ trace_coupled_idle_exit_rcuidle(dev->cpu);

cpuidle_coupled_set_not_waiting(dev, coupled);
atomic_dec(&coupled->ready_count);
smp_mb__after_atomic_dec();

out:
+ trace_coupled_exit_rcuidle(dev->cpu);
+
/*
* Normal cpuidle states are expected to return with irqs enabled.
* That leads to an inefficiency where a cpu receiving an interrupt
@@ -445,8 +468,10 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
* a cpu exits and re-enters the ready state because this cpu has
* already decremented its waiting_count.
*/
+ trace_coupled_spin_rcuidle(dev->cpu);
while (atomic_read(&coupled->ready_count) != 0)
cpu_relax();
+ trace_coupled_unspin_rcuidle(dev->cpu);

smp_rmb();

diff --git a/include/trace/events/cpuidle.h b/include/trace/events/cpuidle.h
new file mode 100644
index 0000000..9b2cbbb
--- /dev/null
+++ b/include/trace/events/cpuidle.h
@@ -0,0 +1,243 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuidle
+
+#if !defined(_TRACE_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUIDLE_H
+
+#include <linux/atomic.h>
+#include <linux/tracepoint.h>
+
+extern atomic_t cpuidle_trace_seq;
+
+TRACE_EVENT(coupled_enter,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_exit,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_spin,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_unspin,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_safe_enter,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_safe_exit,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_idle_enter,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_idle_exit,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_abort,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_detected_abort,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_poke,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+TRACE_EVENT(coupled_poked,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
+ ),
+
+ TP_printk("%u %u", __entry->seq, __entry->cpu)
+);
+
+#endif /* if !defined(_TRACE_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
--
1.7.7.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo [at] vger
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/


rostedt at goodmis

May 3, 2012, 2:00 PM

Post #2 of 3 (78 views)
Permalink
Re: [PATCHv3 5/5] cpuidle: coupled: add trace events [In reply to]

On Mon, 2012-04-30 at 13:09 -0700, Colin Cross wrote:

> diff --git a/include/trace/events/cpuidle.h b/include/trace/events/cpuidle.h
> new file mode 100644
> index 0000000..9b2cbbb
> --- /dev/null
> +++ b/include/trace/events/cpuidle.h
> @@ -0,0 +1,243 @@
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM cpuidle
> +
> +#if !defined(_TRACE_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_CPUIDLE_H
> +
> +#include <linux/atomic.h>
> +#include <linux/tracepoint.h>
> +
> +extern atomic_t cpuidle_trace_seq;
> +
> +TRACE_EVENT(coupled_enter,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_exit,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_spin,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_unspin,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_safe_enter,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_safe_exit,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_idle_enter,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_idle_exit,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_abort,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_detected_abort,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_poke,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);
> +
> +TRACE_EVENT(coupled_poked,
> +
> + TP_PROTO(unsigned int cpu),
> +
> + TP_ARGS(cpu),
> +
> + TP_STRUCT__entry(
> + __field(unsigned int, cpu)
> + __field(unsigned int, seq)
> + ),
> +
> + TP_fast_assign(
> + __entry->cpu = cpu;
> + __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
> + ),
> +
> + TP_printk("%u %u", __entry->seq, __entry->cpu)
> +);

Egad! Please use DECLARE_EVENT_CLASS() and DEFINE_EVENT() when adding
events that are basically the same. Every TRACE_EVENT() can bloat the
kernel by 5k, using the DEFINE_EVENT()s keeps each event around just a
few hundred bytes.

See include/trace/events/ext4.h for examples.

-- Steve


> +
> +#endif /* if !defined(_TRACE_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ) */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo [at] vger
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/


ccross at android

May 3, 2012, 2:13 PM

Post #3 of 3 (77 views)
Permalink
Re: [PATCHv3 5/5] cpuidle: coupled: add trace events [In reply to]

On Thu, May 3, 2012 at 2:00 PM, Steven Rostedt <rostedt [at] goodmis> wrote:
> On Mon, 2012-04-30 at 13:09 -0700, Colin Cross wrote:
>
>> diff --git a/include/trace/events/cpuidle.h b/include/trace/events/cpuidle.h
>> new file mode 100644
>> index 0000000..9b2cbbb
>> --- /dev/null
>> +++ b/include/trace/events/cpuidle.h
>> @@ -0,0 +1,243 @@
>> +#undef TRACE_SYSTEM
>> +#define TRACE_SYSTEM cpuidle
>> +
>> +#if !defined(_TRACE_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ)
>> +#define _TRACE_CPUIDLE_H
>> +
>> +#include <linux/atomic.h>
>> +#include <linux/tracepoint.h>
>> +
>> +extern atomic_t cpuidle_trace_seq;
>> +
>> +TRACE_EVENT(coupled_enter,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_exit,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_spin,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_unspin,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_safe_enter,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_safe_exit,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_idle_enter,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_idle_exit,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_abort,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_detected_abort,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_poke,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>> +
>> +TRACE_EVENT(coupled_poked,
>> +
>> +     TP_PROTO(unsigned int cpu),
>> +
>> +     TP_ARGS(cpu),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(unsigned int, cpu)
>> +             __field(unsigned int, seq)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->cpu = cpu;
>> +             __entry->seq = atomic_inc_return(&cpuidle_trace_seq);
>> +     ),
>> +
>> +     TP_printk("%u %u", __entry->seq, __entry->cpu)
>> +);
>
> Egad! Please use DECLARE_EVENT_CLASS() and DEFINE_EVENT() when adding
> events that are basically the same. Every TRACE_EVENT() can bloat the
> kernel by 5k, using the DEFINE_EVENT()s keeps each event around just a
> few hundred bytes.
>
> See include/trace/events/ext4.h for examples.

Thanks, I'll take a look. There is no mention in Documentation/ or
samples/ of DECLARE_EVENT_CLASS() or DEFINE_EVENT(), nor any mention
of the cost of TRACE_EVENT().

Looking at the new power tracing code, I will also rework these events
to be more similar to the existing ones.

I suggest skipping this patch for 3.5, and I'll post an updated one for 3.6.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo [at] vger
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

Linux kernel RSS feed   Index | Next | Previous | View Threaded
 
 


Interested in having your list archived? Contact Gossamer Threads
 
  Web Applications & Managed Hosting Powered by Gossamer Threads Inc.