diff mbox

[2/2] sparc64: Fix stack dumping and tracing when function graph is enabled.

Message ID 20100421.032355.147982685.davem@davemloft.net
State Accepted
Delegated to: David Miller
Headers show

Commit Message

David Miller April 21, 2010, 10:23 a.m. UTC
Like x86, when the function graph tracer is enabled, emit the ftrace
stub as well as the program counter it will be transformed back into.

We duplicate a lot of similar stack walking logic in 3 or 4 spots, so
eventually we should consolidate things like x86 does.

Thanks to Frederic Weisbecker for pointing this out.

Signed-off-by: David S. Miller <davem@davemloft.net>
---
 arch/sparc/kernel/perf_event.c |   14 ++++++++++++++
 arch/sparc/kernel/stacktrace.c |   23 ++++++++++++++++++++++-
 arch/sparc/kernel/traps_64.c   |   14 ++++++++++++++
 3 files changed, 50 insertions(+), 1 deletions(-)

Comments

Frédéric Weisbecker April 22, 2010, 7:20 p.m. UTC | #1
On Wed, Apr 21, 2010 at 03:23:55AM -0700, David Miller wrote:
> 
> Like x86, when the function graph tracer is enabled, emit the ftrace
> stub as well as the program counter it will be transformed back into.
> 
> We duplicate a lot of similar stack walking logic in 3 or 4 spots, so
> eventually we should consolidate things like x86 does.



Yeah, in fact if we want a kind a arch agnostic graph stack walker,
we could make it an iterator.

(Modifying print_ftrace_graph_addr a bit)

static unsigned long
walk_ftrace_graph_stack(unsigned long addr,
			struct task_struct *task, int *idx)
{
	unsigned long ret_addr;
	int index = task->curr_ret_stack;

	if (addr != (unsigned long)return_to_handler)
		return -ENOENT;

	if (!task->ret_stack || index < *idx)
		return -ENOENT;

	index -= *idx;
	ret_addr = task->ret_stack[index].ret;

	(*idx)++;

	return ret_addr;
}

Hmm?

I can make it in tracing/core but I fear it won't be available
for the sparc tree until .35

--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e277193..34ce49f 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -14,6 +14,7 @@ 
 
 #include <linux/perf_event.h>
 #include <linux/kprobes.h>
+#include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/kdebug.h>
 #include <linux/mutex.h>
@@ -1276,6 +1277,9 @@  static void perf_callchain_kernel(struct pt_regs *regs,
 				  struct perf_callchain_entry *entry)
 {
 	unsigned long ksp, fp;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	int graph = 0;
+#endif
 
 	callchain_store(entry, PERF_CONTEXT_KERNEL);
 	callchain_store(entry, regs->tpc);
@@ -1303,6 +1307,16 @@  static void perf_callchain_kernel(struct pt_regs *regs,
 			fp = (unsigned long)sf->fp + STACK_BIAS;
 		}
 		callchain_store(entry, pc);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+			int index = current->curr_ret_stack;
+			if (current->ret_stack && index >= graph) {
+				pc = current->ret_stack[index - graph].ret;
+				callchain_store(entry, pc);
+				graph++;
+			}
+		}
+#endif
 	} while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c
index acb12f6..3e08153 100644
--- a/arch/sparc/kernel/stacktrace.c
+++ b/arch/sparc/kernel/stacktrace.c
@@ -1,6 +1,7 @@ 
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
 #include <linux/thread_info.h>
+#include <linux/ftrace.h>
 #include <linux/module.h>
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
@@ -12,6 +13,10 @@  static void __save_stack_trace(struct thread_info *tp,
 			       bool skip_sched)
 {
 	unsigned long ksp, fp;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	struct task_struct *t;
+	int graph = 0;
+#endif
 
 	if (tp == current_thread_info()) {
 		stack_trace_flush();
@@ -21,6 +26,9 @@  static void __save_stack_trace(struct thread_info *tp,
 	}
 
 	fp = ksp + STACK_BIAS;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	t = tp->task;
+#endif
 	do {
 		struct sparc_stackf *sf;
 		struct pt_regs *regs;
@@ -44,8 +52,21 @@  static void __save_stack_trace(struct thread_info *tp,
 
 		if (trace->skip > 0)
 			trace->skip--;
-		else if (!skip_sched || !in_sched_functions(pc))
+		else if (!skip_sched || !in_sched_functions(pc)) {
 			trace->entries[trace->nr_entries++] = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+			if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+				int index = t->curr_ret_stack;
+				if (t->ret_stack && index >= graph) {
+					pc = t->ret_stack[index - graph].ret;
+					if (trace->nr_entries <
+					    trace->max_entries)
+						trace->entries[trace->nr_entries++] = pc;
+					graph++;
+				}
+			}
+#endif
+		}
 	} while (trace->nr_entries < trace->max_entries);
 }
 
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 9da57f0..42ad2ba 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -17,6 +17,7 @@ 
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/kdebug.h>
+#include <linux/ftrace.h>
 #include <linux/gfp.h>
 
 #include <asm/smp.h>
@@ -2154,6 +2155,9 @@  void show_stack(struct task_struct *tsk, unsigned long *_ksp)
 	unsigned long fp, thread_base, ksp;
 	struct thread_info *tp;
 	int count = 0;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	int graph = 0;
+#endif
 
 	ksp = (unsigned long) _ksp;
 	if (!tsk)
@@ -2193,6 +2197,16 @@  void show_stack(struct task_struct *tsk, unsigned long *_ksp)
 		}
 
 		printk(" [%016lx] %pS\n", pc, (void *) pc);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+			int index = tsk->curr_ret_stack;
+			if (tsk->ret_stack && index >= graph) {
+				pc = tsk->ret_stack[index - graph].ret;
+				printk(" [%016lx] %pS\n", pc, (void *) pc);
+				graph++;
+			}
+		}
+#endif
 	} while (++count < 16);
 }