summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/ftrace.c')
-rw-r--r--arch/s390/kernel/ftrace.c79
1 files changed, 2 insertions, 77 deletions
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index fcb009d3edde..f0072125926c 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -21,9 +21,8 @@ void mcount_replace_code(void);
void ftrace_disable_code(void);
void ftrace_enable_insn(void);
-#ifdef CONFIG_64BIT
/*
- * The 64-bit mcount code looks like this:
+ * The mcount code looks like this:
* stg %r14,8(%r15) # offset 0
* larl %r1,<&counter> # offset 6
* brasl %r14,_mcount # offset 12
@@ -34,7 +33,7 @@ void ftrace_enable_insn(void);
* Note: we do not patch the first instruction to an unconditional branch,
* since that would break kprobes/jprobes. It is easier to leave the larl
* instruction in and only modify the second instruction.
- * The 64-bit enabled ftrace code block looks like this:
+ * The enabled ftrace code block looks like this:
* larl %r0,.+24 # offset 0
* > lg %r1,__LC_FTRACE_FUNC # offset 6
* br %r1 # offset 12
@@ -71,65 +70,15 @@ asm(
#define MCOUNT_INSN_OFFSET 6
#define FTRACE_INSN_SIZE 6
-#else /* CONFIG_64BIT */
-/*
- * The 31-bit mcount code looks like this:
- * st %r14,4(%r15) # offset 0
- * > bras %r1,0f # offset 4
- * > .long _mcount # offset 8
- * > .long <&counter> # offset 12
- * > 0: l %r14,0(%r1) # offset 16
- * > l %r1,4(%r1) # offset 20
- * basr %r14,%r14 # offset 24
- * l %r14,4(%r15) # offset 26
- * Total length is 30 bytes. The twenty bytes starting from offset 4
- * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
- * The 31-bit enabled ftrace code block looks like this:
- * st %r14,4(%r15) # offset 0
- * > l %r14,__LC_FTRACE_FUNC # offset 4
- * > j 0f # offset 8
- * > .fill 12,1,0x07 # offset 12
- * 0: basr %r14,%r14 # offset 24
- * l %r14,4(%r14) # offset 26
- * The return points of the mcount/ftrace function have the same offset 26.
- * The 31-bit disabled ftrace code block looks like this:
- * st %r14,4(%r15) # offset 0
- * > j .+26 # offset 4
- * > j 0f # offset 8
- * > .fill 12,1,0x07 # offset 12
- * 0: basr %r14,%r14 # offset 24
- * l %r14,4(%r14) # offset 26
- * The j instruction branches to offset 30 to skip as many instructions
- * as possible.
- */
-asm(
- " .align 4\n"
- "ftrace_disable_code:\n"
- " j 1f\n"
- " j 0f\n"
- " .fill 12,1,0x07\n"
- "0: basr %r14,%r14\n"
- "1:\n"
- " .align 4\n"
- "ftrace_enable_insn:\n"
- " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
-
-#define FTRACE_INSN_SIZE 4
-
-#endif /* CONFIG_64BIT */
-
-#ifdef CONFIG_64BIT
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return 0;
}
-#endif
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
-#ifdef CONFIG_64BIT
/* Initial replacement of the whole mcount block */
if (addr == MCOUNT_ADDR) {
if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
@@ -138,7 +87,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EPERM;
return 0;
}
-#endif
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
MCOUNT_INSN_SIZE))
return -EPERM;
@@ -196,8 +144,6 @@ out:
* the original offset to prepare_ftrace_return and put it back.
*/
-#ifdef CONFIG_64BIT
-
int ftrace_enable_ftrace_graph_caller(void)
{
static unsigned short offset = 0x0002;
@@ -216,25 +162,4 @@ int ftrace_disable_ftrace_graph_caller(void)
&offset, sizeof(offset));
}
-#else /* CONFIG_64BIT */
-
-int ftrace_enable_ftrace_graph_caller(void)
-{
- unsigned short offset;
-
- offset = ((void *) prepare_ftrace_return -
- (void *) ftrace_graph_caller) / 2;
- return probe_kernel_write((void *) ftrace_graph_caller + 2,
- &offset, sizeof(offset));
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
- static unsigned short offset = 0x0002;
-
- return probe_kernel_write((void *) ftrace_graph_caller + 2,
- &offset, sizeof(offset));
-}
-
-#endif /* CONFIG_64BIT */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */