1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_FTRACE
3 #define _ASM_POWERPC_FTRACE
4
5 #include <asm/types.h>
6
7 #ifdef CONFIG_FUNCTION_TRACER
8 #define MCOUNT_ADDR ((unsigned long)(_mcount))
9 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
10
11 /* Ignore unused weak functions which will have larger offsets */
12 #if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
13 #define FTRACE_MCOUNT_MAX_OFFSET 16
14 #elif defined(CONFIG_PPC32)
15 #define FTRACE_MCOUNT_MAX_OFFSET 8
16 #endif
17
18 #ifndef __ASSEMBLY__
19 extern void _mcount(void);
20
21 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
22 unsigned long sp);
23
24 struct module;
25 struct dyn_ftrace;
26 struct dyn_arch_ftrace {
27 #ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
28 /* pointer to the associated out-of-line stub */
29 unsigned long ool_stub;
30 #endif
31 };
32
33 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
34 #define ftrace_need_init_nop() (true)
35 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
36 #define ftrace_init_nop ftrace_init_nop
37
38 #include <linux/ftrace_regs.h>
39
arch_ftrace_get_regs(struct ftrace_regs * fregs)40 static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
41 {
42 /* We clear regs.msr in ftrace_call */
43 return arch_ftrace_regs(fregs)->regs.msr ? &arch_ftrace_regs(fregs)->regs : NULL;
44 }
45
46 #define arch_ftrace_fill_perf_regs(fregs, _regs) do { \
47 (_regs)->result = 0; \
48 (_regs)->nip = arch_ftrace_regs(fregs)->regs.nip; \
49 (_regs)->gpr[1] = arch_ftrace_regs(fregs)->regs.gpr[1]; \
50 asm volatile("mfmsr %0" : "=r" ((_regs)->msr)); \
51 } while (0)
52
53 static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs * fregs,unsigned long ip)54 ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
55 unsigned long ip)
56 {
57 regs_set_return_ip(&arch_ftrace_regs(fregs)->regs, ip);
58 }
59
60 static __always_inline unsigned long
ftrace_regs_get_return_address(struct ftrace_regs * fregs)61 ftrace_regs_get_return_address(struct ftrace_regs *fregs)
62 {
63 return arch_ftrace_regs(fregs)->regs.link;
64 }
65
66 struct ftrace_ops;
67
68 #define ftrace_graph_func ftrace_graph_func
69 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
70 struct ftrace_ops *op, struct ftrace_regs *fregs);
71 #endif
72 #endif /* __ASSEMBLY__ */
73
74 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
75 #define ARCH_SUPPORTS_FTRACE_OPS 1
76 #endif
77 #endif /* CONFIG_FUNCTION_TRACER */
78
79 #ifndef __ASSEMBLY__
80 #ifdef CONFIG_FTRACE_SYSCALLS
81 /*
82 * Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
83 * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
84 * those.
85 */
86 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
arch_syscall_match_sym_name(const char * sym,const char * name)87 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
88 {
89 return !strcmp(sym, name) ||
90 (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
91 (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
92 (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
93 (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
94 }
95 #endif /* CONFIG_FTRACE_SYSCALLS */
96
97 #if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
98 #include <asm/paca.h>
99
this_cpu_disable_ftrace(void)100 static inline void this_cpu_disable_ftrace(void)
101 {
102 get_paca()->ftrace_enabled = 0;
103 }
104
this_cpu_enable_ftrace(void)105 static inline void this_cpu_enable_ftrace(void)
106 {
107 get_paca()->ftrace_enabled = 1;
108 }
109
110 /* Disable ftrace on this CPU if possible (may not be implemented) */
this_cpu_set_ftrace_enabled(u8 ftrace_enabled)111 static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
112 {
113 get_paca()->ftrace_enabled = ftrace_enabled;
114 }
115
this_cpu_get_ftrace_enabled(void)116 static inline u8 this_cpu_get_ftrace_enabled(void)
117 {
118 return get_paca()->ftrace_enabled;
119 }
120 #else /* CONFIG_PPC64 */
this_cpu_disable_ftrace(void)121 static inline void this_cpu_disable_ftrace(void) { }
this_cpu_enable_ftrace(void)122 static inline void this_cpu_enable_ftrace(void) { }
this_cpu_set_ftrace_enabled(u8 ftrace_enabled)123 static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
this_cpu_get_ftrace_enabled(void)124 static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
125 #endif /* CONFIG_PPC64 */
126
127 #ifdef CONFIG_FUNCTION_TRACER
128 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
129 #ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
130 struct ftrace_ool_stub {
131 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
132 struct ftrace_ops *ftrace_op;
133 #endif
134 u32 insn[4];
135 } __aligned(sizeof(unsigned long));
136 extern struct ftrace_ool_stub ftrace_ool_stub_text_end[], ftrace_ool_stub_text[],
137 ftrace_ool_stub_inittext[];
138 extern unsigned int ftrace_ool_stub_text_end_count, ftrace_ool_stub_text_count,
139 ftrace_ool_stub_inittext_count;
140 #endif
141 void ftrace_free_init_tramp(void);
142 unsigned long ftrace_call_adjust(unsigned long addr);
143
144 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
145 /*
146 * When an ftrace registered caller is tracing a function that is also set by a
147 * register_ftrace_direct() call, it needs to be differentiated in the
148 * ftrace_caller trampoline so that the direct call can be invoked after the
149 * other ftrace ops. To do this, place the direct caller in the orig_gpr3 field
150 * of pt_regs. This tells ftrace_caller that there's a direct caller.
151 */
arch_ftrace_set_direct_caller(struct ftrace_regs * fregs,unsigned long addr)152 static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
153 {
154 struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
155
156 regs->orig_gpr3 = addr;
157 }
158 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
159 #else
ftrace_free_init_tramp(void)160 static inline void ftrace_free_init_tramp(void) { }
ftrace_call_adjust(unsigned long addr)161 static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
162 #endif
163 #endif /* !__ASSEMBLY__ */
164
165 #endif /* _ASM_POWERPC_FTRACE */
166