1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_UPROBES_H
3 #define _LINUX_UPROBES_H
4 /*
5  * User-space Probes (UProbes)
6  *
7  * Copyright (C) IBM Corporation, 2008-2012
8  * Authors:
9  *	Srikar Dronamraju
10  *	Jim Keniston
11  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
12  */
13 
14 #include <linux/errno.h>
15 #include <linux/rbtree.h>
16 #include <linux/types.h>
17 #include <linux/wait.h>
18 #include <linux/timer.h>
19 #include <linux/seqlock.h>
20 
21 struct uprobe;
22 struct vm_area_struct;
23 struct mm_struct;
24 struct inode;
25 struct notifier_block;
26 struct page;
27 
28 /*
29  * Allowed return values from uprobe consumer's handler callback
30  * with following meaning:
31  *
32  * UPROBE_HANDLER_REMOVE
33  * - Remove the uprobe breakpoint from current->mm.
34  * UPROBE_HANDLER_IGNORE
35  * - Ignore ret_handler callback for this consumer.
36  */
37 #define UPROBE_HANDLER_REMOVE		1
38 #define UPROBE_HANDLER_IGNORE		2
39 
40 #define MAX_URETPROBE_DEPTH		64
41 
42 #define UPROBE_NO_TRAMPOLINE_VADDR	(~0UL)
43 
44 struct uprobe_consumer {
45 	/*
46 	 * handler() can return UPROBE_HANDLER_REMOVE to signal the need to
47 	 * unregister uprobe for current process. If UPROBE_HANDLER_REMOVE is
48 	 * returned, filter() callback has to be implemented as well and it
49 	 * should return false to "confirm" the decision to uninstall uprobe
50 	 * for the current process. If filter() is omitted or returns true,
51 	 * UPROBE_HANDLER_REMOVE is effectively ignored.
52 	 */
53 	int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
54 	int (*ret_handler)(struct uprobe_consumer *self,
55 				unsigned long func,
56 				struct pt_regs *regs, __u64 *data);
57 	bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
58 
59 	struct list_head cons_node;
60 
61 	__u64 id;	/* set when uprobe_consumer is registered */
62 };
63 
64 #ifdef CONFIG_UPROBES
65 #include <asm/uprobes.h>
66 
67 enum uprobe_task_state {
68 	UTASK_RUNNING,
69 	UTASK_SSTEP,
70 	UTASK_SSTEP_ACK,
71 	UTASK_SSTEP_TRAPPED,
72 };
73 
74 /* The state of hybrid-lifetime uprobe inside struct return_instance */
75 enum hprobe_state {
76 	HPROBE_LEASED,		/* uretprobes_srcu-protected uprobe */
77 	HPROBE_STABLE,		/* refcounted uprobe */
78 	HPROBE_GONE,		/* NULL uprobe, SRCU expired, refcount failed */
79 	HPROBE_CONSUMED,	/* uprobe "consumed" by uretprobe handler */
80 };
81 
82 /*
83  * Hybrid lifetime uprobe. Represents a uprobe instance that could be either
84  * SRCU protected (with SRCU protection eventually potentially timing out),
85  * refcounted using uprobe->ref, or there could be no valid uprobe (NULL).
86  *
87  * hprobe's internal state is setup such that background timer thread can
88  * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one
89  * (or no uprobe, if refcounting failed).
90  *
91  * *stable* pointer always point to the uprobe (or could be NULL if there is
92  * was no valid underlying uprobe to begin with).
93  *
94  * *leased* pointer is the key to achieving race-free atomic lifetime state
95  * transition and can have three possible states:
96  *   - either the same non-NULL value as *stable*, in which case uprobe is
97  *     SRCU-protected;
98  *   - NULL, in which case uprobe (if there is any) is refcounted;
99  *   - special __UPROBE_DEAD value, which represents an uprobe that was SRCU
100  *     protected initially, but SRCU period timed out and we attempted to
101  *     convert it to refcounted, but refcount_inc_not_zero() failed, because
102  *     uprobe effectively went away (the last consumer unsubscribed). In this
103  *     case it's important to know that *stable* pointer (which still has
104  *     non-NULL uprobe pointer) shouldn't be used, because lifetime of
105  *     underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an
106  *     internal marker and is handled transparently by hprobe_fetch() helper.
107  *
108  * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for
109  * SRCU unlocking.
110  *
111  * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe
112  * state transitioning details. It all hinges on atomic xchg() over *leaded*
113  * pointer. *stable* pointer, once initially set, is not modified concurrently.
114  */
115 struct hprobe {
116 	enum hprobe_state state;
117 	int srcu_idx;
118 	struct uprobe *uprobe;
119 };
120 
121 /*
122  * uprobe_task: Metadata of a task while it singlesteps.
123  */
124 struct uprobe_task {
125 	enum uprobe_task_state		state;
126 
127 	unsigned int			depth;
128 	struct return_instance		*return_instances;
129 
130 	struct return_instance		*ri_pool;
131 	struct timer_list		ri_timer;
132 	seqcount_t			ri_seqcount;
133 
134 	union {
135 		struct {
136 			struct arch_uprobe_task	autask;
137 			unsigned long		vaddr;
138 		};
139 
140 		struct {
141 			struct callback_head	dup_xol_work;
142 			unsigned long		dup_xol_addr;
143 		};
144 	};
145 
146 	struct uprobe			*active_uprobe;
147 	unsigned long			xol_vaddr;
148 
149 	struct arch_uprobe              *auprobe;
150 };
151 
152 struct return_consumer {
153 	__u64	cookie;
154 	__u64	id;
155 };
156 
157 struct return_instance {
158 	struct hprobe		hprobe;
159 	unsigned long		func;
160 	unsigned long		stack;		/* stack pointer */
161 	unsigned long		orig_ret_vaddr; /* original return address */
162 	bool			chained;	/* true, if instance is nested */
163 	int			cons_cnt;	/* total number of session consumers */
164 
165 	struct return_instance	*next;		/* keep as stack */
166 	struct rcu_head		rcu;
167 
168 	/* singular pre-allocated return_consumer instance for common case */
169 	struct return_consumer	consumer;
170 	/*
171 	 * extra return_consumer instances for rare cases of multiple session consumers,
172 	 * contains (cons_cnt - 1) elements
173 	 */
174 	struct return_consumer	*extra_consumers;
175 } ____cacheline_aligned;
176 
177 enum rp_check {
178 	RP_CHECK_CALL,
179 	RP_CHECK_CHAIN_CALL,
180 	RP_CHECK_RET,
181 };
182 
183 struct xol_area;
184 
185 struct uprobes_state {
186 	struct xol_area		*xol_area;
187 };
188 
189 extern void __init uprobes_init(void);
190 extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
191 extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
192 extern bool is_swbp_insn(uprobe_opcode_t *insn);
193 extern bool is_trap_insn(uprobe_opcode_t *insn);
194 extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
195 extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
196 extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
197 extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
198 extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
199 extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
200 extern void uprobe_unregister_sync(void);
201 extern int uprobe_mmap(struct vm_area_struct *vma);
202 extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
203 extern void uprobe_start_dup_mmap(void);
204 extern void uprobe_end_dup_mmap(void);
205 extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
206 extern void uprobe_free_utask(struct task_struct *t);
207 extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
208 extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
209 extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
210 extern void uprobe_notify_resume(struct pt_regs *regs);
211 extern bool uprobe_deny_signal(void);
212 extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
213 extern void uprobe_clear_state(struct mm_struct *mm);
214 extern int  arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
215 extern int  arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
216 extern int  arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
217 extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
218 extern int  arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
219 extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
220 extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
221 extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
222 extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
223 extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
224 					 void *src, unsigned long len);
225 extern void uprobe_handle_trampoline(struct pt_regs *regs);
226 extern void *arch_uprobe_trampoline(unsigned long *psize);
227 extern unsigned long uprobe_get_trampoline_vaddr(void);
228 #else /* !CONFIG_UPROBES */
229 struct uprobes_state {
230 };
231 
uprobes_init(void)232 static inline void uprobes_init(void)
233 {
234 }
235 
236 #define uprobe_get_trap_addr(regs)	instruction_pointer(regs)
237 
238 static inline struct uprobe *
uprobe_register(struct inode * inode,loff_t offset,loff_t ref_ctr_offset,struct uprobe_consumer * uc)239 uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
240 {
241 	return ERR_PTR(-ENOSYS);
242 }
243 static inline int
uprobe_apply(struct uprobe * uprobe,struct uprobe_consumer * uc,bool add)244 uprobe_apply(struct uprobe* uprobe, struct uprobe_consumer *uc, bool add)
245 {
246 	return -ENOSYS;
247 }
248 static inline void
uprobe_unregister_nosync(struct uprobe * uprobe,struct uprobe_consumer * uc)249 uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
250 {
251 }
uprobe_unregister_sync(void)252 static inline void uprobe_unregister_sync(void)
253 {
254 }
uprobe_mmap(struct vm_area_struct * vma)255 static inline int uprobe_mmap(struct vm_area_struct *vma)
256 {
257 	return 0;
258 }
259 static inline void
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)260 uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
261 {
262 }
uprobe_start_dup_mmap(void)263 static inline void uprobe_start_dup_mmap(void)
264 {
265 }
uprobe_end_dup_mmap(void)266 static inline void uprobe_end_dup_mmap(void)
267 {
268 }
269 static inline void
uprobe_dup_mmap(struct mm_struct * oldmm,struct mm_struct * newmm)270 uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
271 {
272 }
uprobe_notify_resume(struct pt_regs * regs)273 static inline void uprobe_notify_resume(struct pt_regs *regs)
274 {
275 }
uprobe_deny_signal(void)276 static inline bool uprobe_deny_signal(void)
277 {
278 	return false;
279 }
uprobe_free_utask(struct task_struct * t)280 static inline void uprobe_free_utask(struct task_struct *t)
281 {
282 }
uprobe_copy_process(struct task_struct * t,unsigned long flags)283 static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
284 {
285 }
uprobe_clear_state(struct mm_struct * mm)286 static inline void uprobe_clear_state(struct mm_struct *mm)
287 {
288 }
289 #endif /* !CONFIG_UPROBES */
290 #endif	/* _LINUX_UPROBES_H */
291