1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
4  */
5 #define pr_fmt(fmt) "hw-breakpoint: " fmt
6 
7 #include <linux/hw_breakpoint.h>
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 
11 #include <asm/hw_breakpoint.h>
12 
13 /* Breakpoint currently in use for each BRP. */
14 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[LOONGARCH_MAX_BRP]);
15 
16 /* Watchpoint currently in use for each WRP. */
17 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[LOONGARCH_MAX_WRP]);
18 
hw_breakpoint_slots(int type)19 int hw_breakpoint_slots(int type)
20 {
21 	/*
22 	 * We can be called early, so don't rely on
23 	 * our static variables being initialised.
24 	 */
25 	switch (type) {
26 	case TYPE_INST:
27 		return get_num_brps();
28 	case TYPE_DATA:
29 		return get_num_wrps();
30 	default:
31 		pr_warn("unknown slot type: %d\n", type);
32 		return 0;
33 	}
34 }
35 
36 #define READ_WB_REG_CASE(OFF, N, REG, T, VAL)		\
37 	case (OFF + N):					\
38 		LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL);	\
39 		break
40 
41 #define WRITE_WB_REG_CASE(OFF, N, REG, T, VAL)		\
42 	case (OFF + N):					\
43 		LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL);	\
44 		break
45 
46 #define GEN_READ_WB_REG_CASES(OFF, REG, T, VAL)		\
47 	READ_WB_REG_CASE(OFF, 0, REG, T, VAL);		\
48 	READ_WB_REG_CASE(OFF, 1, REG, T, VAL);		\
49 	READ_WB_REG_CASE(OFF, 2, REG, T, VAL);		\
50 	READ_WB_REG_CASE(OFF, 3, REG, T, VAL);		\
51 	READ_WB_REG_CASE(OFF, 4, REG, T, VAL);		\
52 	READ_WB_REG_CASE(OFF, 5, REG, T, VAL);		\
53 	READ_WB_REG_CASE(OFF, 6, REG, T, VAL);		\
54 	READ_WB_REG_CASE(OFF, 7, REG, T, VAL);		\
55 	READ_WB_REG_CASE(OFF, 8, REG, T, VAL);		\
56 	READ_WB_REG_CASE(OFF, 9, REG, T, VAL);		\
57 	READ_WB_REG_CASE(OFF, 10, REG, T, VAL);		\
58 	READ_WB_REG_CASE(OFF, 11, REG, T, VAL);		\
59 	READ_WB_REG_CASE(OFF, 12, REG, T, VAL);		\
60 	READ_WB_REG_CASE(OFF, 13, REG, T, VAL);
61 
62 #define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL)	\
63 	WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL);		\
64 	WRITE_WB_REG_CASE(OFF, 1, REG, T, VAL);		\
65 	WRITE_WB_REG_CASE(OFF, 2, REG, T, VAL);		\
66 	WRITE_WB_REG_CASE(OFF, 3, REG, T, VAL);		\
67 	WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL);		\
68 	WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL);		\
69 	WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL);		\
70 	WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);		\
71 	WRITE_WB_REG_CASE(OFF, 8, REG, T, VAL);		\
72 	WRITE_WB_REG_CASE(OFF, 9, REG, T, VAL);		\
73 	WRITE_WB_REG_CASE(OFF, 10, REG, T, VAL);	\
74 	WRITE_WB_REG_CASE(OFF, 11, REG, T, VAL);	\
75 	WRITE_WB_REG_CASE(OFF, 12, REG, T, VAL);	\
76 	WRITE_WB_REG_CASE(OFF, 13, REG, T, VAL);
77 
read_wb_reg(int reg,int n,int t)78 static u64 read_wb_reg(int reg, int n, int t)
79 {
80 	u64 val = 0;
81 
82 	switch (reg + n) {
83 	GEN_READ_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
84 	GEN_READ_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
85 	GEN_READ_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
86 	GEN_READ_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
87 	default:
88 		pr_warn("Attempt to read from unknown breakpoint register %d\n", n);
89 	}
90 
91 	return val;
92 }
93 NOKPROBE_SYMBOL(read_wb_reg);
94 
write_wb_reg(int reg,int n,int t,u64 val)95 static void write_wb_reg(int reg, int n, int t, u64 val)
96 {
97 	switch (reg + n) {
98 	GEN_WRITE_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
99 	GEN_WRITE_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
100 	GEN_WRITE_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
101 	GEN_WRITE_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
102 	default:
103 		pr_warn("Attempt to write to unknown breakpoint register %d\n", n);
104 	}
105 }
106 NOKPROBE_SYMBOL(write_wb_reg);
107 
108 enum hw_breakpoint_ops {
109 	HW_BREAKPOINT_INSTALL,
110 	HW_BREAKPOINT_UNINSTALL,
111 };
112 
113 /*
114  * hw_breakpoint_slot_setup - Find and setup a perf slot according to operations
115  *
116  * @slots: pointer to array of slots
117  * @max_slots: max number of slots
118  * @bp: perf_event to setup
119  * @ops: operation to be carried out on the slot
120  *
121  * Return:
122  *	slot index on success
123  *	-ENOSPC if no slot is available/matches
124  *	-EINVAL on wrong operations parameter
125  */
126 
hw_breakpoint_slot_setup(struct perf_event ** slots,int max_slots,struct perf_event * bp,enum hw_breakpoint_ops ops)127 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
128 				    struct perf_event *bp, enum hw_breakpoint_ops ops)
129 {
130 	int i;
131 	struct perf_event **slot;
132 
133 	for (i = 0; i < max_slots; ++i) {
134 		slot = &slots[i];
135 		switch (ops) {
136 		case HW_BREAKPOINT_INSTALL:
137 			if (!*slot) {
138 				*slot = bp;
139 				return i;
140 			}
141 			break;
142 		case HW_BREAKPOINT_UNINSTALL:
143 			if (*slot == bp) {
144 				*slot = NULL;
145 				return i;
146 			}
147 			break;
148 		default:
149 			pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
150 			return -EINVAL;
151 		}
152 	}
153 
154 	return -ENOSPC;
155 }
156 
ptrace_hw_copy_thread(struct task_struct * tsk)157 void ptrace_hw_copy_thread(struct task_struct *tsk)
158 {
159 	memset(tsk->thread.hbp_break, 0, sizeof(tsk->thread.hbp_break));
160 	memset(tsk->thread.hbp_watch, 0, sizeof(tsk->thread.hbp_watch));
161 }
162 
163 /*
164  * Unregister breakpoints from this task and reset the pointers in the thread_struct.
165  */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)166 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
167 {
168 	int i;
169 	struct thread_struct *t = &tsk->thread;
170 
171 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
172 		if (t->hbp_break[i]) {
173 			unregister_hw_breakpoint(t->hbp_break[i]);
174 			t->hbp_break[i] = NULL;
175 		}
176 	}
177 
178 	for (i = 0; i < LOONGARCH_MAX_WRP; i++) {
179 		if (t->hbp_watch[i]) {
180 			unregister_hw_breakpoint(t->hbp_watch[i]);
181 			t->hbp_watch[i] = NULL;
182 		}
183 	}
184 }
185 
hw_breakpoint_control(struct perf_event * bp,enum hw_breakpoint_ops ops)186 static int hw_breakpoint_control(struct perf_event *bp,
187 				 enum hw_breakpoint_ops ops)
188 {
189 	u32 ctrl, privilege;
190 	int i, max_slots, enable;
191 	struct pt_regs *regs;
192 	struct perf_event **slots;
193 	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
194 
195 	if (arch_check_bp_in_kernelspace(info))
196 		privilege = CTRL_PLV0_ENABLE;
197 	else
198 		privilege = CTRL_PLV3_ENABLE;
199 
200 	/*  Whether bp belongs to a task. */
201 	if (bp->hw.target)
202 		regs = task_pt_regs(bp->hw.target);
203 
204 	if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
205 		/* Breakpoint */
206 		slots = this_cpu_ptr(bp_on_reg);
207 		max_slots = boot_cpu_data.watch_ireg_count;
208 	} else {
209 		/* Watchpoint */
210 		slots = this_cpu_ptr(wp_on_reg);
211 		max_slots = boot_cpu_data.watch_dreg_count;
212 	}
213 
214 	i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
215 
216 	if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
217 		return i;
218 
219 	switch (ops) {
220 	case HW_BREAKPOINT_INSTALL:
221 		/* Set the FWPnCFG/MWPnCFG 1~4 register. */
222 		if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
223 			write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
224 			write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
225 			write_wb_reg(CSR_CFG_ASID, i, 0, 0);
226 			write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
227 		} else {
228 			write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
229 			write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
230 			write_wb_reg(CSR_CFG_ASID, i, 1, 0);
231 			ctrl = encode_ctrl_reg(info->ctrl);
232 			write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
233 		}
234 		enable = csr_read64(LOONGARCH_CSR_CRMD);
235 		csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
236 		if (bp->hw.target && test_tsk_thread_flag(bp->hw.target, TIF_LOAD_WATCH))
237 			regs->csr_prmd |= CSR_PRMD_PWE;
238 		break;
239 	case HW_BREAKPOINT_UNINSTALL:
240 		/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
241 		if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
242 			write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
243 			write_wb_reg(CSR_CFG_MASK, i, 0, 0);
244 			write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
245 			write_wb_reg(CSR_CFG_ASID, i, 0, 0);
246 		} else {
247 			write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
248 			write_wb_reg(CSR_CFG_MASK, i, 1, 0);
249 			write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
250 			write_wb_reg(CSR_CFG_ASID, i, 1, 0);
251 		}
252 		if (bp->hw.target)
253 			regs->csr_prmd &= ~CSR_PRMD_PWE;
254 		break;
255 	}
256 
257 	return 0;
258 }
259 
260 /*
261  * Install a perf counter breakpoint.
262  */
arch_install_hw_breakpoint(struct perf_event * bp)263 int arch_install_hw_breakpoint(struct perf_event *bp)
264 {
265 	return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
266 }
267 
arch_uninstall_hw_breakpoint(struct perf_event * bp)268 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
269 {
270 	hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
271 }
272 
get_hbp_len(u8 hbp_len)273 static int get_hbp_len(u8 hbp_len)
274 {
275 	unsigned int len_in_bytes = 0;
276 
277 	switch (hbp_len) {
278 	case LOONGARCH_BREAKPOINT_LEN_1:
279 		len_in_bytes = 1;
280 		break;
281 	case LOONGARCH_BREAKPOINT_LEN_2:
282 		len_in_bytes = 2;
283 		break;
284 	case LOONGARCH_BREAKPOINT_LEN_4:
285 		len_in_bytes = 4;
286 		break;
287 	case LOONGARCH_BREAKPOINT_LEN_8:
288 		len_in_bytes = 8;
289 		break;
290 	}
291 
292 	return len_in_bytes;
293 }
294 
295 /*
296  * Check whether bp virtual address is in kernel space.
297  */
arch_check_bp_in_kernelspace(struct arch_hw_breakpoint * hw)298 int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
299 {
300 	unsigned int len;
301 	unsigned long va;
302 
303 	va = hw->address;
304 	len = get_hbp_len(hw->ctrl.len);
305 
306 	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
307 }
308 
309 /*
310  * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
311  * Hopefully this will disappear when ptrace can bypass the conversion
312  * to generic breakpoint descriptions.
313  */
arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,int * gen_len,int * gen_type)314 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
315 			   int *gen_len, int *gen_type)
316 {
317 	/* Type */
318 	switch (ctrl.type) {
319 	case LOONGARCH_BREAKPOINT_EXECUTE:
320 		*gen_type = HW_BREAKPOINT_X;
321 		break;
322 	case LOONGARCH_BREAKPOINT_LOAD:
323 		*gen_type = HW_BREAKPOINT_R;
324 		break;
325 	case LOONGARCH_BREAKPOINT_STORE:
326 		*gen_type = HW_BREAKPOINT_W;
327 		break;
328 	case LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE:
329 		*gen_type = HW_BREAKPOINT_RW;
330 		break;
331 	default:
332 		return -EINVAL;
333 	}
334 
335 	/* Len */
336 	switch (ctrl.len) {
337 	case LOONGARCH_BREAKPOINT_LEN_1:
338 		*gen_len = HW_BREAKPOINT_LEN_1;
339 		break;
340 	case LOONGARCH_BREAKPOINT_LEN_2:
341 		*gen_len = HW_BREAKPOINT_LEN_2;
342 		break;
343 	case LOONGARCH_BREAKPOINT_LEN_4:
344 		*gen_len = HW_BREAKPOINT_LEN_4;
345 		break;
346 	case LOONGARCH_BREAKPOINT_LEN_8:
347 		*gen_len = HW_BREAKPOINT_LEN_8;
348 		break;
349 	default:
350 		return -EINVAL;
351 	}
352 
353 	return 0;
354 }
355 
356 /*
357  * Construct an arch_hw_breakpoint from a perf_event.
358  */
arch_build_bp_info(struct perf_event * bp,const struct perf_event_attr * attr,struct arch_hw_breakpoint * hw)359 static int arch_build_bp_info(struct perf_event *bp,
360 			      const struct perf_event_attr *attr,
361 			      struct arch_hw_breakpoint *hw)
362 {
363 	/* Type */
364 	switch (attr->bp_type) {
365 	case HW_BREAKPOINT_X:
366 		hw->ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
367 		break;
368 	case HW_BREAKPOINT_R:
369 		hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD;
370 		break;
371 	case HW_BREAKPOINT_W:
372 		hw->ctrl.type = LOONGARCH_BREAKPOINT_STORE;
373 		break;
374 	case HW_BREAKPOINT_RW:
375 		hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE;
376 		break;
377 	default:
378 		return -EINVAL;
379 	}
380 
381 	/* Len */
382 	switch (attr->bp_len) {
383 	case HW_BREAKPOINT_LEN_1:
384 		hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_1;
385 		break;
386 	case HW_BREAKPOINT_LEN_2:
387 		hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_2;
388 		break;
389 	case HW_BREAKPOINT_LEN_4:
390 		hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
391 		break;
392 	case HW_BREAKPOINT_LEN_8:
393 		hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_8;
394 		break;
395 	default:
396 		return -EINVAL;
397 	}
398 
399 	/* Address */
400 	hw->address = attr->bp_addr;
401 
402 	return 0;
403 }
404 
405 /*
406  * Validate the arch-specific HW Breakpoint register settings.
407  */
hw_breakpoint_arch_parse(struct perf_event * bp,const struct perf_event_attr * attr,struct arch_hw_breakpoint * hw)408 int hw_breakpoint_arch_parse(struct perf_event *bp,
409 			     const struct perf_event_attr *attr,
410 			     struct arch_hw_breakpoint *hw)
411 {
412 	int ret;
413 	u64 alignment_mask;
414 
415 	/* Build the arch_hw_breakpoint. */
416 	ret = arch_build_bp_info(bp, attr, hw);
417 	if (ret)
418 		return ret;
419 
420 	if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
421 		alignment_mask = 0x3;
422 		hw->address &= ~alignment_mask;
423 	}
424 
425 	return 0;
426 }
427 
update_bp_registers(struct pt_regs * regs,int enable,int type)428 static void update_bp_registers(struct pt_regs *regs, int enable, int type)
429 {
430 	u32 ctrl;
431 	int i, max_slots;
432 	struct perf_event **slots;
433 	struct arch_hw_breakpoint *info;
434 
435 	switch (type) {
436 	case 0:
437 		slots = this_cpu_ptr(bp_on_reg);
438 		max_slots = boot_cpu_data.watch_ireg_count;
439 		break;
440 	case 1:
441 		slots = this_cpu_ptr(wp_on_reg);
442 		max_slots = boot_cpu_data.watch_dreg_count;
443 		break;
444 	default:
445 		return;
446 	}
447 
448 	for (i = 0; i < max_slots; ++i) {
449 		if (!slots[i])
450 			continue;
451 
452 		info = counter_arch_bp(slots[i]);
453 		if (enable) {
454 			if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
455 				write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
456 				write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
457 			} else {
458 				ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
459 				if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
460 					ctrl |= 0x1 << MWPnCFG3_LoadEn;
461 				if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
462 					ctrl |= 0x1 << MWPnCFG3_StoreEn;
463 				write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
464 			}
465 			regs->csr_prmd |= CSR_PRMD_PWE;
466 		} else {
467 			if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
468 				write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
469 			} else {
470 				ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
471 				if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
472 					ctrl &= ~0x1 << MWPnCFG3_LoadEn;
473 				if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
474 					ctrl &= ~0x1 << MWPnCFG3_StoreEn;
475 				write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
476 			}
477 			regs->csr_prmd &= ~CSR_PRMD_PWE;
478 		}
479 	}
480 }
481 NOKPROBE_SYMBOL(update_bp_registers);
482 
483 /*
484  * Debug exception handlers.
485  */
breakpoint_handler(struct pt_regs * regs)486 void breakpoint_handler(struct pt_regs *regs)
487 {
488 	int i;
489 	struct perf_event *bp, **slots;
490 
491 	slots = this_cpu_ptr(bp_on_reg);
492 
493 	for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
494 		if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
495 			bp = slots[i];
496 			if (bp == NULL)
497 				continue;
498 			perf_bp_event(bp, regs);
499 			csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
500 			update_bp_registers(regs, 0, 0);
501 		}
502 	}
503 }
504 NOKPROBE_SYMBOL(breakpoint_handler);
505 
watchpoint_handler(struct pt_regs * regs)506 void watchpoint_handler(struct pt_regs *regs)
507 {
508 	int i;
509 	struct perf_event *wp, **slots;
510 
511 	slots = this_cpu_ptr(wp_on_reg);
512 
513 	for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
514 		if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
515 			wp = slots[i];
516 			if (wp == NULL)
517 				continue;
518 			perf_bp_event(wp, regs);
519 			csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
520 			update_bp_registers(regs, 0, 1);
521 		}
522 	}
523 }
524 NOKPROBE_SYMBOL(watchpoint_handler);
525 
arch_hw_breakpoint_init(void)526 static int __init arch_hw_breakpoint_init(void)
527 {
528 	int cpu;
529 
530 	boot_cpu_data.watch_ireg_count = get_num_brps();
531 	boot_cpu_data.watch_dreg_count = get_num_wrps();
532 
533 	pr_info("Found %d breakpoint and %d watchpoint registers.\n",
534 		boot_cpu_data.watch_ireg_count, boot_cpu_data.watch_dreg_count);
535 
536 	for (cpu = 1; cpu < NR_CPUS; cpu++) {
537 		cpu_data[cpu].watch_ireg_count = boot_cpu_data.watch_ireg_count;
538 		cpu_data[cpu].watch_dreg_count = boot_cpu_data.watch_dreg_count;
539 	}
540 
541 	return 0;
542 }
543 arch_initcall(arch_hw_breakpoint_init);
544 
hw_breakpoint_thread_switch(struct task_struct * next)545 void hw_breakpoint_thread_switch(struct task_struct *next)
546 {
547 	u64 addr, mask;
548 	struct pt_regs *regs = task_pt_regs(next);
549 
550 	if (test_tsk_thread_flag(next, TIF_SINGLESTEP)) {
551 		addr = read_wb_reg(CSR_CFG_ADDR, 0, 0);
552 		mask = read_wb_reg(CSR_CFG_MASK, 0, 0);
553 		if (!((regs->csr_era ^ addr) & ~mask))
554 			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
555 		regs->csr_prmd |= CSR_PRMD_PWE;
556 	} else {
557 		/* Update breakpoints */
558 		update_bp_registers(regs, 1, 0);
559 		/* Update watchpoints */
560 		update_bp_registers(regs, 1, 1);
561 	}
562 }
563 
hw_breakpoint_pmu_read(struct perf_event * bp)564 void hw_breakpoint_pmu_read(struct perf_event *bp)
565 {
566 }
567 
568 /*
569  * Dummy function to register with die_notifier.
570  */
hw_breakpoint_exceptions_notify(struct notifier_block * unused,unsigned long val,void * data)571 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
572 				    unsigned long val, void *data)
573 {
574 	return NOTIFY_DONE;
575 }
576