1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 SiFive
4  */
5 
6 #include <linux/ptrace.h>
7 #include <linux/kdebug.h>
8 #include <linux/bug.h>
9 #include <linux/kgdb.h>
10 #include <linux/irqflags.h>
11 #include <linux/string.h>
12 #include <asm/cacheflush.h>
13 #include <asm/gdb_xml.h>
14 #include <asm/insn.h>
15 
16 enum {
17 	NOT_KGDB_BREAK = 0,
18 	KGDB_SW_BREAK,
19 	KGDB_COMPILED_BREAK,
20 	KGDB_SW_SINGLE_STEP
21 };
22 
23 static unsigned long stepped_address;
24 static unsigned int stepped_opcode;
25 
decode_register_index(unsigned long opcode,int offset)26 static int decode_register_index(unsigned long opcode, int offset)
27 {
28 	return (opcode >> offset) & 0x1F;
29 }
30 
decode_register_index_short(unsigned long opcode,int offset)31 static int decode_register_index_short(unsigned long opcode, int offset)
32 {
33 	return ((opcode >> offset) & 0x7) + 8;
34 }
35 
36 /* Calculate the new address for after a step */
get_step_address(struct pt_regs * regs,unsigned long * next_addr)37 static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
38 {
39 	unsigned long pc = regs->epc;
40 	unsigned long *regs_ptr = (unsigned long *)regs;
41 	unsigned int rs1_num, rs2_num;
42 	int op_code;
43 
44 	if (get_kernel_nofault(op_code, (void *)pc))
45 		return -EINVAL;
46 	if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
47 		if (riscv_insn_is_c_jalr(op_code) ||
48 		    riscv_insn_is_c_jr(op_code)) {
49 			rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
50 			*next_addr = regs_ptr[rs1_num];
51 		} else if (riscv_insn_is_c_j(op_code) ||
52 			   riscv_insn_is_c_jal(op_code)) {
53 			*next_addr = RVC_EXTRACT_JTYPE_IMM(op_code) + pc;
54 		} else if (riscv_insn_is_c_beqz(op_code)) {
55 			rs1_num = decode_register_index_short(op_code,
56 							      RVC_C1_RS1_OPOFF);
57 			if (!rs1_num || regs_ptr[rs1_num] == 0)
58 				*next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
59 			else
60 				*next_addr = pc + 2;
61 		} else if (riscv_insn_is_c_bnez(op_code)) {
62 			rs1_num =
63 			    decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
64 			if (rs1_num && regs_ptr[rs1_num] != 0)
65 				*next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
66 			else
67 				*next_addr = pc + 2;
68 		} else {
69 			*next_addr = pc + 2;
70 		}
71 	} else {
72 		if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
73 			bool result = false;
74 			long imm = RV_EXTRACT_BTYPE_IMM(op_code);
75 			unsigned long rs1_val = 0, rs2_val = 0;
76 
77 			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
78 			rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
79 			if (rs1_num)
80 				rs1_val = regs_ptr[rs1_num];
81 			if (rs2_num)
82 				rs2_val = regs_ptr[rs2_num];
83 
84 			if (riscv_insn_is_beq(op_code))
85 				result = (rs1_val == rs2_val) ? true : false;
86 			else if (riscv_insn_is_bne(op_code))
87 				result = (rs1_val != rs2_val) ? true : false;
88 			else if (riscv_insn_is_blt(op_code))
89 				result =
90 				    ((long)rs1_val <
91 				     (long)rs2_val) ? true : false;
92 			else if (riscv_insn_is_bge(op_code))
93 				result =
94 				    ((long)rs1_val >=
95 				     (long)rs2_val) ? true : false;
96 			else if (riscv_insn_is_bltu(op_code))
97 				result = (rs1_val < rs2_val) ? true : false;
98 			else if (riscv_insn_is_bgeu(op_code))
99 				result = (rs1_val >= rs2_val) ? true : false;
100 			if (result)
101 				*next_addr = imm + pc;
102 			else
103 				*next_addr = pc + 4;
104 		} else if (riscv_insn_is_jal(op_code)) {
105 			*next_addr = RV_EXTRACT_JTYPE_IMM(op_code) + pc;
106 		} else if (riscv_insn_is_jalr(op_code)) {
107 			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
108 			if (rs1_num)
109 				*next_addr = ((unsigned long *)regs)[rs1_num];
110 			*next_addr += RV_EXTRACT_ITYPE_IMM(op_code);
111 		} else if (riscv_insn_is_sret(op_code)) {
112 			*next_addr = pc;
113 		} else {
114 			*next_addr = pc + 4;
115 		}
116 	}
117 	return 0;
118 }
119 
do_single_step(struct pt_regs * regs)120 static int do_single_step(struct pt_regs *regs)
121 {
122 	/* Determine where the target instruction will send us to */
123 	unsigned long addr = 0;
124 	int error = get_step_address(regs, &addr);
125 
126 	if (error)
127 		return error;
128 
129 	/* Store the op code in the stepped address */
130 	error = get_kernel_nofault(stepped_opcode, (void *)addr);
131 	if (error)
132 		return error;
133 
134 	stepped_address = addr;
135 
136 	/* Replace the op code with the break instruction */
137 	error = copy_to_kernel_nofault((void *)stepped_address,
138 				   arch_kgdb_ops.gdb_bpt_instr,
139 				   BREAK_INSTR_SIZE);
140 	/* Flush and return */
141 	if (!error) {
142 		flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
143 		kgdb_single_step = 1;
144 		atomic_set(&kgdb_cpu_doing_single_step,
145 			   raw_smp_processor_id());
146 	} else {
147 		stepped_address = 0;
148 		stepped_opcode = 0;
149 	}
150 	return error;
151 }
152 
153 /* Undo a single step */
undo_single_step(struct pt_regs * regs)154 static void undo_single_step(struct pt_regs *regs)
155 {
156 	if (stepped_opcode != 0) {
157 		copy_to_kernel_nofault((void *)stepped_address,
158 				   (void *)&stepped_opcode, BREAK_INSTR_SIZE);
159 		flush_icache_range(stepped_address,
160 				   stepped_address + BREAK_INSTR_SIZE);
161 	}
162 	stepped_address = 0;
163 	stepped_opcode = 0;
164 	kgdb_single_step = 0;
165 	atomic_set(&kgdb_cpu_doing_single_step, -1);
166 }
167 
168 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
169 	{DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
170 	{DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
171 	{DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
172 	{DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
173 	{DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
174 	{DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
175 	{DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
176 	{DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
177 	{DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
178 	{DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
179 	{DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
180 	{DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
181 	{DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
182 	{DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
183 	{DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
184 	{DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
185 	{DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
186 	{DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
187 	{DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
188 	{DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
189 	{DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
190 	{DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
191 	{DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
192 	{DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
193 	{DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
194 	{DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
195 	{DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
196 	{DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
197 	{DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
198 	{DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
199 	{DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
200 	{DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
201 	{DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
202 	{DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
203 	{DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
204 	{DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
205 };
206 
dbg_get_reg(int regno,void * mem,struct pt_regs * regs)207 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
208 {
209 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
210 		return NULL;
211 
212 	if (dbg_reg_def[regno].offset != -1)
213 		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
214 		       dbg_reg_def[regno].size);
215 	else
216 		memset(mem, 0, dbg_reg_def[regno].size);
217 	return dbg_reg_def[regno].name;
218 }
219 
dbg_set_reg(int regno,void * mem,struct pt_regs * regs)220 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
221 {
222 	if (regno >= DBG_MAX_REG_NUM || regno < 0)
223 		return -EINVAL;
224 
225 	if (dbg_reg_def[regno].offset != -1)
226 		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
227 		       dbg_reg_def[regno].size);
228 	return 0;
229 }
230 
231 void
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * task)232 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
233 {
234 	/* Initialize to zero */
235 	memset((char *)gdb_regs, 0, NUMREGBYTES);
236 
237 	gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
238 	gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
239 	gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
240 	gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
241 	gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
242 	gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
243 	gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
244 	gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
245 	gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
246 	gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
247 	gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
248 	gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
249 	gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
250 }
251 
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long pc)252 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
253 {
254 	regs->epc = pc;
255 }
256 
arch_kgdb_breakpoint(void)257 noinline void arch_kgdb_breakpoint(void)
258 {
259 	asm(".global kgdb_compiled_break\n"
260 	    "kgdb_compiled_break: ebreak\n");
261 }
262 
kgdb_arch_handle_qxfer_pkt(char * remcom_in_buffer,char * remcom_out_buffer)263 void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
264 				char *remcom_out_buffer)
265 {
266 	if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
267 		     sizeof(gdb_xfer_read_target)))
268 		strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
269 	else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
270 			  sizeof(gdb_xfer_read_cpuxml)))
271 		strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
272 }
273 
kgdb_arch_update_addr(struct pt_regs * regs,char * remcom_in_buffer)274 static inline void kgdb_arch_update_addr(struct pt_regs *regs,
275 					 char *remcom_in_buffer)
276 {
277 	unsigned long addr;
278 	char *ptr;
279 
280 	ptr = &remcom_in_buffer[1];
281 	if (kgdb_hex2long(&ptr, &addr))
282 		regs->epc = addr;
283 }
284 
kgdb_arch_handle_exception(int vector,int signo,int err_code,char * remcom_in_buffer,char * remcom_out_buffer,struct pt_regs * regs)285 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
286 			       char *remcom_in_buffer, char *remcom_out_buffer,
287 			       struct pt_regs *regs)
288 {
289 	int err = 0;
290 
291 	undo_single_step(regs);
292 
293 	switch (remcom_in_buffer[0]) {
294 	case 'c':
295 	case 'D':
296 	case 'k':
297 		if (remcom_in_buffer[0] == 'c')
298 			kgdb_arch_update_addr(regs, remcom_in_buffer);
299 		break;
300 	case 's':
301 		kgdb_arch_update_addr(regs, remcom_in_buffer);
302 		err = do_single_step(regs);
303 		break;
304 	default:
305 		err = -1;
306 	}
307 	return err;
308 }
309 
kgdb_riscv_kgdbbreak(unsigned long addr)310 static int kgdb_riscv_kgdbbreak(unsigned long addr)
311 {
312 	if (stepped_address == addr)
313 		return KGDB_SW_SINGLE_STEP;
314 	if (atomic_read(&kgdb_setting_breakpoint))
315 		if (addr == (unsigned long)&kgdb_compiled_break)
316 			return KGDB_COMPILED_BREAK;
317 
318 	return kgdb_has_hit_break(addr);
319 }
320 
kgdb_riscv_notify(struct notifier_block * self,unsigned long cmd,void * ptr)321 static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
322 			     void *ptr)
323 {
324 	struct die_args *args = (struct die_args *)ptr;
325 	struct pt_regs *regs = args->regs;
326 	unsigned long flags;
327 	int type;
328 
329 	if (user_mode(regs))
330 		return NOTIFY_DONE;
331 
332 	type = kgdb_riscv_kgdbbreak(regs->epc);
333 	if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
334 		return NOTIFY_DONE;
335 
336 	local_irq_save(flags);
337 
338 	if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
339 				  args->signr, cmd, regs))
340 		return NOTIFY_DONE;
341 
342 	if (type == KGDB_COMPILED_BREAK)
343 		regs->epc += 4;
344 
345 	local_irq_restore(flags);
346 
347 	return NOTIFY_STOP;
348 }
349 
350 static struct notifier_block kgdb_notifier = {
351 	.notifier_call = kgdb_riscv_notify,
352 };
353 
kgdb_arch_init(void)354 int kgdb_arch_init(void)
355 {
356 	register_die_notifier(&kgdb_notifier);
357 
358 	return 0;
359 }
360 
kgdb_arch_exit(void)361 void kgdb_arch_exit(void)
362 {
363 	unregister_die_notifier(&kgdb_notifier);
364 }
365 
366 /*
367  * Global data
368  */
369 #ifdef CONFIG_RISCV_ISA_C
370 const struct kgdb_arch arch_kgdb_ops = {
371 	.gdb_bpt_instr = {0x02, 0x90},	/* c.ebreak */
372 };
373 #else
374 const struct kgdb_arch arch_kgdb_ops = {
375 	.gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00},	/* ebreak */
376 };
377 #endif
378