xref: /linux/arch/loongarch/kernel/kgdb.c (revision 981368e1440b76f68b1ac8f5fb14e739f80ecc4e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * LoongArch KGDB support
4  *
5  * Copyright (C) 2023 Loongson Technology Corporation Limited
6  */
7 
8 #include <linux/hw_breakpoint.h>
9 #include <linux/kdebug.h>
10 #include <linux/kgdb.h>
11 #include <linux/processor.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 
16 #include <asm/cacheflush.h>
17 #include <asm/fpu.h>
18 #include <asm/hw_breakpoint.h>
19 #include <asm/inst.h>
20 #include <asm/irq_regs.h>
21 #include <asm/ptrace.h>
22 #include <asm/sigcontext.h>
23 
24 int kgdb_watch_activated;
25 static unsigned int stepped_opcode;
26 static unsigned long stepped_address;
27 
28 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
29 	{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
30 	{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
31 	{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
32 	{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
33 	{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
34 	{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
35 	{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
36 	{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
37 	{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
38 	{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
39 	{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
40 	{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
41 	{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
42 	{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
43 	{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
44 	{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
45 	{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
46 	{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
47 	{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
48 	{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
49 	{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
50 	{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
51 	{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
52 	{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
53 	{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
54 	{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
55 	{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
56 	{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
57 	{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
58 	{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
59 	{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
60 	{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
61 	{ "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
62 	{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
63 	{ "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
64 	{ "f0", GDB_SIZEOF_REG, 0 },
65 	{ "f1", GDB_SIZEOF_REG, 1 },
66 	{ "f2", GDB_SIZEOF_REG, 2 },
67 	{ "f3", GDB_SIZEOF_REG, 3 },
68 	{ "f4", GDB_SIZEOF_REG, 4 },
69 	{ "f5", GDB_SIZEOF_REG, 5 },
70 	{ "f6", GDB_SIZEOF_REG, 6 },
71 	{ "f7", GDB_SIZEOF_REG, 7 },
72 	{ "f8", GDB_SIZEOF_REG, 8 },
73 	{ "f9", GDB_SIZEOF_REG, 9 },
74 	{ "f10", GDB_SIZEOF_REG, 10 },
75 	{ "f11", GDB_SIZEOF_REG, 11 },
76 	{ "f12", GDB_SIZEOF_REG, 12 },
77 	{ "f13", GDB_SIZEOF_REG, 13 },
78 	{ "f14", GDB_SIZEOF_REG, 14 },
79 	{ "f15", GDB_SIZEOF_REG, 15 },
80 	{ "f16", GDB_SIZEOF_REG, 16 },
81 	{ "f17", GDB_SIZEOF_REG, 17 },
82 	{ "f18", GDB_SIZEOF_REG, 18 },
83 	{ "f19", GDB_SIZEOF_REG, 19 },
84 	{ "f20", GDB_SIZEOF_REG, 20 },
85 	{ "f21", GDB_SIZEOF_REG, 21 },
86 	{ "f22", GDB_SIZEOF_REG, 22 },
87 	{ "f23", GDB_SIZEOF_REG, 23 },
88 	{ "f24", GDB_SIZEOF_REG, 24 },
89 	{ "f25", GDB_SIZEOF_REG, 25 },
90 	{ "f26", GDB_SIZEOF_REG, 26 },
91 	{ "f27", GDB_SIZEOF_REG, 27 },
92 	{ "f28", GDB_SIZEOF_REG, 28 },
93 	{ "f29", GDB_SIZEOF_REG, 29 },
94 	{ "f30", GDB_SIZEOF_REG, 30 },
95 	{ "f31", GDB_SIZEOF_REG, 31 },
96 	{ "fcc0", 1, 0 },
97 	{ "fcc1", 1, 1 },
98 	{ "fcc2", 1, 2 },
99 	{ "fcc3", 1, 3 },
100 	{ "fcc4", 1, 4 },
101 	{ "fcc5", 1, 5 },
102 	{ "fcc6", 1, 6 },
103 	{ "fcc7", 1, 7 },
104 	{ "fcsr", 4, 0 },
105 };
106 
107 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
108 {
109 	int reg_offset, reg_size;
110 
111 	if (regno < 0 || regno >= DBG_MAX_REG_NUM)
112 		return NULL;
113 
114 	reg_offset = dbg_reg_def[regno].offset;
115 	reg_size = dbg_reg_def[regno].size;
116 
117 	if (reg_offset == -1)
118 		goto out;
119 
120 	/* Handle general-purpose/orig_a0/pc/badv registers */
121 	if (regno <= DBG_PT_REGS_END) {
122 		memcpy(mem, (void *)regs + reg_offset, reg_size);
123 		goto out;
124 	}
125 
126 	if (!(regs->csr_euen & CSR_EUEN_FPEN))
127 		goto out;
128 
129 	save_fp(current);
130 
131 	/* Handle FP registers */
132 	switch (regno) {
133 	case DBG_FCSR:				/* Process the fcsr */
134 		memcpy(mem, (void *)&current->thread.fpu.fcsr, reg_size);
135 		break;
136 	case DBG_FCC_BASE ... DBG_FCC_END:	/* Process the fcc */
137 		memcpy(mem, (void *)&current->thread.fpu.fcc + reg_offset, reg_size);
138 		break;
139 	case DBG_FPR_BASE ... DBG_FPR_END:	/* Process the fpr */
140 		memcpy(mem, (void *)&current->thread.fpu.fpr[reg_offset], reg_size);
141 		break;
142 	default:
143 		break;
144 	}
145 
146 out:
147 	return dbg_reg_def[regno].name;
148 }
149 
150 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
151 {
152 	int reg_offset, reg_size;
153 
154 	if (regno < 0 || regno >= DBG_MAX_REG_NUM)
155 		return -EINVAL;
156 
157 	reg_offset = dbg_reg_def[regno].offset;
158 	reg_size = dbg_reg_def[regno].size;
159 
160 	if (reg_offset == -1)
161 		return 0;
162 
163 	/* Handle general-purpose/orig_a0/pc/badv registers */
164 	if (regno <= DBG_PT_REGS_END) {
165 		memcpy((void *)regs + reg_offset, mem, reg_size);
166 		return 0;
167 	}
168 
169 	if (!(regs->csr_euen & CSR_EUEN_FPEN))
170 		return 0;
171 
172 	/* Handle FP registers */
173 	switch (regno) {
174 	case DBG_FCSR:				/* Process the fcsr */
175 		memcpy((void *)&current->thread.fpu.fcsr, mem, reg_size);
176 		break;
177 	case DBG_FCC_BASE ... DBG_FCC_END:	/* Process the fcc */
178 		memcpy((void *)&current->thread.fpu.fcc + reg_offset, mem, reg_size);
179 		break;
180 	case DBG_FPR_BASE ... DBG_FPR_END:	/* Process the fpr */
181 		memcpy((void *)&current->thread.fpu.fpr[reg_offset], mem, reg_size);
182 		break;
183 	default:
184 		break;
185 	}
186 
187 	restore_fp(current);
188 
189 	return 0;
190 }
191 
192 /*
193  * Similar to regs_to_gdb_regs() except that process is sleeping and so
194  * we may not be able to get all the info.
195  */
196 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
197 {
198 	/* Initialize to zero */
199 	memset((char *)gdb_regs, 0, NUMREGBYTES);
200 
201 	gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
202 	gdb_regs[DBG_LOONGARCH_TP] = (long)p;
203 	gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
204 
205 	/* S0 - S8 */
206 	gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
207 	gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
208 	gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
209 	gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
210 	gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
211 	gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
212 	gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
213 	gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
214 	gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
215 
216 	/*
217 	 * PC use return address (RA), i.e. the moment after return from __switch_to()
218 	 */
219 	gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
220 }
221 
222 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
223 {
224 	regs->csr_era = pc;
225 }
226 
227 void arch_kgdb_breakpoint(void)
228 {
229 	__asm__ __volatile__ (			\
230 		".globl kgdb_breakinst\n\t"	\
231 		"nop\n"				\
232 		"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
233 }
234 
235 /*
236  * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
237  * then try to fall into the debugger
238  */
239 static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
240 {
241 	struct die_args *args = (struct die_args *)ptr;
242 	struct pt_regs *regs = args->regs;
243 
244 	/* Userspace events, ignore. */
245 	if (user_mode(regs))
246 		return NOTIFY_DONE;
247 
248 	if (!kgdb_io_module_registered)
249 		return NOTIFY_DONE;
250 
251 	if (atomic_read(&kgdb_active) != -1)
252 		kgdb_nmicallback(smp_processor_id(), regs);
253 
254 	if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
255 		return NOTIFY_DONE;
256 
257 	if (atomic_read(&kgdb_setting_breakpoint))
258 		if (regs->csr_era == (unsigned long)&kgdb_breakinst)
259 			regs->csr_era += LOONGARCH_INSN_SIZE;
260 
261 	return NOTIFY_STOP;
262 }
263 
264 bool kgdb_breakpoint_handler(struct pt_regs *regs)
265 {
266 	struct die_args args = {
267 		.regs	= regs,
268 		.str	= "Break",
269 		.err	= BRK_KDB,
270 		.trapnr = read_csr_excode(),
271 		.signr	= SIGTRAP,
272 
273 	};
274 
275 	return (kgdb_loongarch_notify(NULL, DIE_TRAP, &args) == NOTIFY_STOP) ? true : false;
276 }
277 
278 static struct notifier_block kgdb_notifier = {
279 	.notifier_call = kgdb_loongarch_notify,
280 };
281 
282 static inline void kgdb_arch_update_addr(struct pt_regs *regs,
283 					 char *remcom_in_buffer)
284 {
285 	unsigned long addr;
286 	char *ptr;
287 
288 	ptr = &remcom_in_buffer[1];
289 	if (kgdb_hex2long(&ptr, &addr))
290 		regs->csr_era = addr;
291 }
292 
293 /* Calculate the new address for after a step */
294 static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
295 {
296 	char cj_val;
297 	unsigned int si, si_l, si_h, rd, rj, cj;
298 	unsigned long pc = instruction_pointer(regs);
299 	union loongarch_instruction *ip = (union loongarch_instruction *)pc;
300 
301 	if (pc & 3) {
302 		pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
303 		return -EINVAL;
304 	}
305 
306 	*next_addr = pc + LOONGARCH_INSN_SIZE;
307 
308 	si_h = ip->reg0i26_format.immediate_h;
309 	si_l = ip->reg0i26_format.immediate_l;
310 	switch (ip->reg0i26_format.opcode) {
311 	case b_op:
312 		*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
313 		return 0;
314 	case bl_op:
315 		*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
316 		regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
317 		return 0;
318 	}
319 
320 	rj = ip->reg1i21_format.rj;
321 	cj = (rj & 0x07) + DBG_FCC_BASE;
322 	si_l = ip->reg1i21_format.immediate_l;
323 	si_h = ip->reg1i21_format.immediate_h;
324 	dbg_get_reg(cj, &cj_val, regs);
325 	switch (ip->reg1i21_format.opcode) {
326 	case beqz_op:
327 		if (regs->regs[rj] == 0)
328 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
329 		return 0;
330 	case bnez_op:
331 		if (regs->regs[rj] != 0)
332 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
333 		return 0;
334 	case bceqz_op: /* bceqz_op = bcnez_op */
335 		if (((rj & 0x18) == 0x00) && !cj_val) /* bceqz */
336 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
337 		if (((rj & 0x18) == 0x08) && cj_val) /* bcnez */
338 			*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
339 		return 0;
340 	}
341 
342 	rj = ip->reg2i16_format.rj;
343 	rd = ip->reg2i16_format.rd;
344 	si = ip->reg2i16_format.immediate;
345 	switch (ip->reg2i16_format.opcode) {
346 	case beq_op:
347 		if (regs->regs[rj] == regs->regs[rd])
348 			*next_addr = pc + sign_extend64(si << 2, 17);
349 		return 0;
350 	case bne_op:
351 		if (regs->regs[rj] != regs->regs[rd])
352 			*next_addr = pc + sign_extend64(si << 2, 17);
353 		return 0;
354 	case blt_op:
355 		if ((long)regs->regs[rj] < (long)regs->regs[rd])
356 			*next_addr = pc + sign_extend64(si << 2, 17);
357 		return 0;
358 	case bge_op:
359 		if ((long)regs->regs[rj] >= (long)regs->regs[rd])
360 			*next_addr = pc + sign_extend64(si << 2, 17);
361 		return 0;
362 	case bltu_op:
363 		if (regs->regs[rj] < regs->regs[rd])
364 			*next_addr = pc + sign_extend64(si << 2, 17);
365 		return 0;
366 	case bgeu_op:
367 		if (regs->regs[rj] >= regs->regs[rd])
368 			*next_addr = pc + sign_extend64(si << 2, 17);
369 		return 0;
370 	case jirl_op:
371 		regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
372 		*next_addr = regs->regs[rj] + sign_extend64(si << 2, 17);
373 		return 0;
374 	}
375 
376 	return 0;
377 }
378 
379 static int do_single_step(struct pt_regs *regs)
380 {
381 	int error = 0;
382 	unsigned long addr = 0; /* Determine where the target instruction will send us to */
383 
384 	error = get_step_address(regs, &addr);
385 	if (error)
386 		return error;
387 
388 	/* Store the opcode in the stepped address */
389 	error = get_kernel_nofault(stepped_opcode, (void *)addr);
390 	if (error)
391 		return error;
392 
393 	stepped_address = addr;
394 
395 	/* Replace the opcode with the break instruction */
396 	error = copy_to_kernel_nofault((void *)stepped_address,
397 				       arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
398 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
399 
400 	if (error) {
401 		stepped_opcode = 0;
402 		stepped_address = 0;
403 	} else {
404 		kgdb_single_step = 1;
405 		atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
406 	}
407 
408 	return error;
409 }
410 
411 /* Undo a single step */
412 static void undo_single_step(struct pt_regs *regs)
413 {
414 	if (stepped_opcode) {
415 		copy_to_kernel_nofault((void *)stepped_address,
416 				       (void *)&stepped_opcode, BREAK_INSTR_SIZE);
417 		flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE);
418 	}
419 
420 	stepped_opcode = 0;
421 	stepped_address = 0;
422 	kgdb_single_step = 0;
423 	atomic_set(&kgdb_cpu_doing_single_step, -1);
424 }
425 
426 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
427 			       char *remcom_in_buffer, char *remcom_out_buffer,
428 			       struct pt_regs *regs)
429 {
430 	int ret = 0;
431 
432 	undo_single_step(regs);
433 	regs->csr_prmd |= CSR_PRMD_PWE;
434 
435 	switch (remcom_in_buffer[0]) {
436 	case 'D':
437 	case 'k':
438 		regs->csr_prmd &= ~CSR_PRMD_PWE;
439 		fallthrough;
440 	case 'c':
441 		kgdb_arch_update_addr(regs, remcom_in_buffer);
442 		break;
443 	case 's':
444 		kgdb_arch_update_addr(regs, remcom_in_buffer);
445 		ret = do_single_step(regs);
446 		break;
447 	default:
448 		ret = -1;
449 	}
450 
451 	return ret;
452 }
453 
454 static struct hw_breakpoint {
455 	unsigned int		enabled;
456 	unsigned long		addr;
457 	int			len;
458 	int			type;
459 	struct perf_event	* __percpu *pev;
460 } breakinfo[LOONGARCH_MAX_BRP];
461 
462 static int hw_break_reserve_slot(int breakno)
463 {
464 	int cpu, cnt = 0;
465 	struct perf_event **pevent;
466 
467 	for_each_online_cpu(cpu) {
468 		cnt++;
469 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
470 		if (dbg_reserve_bp_slot(*pevent))
471 			goto fail;
472 	}
473 
474 	return 0;
475 
476 fail:
477 	for_each_online_cpu(cpu) {
478 		cnt--;
479 		if (!cnt)
480 			break;
481 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
482 		dbg_release_bp_slot(*pevent);
483 	}
484 
485 	return -1;
486 }
487 
488 static int hw_break_release_slot(int breakno)
489 {
490 	int cpu;
491 	struct perf_event **pevent;
492 
493 	if (dbg_is_early)
494 		return 0;
495 
496 	for_each_online_cpu(cpu) {
497 		pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
498 		if (dbg_release_bp_slot(*pevent))
499 			/*
500 			 * The debugger is responsible for handing the retry on
501 			 * remove failure.
502 			 */
503 			return -1;
504 	}
505 
506 	return 0;
507 }
508 
509 static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
510 {
511 	int i;
512 
513 	for (i = 0; i < LOONGARCH_MAX_BRP; i++)
514 		if (!breakinfo[i].enabled)
515 			break;
516 
517 	if (i == LOONGARCH_MAX_BRP)
518 		return -1;
519 
520 	switch (bptype) {
521 	case BP_HARDWARE_BREAKPOINT:
522 		breakinfo[i].type = HW_BREAKPOINT_X;
523 		break;
524 	case BP_READ_WATCHPOINT:
525 		breakinfo[i].type = HW_BREAKPOINT_R;
526 		break;
527 	case BP_WRITE_WATCHPOINT:
528 		breakinfo[i].type = HW_BREAKPOINT_W;
529 		break;
530 	case BP_ACCESS_WATCHPOINT:
531 		breakinfo[i].type = HW_BREAKPOINT_RW;
532 		break;
533 	default:
534 		return -1;
535 	}
536 
537 	switch (len) {
538 	case 1:
539 		breakinfo[i].len = HW_BREAKPOINT_LEN_1;
540 		break;
541 	case 2:
542 		breakinfo[i].len = HW_BREAKPOINT_LEN_2;
543 		break;
544 	case 4:
545 		breakinfo[i].len = HW_BREAKPOINT_LEN_4;
546 		break;
547 	case 8:
548 		breakinfo[i].len = HW_BREAKPOINT_LEN_8;
549 		break;
550 	default:
551 		return -1;
552 	}
553 
554 	breakinfo[i].addr = addr;
555 	if (hw_break_reserve_slot(i)) {
556 		breakinfo[i].addr = 0;
557 		return -1;
558 	}
559 	breakinfo[i].enabled = 1;
560 
561 	return 0;
562 }
563 
564 static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
565 {
566 	int i;
567 
568 	for (i = 0; i < LOONGARCH_MAX_BRP; i++)
569 		if (breakinfo[i].addr == addr && breakinfo[i].enabled)
570 			break;
571 
572 	if (i == LOONGARCH_MAX_BRP)
573 		return -1;
574 
575 	if (hw_break_release_slot(i)) {
576 		pr_err("Cannot remove hw breakpoint at %lx\n", addr);
577 		return -1;
578 	}
579 	breakinfo[i].enabled = 0;
580 
581 	return 0;
582 }
583 
584 static void kgdb_disable_hw_break(struct pt_regs *regs)
585 {
586 	int i;
587 	int cpu = raw_smp_processor_id();
588 	struct perf_event *bp;
589 
590 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
591 		if (!breakinfo[i].enabled)
592 			continue;
593 
594 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
595 		if (bp->attr.disabled == 1)
596 			continue;
597 
598 		arch_uninstall_hw_breakpoint(bp);
599 		bp->attr.disabled = 1;
600 	}
601 
602 	/* Disable hardware debugging while we are in kgdb */
603 	csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
604 }
605 
606 static void kgdb_remove_all_hw_break(void)
607 {
608 	int i;
609 	int cpu = raw_smp_processor_id();
610 	struct perf_event *bp;
611 
612 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
613 		if (!breakinfo[i].enabled)
614 			continue;
615 
616 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
617 		if (!bp->attr.disabled) {
618 			arch_uninstall_hw_breakpoint(bp);
619 			bp->attr.disabled = 1;
620 			continue;
621 		}
622 
623 		if (hw_break_release_slot(i))
624 			pr_err("KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr);
625 		breakinfo[i].enabled = 0;
626 	}
627 
628 	csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
629 	kgdb_watch_activated = 0;
630 }
631 
632 static void kgdb_correct_hw_break(void)
633 {
634 	int i, activated = 0;
635 
636 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
637 		struct perf_event *bp;
638 		int val;
639 		int cpu = raw_smp_processor_id();
640 
641 		if (!breakinfo[i].enabled)
642 			continue;
643 
644 		bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
645 		if (bp->attr.disabled != 1)
646 			continue;
647 
648 		bp->attr.bp_addr = breakinfo[i].addr;
649 		bp->attr.bp_len = breakinfo[i].len;
650 		bp->attr.bp_type = breakinfo[i].type;
651 
652 		val = hw_breakpoint_arch_parse(bp, &bp->attr, counter_arch_bp(bp));
653 		if (val)
654 			return;
655 
656 		val = arch_install_hw_breakpoint(bp);
657 		if (!val)
658 			bp->attr.disabled = 0;
659 		activated = 1;
660 	}
661 
662 	csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
663 	kgdb_watch_activated = activated;
664 }
665 
666 const struct kgdb_arch arch_kgdb_ops = {
667 	.gdb_bpt_instr		= {0x02, 0x00, break_op >> 1, 0x00}, /* BRK_KDB = 2 */
668 	.flags			= KGDB_HW_BREAKPOINT,
669 	.set_hw_breakpoint	= kgdb_set_hw_break,
670 	.remove_hw_breakpoint	= kgdb_remove_hw_break,
671 	.disable_hw_break	= kgdb_disable_hw_break,
672 	.remove_all_hw_break	= kgdb_remove_all_hw_break,
673 	.correct_hw_break	= kgdb_correct_hw_break,
674 };
675 
676 int kgdb_arch_init(void)
677 {
678 	return register_die_notifier(&kgdb_notifier);
679 }
680 
681 void kgdb_arch_late(void)
682 {
683 	int i, cpu;
684 	struct perf_event_attr attr;
685 	struct perf_event **pevent;
686 
687 	hw_breakpoint_init(&attr);
688 
689 	attr.bp_addr = (unsigned long)kgdb_arch_init;
690 	attr.bp_len = HW_BREAKPOINT_LEN_4;
691 	attr.bp_type = HW_BREAKPOINT_W;
692 	attr.disabled = 1;
693 
694 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
695 		if (breakinfo[i].pev)
696 			continue;
697 
698 		breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
699 		if (IS_ERR((void * __force)breakinfo[i].pev)) {
700 			pr_err("kgdb: Could not allocate hw breakpoints.\n");
701 			breakinfo[i].pev = NULL;
702 			return;
703 		}
704 
705 		for_each_online_cpu(cpu) {
706 			pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
707 			if (pevent[0]->destroy) {
708 				pevent[0]->destroy = NULL;
709 				release_bp_slot(*pevent);
710 			}
711 		}
712 	}
713 }
714 
715 void kgdb_arch_exit(void)
716 {
717 	int i;
718 
719 	for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
720 		if (breakinfo[i].pev) {
721 			unregister_wide_hw_breakpoint(breakinfo[i].pev);
722 			breakinfo[i].pev = NULL;
723 		}
724 	}
725 
726 	unregister_die_notifier(&kgdb_notifier);
727 }
728