xref: /linux/arch/arm/kernel/entry-v7m.S (revision 37744feebc086908fd89760650f458ab19071750)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/kernel/entry-v7m.S
4 *
5 * Copyright (C) 2008 ARM Ltd.
6 *
7 * Low-level vector interface routines for the ARMv7-M architecture
8 */
9#include <asm/memory.h>
10#include <asm/glue.h>
11#include <asm/thread_notify.h>
12#include <asm/v7m.h>
13
14#include "entry-header.S"
15
16#ifdef CONFIG_TRACE_IRQFLAGS
17#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
18#endif
19
20__invalid_entry:
21	v7m_exception_entry
22#ifdef CONFIG_PRINTK
23	adr	r0, strerr
24	mrs	r1, ipsr
25	mov	r2, lr
26	bl	printk
27#endif
28	mov	r0, sp
29	bl	show_regs
301:	b	1b
31ENDPROC(__invalid_entry)
32
33strerr:	.asciz	"\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
34
35	.align	2
36__irq_entry:
37	v7m_exception_entry
38
39	@
40	@ Invoke the IRQ handler
41	@
42	mrs	r0, ipsr
43	ldr	r1, =V7M_xPSR_EXCEPTIONNO
44	and	r0, r1
45	sub	r0, #16
46	mov	r1, sp
47	stmdb	sp!, {lr}
48	@ routine called with r0 = irq number, r1 = struct pt_regs *
49	bl	nvic_handle_irq
50
51	pop	{lr}
52	@
53	@ Check for any pending work if returning to user
54	@
55	ldr	r1, =BASEADDR_V7M_SCB
56	ldr	r0, [r1, V7M_SCB_ICSR]
57	tst	r0, V7M_SCB_ICSR_RETTOBASE
58	beq	2f
59
60	get_thread_info tsk
61	ldr	r2, [tsk, #TI_FLAGS]
62	tst	r2, #_TIF_WORK_MASK
63	beq	2f			@ no work pending
64	mov	r0, #V7M_SCB_ICSR_PENDSVSET
65	str	r0, [r1, V7M_SCB_ICSR]	@ raise PendSV
66
672:
68	@ registers r0-r3 and r12 are automatically restored on exception
69	@ return. r4-r7 were not clobbered in v7m_exception_entry so for
70	@ correctness they don't need to be restored. So only r8-r11 must be
71	@ restored here. The easiest way to do so is to restore r0-r7, too.
72	ldmia	sp!, {r0-r11}
73	add	sp, #PT_REGS_SIZE-S_IP
74	cpsie	i
75	bx	lr
76ENDPROC(__irq_entry)
77
78__pendsv_entry:
79	v7m_exception_entry
80
81	ldr	r1, =BASEADDR_V7M_SCB
82	mov	r0, #V7M_SCB_ICSR_PENDSVCLR
83	str	r0, [r1, V7M_SCB_ICSR]	@ clear PendSV
84
85	@ execute the pending work, including reschedule
86	get_thread_info tsk
87	mov	why, #0
88	b	ret_to_user_from_irq
89ENDPROC(__pendsv_entry)
90
91/*
92 * Register switch for ARMv7-M processors.
93 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
94 * previous and next are guaranteed not to be the same.
95 */
96ENTRY(__switch_to)
97	.fnstart
98	.cantunwind
99	add	ip, r1, #TI_CPU_SAVE
100	stmia	ip!, {r4 - r11}		@ Store most regs on stack
101	str	sp, [ip], #4
102	str	lr, [ip], #4
103	mov	r5, r0
104	add	r4, r2, #TI_CPU_SAVE
105	ldr	r0, =thread_notify_head
106	mov	r1, #THREAD_NOTIFY_SWITCH
107	bl	atomic_notifier_call_chain
108	mov	ip, r4
109	mov	r0, r5
110	ldmia	ip!, {r4 - r11}		@ Load all regs saved previously
111	ldr	sp, [ip]
112	ldr	pc, [ip, #4]!
113	.fnend
114ENDPROC(__switch_to)
115
116	.data
117#if CONFIG_CPU_V7M_NUM_IRQ <= 112
118	.align	9
119#else
120	.align	10
121#endif
122
123/*
124 * Vector table (Natural alignment need to be ensured)
125 */
126ENTRY(vector_table)
127	.long	0			@ 0 - Reset stack pointer
128	.long	__invalid_entry		@ 1 - Reset
129	.long	__invalid_entry		@ 2 - NMI
130	.long	__invalid_entry		@ 3 - HardFault
131	.long	__invalid_entry		@ 4 - MemManage
132	.long	__invalid_entry		@ 5 - BusFault
133	.long	__invalid_entry		@ 6 - UsageFault
134	.long	__invalid_entry		@ 7 - Reserved
135	.long	__invalid_entry		@ 8 - Reserved
136	.long	__invalid_entry		@ 9 - Reserved
137	.long	__invalid_entry		@ 10 - Reserved
138	.long	vector_swi		@ 11 - SVCall
139	.long	__invalid_entry		@ 12 - Debug Monitor
140	.long	__invalid_entry		@ 13 - Reserved
141	.long	__pendsv_entry		@ 14 - PendSV
142	.long	__invalid_entry		@ 15 - SysTick
143	.rept	CONFIG_CPU_V7M_NUM_IRQ
144	.long	__irq_entry		@ External Interrupts
145	.endr
146	.align	2
147	.globl	exc_ret
148exc_ret:
149	.space	4
150