xref: /linux/arch/arm/common/mcpm_head.S (revision 95298d63c67673c654c08952672d016212b26054)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
4 *
5 * Created by:  Nicolas Pitre, March 2012
6 * Copyright:   (C) 2012-2013  Linaro Limited
7 *
8 * Refer to Documentation/arm/cluster-pm-race-avoidance.rst
9 * for details of the synchronisation algorithms used here.
10 */
11
12#include <linux/linkage.h>
13#include <asm/mcpm.h>
14#include <asm/assembler.h>
15
16#include "vlock.h"
17
18.if MCPM_SYNC_CLUSTER_CPUS
19.error "cpus must be the first member of struct mcpm_sync_struct"
20.endif
21
22	.macro	pr_dbg	string
23#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
24	b	1901f
251902:	.asciz	"CPU"
261903:	.asciz	" cluster"
271904:	.asciz	": \string"
28	.align
291901:	adr	r0, 1902b
30	bl	printascii
31	mov	r0, r9
32	bl	printhex2
33	adr	r0, 1903b
34	bl	printascii
35	mov	r0, r10
36	bl	printhex2
37	adr	r0, 1904b
38	bl	printascii
39#endif
40	.endm
41
42	.arm
43	.align
44
45ENTRY(mcpm_entry_point)
46
47 ARM_BE8(setend        be)
48 THUMB(	badr	r12, 1f		)
49 THUMB(	bx	r12		)
50 THUMB(	.thumb			)
511:
52	mrc	p15, 0, r0, c0, c0, 5		@ MPIDR
53	ubfx	r9, r0, #0, #8			@ r9 = cpu
54	ubfx	r10, r0, #8, #8			@ r10 = cluster
55	mov	r3, #MAX_CPUS_PER_CLUSTER
56	mla	r4, r3, r10, r9			@ r4 = canonical CPU index
57	cmp	r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
58	blo	2f
59
60	/* We didn't expect this CPU.  Try to cheaply make it quiet. */
611:	wfi
62	wfe
63	b	1b
64
652:	pr_dbg	"kernel mcpm_entry_point\n"
66
67	/*
68	 * MMU is off so we need to get to various variables in a
69	 * position independent way.
70	 */
71	adr	r5, 3f
72	ldmia	r5, {r0, r6, r7, r8, r11}
73	add	r0, r5, r0			@ r0 = mcpm_entry_early_pokes
74	add	r6, r5, r6			@ r6 = mcpm_entry_vectors
75	ldr	r7, [r5, r7]			@ r7 = mcpm_power_up_setup_phys
76	add	r8, r5, r8			@ r8 = mcpm_sync
77	add	r11, r5, r11			@ r11 = first_man_locks
78
79	@ Perform an early poke, if any
80	add	r0, r0, r4, lsl #3
81	ldmia	r0, {r0, r1}
82	teq	r0, #0
83	strne	r1, [r0]
84
85	mov	r0, #MCPM_SYNC_CLUSTER_SIZE
86	mla	r8, r0, r10, r8			@ r8 = sync cluster base
87
88	@ Signal that this CPU is coming UP:
89	mov	r0, #CPU_COMING_UP
90	mov	r5, #MCPM_SYNC_CPU_SIZE
91	mla	r5, r9, r5, r8			@ r5 = sync cpu address
92	strb	r0, [r5]
93
94	@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
95	@ state, because there is at least one active CPU (this CPU).
96
97	mov	r0, #VLOCK_SIZE
98	mla	r11, r0, r10, r11		@ r11 = cluster first man lock
99	mov	r0, r11
100	mov	r1, r9				@ cpu
101	bl	vlock_trylock			@ implies DMB
102
103	cmp	r0, #0				@ failed to get the lock?
104	bne	mcpm_setup_wait		@ wait for cluster setup if so
105
106	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
107	cmp	r0, #CLUSTER_UP			@ cluster already up?
108	bne	mcpm_setup			@ if not, set up the cluster
109
110	@ Otherwise, release the first man lock and skip setup:
111	mov	r0, r11
112	bl	vlock_unlock
113	b	mcpm_setup_complete
114
115mcpm_setup:
116	@ Control dependency implies strb not observable before previous ldrb.
117
118	@ Signal that the cluster is being brought up:
119	mov	r0, #INBOUND_COMING_UP
120	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
121	dmb
122
123	@ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
124	@ point onwards will observe INBOUND_COMING_UP and abort.
125
126	@ Wait for any previously-pending cluster teardown operations to abort
127	@ or complete:
128mcpm_teardown_wait:
129	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
130	cmp	r0, #CLUSTER_GOING_DOWN
131	bne	first_man_setup
132	wfe
133	b	mcpm_teardown_wait
134
135first_man_setup:
136	dmb
137
138	@ If the outbound gave up before teardown started, skip cluster setup:
139
140	cmp	r0, #CLUSTER_UP
141	beq	mcpm_setup_leave
142
143	@ power_up_setup is now responsible for setting up the cluster:
144
145	cmp	r7, #0
146	mov	r0, #1		@ second (cluster) affinity level
147	blxne	r7		@ Call power_up_setup if defined
148	dmb
149
150	mov	r0, #CLUSTER_UP
151	strb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
152	dmb
153
154mcpm_setup_leave:
155	@ Leave the cluster setup critical section:
156
157	mov	r0, #INBOUND_NOT_COMING_UP
158	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
159	dsb	st
160	sev
161
162	mov	r0, r11
163	bl	vlock_unlock	@ implies DMB
164	b	mcpm_setup_complete
165
166	@ In the contended case, non-first men wait here for cluster setup
167	@ to complete:
168mcpm_setup_wait:
169	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
170	cmp	r0, #CLUSTER_UP
171	wfene
172	bne	mcpm_setup_wait
173	dmb
174
175mcpm_setup_complete:
176	@ If a platform-specific CPU setup hook is needed, it is
177	@ called from here.
178
179	cmp	r7, #0
180	mov	r0, #0		@ first (CPU) affinity level
181	blxne	r7		@ Call power_up_setup if defined
182	dmb
183
184	@ Mark the CPU as up:
185
186	mov	r0, #CPU_UP
187	strb	r0, [r5]
188
189	@ Observability order of CPU_UP and opening of the gate does not matter.
190
191mcpm_entry_gated:
192	ldr	r5, [r6, r4, lsl #2]		@ r5 = CPU entry vector
193	cmp	r5, #0
194	wfeeq
195	beq	mcpm_entry_gated
196	dmb
197
198	pr_dbg	"released\n"
199	bx	r5
200
201	.align	2
202
2033:	.word	mcpm_entry_early_pokes - .
204	.word	mcpm_entry_vectors - 3b
205	.word	mcpm_power_up_setup_phys - 3b
206	.word	mcpm_sync - 3b
207	.word	first_man_locks - 3b
208
209ENDPROC(mcpm_entry_point)
210
211	.bss
212
213	.align	CACHE_WRITEBACK_ORDER
214	.type	first_man_locks, #object
215first_man_locks:
216	.space	VLOCK_SIZE * MAX_NR_CLUSTERS
217	.align	CACHE_WRITEBACK_ORDER
218
219	.type	mcpm_entry_vectors, #object
220ENTRY(mcpm_entry_vectors)
221	.space	4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
222
223	.type	mcpm_entry_early_pokes, #object
224ENTRY(mcpm_entry_early_pokes)
225	.space	8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
226
227	.type	mcpm_power_up_setup_phys, #object
228ENTRY(mcpm_power_up_setup_phys)
229	.space  4		@ set by mcpm_sync_init()
230