xref: /linux/arch/arm/common/mcpm_head.S (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1/*
2 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
3 *
4 * Created by:  Nicolas Pitre, March 2012
5 * Copyright:   (C) 2012-2013  Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * Refer to Documentation/arm/cluster-pm-race-avoidance.txt
13 * for details of the synchronisation algorithms used here.
14 */
15
16#include <linux/linkage.h>
17#include <asm/mcpm.h>
18#include <asm/assembler.h>
19
20#include "vlock.h"
21
22.if MCPM_SYNC_CLUSTER_CPUS
23.error "cpus must be the first member of struct mcpm_sync_struct"
24.endif
25
26	.macro	pr_dbg	string
27#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
28	b	1901f
291902:	.asciz	"CPU"
301903:	.asciz	" cluster"
311904:	.asciz	": \string"
32	.align
331901:	adr	r0, 1902b
34	bl	printascii
35	mov	r0, r9
36	bl	printhex2
37	adr	r0, 1903b
38	bl	printascii
39	mov	r0, r10
40	bl	printhex2
41	adr	r0, 1904b
42	bl	printascii
43#endif
44	.endm
45
46	.arm
47	.align
48
49ENTRY(mcpm_entry_point)
50
51 ARM_BE8(setend        be)
52 THUMB(	adr	r12, BSYM(1f)	)
53 THUMB(	bx	r12		)
54 THUMB(	.thumb			)
551:
56	mrc	p15, 0, r0, c0, c0, 5		@ MPIDR
57	ubfx	r9, r0, #0, #8			@ r9 = cpu
58	ubfx	r10, r0, #8, #8			@ r10 = cluster
59	mov	r3, #MAX_CPUS_PER_CLUSTER
60	mla	r4, r3, r10, r9			@ r4 = canonical CPU index
61	cmp	r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
62	blo	2f
63
64	/* We didn't expect this CPU.  Try to cheaply make it quiet. */
651:	wfi
66	wfe
67	b	1b
68
692:	pr_dbg	"kernel mcpm_entry_point\n"
70
71	/*
72	 * MMU is off so we need to get to various variables in a
73	 * position independent way.
74	 */
75	adr	r5, 3f
76	ldmia	r5, {r0, r6, r7, r8, r11}
77	add	r0, r5, r0			@ r0 = mcpm_entry_early_pokes
78	add	r6, r5, r6			@ r6 = mcpm_entry_vectors
79	ldr	r7, [r5, r7]			@ r7 = mcpm_power_up_setup_phys
80	add	r8, r5, r8			@ r8 = mcpm_sync
81	add	r11, r5, r11			@ r11 = first_man_locks
82
83	@ Perform an early poke, if any
84	add	r0, r0, r4, lsl #3
85	ldmia	r0, {r0, r1}
86	teq	r0, #0
87	strne	r1, [r0]
88
89	mov	r0, #MCPM_SYNC_CLUSTER_SIZE
90	mla	r8, r0, r10, r8			@ r8 = sync cluster base
91
92	@ Signal that this CPU is coming UP:
93	mov	r0, #CPU_COMING_UP
94	mov	r5, #MCPM_SYNC_CPU_SIZE
95	mla	r5, r9, r5, r8			@ r5 = sync cpu address
96	strb	r0, [r5]
97
98	@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
99	@ state, because there is at least one active CPU (this CPU).
100
101	mov	r0, #VLOCK_SIZE
102	mla	r11, r0, r10, r11		@ r11 = cluster first man lock
103	mov	r0, r11
104	mov	r1, r9				@ cpu
105	bl	vlock_trylock			@ implies DMB
106
107	cmp	r0, #0				@ failed to get the lock?
108	bne	mcpm_setup_wait		@ wait for cluster setup if so
109
110	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
111	cmp	r0, #CLUSTER_UP			@ cluster already up?
112	bne	mcpm_setup			@ if not, set up the cluster
113
114	@ Otherwise, release the first man lock and skip setup:
115	mov	r0, r11
116	bl	vlock_unlock
117	b	mcpm_setup_complete
118
119mcpm_setup:
120	@ Control dependency implies strb not observable before previous ldrb.
121
122	@ Signal that the cluster is being brought up:
123	mov	r0, #INBOUND_COMING_UP
124	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
125	dmb
126
127	@ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
128	@ point onwards will observe INBOUND_COMING_UP and abort.
129
130	@ Wait for any previously-pending cluster teardown operations to abort
131	@ or complete:
132mcpm_teardown_wait:
133	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
134	cmp	r0, #CLUSTER_GOING_DOWN
135	bne	first_man_setup
136	wfe
137	b	mcpm_teardown_wait
138
139first_man_setup:
140	dmb
141
142	@ If the outbound gave up before teardown started, skip cluster setup:
143
144	cmp	r0, #CLUSTER_UP
145	beq	mcpm_setup_leave
146
147	@ power_up_setup is now responsible for setting up the cluster:
148
149	cmp	r7, #0
150	mov	r0, #1		@ second (cluster) affinity level
151	blxne	r7		@ Call power_up_setup if defined
152	dmb
153
154	mov	r0, #CLUSTER_UP
155	strb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
156	dmb
157
158mcpm_setup_leave:
159	@ Leave the cluster setup critical section:
160
161	mov	r0, #INBOUND_NOT_COMING_UP
162	strb	r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
163	dsb	st
164	sev
165
166	mov	r0, r11
167	bl	vlock_unlock	@ implies DMB
168	b	mcpm_setup_complete
169
170	@ In the contended case, non-first men wait here for cluster setup
171	@ to complete:
172mcpm_setup_wait:
173	ldrb	r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
174	cmp	r0, #CLUSTER_UP
175	wfene
176	bne	mcpm_setup_wait
177	dmb
178
179mcpm_setup_complete:
180	@ If a platform-specific CPU setup hook is needed, it is
181	@ called from here.
182
183	cmp	r7, #0
184	mov	r0, #0		@ first (CPU) affinity level
185	blxne	r7		@ Call power_up_setup if defined
186	dmb
187
188	@ Mark the CPU as up:
189
190	mov	r0, #CPU_UP
191	strb	r0, [r5]
192
193	@ Observability order of CPU_UP and opening of the gate does not matter.
194
195mcpm_entry_gated:
196	ldr	r5, [r6, r4, lsl #2]		@ r5 = CPU entry vector
197	cmp	r5, #0
198	wfeeq
199	beq	mcpm_entry_gated
200	dmb
201
202	pr_dbg	"released\n"
203	bx	r5
204
205	.align	2
206
2073:	.word	mcpm_entry_early_pokes - .
208	.word	mcpm_entry_vectors - 3b
209	.word	mcpm_power_up_setup_phys - 3b
210	.word	mcpm_sync - 3b
211	.word	first_man_locks - 3b
212
213ENDPROC(mcpm_entry_point)
214
215	.bss
216
217	.align	CACHE_WRITEBACK_ORDER
218	.type	first_man_locks, #object
219first_man_locks:
220	.space	VLOCK_SIZE * MAX_NR_CLUSTERS
221	.align	CACHE_WRITEBACK_ORDER
222
223	.type	mcpm_entry_vectors, #object
224ENTRY(mcpm_entry_vectors)
225	.space	4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
226
227	.type	mcpm_entry_early_pokes, #object
228ENTRY(mcpm_entry_early_pokes)
229	.space	8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
230
231	.type	mcpm_power_up_setup_phys, #object
232ENTRY(mcpm_power_up_setup_phys)
233	.space  4		@ set by mcpm_sync_init()
234