xref: /linux/arch/arm/mach-mvebu/coherency_ll.S (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1/*
2 * Coherency fabric: low level functions
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2.  This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 *
12 * This file implements the assembly function to add a CPU to the
13 * coherency fabric. This function is called by each of the secondary
14 * CPUs during their early boot in an SMP kernel, this why this
15 * function have to callable from assembly. It can also be called by a
16 * primary CPU from C code during its boot.
17 */
18
19#include <linux/linkage.h>
20#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
21#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
22
23#include <asm/assembler.h>
24#include <asm/cp15.h>
25
26	.text
27/*
28 * Returns the coherency base address in r1 (r0 is untouched), or 0 if
29 * the coherency fabric is not enabled.
30 */
31ENTRY(ll_get_coherency_base)
32	mrc	p15, 0, r1, c1, c0, 0
33	tst	r1, #CR_M @ Check MMU bit enabled
34	bne	1f
35
36	/*
37	 * MMU is disabled, use the physical address of the coherency
38	 * base address. However, if the coherency fabric isn't mapped
39	 * (i.e its virtual address is zero), it means coherency is
40	 * not enabled, so we return 0.
41	 */
42	ldr	r1, =coherency_base
43	cmp	r1, #0
44	beq	2f
45	adr	r1, 3f
46	ldr	r3, [r1]
47	ldr	r1, [r1, r3]
48	b	2f
491:
50	/*
51	 * MMU is enabled, use the virtual address of the coherency
52	 * base address.
53	 */
54	ldr	r1, =coherency_base
55	ldr	r1, [r1]
562:
57	ret	lr
58ENDPROC(ll_get_coherency_base)
59
60/*
61 * Returns the coherency CPU mask in r3 (r0 is untouched). This
62 * coherency CPU mask can be used with the coherency fabric
63 * configuration and control registers. Note that the mask is already
64 * endian-swapped as appropriate so that the calling functions do not
65 * have to care about endianness issues while accessing the coherency
66 * fabric registers
67 */
68ENTRY(ll_get_coherency_cpumask)
69	mrc	15, 0, r3, cr0, cr0, 5
70	and	r3, r3, #15
71	mov	r2, #(1 << 24)
72	lsl	r3, r2, r3
73ARM_BE8(rev	r3, r3)
74	ret	lr
75ENDPROC(ll_get_coherency_cpumask)
76
77/*
78 * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
79 * ll_disable_coherency() use the strex/ldrex instructions while the
80 * MMU can be disabled. The Armada XP SoC has an exclusive monitor
81 * that tracks transactions to Device and/or SO memory and thanks to
82 * that, exclusive transactions are functional even when the MMU is
83 * disabled.
84 */
85
86ENTRY(ll_add_cpu_to_smp_group)
87	/*
88	 * As r0 is not modified by ll_get_coherency_base() and
89	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
90	 * and avoid it being modified by the branch and link
91	 * calls. This function is used very early in the secondary
92	 * CPU boot, and no stack is available at this point.
93	 */
94	mov 	r0, lr
95	bl	ll_get_coherency_base
96	/* Bail out if the coherency is not enabled */
97	cmp	r1, #0
98	reteq	r0
99	bl	ll_get_coherency_cpumask
100	mov 	lr, r0
101	add	r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
1021:
103	ldrex	r2, [r0]
104	orr	r2, r2, r3
105	strex	r1, r2, [r0]
106	cmp	r1, #0
107	bne	1b
108	ret	lr
109ENDPROC(ll_add_cpu_to_smp_group)
110
111ENTRY(ll_enable_coherency)
112	/*
113	 * As r0 is not modified by ll_get_coherency_base() and
114	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
115	 * and avoid it being modified by the branch and link
116	 * calls. This function is used very early in the secondary
117	 * CPU boot, and no stack is available at this point.
118	 */
119	mov r0, lr
120	bl	ll_get_coherency_base
121	/* Bail out if the coherency is not enabled */
122	cmp	r1, #0
123	reteq	r0
124	bl	ll_get_coherency_cpumask
125	mov lr, r0
126	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1271:
128	ldrex	r2, [r0]
129	orr	r2, r2, r3
130	strex	r1, r2, [r0]
131	cmp	r1, #0
132	bne	1b
133	dsb
134	mov	r0, #0
135	ret	lr
136ENDPROC(ll_enable_coherency)
137
138ENTRY(ll_disable_coherency)
139	/*
140	 * As r0 is not modified by ll_get_coherency_base() and
141	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
142	 * and avoid it being modified by the branch and link
143	 * calls. This function is used very early in the secondary
144	 * CPU boot, and no stack is available at this point.
145	 */
146	mov 	r0, lr
147	bl	ll_get_coherency_base
148	/* Bail out if the coherency is not enabled */
149	cmp	r1, #0
150	reteq	r0
151	bl	ll_get_coherency_cpumask
152	mov 	lr, r0
153	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1541:
155	ldrex	r2, [r0]
156	bic	r2, r2, r3
157	strex	r1, r2, [r0]
158	cmp	r1, #0
159	bne	1b
160	dsb
161	ret	lr
162ENDPROC(ll_disable_coherency)
163
164	.align 2
1653:
166	.long	coherency_phys_base - .
167