xref: /freebsd/sys/arm64/arm64/cpufunc_asm.S (revision 5f757f3ff9144b609b3c433dfd370cc6bdc191ad)
1/*-
2 * Copyright (c) 2014 Robin Randhawa
3 * Copyright (c) 2015 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Andrew Turner
7 * under sponsorship from the FreeBSD Foundation
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <sys/errno.h>
33#include <machine/asm.h>
34#include <machine/param.h>
35
36#include "assym.inc"
37/*
38 * FIXME:
39 * Need big.LITTLE awareness at some point.
40 * Using arm64_p[id]cache_line_size may not be the best option.
41 * Need better SMP awareness.
42 */
43	.text
44	.align	2
45
46.Lpage_mask:
47	.word	PAGE_MASK
48
49/*
50 * Macro to handle the cache. This takes the start address in x0, length
51 * in x1. It will corrupt x0, x1, x2, x3, and x4.
52 */
53.macro cache_handle_range dcop = 0, ic = 0, icop = 0
54.if \ic == 0
55	adrp	x3, dcache_line_size	/* Load the D cache line size */
56	ldr	x3, [x3, :lo12:dcache_line_size]
57.else
58	adrp	x3, idcache_line_size	/* Load the I & D cache line size */
59	ldr	x3, [x3, :lo12:idcache_line_size]
60.endif
61	sub	x4, x3, #1		/* Get the address mask */
62	and	x2, x0, x4		/* Get the low bits of the address */
63	add	x1, x1, x2		/* Add these to the size */
64	bic	x0, x0, x4		/* Clear the low bit of the address */
65.if \ic != 0
66	mov	x2, x0			/* Save the address */
67	mov	x4, x1			/* Save the size */
68.endif
691:
70	dc	\dcop, x0
71	add	x0, x0, x3		/* Move to the next line */
72	subs	x1, x1, x3		/* Reduce the size */
73	b.hi	1b			/* Check if we are done */
74	dsb	ish
75.if \ic != 0
762:
77	ic	\icop, x2
78	add	x2, x2, x3		/* Move to the next line */
79	subs	x4, x4, x3		/* Reduce the size */
80	b.hi	2b			/* Check if we are done */
81	dsb	ish
82	isb
83.endif
84.endm
85
86ENTRY(arm64_nullop)
87	ret
88END(arm64_nullop)
89
90/*
91 * Generic functions to read/modify/write the internal coprocessor registers
92 */
93
94ENTRY(arm64_tlb_flushID)
95	dsb	ishst
96#ifdef SMP
97	tlbi	vmalle1is
98#else
99	tlbi	vmalle1
100#endif
101	dsb	ish
102	isb
103	ret
104END(arm64_tlb_flushID)
105
106/*
107 * void arm64_dcache_wb_range(void *, vm_size_t)
108 */
109ENTRY(arm64_dcache_wb_range)
110	cache_handle_range	dcop = cvac
111	ret
112END(arm64_dcache_wb_range)
113
114/*
115 * void arm64_dcache_wbinv_range(void *, vm_size_t)
116 */
117ENTRY(arm64_dcache_wbinv_range)
118	cache_handle_range	dcop = civac
119	ret
120END(arm64_dcache_wbinv_range)
121
122/*
123 * void arm64_dcache_inv_range(void *, vm_size_t)
124 *
125 * Note, we must not invalidate everything.  If the range is too big we
126 * must use wb-inv of the entire cache.
127 */
128ENTRY(arm64_dcache_inv_range)
129	cache_handle_range	dcop = ivac
130	ret
131END(arm64_dcache_inv_range)
132
133/*
134 * void arm64_dic_idc_icache_sync_range(void *, vm_size_t)
135 * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
136 * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb.
137 */
138ENTRY(arm64_dic_idc_icache_sync_range)
139	dsb	ishst
140	isb
141	ret
142END(arm64_dic_idc_icache_sync_range)
143
144/*
145 * void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t)
146 * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
147 */
148ENTRY(arm64_idc_aliasing_icache_sync_range)
149	dsb	ishst
150	ic	ialluis
151	dsb	ish
152	isb
153	ret
154END(arm64_idc_aliasing_icache_sync_range)
155
156/*
157 * void arm64_aliasing_icache_sync_range(void *, vm_size_t)
158 */
159ENTRY(arm64_aliasing_icache_sync_range)
160	/*
161	 * XXX Temporary solution - I-cache flush should be range based for
162	 * PIPT cache or IALLUIS for VIVT or VIPT caches
163	 */
164/*	cache_handle_range	dcop = cvau, ic = 1, icop = ivau */
165	cache_handle_range	dcop = cvau
166	ic	ialluis
167	dsb	ish
168	isb
169	ret
170END(arm64_aliasing_icache_sync_range)
171
172/*
173 * int arm64_icache_sync_range_checked(void *, vm_size_t)
174 */
175ENTRY(arm64_icache_sync_range_checked)
176	adr	x5, cache_maint_fault
177	SET_FAULT_HANDLER(x5, x6)
178	/* XXX: See comment in arm64_icache_sync_range */
179	cache_handle_range	dcop = cvau
180	ic	ialluis
181	dsb	ish
182	isb
183	SET_FAULT_HANDLER(xzr, x6)
184	mov	x0, #0
185	ret
186END(arm64_icache_sync_range_checked)
187
188ENTRY(cache_maint_fault)
189	SET_FAULT_HANDLER(xzr, x1)
190	mov	x0, #EFAULT
191	ret
192END(cache_maint_fault)
193