xref: /freebsd/sys/arm64/arm64/cpufunc_asm.S (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1/*-
2 * Copyright (c) 2014 Robin Randhawa
3 * Copyright (c) 2015 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Andrew Turner
7 * under sponsorship from the FreeBSD Foundation
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <sys/elf_common.h>
33#include <sys/errno.h>
34
35#include <machine/asm.h>
36#include <machine/param.h>
37
38#include "assym.inc"
39/*
40 * FIXME:
41 * Need big.LITTLE awareness at some point.
42 * Using arm64_p[id]cache_line_size may not be the best option.
43 * Need better SMP awareness.
44 */
45	.text
46	.align	2
47
48.Lpage_mask:
49	.word	PAGE_MASK
50
51/*
52 * Macro to handle the cache. This takes the start address in x0, length
53 * in x1. It will corrupt x0, x1, x2, x3, and x4.
54 */
55.macro cache_handle_range dcop = 0, ic = 0, icop = 0
56.if \ic == 0
57	adrp	x3, dcache_line_size	/* Load the D cache line size */
58	ldr	x3, [x3, :lo12:dcache_line_size]
59.else
60	adrp	x3, idcache_line_size	/* Load the I & D cache line size */
61	ldr	x3, [x3, :lo12:idcache_line_size]
62.endif
63	sub	x4, x3, #1		/* Get the address mask */
64	and	x2, x0, x4		/* Get the low bits of the address */
65	add	x1, x1, x2		/* Add these to the size */
66	bic	x0, x0, x4		/* Clear the low bit of the address */
67.if \ic != 0
68	mov	x2, x0			/* Save the address */
69	mov	x4, x1			/* Save the size */
70.endif
711:
72	dc	\dcop, x0
73	add	x0, x0, x3		/* Move to the next line */
74	subs	x1, x1, x3		/* Reduce the size */
75	b.hi	1b			/* Check if we are done */
76	dsb	ish
77.if \ic != 0
782:
79	ic	\icop, x2
80	add	x2, x2, x3		/* Move to the next line */
81	subs	x4, x4, x3		/* Reduce the size */
82	b.hi	2b			/* Check if we are done */
83	dsb	ish
84	isb
85.endif
86.endm
87
88ENTRY(arm64_nullop)
89	ret
90END(arm64_nullop)
91
92/*
93 * Generic functions to read/modify/write the internal coprocessor registers
94 */
95
96ENTRY(arm64_tlb_flushID)
97	dsb	ishst
98#ifdef SMP
99	tlbi	vmalle1is
100#else
101	tlbi	vmalle1
102#endif
103	dsb	ish
104	isb
105	ret
106END(arm64_tlb_flushID)
107
108/*
109 * void arm64_dcache_wb_range(void *, vm_size_t)
110 */
111ENTRY(arm64_dcache_wb_range)
112	cache_handle_range	dcop = cvac
113	ret
114END(arm64_dcache_wb_range)
115
116/*
117 * void arm64_dcache_wbinv_range(void *, vm_size_t)
118 */
119ENTRY(arm64_dcache_wbinv_range)
120	cache_handle_range	dcop = civac
121	ret
122END(arm64_dcache_wbinv_range)
123
124/*
125 * void arm64_dcache_inv_range(void *, vm_size_t)
126 *
127 * Note, we must not invalidate everything.  If the range is too big we
128 * must use wb-inv of the entire cache.
129 */
130ENTRY(arm64_dcache_inv_range)
131	cache_handle_range	dcop = ivac
132	ret
133END(arm64_dcache_inv_range)
134
135/*
136 * void arm64_dic_idc_icache_sync_range(void *, vm_size_t)
137 * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
138 * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb.
139 */
140ENTRY(arm64_dic_idc_icache_sync_range)
141	dsb	ishst
142	isb
143	ret
144END(arm64_dic_idc_icache_sync_range)
145
146/*
147 * void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t)
148 * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
149 */
150ENTRY(arm64_idc_aliasing_icache_sync_range)
151	dsb	ishst
152	ic	ialluis
153	dsb	ish
154	isb
155	ret
156END(arm64_idc_aliasing_icache_sync_range)
157
158/*
159 * void arm64_aliasing_icache_sync_range(void *, vm_size_t)
160 */
161ENTRY(arm64_aliasing_icache_sync_range)
162	/*
163	 * XXX Temporary solution - I-cache flush should be range based for
164	 * PIPT cache or IALLUIS for VIVT or VIPT caches
165	 */
166/*	cache_handle_range	dcop = cvau, ic = 1, icop = ivau */
167	cache_handle_range	dcop = cvau
168	ic	ialluis
169	dsb	ish
170	isb
171	ret
172END(arm64_aliasing_icache_sync_range)
173
174/*
175 * int arm64_icache_sync_range_checked(void *, vm_size_t)
176 */
177ENTRY(arm64_icache_sync_range_checked)
178	adr	x5, cache_maint_fault
179	SET_FAULT_HANDLER(x5, x6)
180	/* XXX: See comment in arm64_icache_sync_range */
181	cache_handle_range	dcop = cvau
182	ic	ialluis
183	dsb	ish
184	isb
185	SET_FAULT_HANDLER(xzr, x6)
186	mov	x0, #0
187	ret
188END(arm64_icache_sync_range_checked)
189
190ENTRY(cache_maint_fault)
191	SET_FAULT_HANDLER(xzr, x1)
192	mov	x0, #EFAULT
193	ret
194END(cache_maint_fault)
195
196GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
197