xref: /freebsd/sys/arm64/arm64/cpufunc_asm.S (revision 28f4385e45a2681c14bd04b83fe1796eaefe8265)
1/*-
2 * Copyright (c) 2014 Robin Randhawa
3 * Copyright (c) 2015 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Andrew Turner
7 * under sponsorship from the FreeBSD Foundation
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <sys/errno.h>
33#include <machine/asm.h>
34#include <machine/param.h>
35
36#include "assym.inc"
37
38__FBSDID("$FreeBSD$");
39
40/*
41 * FIXME:
42 * Need big.LITTLE awareness at some point.
43 * Using arm64_p[id]cache_line_size may not be the best option.
44 * Need better SMP awareness.
45 */
46	.text
47	.align	2
48
49.Lpage_mask:
50	.word	PAGE_MASK
51
52/*
53 * Macro to handle the cache. This takes the start address in x0, length
54 * in x1. It will corrupt x0, x1, x2, and x3.
55 */
56.macro cache_handle_range dcop = 0, ic = 0, icop = 0
57.if \ic == 0
58	ldr	x3, =dcache_line_size	/* Load the D cache line size */
59.else
60	ldr	x3, =idcache_line_size	/* Load the I & D cache line size */
61.endif
62	ldr	x3, [x3]
63	sub	x4, x3, #1		/* Get the address mask */
64	and	x2, x0, x4		/* Get the low bits of the address */
65	add	x1, x1, x2		/* Add these to the size */
66	bic	x0, x0, x4		/* Clear the low bit of the address */
671:
68	dc	\dcop, x0
69	dsb	ish
70.if \ic != 0
71	ic	\icop, x0
72	dsb	ish
73.endif
74	add	x0, x0, x3		/* Move to the next line */
75	subs	x1, x1, x3		/* Reduce the size */
76	b.hi	1b			/* Check if we are done */
77.if \ic != 0
78	isb
79.endif
80.endm
81
82ENTRY(arm64_nullop)
83	ret
84END(arm64_nullop)
85
86/*
87 * Generic functions to read/modify/write the internal coprocessor registers
88 */
89
90ENTRY(arm64_setttb)
91	dsb	ish
92	msr	ttbr0_el1, x0
93	dsb	ish
94	isb
95	ret
96END(arm64_setttb)
97
98ENTRY(arm64_tlb_flushID)
99#ifdef SMP
100	tlbi	vmalle1is
101#else
102	tlbi	vmalle1
103#endif
104	dsb	ish
105	isb
106	ret
107END(arm64_tlb_flushID)
108
109/*
110 * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
111 */
112ENTRY(arm64_dcache_wb_range)
113	cache_handle_range	dcop = cvac
114	ret
115END(arm64_dcache_wb_range)
116
117/*
118 * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
119 */
120ENTRY(arm64_dcache_wbinv_range)
121	cache_handle_range	dcop = civac
122	ret
123END(arm64_dcache_wbinv_range)
124
125/*
126 * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
127 *
128 * Note, we must not invalidate everything.  If the range is too big we
129 * must use wb-inv of the entire cache.
130 */
131ENTRY(arm64_dcache_inv_range)
132	cache_handle_range	dcop = ivac
133	ret
134END(arm64_dcache_inv_range)
135
136/*
137 * void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t)
138 */
139ENTRY(arm64_idcache_wbinv_range)
140	cache_handle_range	dcop = civac, ic = 1, icop = ivau
141	ret
142END(arm64_idcache_wbinv_range)
143
144/*
145 * void arm64_icache_sync_range(vm_offset_t, vm_size_t)
146 */
147ENTRY(arm64_icache_sync_range)
148	/*
149	 * XXX Temporary solution - I-cache flush should be range based for
150	 * PIPT cache or IALLUIS for VIVT or VIPT caches
151	 */
152/*	cache_handle_range	dcop = cvau, ic = 1, icop = ivau */
153	cache_handle_range	dcop = cvau
154	ic	ialluis
155	dsb	ish
156	isb
157	ret
158END(arm64_icache_sync_range)
159
160/*
161 * int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t)
162 */
163ENTRY(arm64_icache_sync_range_checked)
164	adr	x5, cache_maint_fault
165	SET_FAULT_HANDLER(x5, x6)
166	/* XXX: See comment in arm64_icache_sync_range */
167	cache_handle_range	dcop = cvau
168	ic	ialluis
169	dsb	ish
170	isb
171	SET_FAULT_HANDLER(xzr, x6)
172	mov	x0, #0
173	ret
174END(arm64_icache_sync_range_checked)
175
176ENTRY(cache_maint_fault)
177	SET_FAULT_HANDLER(xzr, x1)
178	mov	x0, #EFAULT
179	ret
180END(cache_maint_fault)
181