xref: /freebsd/sys/arm64/arm64/cpufunc_asm.S (revision 98e0ffaefb0f241cda3a72395d3be04192ae0d47)
1/*-
2 * Copyright (c) 2014 Robin Randhawa
3 * Copyright (c) 2015 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Andrew Turner
7 * under sponsorship from the FreeBSD Foundation
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <machine/asm.h>
33#include <machine/param.h>
34__FBSDID("$FreeBSD$");
35
36/*
37 * FIXME:
38 * Need big.LITTLE awareness at some point.
39 * Using arm64_p[id]cache_line_size may not be the best option.
40 * Need better SMP awareness.
41 */
42	.text
43	.align	2
44
45.Lpage_mask:
46	.word	PAGE_MASK
47
48/*
49 * Macro to handle the cache. This takes the start address in x0, length
50 * in x1. It will corrupt x0, x1, x2, and x3.
51 */
52.macro cache_handle_range dcop = 0, ic = 0, icop = 0
53.if \ic == 0
54	ldr	x3, =dcache_line_size	/* Load the D cache line size */
55.else
56	ldr	x3, =idcache_line_size	/* Load the I & D cache line size */
57.endif
58	ldr	x3, [x3]
59	sub	x4, x3, #1		/* Get the address mask */
60	and	x2, x0, x4		/* Get the low bits of the address */
61	add	x1, x1, x2		/* Add these to the size */
62	bic	x0, x0, x4		/* Clear the low bit of the address */
631:
64.if \ic != 0
65	ic	\icop, x0
66.endif
67	dc	\dcop, x0
68	add	x0, x0, x3		/* Move to the next line */
69	subs	x1, x1, x3		/* Reduce the size */
70	b.hi	1b			/* Check if we are done */
71.if \ic != 0
72	isb
73.endif
74	dsb	ish
75	ret
76.endm
77
78ENTRY(arm64_nullop)
79	ret
80END(arm64_nullop)
81
82/*
83 * Generic functions to read/modify/write the internal coprocessor registers
84 */
85
86ENTRY(arm64_setttb)
87	dsb	ish
88	msr	ttbr0_el1, x0
89	dsb	ish
90	isb
91	ret
92END(arm64_setttb)
93
94ENTRY(arm64_tlb_flushID)
95#ifdef SMP
96	tlbi	vmalle1is
97#else
98	tlbi	vmalle1
99#endif
100	dsb	ish
101	isb
102	ret
103END(arm64_tlb_flushID)
104
105ENTRY(arm64_tlb_flushID_SE)
106	ldr	x1, .Lpage_mask
107	bic	x0, x0, x1
108#ifdef SMP
109	tlbi	vae1is, x0
110#else
111	tlbi	vae1, x0
112#endif
113	dsb	ish
114	isb
115	ret
116END(arm64_tlb_flushID_SE)
117
118/*
119 * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
120 */
121ENTRY(arm64_dcache_wb_range)
122	cache_handle_range	dcop = cvac
123END(arm64_dcache_wb_range)
124
125/*
126 * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
127 */
128ENTRY(arm64_dcache_wbinv_range)
129	cache_handle_range	dcop = civac
130END(arm64_dcache_wbinv_range)
131
132/*
133 * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
134 *
135 * Note, we must not invalidate everything.  If the range is too big we
136 * must use wb-inv of the entire cache.
137 */
138ENTRY(arm64_dcache_inv_range)
139	cache_handle_range	dcop = ivac
140END(arm64_dcache_inv_range)
141
142/*
143 * void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t)
144 */
145ENTRY(arm64_idcache_wbinv_range)
146	cache_handle_range	dcop = civac, ic = 1, icop = ivau
147END(arm64_idcache_wbinv_range)
148
149/*
150 * void arm64_icache_sync_range(vm_offset_t, vm_size_t)
151 */
152ENTRY(arm64_icache_sync_range)
153	cache_handle_range	dcop = cvac, ic = 1, icop = ivau
154END(arm64_icache_sync_range)
155