xref: /freebsd/sys/arm64/arm64/cpufunc_asm.S (revision c2e0d56f5e493a8514324fd5e062ddc99a68b599)
1e5acd89cSAndrew Turner/*-
2e5acd89cSAndrew Turner * Copyright (c) 2014 Robin Randhawa
3e5acd89cSAndrew Turner * Copyright (c) 2015 The FreeBSD Foundation
4e5acd89cSAndrew Turner * All rights reserved.
5e5acd89cSAndrew Turner *
6e5acd89cSAndrew Turner * Portions of this software were developed by Andrew Turner
7e5acd89cSAndrew Turner * under sponsorship from the FreeBSD Foundation
8e5acd89cSAndrew Turner *
9e5acd89cSAndrew Turner * Redistribution and use in source and binary forms, with or without
10e5acd89cSAndrew Turner * modification, are permitted provided that the following conditions
11e5acd89cSAndrew Turner * are met:
12e5acd89cSAndrew Turner * 1. Redistributions of source code must retain the above copyright
13e5acd89cSAndrew Turner *    notice, this list of conditions and the following disclaimer.
14e5acd89cSAndrew Turner * 2. Redistributions in binary form must reproduce the above copyright
15e5acd89cSAndrew Turner *    notice, this list of conditions and the following disclaimer in the
16e5acd89cSAndrew Turner *    documentation and/or other materials provided with the distribution.
17e5acd89cSAndrew Turner *
18e5acd89cSAndrew Turner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19e5acd89cSAndrew Turner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20e5acd89cSAndrew Turner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21e5acd89cSAndrew Turner * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22e5acd89cSAndrew Turner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23e5acd89cSAndrew Turner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24e5acd89cSAndrew Turner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25e5acd89cSAndrew Turner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26e5acd89cSAndrew Turner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27e5acd89cSAndrew Turner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28e5acd89cSAndrew Turner * SUCH DAMAGE.
29e5acd89cSAndrew Turner *
30e5acd89cSAndrew Turner */
31e5acd89cSAndrew Turner
32*c2e0d56fSAndrew Turner#include <sys/elf_common.h>
33e8d5909cSOlivier Houchard#include <sys/errno.h>
34*c2e0d56fSAndrew Turner
35e5acd89cSAndrew Turner#include <machine/asm.h>
36e5acd89cSAndrew Turner#include <machine/param.h>
37e8d5909cSOlivier Houchard
38e8d5909cSOlivier Houchard#include "assym.inc"
39e5acd89cSAndrew Turner/*
40e5acd89cSAndrew Turner * FIXME:
41e5acd89cSAndrew Turner * Need big.LITTLE awareness at some point.
42e5acd89cSAndrew Turner * Using arm64_p[id]cache_line_size may not be the best option.
43e5acd89cSAndrew Turner * Need better SMP awareness.
44e5acd89cSAndrew Turner */
45e5acd89cSAndrew Turner	.text
46e5acd89cSAndrew Turner	.align	2
47e5acd89cSAndrew Turner
48e5acd89cSAndrew Turner.Lpage_mask:
49e5acd89cSAndrew Turner	.word	PAGE_MASK
50e5acd89cSAndrew Turner
51e5acd89cSAndrew Turner/*
52e5acd89cSAndrew Turner * Macro to handle the cache. This takes the start address in x0, length
53d8d0bf06SAlan Cox * in x1. It will corrupt x0, x1, x2, x3, and x4.
54e5acd89cSAndrew Turner */
55e5acd89cSAndrew Turner.macro cache_handle_range dcop = 0, ic = 0, icop = 0
56e5acd89cSAndrew Turner.if \ic == 0
577eb26be9SAndrew Turner	adrp	x3, dcache_line_size	/* Load the D cache line size */
587eb26be9SAndrew Turner	ldr	x3, [x3, :lo12:dcache_line_size]
59e5acd89cSAndrew Turner.else
607eb26be9SAndrew Turner	adrp	x3, idcache_line_size	/* Load the I & D cache line size */
617eb26be9SAndrew Turner	ldr	x3, [x3, :lo12:idcache_line_size]
62e5acd89cSAndrew Turner.endif
63e5acd89cSAndrew Turner	sub	x4, x3, #1		/* Get the address mask */
64e5acd89cSAndrew Turner	and	x2, x0, x4		/* Get the low bits of the address */
65e5acd89cSAndrew Turner	add	x1, x1, x2		/* Add these to the size */
66e5acd89cSAndrew Turner	bic	x0, x0, x4		/* Clear the low bit of the address */
67d8d0bf06SAlan Cox.if \ic != 0
68d8d0bf06SAlan Cox	mov	x2, x0			/* Save the address */
69d8d0bf06SAlan Cox	mov	x4, x1			/* Save the size */
70d8d0bf06SAlan Cox.endif
71e5acd89cSAndrew Turner1:
72b8bbefedSZbigniew Bodek	dc	\dcop, x0
73e5acd89cSAndrew Turner	add	x0, x0, x3		/* Move to the next line */
74e5acd89cSAndrew Turner	subs	x1, x1, x3		/* Reduce the size */
75e5acd89cSAndrew Turner	b.hi	1b			/* Check if we are done */
76d8d0bf06SAlan Cox	dsb	ish
77e5acd89cSAndrew Turner.if \ic != 0
78d8d0bf06SAlan Cox2:
79d8d0bf06SAlan Cox	ic	\icop, x2
80d8d0bf06SAlan Cox	add	x2, x2, x3		/* Move to the next line */
81d8d0bf06SAlan Cox	subs	x4, x4, x3		/* Reduce the size */
82d8d0bf06SAlan Cox	b.hi	2b			/* Check if we are done */
83d8d0bf06SAlan Cox	dsb	ish
84e5acd89cSAndrew Turner	isb
85e5acd89cSAndrew Turner.endif
86e5acd89cSAndrew Turner.endm
87e5acd89cSAndrew Turner
88e5acd89cSAndrew TurnerENTRY(arm64_nullop)
89e5acd89cSAndrew Turner	ret
90e5acd89cSAndrew TurnerEND(arm64_nullop)
91e5acd89cSAndrew Turner
92e5acd89cSAndrew Turner/*
93e5acd89cSAndrew Turner * Generic functions to read/modify/write the internal coprocessor registers
94e5acd89cSAndrew Turner */
95e5acd89cSAndrew Turner
96e5acd89cSAndrew TurnerENTRY(arm64_tlb_flushID)
978308d2a2SAndrew Turner	dsb	ishst
98e5acd89cSAndrew Turner#ifdef SMP
99e5acd89cSAndrew Turner	tlbi	vmalle1is
100e5acd89cSAndrew Turner#else
101e5acd89cSAndrew Turner	tlbi	vmalle1
102e5acd89cSAndrew Turner#endif
103e5acd89cSAndrew Turner	dsb	ish
104e5acd89cSAndrew Turner	isb
105e5acd89cSAndrew Turner	ret
106e5acd89cSAndrew TurnerEND(arm64_tlb_flushID)
107e5acd89cSAndrew Turner
108e5acd89cSAndrew Turner/*
1091e3f42b6SJohn Baldwin * void arm64_dcache_wb_range(void *, vm_size_t)
110e5acd89cSAndrew Turner */
111e5acd89cSAndrew TurnerENTRY(arm64_dcache_wb_range)
112e5acd89cSAndrew Turner	cache_handle_range	dcop = cvac
113cd0c606fSAndrew Turner	ret
114e5acd89cSAndrew TurnerEND(arm64_dcache_wb_range)
115e5acd89cSAndrew Turner
116e5acd89cSAndrew Turner/*
1171e3f42b6SJohn Baldwin * void arm64_dcache_wbinv_range(void *, vm_size_t)
118e5acd89cSAndrew Turner */
119e5acd89cSAndrew TurnerENTRY(arm64_dcache_wbinv_range)
120e5acd89cSAndrew Turner	cache_handle_range	dcop = civac
121cd0c606fSAndrew Turner	ret
122e5acd89cSAndrew TurnerEND(arm64_dcache_wbinv_range)
123e5acd89cSAndrew Turner
124e5acd89cSAndrew Turner/*
1251e3f42b6SJohn Baldwin * void arm64_dcache_inv_range(void *, vm_size_t)
126e5acd89cSAndrew Turner *
127e5acd89cSAndrew Turner * Note, we must not invalidate everything.  If the range is too big we
128e5acd89cSAndrew Turner * must use wb-inv of the entire cache.
129e5acd89cSAndrew Turner */
130e5acd89cSAndrew TurnerENTRY(arm64_dcache_inv_range)
131e5acd89cSAndrew Turner	cache_handle_range	dcop = ivac
132cd0c606fSAndrew Turner	ret
133e5acd89cSAndrew TurnerEND(arm64_dcache_inv_range)
134e5acd89cSAndrew Turner
135e5acd89cSAndrew Turner/*
1361e3f42b6SJohn Baldwin * void arm64_dic_idc_icache_sync_range(void *, vm_size_t)
137ad020198SAndrew Turner * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
138ad020198SAndrew Turner * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb.
139e5acd89cSAndrew Turner */
140ad020198SAndrew TurnerENTRY(arm64_dic_idc_icache_sync_range)
141ad020198SAndrew Turner	dsb	ishst
142ad020198SAndrew Turner	isb
143ad020198SAndrew Turner	ret
144ad020198SAndrew TurnerEND(arm64_dic_idc_icache_sync_range)
145ad020198SAndrew Turner
146ad020198SAndrew Turner/*
1471e3f42b6SJohn Baldwin * void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t)
1487a060a88SAndrew Turner * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb.
1497a060a88SAndrew Turner */
1507a060a88SAndrew TurnerENTRY(arm64_idc_aliasing_icache_sync_range)
1517a060a88SAndrew Turner	dsb	ishst
1527a060a88SAndrew Turner	ic	ialluis
1537a060a88SAndrew Turner	dsb	ish
1547a060a88SAndrew Turner	isb
1557a060a88SAndrew Turner	ret
1567a060a88SAndrew TurnerEND(arm64_idc_aliasing_icache_sync_range)
1577a060a88SAndrew Turner
1587a060a88SAndrew Turner/*
1591e3f42b6SJohn Baldwin * void arm64_aliasing_icache_sync_range(void *, vm_size_t)
160ad020198SAndrew Turner */
161ad020198SAndrew TurnerENTRY(arm64_aliasing_icache_sync_range)
16289b090f1SMichal Meloun	/*
16389b090f1SMichal Meloun	 * XXX Temporary solution - I-cache flush should be range based for
16489b090f1SMichal Meloun	 * PIPT cache or IALLUIS for VIVT or VIPT caches
16589b090f1SMichal Meloun	 */
16689b090f1SMichal Meloun/*	cache_handle_range	dcop = cvau, ic = 1, icop = ivau */
16789b090f1SMichal Meloun	cache_handle_range	dcop = cvau
16889b090f1SMichal Meloun	ic	ialluis
16989b090f1SMichal Meloun	dsb	ish
170cd0c606fSAndrew Turner	isb
171cd0c606fSAndrew Turner	ret
172ad020198SAndrew TurnerEND(arm64_aliasing_icache_sync_range)
1739cd27257SOlivier Houchard
1749cd27257SOlivier Houchard/*
1751e3f42b6SJohn Baldwin * int arm64_icache_sync_range_checked(void *, vm_size_t)
1769cd27257SOlivier Houchard */
1779cd27257SOlivier HouchardENTRY(arm64_icache_sync_range_checked)
1789cd27257SOlivier Houchard	adr	x5, cache_maint_fault
1799cd27257SOlivier Houchard	SET_FAULT_HANDLER(x5, x6)
1809cd27257SOlivier Houchard	/* XXX: See comment in arm64_icache_sync_range */
1819cd27257SOlivier Houchard	cache_handle_range	dcop = cvau
1829cd27257SOlivier Houchard	ic	ialluis
1839cd27257SOlivier Houchard	dsb	ish
184cd0c606fSAndrew Turner	isb
1859cd27257SOlivier Houchard	SET_FAULT_HANDLER(xzr, x6)
1869cd27257SOlivier Houchard	mov	x0, #0
1879cd27257SOlivier Houchard	ret
1889cd27257SOlivier HouchardEND(arm64_icache_sync_range_checked)
1899cd27257SOlivier Houchard
1909cd27257SOlivier HouchardENTRY(cache_maint_fault)
1919cd27257SOlivier Houchard	SET_FAULT_HANDLER(xzr, x1)
1929cd27257SOlivier Houchard	mov	x0, #EFAULT
1939cd27257SOlivier Houchard	ret
1949cd27257SOlivier HouchardEND(cache_maint_fault)
195*c2e0d56fSAndrew Turner
196*c2e0d56fSAndrew TurnerGNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
197