xref: /freebsd/sys/arm64/include/cpufunc.h (revision 1ae25866767d686067fe6678b62681b7a8f0d361)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifdef __arm__
28 #include <arm/cpufunc.h>
29 #else /* !__arm__ */
30 
31 #ifndef _MACHINE_CPUFUNC_H_
32 #define	_MACHINE_CPUFUNC_H_
33 
34 static __inline void
breakpoint(void)35 breakpoint(void)
36 {
37 
38 	__asm("brk #0");
39 }
40 
41 #ifdef _KERNEL
42 #include <machine/armreg.h>
43 
44 static __inline register_t
dbg_disable(void)45 dbg_disable(void)
46 {
47 	uint32_t ret;
48 
49 	__asm __volatile(
50 	    "mrs %x0, daif   \n"
51 	    "msr daifset, #(" __XSTRING(DAIF_D) ") \n"
52 	    : "=&r" (ret));
53 
54 	return (ret);
55 }
56 
57 static __inline void
dbg_enable(void)58 dbg_enable(void)
59 {
60 
61 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")");
62 }
63 
64 static __inline register_t
intr_disable(void)65 intr_disable(void)
66 {
67 	/* DAIF is a 32-bit register */
68 	uint32_t ret;
69 
70 	__asm __volatile(
71 	    "mrs %x0, daif   \n"
72 	    "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n"
73 	    : "=&r" (ret));
74 
75 	return (ret);
76 }
77 
78 static __inline void
intr_restore(register_t s)79 intr_restore(register_t s)
80 {
81 
82 	WRITE_SPECIALREG(daif, s);
83 }
84 
85 static __inline void
intr_enable(void)86 intr_enable(void)
87 {
88 
89 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")");
90 }
91 
92 static __inline void
serror_enable(void)93 serror_enable(void)
94 {
95 
96 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
97 }
98 
99 static __inline void
serror_disable(void)100 serror_disable(void)
101 {
102 
103 	__asm __volatile("msr daifset, #(" __XSTRING(DAIF_A) ")");
104 }
105 
106 static __inline register_t
get_midr(void)107 get_midr(void)
108 {
109 	uint64_t midr;
110 
111 	midr = READ_SPECIALREG(midr_el1);
112 
113 	return (midr);
114 }
115 
116 static __inline register_t
get_mpidr(void)117 get_mpidr(void)
118 {
119 	uint64_t mpidr;
120 
121 	mpidr = READ_SPECIALREG(mpidr_el1);
122 
123 	return (mpidr);
124 }
125 
126 static __inline void
clrex(void)127 clrex(void)
128 {
129 
130 	/*
131 	 * Ensure compiler barrier, otherwise the monitor clear might
132 	 * occur too late for us ?
133 	 */
134 	__asm __volatile("clrex" : : : "memory");
135 }
136 
137 static __inline void
set_ttbr0(uint64_t ttbr0)138 set_ttbr0(uint64_t ttbr0)
139 {
140 
141 	__asm __volatile(
142 	    "msr ttbr0_el1, %0 \n"
143 	    "isb               \n"
144 	    :
145 	    : "r" (ttbr0));
146 }
147 
148 static __inline void
invalidate_icache(void)149 invalidate_icache(void)
150 {
151 
152 	__asm __volatile(
153 	    "ic ialluis        \n"
154 	    "dsb ish           \n"
155 	    "isb               \n");
156 }
157 
158 static __inline void
invalidate_local_icache(void)159 invalidate_local_icache(void)
160 {
161 
162 	__asm __volatile(
163 	    "ic iallu          \n"
164 	    "dsb nsh           \n"
165 	    "isb               \n");
166 }
167 
168 static __inline void
wfet(uint64_t val)169 wfet(uint64_t val)
170 {
171 	__asm __volatile(
172 		"msr s0_3_c1_c0_0, %0\n"
173 		:
174 		: "r" ((val))
175 		: "memory");
176 }
177 
178 static __inline void
wfit(uint64_t val)179 wfit(uint64_t val)
180 {
181 	__asm __volatile(
182 		"msr s0_3_c1_c0_1, %0\n"
183 		:
184 		: "r" ((val))
185 		: "memory");
186 }
187 
188 extern bool icache_aliasing;
189 extern bool icache_vmid;
190 
191 extern int64_t dcache_line_size;
192 extern int64_t icache_line_size;
193 extern int64_t idcache_line_size;
194 extern int64_t dczva_line_size;
195 
196 #define	cpu_nullop()			arm64_nullop()
197 #define	cpufunc_nullop()		arm64_nullop()
198 
199 #define	cpu_tlb_flushID()		arm64_tlb_flushID()
200 
201 #define	cpu_dcache_wbinv_range(a, s)	arm64_dcache_wbinv_range((a), (s))
202 #define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
203 #define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
204 
205 extern void (*arm64_icache_sync_range)(void *, vm_size_t);
206 
207 #define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
208 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
209 
210 void arm64_nullop(void);
211 void arm64_tlb_flushID(void);
212 void arm64_dic_idc_icache_sync_range(void *, vm_size_t);
213 void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t);
214 void arm64_aliasing_icache_sync_range(void *, vm_size_t);
215 int arm64_icache_sync_range_checked(void *, vm_size_t);
216 void arm64_dcache_wbinv_range(void *, vm_size_t);
217 void arm64_dcache_inv_range(void *, vm_size_t);
218 void arm64_dcache_wb_range(void *, vm_size_t);
219 bool arm64_get_writable_addr(void *, void **);
220 
221 #endif	/* _KERNEL */
222 #endif	/* _MACHINE_CPUFUNC_H_ */
223 
224 #endif /* !__arm__ */
225