xref: /freebsd/sys/arm64/include/cpufunc.h (revision 0bf56da32d83fbd3b5db8d6c72cd1e7cc26fbc66)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef _MACHINE_CPUFUNC_H_
30 #define	_MACHINE_CPUFUNC_H_
31 
32 static __inline void
33 breakpoint(void)
34 {
35 
36 	__asm("brk #0");
37 }
38 
39 #ifdef _KERNEL
40 
41 #define	HAVE_INLINE_FFS
42 
43 static __inline __pure2 int
44 ffs(int mask)
45 {
46 
47 	return (__builtin_ffs(mask));
48 }
49 
50 #define	HAVE_INLINE_FFSL
51 
52 static __inline __pure2 int
53 ffsl(long mask)
54 {
55 
56 	return (__builtin_ffsl(mask));
57 }
58 
59 #define	HAVE_INLINE_FFSLL
60 
61 static __inline __pure2 int
62 ffsll(long long mask)
63 {
64 
65 	return (__builtin_ffsll(mask));
66 }
67 
68 #define	HAVE_INLINE_FLS
69 
70 static __inline __pure2 int
71 fls(int mask)
72 {
73 
74 	return (mask == 0 ? 0 :
75 	    8 * sizeof(mask) - __builtin_clz((u_int)mask));
76 }
77 
78 #define	HAVE_INLINE_FLSL
79 
80 static __inline __pure2 int
81 flsl(long mask)
82 {
83 
84 	return (mask == 0 ? 0 :
85 	    8 * sizeof(mask) - __builtin_clzl((u_long)mask));
86 }
87 
88 #define	HAVE_INLINE_FLSLL
89 
90 static __inline __pure2 int
91 flsll(long long mask)
92 {
93 
94 	return (mask == 0 ? 0 :
95 	    8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask));
96 }
97 
98 #include <machine/armreg.h>
99 
100 void pan_enable(void);
101 
102 static __inline register_t
103 dbg_disable(void)
104 {
105 	uint32_t ret;
106 
107 	__asm __volatile(
108 	    "mrs %x0, daif   \n"
109 	    "msr daifset, #8 \n"
110 	    : "=&r" (ret));
111 
112 	return (ret);
113 }
114 
115 static __inline void
116 dbg_enable(void)
117 {
118 
119 	__asm __volatile("msr daifclr, #8");
120 }
121 
122 static __inline register_t
123 intr_disable(void)
124 {
125 	/* DAIF is a 32-bit register */
126 	uint32_t ret;
127 
128 	__asm __volatile(
129 	    "mrs %x0, daif   \n"
130 	    "msr daifset, #2 \n"
131 	    : "=&r" (ret));
132 
133 	return (ret);
134 }
135 
136 static __inline void
137 intr_restore(register_t s)
138 {
139 
140 	WRITE_SPECIALREG(daif, s);
141 }
142 
143 static __inline void
144 intr_enable(void)
145 {
146 
147 	__asm __volatile("msr daifclr, #2");
148 }
149 
150 static __inline register_t
151 get_midr(void)
152 {
153 	uint64_t midr;
154 
155 	midr = READ_SPECIALREG(midr_el1);
156 
157 	return (midr);
158 }
159 
160 static __inline register_t
161 get_mpidr(void)
162 {
163 	uint64_t mpidr;
164 
165 	mpidr = READ_SPECIALREG(mpidr_el1);
166 
167 	return (mpidr);
168 }
169 
170 static __inline void
171 clrex(void)
172 {
173 
174 	/*
175 	 * Ensure compiler barrier, otherwise the monitor clear might
176 	 * occur too late for us ?
177 	 */
178 	__asm __volatile("clrex" : : : "memory");
179 }
180 
181 static __inline void
182 set_ttbr0(uint64_t ttbr0)
183 {
184 
185 	__asm __volatile(
186 	    "msr ttbr0_el1, %0 \n"
187 	    "isb               \n"
188 	    :
189 	    : "r" (ttbr0));
190 }
191 
192 static __inline void
193 invalidate_local_icache(void)
194 {
195 
196 	__asm __volatile(
197 	    "ic iallu          \n"
198 	    "dsb nsh           \n"
199 	    "isb               \n");
200 }
201 
202 extern bool icache_aliasing;
203 extern bool icache_vmid;
204 
205 extern int64_t dcache_line_size;
206 extern int64_t icache_line_size;
207 extern int64_t idcache_line_size;
208 extern int64_t dczva_line_size;
209 
210 #define	cpu_nullop()			arm64_nullop()
211 #define	cpufunc_nullop()		arm64_nullop()
212 
213 #define	cpu_tlb_flushID()		arm64_tlb_flushID()
214 
215 #define	cpu_dcache_wbinv_range(a, s)	arm64_dcache_wbinv_range((a), (s))
216 #define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
217 #define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
218 
219 #define	cpu_idcache_wbinv_range(a, s)	arm64_idcache_wbinv_range((a), (s))
220 #define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
221 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
222 
223 void arm64_nullop(void);
224 void arm64_tlb_flushID(void);
225 void arm64_icache_sync_range(vm_offset_t, vm_size_t);
226 int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
227 void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t);
228 void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
229 void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
230 void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
231 
232 #endif	/* _KERNEL */
233 #endif	/* _MACHINE_CPUFUNC_H_ */
234