xref: /freebsd/sys/arm64/include/cpufunc.h (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifdef __arm__
30 #include <arm/cpufunc.h>
31 #else /* !__arm__ */
32 
33 #ifndef _MACHINE_CPUFUNC_H_
34 #define	_MACHINE_CPUFUNC_H_
35 
36 static __inline void
37 breakpoint(void)
38 {
39 
40 	__asm("brk #0");
41 }
42 
43 #ifdef _KERNEL
44 #include <machine/armreg.h>
45 
46 void pan_enable(void);
47 
48 static __inline register_t
49 dbg_disable(void)
50 {
51 	uint32_t ret;
52 
53 	__asm __volatile(
54 	    "mrs %x0, daif   \n"
55 	    "msr daifset, #(" __XSTRING(DAIF_D) ") \n"
56 	    : "=&r" (ret));
57 
58 	return (ret);
59 }
60 
61 static __inline void
62 dbg_enable(void)
63 {
64 
65 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")");
66 }
67 
68 static __inline register_t
69 intr_disable(void)
70 {
71 	/* DAIF is a 32-bit register */
72 	uint32_t ret;
73 
74 	__asm __volatile(
75 	    "mrs %x0, daif   \n"
76 	    "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n"
77 	    : "=&r" (ret));
78 
79 	return (ret);
80 }
81 
82 static __inline void
83 intr_restore(register_t s)
84 {
85 
86 	WRITE_SPECIALREG(daif, s);
87 }
88 
89 static __inline void
90 intr_enable(void)
91 {
92 
93 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")");
94 }
95 
96 static __inline void
97 serror_enable(void)
98 {
99 
100 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
101 }
102 
103 static __inline register_t
104 get_midr(void)
105 {
106 	uint64_t midr;
107 
108 	midr = READ_SPECIALREG(midr_el1);
109 
110 	return (midr);
111 }
112 
113 static __inline register_t
114 get_mpidr(void)
115 {
116 	uint64_t mpidr;
117 
118 	mpidr = READ_SPECIALREG(mpidr_el1);
119 
120 	return (mpidr);
121 }
122 
123 static __inline void
124 clrex(void)
125 {
126 
127 	/*
128 	 * Ensure compiler barrier, otherwise the monitor clear might
129 	 * occur too late for us ?
130 	 */
131 	__asm __volatile("clrex" : : : "memory");
132 }
133 
134 static __inline void
135 set_ttbr0(uint64_t ttbr0)
136 {
137 
138 	__asm __volatile(
139 	    "msr ttbr0_el1, %0 \n"
140 	    "isb               \n"
141 	    :
142 	    : "r" (ttbr0));
143 }
144 
145 static __inline void
146 invalidate_icache(void)
147 {
148 
149 	__asm __volatile(
150 	    "ic ialluis        \n"
151 	    "dsb ish           \n"
152 	    "isb               \n");
153 }
154 
155 static __inline void
156 invalidate_local_icache(void)
157 {
158 
159 	__asm __volatile(
160 	    "ic iallu          \n"
161 	    "dsb nsh           \n"
162 	    "isb               \n");
163 }
164 
165 extern bool icache_aliasing;
166 extern bool icache_vmid;
167 
168 extern int64_t dcache_line_size;
169 extern int64_t icache_line_size;
170 extern int64_t idcache_line_size;
171 extern int64_t dczva_line_size;
172 
173 #define	cpu_nullop()			arm64_nullop()
174 #define	cpufunc_nullop()		arm64_nullop()
175 
176 #define	cpu_tlb_flushID()		arm64_tlb_flushID()
177 
178 #define	cpu_dcache_wbinv_range(a, s)	arm64_dcache_wbinv_range((a), (s))
179 #define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
180 #define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
181 
182 extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
183 
184 #define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
185 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
186 
187 void arm64_nullop(void);
188 void arm64_tlb_flushID(void);
189 void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
190 void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
191 void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
192 int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
193 void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
194 void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
195 void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
196 bool arm64_get_writable_addr(vm_offset_t, vm_offset_t *);
197 
198 #endif	/* _KERNEL */
199 #endif	/* _MACHINE_CPUFUNC_H_ */
200 
201 #endif /* !__arm__ */
202