xref: /freebsd/sys/arm64/include/cpufunc.h (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifdef __arm__
28 #include <arm/cpufunc.h>
29 #else /* !__arm__ */
30 
31 #ifndef _MACHINE_CPUFUNC_H_
32 #define	_MACHINE_CPUFUNC_H_
33 
34 static __inline void
35 breakpoint(void)
36 {
37 
38 	__asm("brk #0");
39 }
40 
41 #ifdef _KERNEL
42 #include <machine/armreg.h>
43 
44 void pan_enable(void);
45 
46 static __inline register_t
47 dbg_disable(void)
48 {
49 	uint32_t ret;
50 
51 	__asm __volatile(
52 	    "mrs %x0, daif   \n"
53 	    "msr daifset, #(" __XSTRING(DAIF_D) ") \n"
54 	    : "=&r" (ret));
55 
56 	return (ret);
57 }
58 
59 static __inline void
60 dbg_enable(void)
61 {
62 
63 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")");
64 }
65 
66 static __inline register_t
67 intr_disable(void)
68 {
69 	/* DAIF is a 32-bit register */
70 	uint32_t ret;
71 
72 	__asm __volatile(
73 	    "mrs %x0, daif   \n"
74 	    "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n"
75 	    : "=&r" (ret));
76 
77 	return (ret);
78 }
79 
80 static __inline void
81 intr_restore(register_t s)
82 {
83 
84 	WRITE_SPECIALREG(daif, s);
85 }
86 
87 static __inline void
88 intr_enable(void)
89 {
90 
91 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")");
92 }
93 
94 static __inline void
95 serror_enable(void)
96 {
97 
98 	__asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
99 }
100 
101 static __inline register_t
102 get_midr(void)
103 {
104 	uint64_t midr;
105 
106 	midr = READ_SPECIALREG(midr_el1);
107 
108 	return (midr);
109 }
110 
111 static __inline register_t
112 get_mpidr(void)
113 {
114 	uint64_t mpidr;
115 
116 	mpidr = READ_SPECIALREG(mpidr_el1);
117 
118 	return (mpidr);
119 }
120 
121 static __inline void
122 clrex(void)
123 {
124 
125 	/*
126 	 * Ensure compiler barrier, otherwise the monitor clear might
127 	 * occur too late for us ?
128 	 */
129 	__asm __volatile("clrex" : : : "memory");
130 }
131 
132 static __inline void
133 set_ttbr0(uint64_t ttbr0)
134 {
135 
136 	__asm __volatile(
137 	    "msr ttbr0_el1, %0 \n"
138 	    "isb               \n"
139 	    :
140 	    : "r" (ttbr0));
141 }
142 
143 static __inline void
144 invalidate_icache(void)
145 {
146 
147 	__asm __volatile(
148 	    "ic ialluis        \n"
149 	    "dsb ish           \n"
150 	    "isb               \n");
151 }
152 
153 static __inline void
154 invalidate_local_icache(void)
155 {
156 
157 	__asm __volatile(
158 	    "ic iallu          \n"
159 	    "dsb nsh           \n"
160 	    "isb               \n");
161 }
162 
163 extern bool icache_aliasing;
164 extern bool icache_vmid;
165 
166 extern int64_t dcache_line_size;
167 extern int64_t icache_line_size;
168 extern int64_t idcache_line_size;
169 extern int64_t dczva_line_size;
170 
171 #define	cpu_nullop()			arm64_nullop()
172 #define	cpufunc_nullop()		arm64_nullop()
173 
174 #define	cpu_tlb_flushID()		arm64_tlb_flushID()
175 
176 #define	cpu_dcache_wbinv_range(a, s)	arm64_dcache_wbinv_range((a), (s))
177 #define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
178 #define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
179 
180 extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
181 
182 #define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
183 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
184 
185 void arm64_nullop(void);
186 void arm64_tlb_flushID(void);
187 void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
188 void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
189 void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
190 int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
191 void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
192 void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
193 void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
194 bool arm64_get_writable_addr(vm_offset_t, vm_offset_t *);
195 
196 #endif	/* _KERNEL */
197 #endif	/* _MACHINE_CPUFUNC_H_ */
198 
199 #endif /* !__arm__ */
200