1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #ifdef __arm__
28 #include <arm/cpufunc.h>
29 #else /* !__arm__ */
30
31 #ifndef _MACHINE_CPUFUNC_H_
32 #define _MACHINE_CPUFUNC_H_
33
34 static __inline void
breakpoint(void)35 breakpoint(void)
36 {
37
38 __asm("brk #0");
39 }
40
41 #ifdef _KERNEL
42 #include <machine/armreg.h>
43
44 static __inline register_t
dbg_disable(void)45 dbg_disable(void)
46 {
47 uint32_t ret;
48
49 __asm __volatile(
50 "mrs %x0, daif \n"
51 "msr daifset, #(" __XSTRING(DAIF_D) ") \n"
52 : "=&r" (ret));
53
54 return (ret);
55 }
56
57 static __inline void
dbg_enable(void)58 dbg_enable(void)
59 {
60
61 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")");
62 }
63
64 static __inline register_t
intr_disable(void)65 intr_disable(void)
66 {
67 /* DAIF is a 32-bit register */
68 uint32_t ret;
69
70 __asm __volatile(
71 "mrs %x0, daif \n"
72 "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n"
73 : "=&r" (ret));
74
75 return (ret);
76 }
77
78 static __inline void
intr_restore(register_t s)79 intr_restore(register_t s)
80 {
81
82 WRITE_SPECIALREG(daif, s);
83 }
84
85 static __inline void
intr_enable(void)86 intr_enable(void)
87 {
88
89 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")");
90 }
91
92 static __inline void
serror_enable(void)93 serror_enable(void)
94 {
95
96 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")");
97 }
98
99 static __inline register_t
get_midr(void)100 get_midr(void)
101 {
102 uint64_t midr;
103
104 midr = READ_SPECIALREG(midr_el1);
105
106 return (midr);
107 }
108
109 static __inline register_t
get_mpidr(void)110 get_mpidr(void)
111 {
112 uint64_t mpidr;
113
114 mpidr = READ_SPECIALREG(mpidr_el1);
115
116 return (mpidr);
117 }
118
119 static __inline void
clrex(void)120 clrex(void)
121 {
122
123 /*
124 * Ensure compiler barrier, otherwise the monitor clear might
125 * occur too late for us ?
126 */
127 __asm __volatile("clrex" : : : "memory");
128 }
129
130 static __inline void
set_ttbr0(uint64_t ttbr0)131 set_ttbr0(uint64_t ttbr0)
132 {
133
134 __asm __volatile(
135 "msr ttbr0_el1, %0 \n"
136 "isb \n"
137 :
138 : "r" (ttbr0));
139 }
140
141 static __inline void
invalidate_icache(void)142 invalidate_icache(void)
143 {
144
145 __asm __volatile(
146 "ic ialluis \n"
147 "dsb ish \n"
148 "isb \n");
149 }
150
151 static __inline void
invalidate_local_icache(void)152 invalidate_local_icache(void)
153 {
154
155 __asm __volatile(
156 "ic iallu \n"
157 "dsb nsh \n"
158 "isb \n");
159 }
160
161 static __inline void
wfet(uint64_t val)162 wfet(uint64_t val)
163 {
164 __asm __volatile(
165 "msr s0_3_c1_c0_0, %0\n"
166 :
167 : "r" ((val))
168 : "memory");
169 }
170
171 static __inline void
wfit(uint64_t val)172 wfit(uint64_t val)
173 {
174 __asm __volatile(
175 "msr s0_3_c1_c0_1, %0\n"
176 :
177 : "r" ((val))
178 : "memory");
179 }
180
181 extern bool icache_aliasing;
182 extern bool icache_vmid;
183
184 extern int64_t dcache_line_size;
185 extern int64_t icache_line_size;
186 extern int64_t idcache_line_size;
187 extern int64_t dczva_line_size;
188
189 #define cpu_nullop() arm64_nullop()
190 #define cpufunc_nullop() arm64_nullop()
191
192 #define cpu_tlb_flushID() arm64_tlb_flushID()
193
194 #define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
195 #define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
196 #define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
197
198 extern void (*arm64_icache_sync_range)(void *, vm_size_t);
199
200 #define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
201 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
202
203 void arm64_nullop(void);
204 void arm64_tlb_flushID(void);
205 void arm64_dic_idc_icache_sync_range(void *, vm_size_t);
206 void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t);
207 void arm64_aliasing_icache_sync_range(void *, vm_size_t);
208 int arm64_icache_sync_range_checked(void *, vm_size_t);
209 void arm64_dcache_wbinv_range(void *, vm_size_t);
210 void arm64_dcache_inv_range(void *, vm_size_t);
211 void arm64_dcache_wb_range(void *, vm_size_t);
212 bool arm64_get_writable_addr(void *, void **);
213
214 #endif /* _KERNEL */
215 #endif /* _MACHINE_CPUFUNC_H_ */
216
217 #endif /* !__arm__ */
218