1 /*- 2 * Copyright (c) 2014 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _MACHINE_CPUFUNC_H_ 30 #define _MACHINE_CPUFUNC_H_ 31 32 static __inline void 33 breakpoint(void) 34 { 35 36 __asm("brk #0"); 37 } 38 39 #ifdef _KERNEL 40 41 #define HAVE_INLINE_FFS 42 43 static __inline __pure2 int 44 ffs(int mask) 45 { 46 47 return (__builtin_ffs(mask)); 48 } 49 50 #define HAVE_INLINE_FFSL 51 52 static __inline __pure2 int 53 ffsl(long mask) 54 { 55 56 return (__builtin_ffsl(mask)); 57 } 58 59 #define HAVE_INLINE_FFSLL 60 61 static __inline __pure2 int 62 ffsll(long long mask) 63 { 64 65 return (__builtin_ffsll(mask)); 66 } 67 68 #define HAVE_INLINE_FLS 69 70 static __inline __pure2 int 71 fls(int mask) 72 { 73 74 return (mask == 0 ? 0 : 75 8 * sizeof(mask) - __builtin_clz((u_int)mask)); 76 } 77 78 #define HAVE_INLINE_FLSL 79 80 static __inline __pure2 int 81 flsl(long mask) 82 { 83 84 return (mask == 0 ? 0 : 85 8 * sizeof(mask) - __builtin_clzl((u_long)mask)); 86 } 87 88 #define HAVE_INLINE_FLSLL 89 90 static __inline __pure2 int 91 flsll(long long mask) 92 { 93 94 return (mask == 0 ? 0 : 95 8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask)); 96 } 97 98 #include <machine/armreg.h> 99 100 void pan_enable(void); 101 102 static __inline register_t 103 dbg_disable(void) 104 { 105 uint32_t ret; 106 107 __asm __volatile( 108 "mrs %x0, daif \n" 109 "msr daifset, #(" __XSTRING(DAIF_D) ") \n" 110 : "=&r" (ret)); 111 112 return (ret); 113 } 114 115 static __inline void 116 dbg_enable(void) 117 { 118 119 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_D) ")"); 120 } 121 122 static __inline register_t 123 intr_disable(void) 124 { 125 /* DAIF is a 32-bit register */ 126 uint32_t ret; 127 128 __asm __volatile( 129 "mrs %x0, daif \n" 130 "msr daifset, #(" __XSTRING(DAIF_INTR) ") \n" 131 : "=&r" (ret)); 132 133 return (ret); 134 } 135 136 static __inline void 137 intr_restore(register_t s) 138 { 139 140 WRITE_SPECIALREG(daif, s); 141 } 142 143 static __inline void 144 intr_enable(void) 145 { 146 147 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_INTR) ")"); 148 } 149 150 static __inline void 151 serror_enable(void) 152 { 153 154 __asm __volatile("msr daifclr, #(" __XSTRING(DAIF_A) ")"); 155 } 156 157 static __inline register_t 158 get_midr(void) 159 { 160 uint64_t midr; 161 162 midr = READ_SPECIALREG(midr_el1); 163 164 return (midr); 165 } 166 167 static __inline register_t 168 get_mpidr(void) 169 { 170 uint64_t mpidr; 171 172 mpidr = READ_SPECIALREG(mpidr_el1); 173 174 return (mpidr); 175 } 176 177 static __inline void 178 clrex(void) 179 { 180 181 /* 182 * Ensure compiler barrier, otherwise the monitor clear might 183 * occur too late for us ? 184 */ 185 __asm __volatile("clrex" : : : "memory"); 186 } 187 188 static __inline void 189 set_ttbr0(uint64_t ttbr0) 190 { 191 192 __asm __volatile( 193 "msr ttbr0_el1, %0 \n" 194 "isb \n" 195 : 196 : "r" (ttbr0)); 197 } 198 199 static __inline void 200 invalidate_icache(void) 201 { 202 203 __asm __volatile( 204 "ic ialluis \n" 205 "dsb ish \n" 206 "isb \n"); 207 } 208 209 static __inline void 210 invalidate_local_icache(void) 211 { 212 213 __asm __volatile( 214 "ic iallu \n" 215 "dsb nsh \n" 216 "isb \n"); 217 } 218 219 extern bool icache_aliasing; 220 extern bool icache_vmid; 221 222 extern int64_t dcache_line_size; 223 extern int64_t icache_line_size; 224 extern int64_t idcache_line_size; 225 extern int64_t dczva_line_size; 226 227 #define cpu_nullop() arm64_nullop() 228 #define cpufunc_nullop() arm64_nullop() 229 230 #define cpu_tlb_flushID() arm64_tlb_flushID() 231 232 #define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s)) 233 #define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s)) 234 #define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s)) 235 236 extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t); 237 238 #define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s)) 239 #define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s)) 240 241 void arm64_nullop(void); 242 void arm64_tlb_flushID(void); 243 void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t); 244 void arm64_idc_aliasing_icache_sync_range(vm_offset_t, vm_size_t); 245 void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t); 246 int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t); 247 void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t); 248 void arm64_dcache_inv_range(vm_offset_t, vm_size_t); 249 void arm64_dcache_wb_range(vm_offset_t, vm_size_t); 250 bool arm64_get_writable_addr(vm_offset_t, vm_offset_t *); 251 252 #endif /* _KERNEL */ 253 #endif /* _MACHINE_CPUFUNC_H_ */ 254