1 /* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */ 2 3 /*- 4 * Copyright (c) 1997 Mark Brinicombe. 5 * Copyright (c) 1997 Causality Limited 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Causality Limited. 19 * 4. The name of Causality Limited may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * RiscBSD kernel project 36 * 37 * cpufunc.h 38 * 39 * Prototypes for cpu, mmu and tlb related functions. 40 * 41 * $FreeBSD$ 42 */ 43 44 #ifndef _MACHINE_CPUFUNC_H_ 45 #define _MACHINE_CPUFUNC_H_ 46 47 #ifdef _KERNEL 48 49 #include <sys/types.h> 50 #include <machine/armreg.h> 51 #include <machine/cpuconf.h> 52 53 static __inline void 54 breakpoint(void) 55 { 56 __asm(".word 0xe7ffffff"); 57 } 58 59 struct cpu_functions { 60 61 /* CPU functions */ 62 63 void (*cf_cpwait) (void); 64 65 /* MMU functions */ 66 67 u_int (*cf_control) (u_int bic, u_int eor); 68 void (*cf_setttb) (u_int ttb); 69 70 /* TLB functions */ 71 72 void (*cf_tlb_flushID) (void); 73 void (*cf_tlb_flushID_SE) (u_int va); 74 void (*cf_tlb_flushD) (void); 75 void (*cf_tlb_flushD_SE) (u_int va); 76 77 /* 78 * Cache operations: 79 * 80 * We define the following primitives: 81 * 82 * icache_sync_all Synchronize I-cache 83 * icache_sync_range Synchronize I-cache range 84 * 85 * dcache_wbinv_all Write-back and Invalidate D-cache 86 * dcache_wbinv_range Write-back and Invalidate D-cache range 87 * dcache_inv_range Invalidate D-cache range 88 * dcache_wb_range Write-back D-cache range 89 * 90 * idcache_wbinv_all Write-back and Invalidate D-cache, 91 * Invalidate I-cache 92 * idcache_wbinv_range Write-back and Invalidate D-cache, 93 * Invalidate I-cache range 94 * 95 * Note that the ARM term for "write-back" is "clean". We use 96 * the term "write-back" since it's a more common way to describe 97 * the operation. 98 * 99 * There are some rules that must be followed: 100 * 101 * ID-cache Invalidate All: 102 * Unlike other functions, this one must never write back. 103 * It is used to intialize the MMU when it is in an unknown 104 * state (such as when it may have lines tagged as valid 105 * that belong to a previous set of mappings). 106 * 107 * I-cache Synch (all or range): 108 * The goal is to synchronize the instruction stream, 109 * so you may beed to write-back dirty D-cache blocks 110 * first. If a range is requested, and you can't 111 * synchronize just a range, you have to hit the whole 112 * thing. 113 * 114 * D-cache Write-Back and Invalidate range: 115 * If you can't WB-Inv a range, you must WB-Inv the 116 * entire D-cache. 117 * 118 * D-cache Invalidate: 119 * If you can't Inv the D-cache, you must Write-Back 120 * and Invalidate. Code that uses this operation 121 * MUST NOT assume that the D-cache will not be written 122 * back to memory. 123 * 124 * D-cache Write-Back: 125 * If you can't Write-back without doing an Inv, 126 * that's fine. Then treat this as a WB-Inv. 127 * Skipping the invalidate is merely an optimization. 128 * 129 * All operations: 130 * Valid virtual addresses must be passed to each 131 * cache operation. 132 */ 133 void (*cf_icache_sync_all) (void); 134 void (*cf_icache_sync_range) (vm_offset_t, vm_size_t); 135 136 void (*cf_dcache_wbinv_all) (void); 137 void (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t); 138 void (*cf_dcache_inv_range) (vm_offset_t, vm_size_t); 139 void (*cf_dcache_wb_range) (vm_offset_t, vm_size_t); 140 141 void (*cf_idcache_inv_all) (void); 142 void (*cf_idcache_wbinv_all) (void); 143 void (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t); 144 void (*cf_l2cache_wbinv_all) (void); 145 void (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t); 146 void (*cf_l2cache_inv_range) (vm_offset_t, vm_size_t); 147 void (*cf_l2cache_wb_range) (vm_offset_t, vm_size_t); 148 void (*cf_l2cache_drain_writebuf) (void); 149 150 /* Other functions */ 151 152 void (*cf_drain_writebuf) (void); 153 154 void (*cf_sleep) (int mode); 155 156 /* Soft functions */ 157 158 void (*cf_context_switch) (void); 159 160 void (*cf_setup) (void); 161 }; 162 163 extern struct cpu_functions cpufuncs; 164 extern u_int cputype; 165 166 #define cpu_cpwait() cpufuncs.cf_cpwait() 167 168 #define cpu_control(c, e) cpufuncs.cf_control(c, e) 169 #define cpu_setttb(t) cpufuncs.cf_setttb(t) 170 171 #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() 172 #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) 173 #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() 174 #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) 175 176 #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all() 177 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) 178 179 #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() 180 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) 181 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) 182 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) 183 184 #define cpu_idcache_inv_all() cpufuncs.cf_idcache_inv_all() 185 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() 186 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) 187 #define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all() 188 #define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s)) 189 #define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s)) 190 #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s)) 191 #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf() 192 193 #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() 194 #define cpu_sleep(m) cpufuncs.cf_sleep(m) 195 196 #define cpu_setup() cpufuncs.cf_setup() 197 198 int set_cpufuncs (void); 199 #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ 200 #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ 201 202 void cpufunc_nullop (void); 203 u_int cpu_ident (void); 204 u_int cpufunc_control (u_int clear, u_int bic); 205 void cpu_domains (u_int domains); 206 u_int cpu_faultstatus (void); 207 u_int cpu_faultaddress (void); 208 u_int cpu_pfr (int); 209 210 #if defined(CPU_FA526) 211 void fa526_setup (void); 212 void fa526_setttb (u_int ttb); 213 void fa526_context_switch (void); 214 void fa526_cpu_sleep (int); 215 void fa526_tlb_flushID_SE (u_int); 216 217 void fa526_icache_sync_all (void); 218 void fa526_icache_sync_range(vm_offset_t start, vm_size_t end); 219 void fa526_dcache_wbinv_all (void); 220 void fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end); 221 void fa526_dcache_inv_range (vm_offset_t start, vm_size_t end); 222 void fa526_dcache_wb_range (vm_offset_t start, vm_size_t end); 223 void fa526_idcache_wbinv_all(void); 224 void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end); 225 #endif 226 227 228 #if defined(CPU_ARM9) || defined(CPU_ARM9E) 229 void arm9_setttb (u_int); 230 void arm9_tlb_flushID_SE (u_int va); 231 void arm9_context_switch (void); 232 #endif 233 234 #if defined(CPU_ARM9) 235 void arm9_icache_sync_all (void); 236 void arm9_icache_sync_range (vm_offset_t, vm_size_t); 237 238 void arm9_dcache_wbinv_all (void); 239 void arm9_dcache_wbinv_range (vm_offset_t, vm_size_t); 240 void arm9_dcache_inv_range (vm_offset_t, vm_size_t); 241 void arm9_dcache_wb_range (vm_offset_t, vm_size_t); 242 243 void arm9_idcache_wbinv_all (void); 244 void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t); 245 246 void arm9_setup (void); 247 248 extern unsigned arm9_dcache_sets_max; 249 extern unsigned arm9_dcache_sets_inc; 250 extern unsigned arm9_dcache_index_max; 251 extern unsigned arm9_dcache_index_inc; 252 #endif 253 254 #if defined(CPU_ARM9E) 255 void arm10_setup (void); 256 257 u_int sheeva_control_ext (u_int, u_int); 258 void sheeva_cpu_sleep (int); 259 void sheeva_setttb (u_int); 260 void sheeva_dcache_wbinv_range (vm_offset_t, vm_size_t); 261 void sheeva_dcache_inv_range (vm_offset_t, vm_size_t); 262 void sheeva_dcache_wb_range (vm_offset_t, vm_size_t); 263 void sheeva_idcache_wbinv_range (vm_offset_t, vm_size_t); 264 265 void sheeva_l2cache_wbinv_range (vm_offset_t, vm_size_t); 266 void sheeva_l2cache_inv_range (vm_offset_t, vm_size_t); 267 void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t); 268 void sheeva_l2cache_wbinv_all (void); 269 #endif 270 271 #if defined(CPU_MV_PJ4B) 272 void armv6_idcache_wbinv_all (void); 273 #endif 274 #if defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT) 275 void armv7_setttb (u_int); 276 void armv7_tlb_flushID (void); 277 void armv7_tlb_flushID_SE (u_int); 278 void armv7_icache_sync_all (void); 279 void armv7_icache_sync_range (vm_offset_t, vm_size_t); 280 void armv7_idcache_wbinv_range (vm_offset_t, vm_size_t); 281 void armv7_idcache_inv_all (void); 282 void armv7_dcache_wbinv_all (void); 283 void armv7_idcache_wbinv_all (void); 284 void armv7_dcache_wbinv_range (vm_offset_t, vm_size_t); 285 void armv7_dcache_inv_range (vm_offset_t, vm_size_t); 286 void armv7_dcache_wb_range (vm_offset_t, vm_size_t); 287 void armv7_cpu_sleep (int); 288 void armv7_setup (void); 289 void armv7_context_switch (void); 290 void armv7_drain_writebuf (void); 291 void armv7_sev (void); 292 u_int armv7_auxctrl (u_int, u_int); 293 294 void armadaxp_idcache_wbinv_all (void); 295 296 void cortexa_setup (void); 297 #endif 298 #if defined(CPU_MV_PJ4B) 299 void pj4b_config (void); 300 void pj4bv7_setup (void); 301 #endif 302 303 #if defined(CPU_ARM1176) 304 void arm11_tlb_flushID (void); 305 void arm11_tlb_flushID_SE (u_int); 306 void arm11_tlb_flushD (void); 307 void arm11_tlb_flushD_SE (u_int va); 308 309 void arm11_context_switch (void); 310 311 void arm11_drain_writebuf (void); 312 313 void armv6_dcache_wbinv_range (vm_offset_t, vm_size_t); 314 void armv6_dcache_inv_range (vm_offset_t, vm_size_t); 315 void armv6_dcache_wb_range (vm_offset_t, vm_size_t); 316 317 void armv6_idcache_inv_all (void); 318 319 void arm11x6_setttb (u_int); 320 void arm11x6_idcache_wbinv_all (void); 321 void arm11x6_dcache_wbinv_all (void); 322 void arm11x6_icache_sync_all (void); 323 void arm11x6_icache_sync_range (vm_offset_t, vm_size_t); 324 void arm11x6_idcache_wbinv_range (vm_offset_t, vm_size_t); 325 void arm11x6_setup (void); 326 void arm11x6_sleep (int); /* no ref. for errata */ 327 #endif 328 329 #if defined(CPU_ARM9E) 330 void armv5_ec_setttb(u_int); 331 332 void armv5_ec_icache_sync_all(void); 333 void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t); 334 335 void armv5_ec_dcache_wbinv_all(void); 336 void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t); 337 void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t); 338 void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t); 339 340 void armv5_ec_idcache_wbinv_all(void); 341 void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t); 342 #endif 343 344 #if defined(CPU_ARM9) || defined(CPU_ARM9E) || \ 345 defined(CPU_XSCALE_80321) || \ 346 defined(CPU_FA526) || \ 347 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ 348 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) 349 350 void armv4_tlb_flushID (void); 351 void armv4_tlb_flushD (void); 352 void armv4_tlb_flushD_SE (u_int va); 353 354 void armv4_drain_writebuf (void); 355 void armv4_idcache_inv_all (void); 356 #endif 357 358 #if defined(CPU_XSCALE_80321) || \ 359 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ 360 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) 361 void xscale_cpwait (void); 362 363 void xscale_cpu_sleep (int mode); 364 365 u_int xscale_control (u_int clear, u_int bic); 366 367 void xscale_setttb (u_int ttb); 368 369 void xscale_tlb_flushID_SE (u_int va); 370 371 void xscale_cache_flushID (void); 372 void xscale_cache_flushI (void); 373 void xscale_cache_flushD (void); 374 void xscale_cache_flushD_SE (u_int entry); 375 376 void xscale_cache_cleanID (void); 377 void xscale_cache_cleanD (void); 378 void xscale_cache_cleanD_E (u_int entry); 379 380 void xscale_cache_clean_minidata (void); 381 382 void xscale_cache_purgeID (void); 383 void xscale_cache_purgeID_E (u_int entry); 384 void xscale_cache_purgeD (void); 385 void xscale_cache_purgeD_E (u_int entry); 386 387 void xscale_cache_syncI (void); 388 void xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end); 389 void xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end); 390 void xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end); 391 void xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end); 392 void xscale_cache_syncI_rng (vm_offset_t start, vm_size_t end); 393 void xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end); 394 395 void xscale_context_switch (void); 396 397 void xscale_setup (void); 398 #endif /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 399 CPU_XSCALE_80219 */ 400 401 #ifdef CPU_XSCALE_81342 402 403 void xscalec3_l2cache_purge (void); 404 void xscalec3_cache_purgeID (void); 405 void xscalec3_cache_purgeD (void); 406 void xscalec3_cache_cleanID (void); 407 void xscalec3_cache_cleanD (void); 408 void xscalec3_cache_syncI (void); 409 410 void xscalec3_cache_purgeID_rng (vm_offset_t start, vm_size_t end); 411 void xscalec3_cache_purgeD_rng (vm_offset_t start, vm_size_t end); 412 void xscalec3_cache_cleanID_rng (vm_offset_t start, vm_size_t end); 413 void xscalec3_cache_cleanD_rng (vm_offset_t start, vm_size_t end); 414 void xscalec3_cache_syncI_rng (vm_offset_t start, vm_size_t end); 415 416 void xscalec3_l2cache_flush_rng (vm_offset_t, vm_size_t); 417 void xscalec3_l2cache_clean_rng (vm_offset_t start, vm_size_t end); 418 void xscalec3_l2cache_purge_rng (vm_offset_t start, vm_size_t end); 419 420 421 void xscalec3_setttb (u_int ttb); 422 void xscalec3_context_switch (void); 423 424 #endif /* CPU_XSCALE_81342 */ 425 426 #define setttb cpu_setttb 427 #define drain_writebuf cpu_drain_writebuf 428 429 /* 430 * Macros for manipulating CPU interrupts 431 */ 432 #if __ARM_ARCH < 6 433 #define __ARM_INTR_BITS (PSR_I | PSR_F) 434 #else 435 #define __ARM_INTR_BITS (PSR_I | PSR_F | PSR_A) 436 #endif 437 438 static __inline uint32_t 439 __set_cpsr(uint32_t bic, uint32_t eor) 440 { 441 uint32_t tmp, ret; 442 443 __asm __volatile( 444 "mrs %0, cpsr\n" /* Get the CPSR */ 445 "bic %1, %0, %2\n" /* Clear bits */ 446 "eor %1, %1, %3\n" /* XOR bits */ 447 "msr cpsr_xc, %1\n" /* Set the CPSR */ 448 : "=&r" (ret), "=&r" (tmp) 449 : "r" (bic), "r" (eor) : "memory"); 450 451 return ret; 452 } 453 454 static __inline uint32_t 455 disable_interrupts(uint32_t mask) 456 { 457 458 return (__set_cpsr(mask & __ARM_INTR_BITS, mask & __ARM_INTR_BITS)); 459 } 460 461 static __inline uint32_t 462 enable_interrupts(uint32_t mask) 463 { 464 465 return (__set_cpsr(mask & __ARM_INTR_BITS, 0)); 466 } 467 468 static __inline uint32_t 469 restore_interrupts(uint32_t old_cpsr) 470 { 471 472 return (__set_cpsr(__ARM_INTR_BITS, old_cpsr & __ARM_INTR_BITS)); 473 } 474 475 static __inline register_t 476 intr_disable(void) 477 { 478 479 return (disable_interrupts(PSR_I | PSR_F)); 480 } 481 482 static __inline void 483 intr_restore(register_t s) 484 { 485 486 restore_interrupts(s); 487 } 488 #undef __ARM_INTR_BITS 489 490 /* 491 * Functions to manipulate cpu r13 492 * (in arm/arm32/setstack.S) 493 */ 494 495 void set_stackptr (u_int mode, u_int address); 496 u_int get_stackptr (u_int mode); 497 498 /* 499 * Miscellany 500 */ 501 502 int get_pc_str_offset (void); 503 504 /* 505 * CPU functions from locore.S 506 */ 507 508 void cpu_reset (void) __attribute__((__noreturn__)); 509 510 /* 511 * Cache info variables. 512 */ 513 514 /* PRIMARY CACHE VARIABLES */ 515 extern int arm_picache_size; 516 extern int arm_picache_line_size; 517 extern int arm_picache_ways; 518 519 extern int arm_pdcache_size; /* and unified */ 520 extern int arm_pdcache_line_size; 521 extern int arm_pdcache_ways; 522 523 extern int arm_pcache_type; 524 extern int arm_pcache_unified; 525 526 extern int arm_dcache_align; 527 extern int arm_dcache_align_mask; 528 529 extern u_int arm_cache_level; 530 extern u_int arm_cache_loc; 531 extern u_int arm_cache_type[14]; 532 533 #endif /* _KERNEL */ 534 #endif /* _MACHINE_CPUFUNC_H_ */ 535 536 /* End of cpufunc.h */ 537