1 /* 2 * arch/arm/include/asm/tlbflush.h 3 * 4 * Copyright (C) 1999-2003 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #ifndef _ASMARM_TLBFLUSH_H 11 #define _ASMARM_TLBFLUSH_H 12 13 #ifdef CONFIG_MMU 14 15 #include <asm/glue.h> 16 17 #define TLB_V3_PAGE (1 << 0) 18 #define TLB_V4_U_PAGE (1 << 1) 19 #define TLB_V4_D_PAGE (1 << 2) 20 #define TLB_V4_I_PAGE (1 << 3) 21 #define TLB_V6_U_PAGE (1 << 4) 22 #define TLB_V6_D_PAGE (1 << 5) 23 #define TLB_V6_I_PAGE (1 << 6) 24 25 #define TLB_V3_FULL (1 << 8) 26 #define TLB_V4_U_FULL (1 << 9) 27 #define TLB_V4_D_FULL (1 << 10) 28 #define TLB_V4_I_FULL (1 << 11) 29 #define TLB_V6_U_FULL (1 << 12) 30 #define TLB_V6_D_FULL (1 << 13) 31 #define TLB_V6_I_FULL (1 << 14) 32 33 #define TLB_V6_U_ASID (1 << 16) 34 #define TLB_V6_D_ASID (1 << 17) 35 #define TLB_V6_I_ASID (1 << 18) 36 37 /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ 38 #define TLB_V7_UIS_PAGE (1 << 19) 39 #define TLB_V7_UIS_FULL (1 << 20) 40 #define TLB_V7_UIS_ASID (1 << 21) 41 42 #define TLB_BARRIER (1 << 28) 43 #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ 44 #define TLB_DCLEAN (1 << 30) 45 #define TLB_WB (1 << 31) 46 47 /* 48 * MMU TLB Model 49 * ============= 50 * 51 * We have the following to choose from: 52 * v3 - ARMv3 53 * v4 - ARMv4 without write buffer 54 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction 55 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction 56 * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) 57 * fa - Faraday (v4 with write buffer with UTLB) 58 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction 59 * v7wbi - identical to v6wbi 60 */ 61 #undef _TLB 62 #undef MULTI_TLB 63 64 #ifdef CONFIG_SMP_ON_UP 65 #define MULTI_TLB 1 66 #endif 67 68 #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) 69 70 #ifdef CONFIG_CPU_TLB_V3 71 # define v3_possible_flags v3_tlb_flags 72 # define v3_always_flags v3_tlb_flags 73 # ifdef _TLB 74 # define MULTI_TLB 1 75 # else 76 # define _TLB v3 77 # endif 78 #else 79 # define v3_possible_flags 0 80 # define v3_always_flags (-1UL) 81 #endif 82 83 #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE) 84 85 #ifdef CONFIG_CPU_TLB_V4WT 86 # define v4_possible_flags v4_tlb_flags 87 # define v4_always_flags v4_tlb_flags 88 # ifdef _TLB 89 # define MULTI_TLB 1 90 # else 91 # define _TLB v4 92 # endif 93 #else 94 # define v4_possible_flags 0 95 # define v4_always_flags (-1UL) 96 #endif 97 98 #define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 99 TLB_V4_U_FULL | TLB_V4_U_PAGE) 100 101 #ifdef CONFIG_CPU_TLB_FA 102 # define fa_possible_flags fa_tlb_flags 103 # define fa_always_flags fa_tlb_flags 104 # ifdef _TLB 105 # define MULTI_TLB 1 106 # else 107 # define _TLB fa 108 # endif 109 #else 110 # define fa_possible_flags 0 111 # define fa_always_flags (-1UL) 112 #endif 113 114 #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \ 115 TLB_V4_I_FULL | TLB_V4_D_FULL | \ 116 TLB_V4_I_PAGE | TLB_V4_D_PAGE) 117 118 #ifdef CONFIG_CPU_TLB_V4WBI 119 # define v4wbi_possible_flags v4wbi_tlb_flags 120 # define v4wbi_always_flags v4wbi_tlb_flags 121 # ifdef _TLB 122 # define MULTI_TLB 1 123 # else 124 # define _TLB v4wbi 125 # endif 126 #else 127 # define v4wbi_possible_flags 0 128 # define v4wbi_always_flags (-1UL) 129 #endif 130 131 #define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \ 132 TLB_V4_I_FULL | TLB_V4_D_FULL | \ 133 TLB_V4_I_PAGE | TLB_V4_D_PAGE) 134 135 #ifdef CONFIG_CPU_TLB_FEROCEON 136 # define fr_possible_flags fr_tlb_flags 137 # define fr_always_flags fr_tlb_flags 138 # ifdef _TLB 139 # define MULTI_TLB 1 140 # else 141 # define _TLB v4wbi 142 # endif 143 #else 144 # define fr_possible_flags 0 145 # define fr_always_flags (-1UL) 146 #endif 147 148 #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \ 149 TLB_V4_I_FULL | TLB_V4_D_FULL | \ 150 TLB_V4_D_PAGE) 151 152 #ifdef CONFIG_CPU_TLB_V4WB 153 # define v4wb_possible_flags v4wb_tlb_flags 154 # define v4wb_always_flags v4wb_tlb_flags 155 # ifdef _TLB 156 # define MULTI_TLB 1 157 # else 158 # define _TLB v4wb 159 # endif 160 #else 161 # define v4wb_possible_flags 0 162 # define v4wb_always_flags (-1UL) 163 #endif 164 165 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 166 TLB_V6_I_FULL | TLB_V6_D_FULL | \ 167 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ 168 TLB_V6_I_ASID | TLB_V6_D_ASID) 169 170 #ifdef CONFIG_CPU_TLB_V6 171 # define v6wbi_possible_flags v6wbi_tlb_flags 172 # define v6wbi_always_flags v6wbi_tlb_flags 173 # ifdef _TLB 174 # define MULTI_TLB 1 175 # else 176 # define _TLB v6wbi 177 # endif 178 #else 179 # define v6wbi_possible_flags 0 180 # define v6wbi_always_flags (-1UL) 181 #endif 182 183 #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 184 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 185 #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 186 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 187 188 #ifdef CONFIG_CPU_TLB_V7 189 190 # ifdef CONFIG_SMP_ON_UP 191 # define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up) 192 # define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up) 193 # elif defined(CONFIG_SMP) 194 # define v7wbi_possible_flags v7wbi_tlb_flags_smp 195 # define v7wbi_always_flags v7wbi_tlb_flags_smp 196 # else 197 # define v7wbi_possible_flags v7wbi_tlb_flags_up 198 # define v7wbi_always_flags v7wbi_tlb_flags_up 199 # endif 200 # ifdef _TLB 201 # define MULTI_TLB 1 202 # else 203 # define _TLB v7wbi 204 # endif 205 #else 206 # define v7wbi_possible_flags 0 207 # define v7wbi_always_flags (-1UL) 208 #endif 209 210 #ifndef _TLB 211 #error Unknown TLB model 212 #endif 213 214 #ifndef __ASSEMBLY__ 215 216 #include <linux/sched.h> 217 218 struct cpu_tlb_fns { 219 void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *); 220 void (*flush_kern_range)(unsigned long, unsigned long); 221 unsigned long tlb_flags; 222 }; 223 224 /* 225 * Select the calling method 226 */ 227 #ifdef MULTI_TLB 228 229 #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range 230 #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range 231 232 #else 233 234 #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range) 235 #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range) 236 237 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); 238 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); 239 240 #endif 241 242 extern struct cpu_tlb_fns cpu_tlb; 243 244 #define __cpu_tlb_flags cpu_tlb.tlb_flags 245 246 /* 247 * TLB Management 248 * ============== 249 * 250 * The arch/arm/mm/tlb-*.S files implement these methods. 251 * 252 * The TLB specific code is expected to perform whatever tests it 253 * needs to determine if it should invalidate the TLB for each 254 * call. Start addresses are inclusive and end addresses are 255 * exclusive; it is safe to round these addresses down. 256 * 257 * flush_tlb_all() 258 * 259 * Invalidate the entire TLB. 260 * 261 * flush_tlb_mm(mm) 262 * 263 * Invalidate all TLB entries in a particular address 264 * space. 265 * - mm - mm_struct describing address space 266 * 267 * flush_tlb_range(mm,start,end) 268 * 269 * Invalidate a range of TLB entries in the specified 270 * address space. 271 * - mm - mm_struct describing address space 272 * - start - start address (may not be aligned) 273 * - end - end address (exclusive, may not be aligned) 274 * 275 * flush_tlb_page(vaddr,vma) 276 * 277 * Invalidate the specified page in the specified address range. 278 * - vaddr - virtual address (may not be aligned) 279 * - vma - vma_struct describing address range 280 * 281 * flush_kern_tlb_page(kaddr) 282 * 283 * Invalidate the TLB entry for the specified page. The address 284 * will be in the kernels virtual memory space. Current uses 285 * only require the D-TLB to be invalidated. 286 * - kaddr - Kernel virtual memory address 287 */ 288 289 /* 290 * We optimise the code below by: 291 * - building a set of TLB flags that might be set in __cpu_tlb_flags 292 * - building a set of TLB flags that will always be set in __cpu_tlb_flags 293 * - if we're going to need __cpu_tlb_flags, access it once and only once 294 * 295 * This allows us to build optimal assembly for the single-CPU type case, 296 * and as close to optimal given the compiler constrants for multi-CPU 297 * case. We could do better for the multi-CPU case if the compiler 298 * implemented the "%?" method, but this has been discontinued due to too 299 * many people getting it wrong. 300 */ 301 #define possible_tlb_flags (v3_possible_flags | \ 302 v4_possible_flags | \ 303 v4wbi_possible_flags | \ 304 fr_possible_flags | \ 305 v4wb_possible_flags | \ 306 fa_possible_flags | \ 307 v6wbi_possible_flags | \ 308 v7wbi_possible_flags) 309 310 #define always_tlb_flags (v3_always_flags & \ 311 v4_always_flags & \ 312 v4wbi_always_flags & \ 313 fr_always_flags & \ 314 v4wb_always_flags & \ 315 fa_always_flags & \ 316 v6wbi_always_flags & \ 317 v7wbi_always_flags) 318 319 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) 320 321 static inline void local_flush_tlb_all(void) 322 { 323 const int zero = 0; 324 const unsigned int __tlb_flag = __cpu_tlb_flags; 325 326 if (tlb_flag(TLB_WB)) 327 dsb(); 328 329 if (tlb_flag(TLB_V3_FULL)) 330 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 331 if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) 332 asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); 333 if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) 334 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); 335 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) 336 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 337 if (tlb_flag(TLB_V7_UIS_FULL)) 338 asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); 339 340 if (tlb_flag(TLB_BARRIER)) { 341 dsb(); 342 isb(); 343 } 344 } 345 346 static inline void local_flush_tlb_mm(struct mm_struct *mm) 347 { 348 const int zero = 0; 349 const int asid = ASID(mm); 350 const unsigned int __tlb_flag = __cpu_tlb_flags; 351 352 if (tlb_flag(TLB_WB)) 353 dsb(); 354 355 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 356 if (tlb_flag(TLB_V3_FULL)) 357 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 358 if (tlb_flag(TLB_V4_U_FULL)) 359 asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); 360 if (tlb_flag(TLB_V4_D_FULL)) 361 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); 362 if (tlb_flag(TLB_V4_I_FULL)) 363 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 364 } 365 put_cpu(); 366 367 if (tlb_flag(TLB_V6_U_ASID)) 368 asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); 369 if (tlb_flag(TLB_V6_D_ASID)) 370 asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); 371 if (tlb_flag(TLB_V6_I_ASID)) 372 asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); 373 if (tlb_flag(TLB_V7_UIS_ASID)) 374 #ifdef CONFIG_ARM_ERRATA_720789 375 asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); 376 #else 377 asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); 378 #endif 379 380 if (tlb_flag(TLB_BARRIER)) 381 dsb(); 382 } 383 384 static inline void 385 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 386 { 387 const int zero = 0; 388 const unsigned int __tlb_flag = __cpu_tlb_flags; 389 390 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); 391 392 if (tlb_flag(TLB_WB)) 393 dsb(); 394 395 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 396 if (tlb_flag(TLB_V3_PAGE)) 397 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); 398 if (tlb_flag(TLB_V4_U_PAGE)) 399 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); 400 if (tlb_flag(TLB_V4_D_PAGE)) 401 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); 402 if (tlb_flag(TLB_V4_I_PAGE)) 403 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); 404 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) 405 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 406 } 407 408 if (tlb_flag(TLB_V6_U_PAGE)) 409 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); 410 if (tlb_flag(TLB_V6_D_PAGE)) 411 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); 412 if (tlb_flag(TLB_V6_I_PAGE)) 413 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); 414 if (tlb_flag(TLB_V7_UIS_PAGE)) 415 #ifdef CONFIG_ARM_ERRATA_720789 416 asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); 417 #else 418 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); 419 #endif 420 421 if (tlb_flag(TLB_BARRIER)) 422 dsb(); 423 } 424 425 static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 426 { 427 const int zero = 0; 428 const unsigned int __tlb_flag = __cpu_tlb_flags; 429 430 kaddr &= PAGE_MASK; 431 432 if (tlb_flag(TLB_WB)) 433 dsb(); 434 435 if (tlb_flag(TLB_V3_PAGE)) 436 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); 437 if (tlb_flag(TLB_V4_U_PAGE)) 438 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); 439 if (tlb_flag(TLB_V4_D_PAGE)) 440 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); 441 if (tlb_flag(TLB_V4_I_PAGE)) 442 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); 443 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) 444 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 445 446 if (tlb_flag(TLB_V6_U_PAGE)) 447 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); 448 if (tlb_flag(TLB_V6_D_PAGE)) 449 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); 450 if (tlb_flag(TLB_V6_I_PAGE)) 451 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); 452 if (tlb_flag(TLB_V7_UIS_PAGE)) 453 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); 454 455 if (tlb_flag(TLB_BARRIER)) { 456 dsb(); 457 isb(); 458 } 459 } 460 461 /* 462 * flush_pmd_entry 463 * 464 * Flush a PMD entry (word aligned, or double-word aligned) to 465 * RAM if the TLB for the CPU we are running on requires this. 466 * This is typically used when we are creating PMD entries. 467 * 468 * clean_pmd_entry 469 * 470 * Clean (but don't drain the write buffer) if the CPU requires 471 * these operations. This is typically used when we are removing 472 * PMD entries. 473 */ 474 static inline void flush_pmd_entry(void *pmd) 475 { 476 const unsigned int __tlb_flag = __cpu_tlb_flags; 477 478 if (tlb_flag(TLB_DCLEAN)) 479 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 480 : : "r" (pmd) : "cc"); 481 482 if (tlb_flag(TLB_L2CLEAN_FR)) 483 asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" 484 : : "r" (pmd) : "cc"); 485 486 if (tlb_flag(TLB_WB)) 487 dsb(); 488 } 489 490 static inline void clean_pmd_entry(void *pmd) 491 { 492 const unsigned int __tlb_flag = __cpu_tlb_flags; 493 494 if (tlb_flag(TLB_DCLEAN)) 495 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 496 : : "r" (pmd) : "cc"); 497 498 if (tlb_flag(TLB_L2CLEAN_FR)) 499 asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" 500 : : "r" (pmd) : "cc"); 501 } 502 503 #undef tlb_flag 504 #undef always_tlb_flags 505 #undef possible_tlb_flags 506 507 /* 508 * Convert calls to our calling convention. 509 */ 510 #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) 511 #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) 512 513 #ifndef CONFIG_SMP 514 #define flush_tlb_all local_flush_tlb_all 515 #define flush_tlb_mm local_flush_tlb_mm 516 #define flush_tlb_page local_flush_tlb_page 517 #define flush_tlb_kernel_page local_flush_tlb_kernel_page 518 #define flush_tlb_range local_flush_tlb_range 519 #define flush_tlb_kernel_range local_flush_tlb_kernel_range 520 #else 521 extern void flush_tlb_all(void); 522 extern void flush_tlb_mm(struct mm_struct *mm); 523 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); 524 extern void flush_tlb_kernel_page(unsigned long kaddr); 525 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 526 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 527 #endif 528 529 /* 530 * If PG_dcache_clean is not set for the page, we need to ensure that any 531 * cache entries for the kernels virtual memory range are written 532 * back to the page. On ARMv6 and later, the cache coherency is handled via 533 * the set_pte_at() function. 534 */ 535 #if __LINUX_ARM_ARCH__ < 6 536 extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 537 pte_t *ptep); 538 #else 539 static inline void update_mmu_cache(struct vm_area_struct *vma, 540 unsigned long addr, pte_t *ptep) 541 { 542 } 543 #endif 544 545 #endif 546 547 #endif /* CONFIG_MMU */ 548 549 #endif 550