1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * TLB Management (flush/create/diagnostics) for ARC700 4 * 5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 6 * 7 * vineetg: Aug 2011 8 * -Reintroduce duplicate PD fixup - some customer chips still have the issue 9 * 10 * vineetg: May 2011 11 * -No need to flush_cache_page( ) for each call to update_mmu_cache() 12 * some of the LMBench tests improved amazingly 13 * = page-fault thrice as fast (75 usec to 28 usec) 14 * = mmap twice as fast (9.6 msec to 4.6 msec), 15 * = fork (5.3 msec to 3.7 msec) 16 * 17 * vineetg: April 2011 : 18 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, 19 * helps avoid a shift when preparing PD0 from PTE 20 * 21 * vineetg: April 2011 : Preparing for MMU V3 22 * -MMU v2/v3 BCRs decoded differently 23 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512 24 * -tlb_entry_erase( ) can be void 25 * -local_flush_tlb_range( ): 26 * = need not "ceil" @end 27 * = walks MMU only if range spans < 32 entries, as opposed to 256 28 * 29 * Vineetg: Sept 10th 2008 30 * -Changes related to MMU v2 (Rel 4.8) 31 * 32 * Vineetg: Aug 29th 2008 33 * -In TLB Flush operations (Metal Fix MMU) there is a explicit command to 34 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, 35 * it fails. Thus need to load it with ANY valid value before invoking 36 * TLBIVUTLB cmd 37 * 38 * Vineetg: Aug 21th 2008: 39 * -Reduced the duration of IRQ lockouts in TLB Flush routines 40 * -Multiple copies of TLB erase code separated into a "single" function 41 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID 42 * in interrupt-safe region. 43 * 44 * Vineetg: April 23rd Bug #93131 45 * Problem: tlb_flush_kernel_range() doesn't do anything if the range to 46 * flush is more than the size of TLB itself. 47 * 48 * Rahul Trivedi : Codito Technologies 2004 49 */ 50 51 #include <linux/module.h> 52 #include <linux/bug.h> 53 #include <linux/mm_types.h> 54 55 #include <asm/arcregs.h> 56 #include <asm/setup.h> 57 #include <asm/mmu_context.h> 58 #include <asm/mmu.h> 59 60 /* Need for ARC MMU v2 61 * 62 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. 63 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages 64 * map into same set, there would be contention for the 2 ways causing severe 65 * Thrashing. 66 * 67 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has 68 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. 69 * Given this, the thrashing problem should never happen because once the 3 70 * J-TLB entries are created (even though 3rd will knock out one of the prev 71 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy 72 * 73 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs. 74 * This is a simple design for keeping them in sync. So what do we do? 75 * The solution which James came up was pretty neat. It utilised the assoc 76 * of uTLBs by not invalidating always but only when absolutely necessary. 77 * 78 * - Existing TLB commands work as before 79 * - New command (TLBWriteNI) for TLB write without clearing uTLBs 80 * - New command (TLBIVUTLB) to invalidate uTLBs. 81 * 82 * The uTLBs need only be invalidated when pages are being removed from the 83 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB 84 * as a result of a miss, the removed entry is still allowed to exist in the 85 * uTLBs as it is still valid and present in the OS page table. This allows the 86 * full associativity of the uTLBs to hide the limited associativity of the main 87 * TLB. 88 * 89 * During a miss handler, the new "TLBWriteNI" command is used to load 90 * entries without clearing the uTLBs. 91 * 92 * When the OS page table is updated, TLB entries that may be associated with a 93 * removed page are removed (flushed) from the TLB using TLBWrite. In this 94 * circumstance, the uTLBs must also be cleared. This is done by using the 95 * existing TLBWrite command. An explicit IVUTLB is also required for those 96 * corner cases when TLBWrite was not executed at all because the corresp 97 * J-TLB entry got evicted/replaced. 98 */ 99 100 101 /* A copy of the ASID from the PID reg is kept in asid_cache */ 102 DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; 103 104 static int __read_mostly pae_exists; 105 106 /* 107 * Utility Routine to erase a J-TLB entry 108 * Caller needs to setup Index Reg (manually or via getIndex) 109 */ 110 static inline void __tlb_entry_erase(void) 111 { 112 write_aux_reg(ARC_REG_TLBPD1, 0); 113 114 if (is_pae40_enabled()) 115 write_aux_reg(ARC_REG_TLBPD1HI, 0); 116 117 write_aux_reg(ARC_REG_TLBPD0, 0); 118 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); 119 } 120 121 static void utlb_invalidate(void) 122 { 123 #if (CONFIG_ARC_MMU_VER >= 2) 124 125 #if (CONFIG_ARC_MMU_VER == 2) 126 /* MMU v2 introduced the uTLB Flush command. 127 * There was however an obscure hardware bug, where uTLB flush would 128 * fail when a prior probe for J-TLB (both totally unrelated) would 129 * return lkup err - because the entry didn't exist in MMU. 130 * The Workaround was to set Index reg with some valid value, prior to 131 * flush. This was fixed in MMU v3 132 */ 133 unsigned int idx; 134 135 /* make sure INDEX Reg is valid */ 136 idx = read_aux_reg(ARC_REG_TLBINDEX); 137 138 /* If not write some dummy val */ 139 if (unlikely(idx & TLB_LKUP_ERR)) 140 write_aux_reg(ARC_REG_TLBINDEX, 0xa); 141 #endif 142 143 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); 144 #endif 145 146 } 147 148 #if (CONFIG_ARC_MMU_VER < 4) 149 150 static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) 151 { 152 unsigned int idx; 153 154 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); 155 156 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); 157 idx = read_aux_reg(ARC_REG_TLBINDEX); 158 159 return idx; 160 } 161 162 static void tlb_entry_erase(unsigned int vaddr_n_asid) 163 { 164 unsigned int idx; 165 166 /* Locate the TLB entry for this vaddr + ASID */ 167 idx = tlb_entry_lkup(vaddr_n_asid); 168 169 /* No error means entry found, zero it out */ 170 if (likely(!(idx & TLB_LKUP_ERR))) { 171 __tlb_entry_erase(); 172 } else { 173 /* Duplicate entry error */ 174 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", 175 vaddr_n_asid); 176 } 177 } 178 179 static void tlb_entry_insert(unsigned int pd0, pte_t pd1) 180 { 181 unsigned int idx; 182 183 /* 184 * First verify if entry for this vaddr+ASID already exists 185 * This also sets up PD0 (vaddr, ASID..) for final commit 186 */ 187 idx = tlb_entry_lkup(pd0); 188 189 /* 190 * If Not already present get a free slot from MMU. 191 * Otherwise, Probe would have located the entry and set INDEX Reg 192 * with existing location. This will cause Write CMD to over-write 193 * existing entry with new PD0 and PD1 194 */ 195 if (likely(idx & TLB_LKUP_ERR)) 196 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); 197 198 /* setup the other half of TLB entry (pfn, rwx..) */ 199 write_aux_reg(ARC_REG_TLBPD1, pd1); 200 201 /* 202 * Commit the Entry to MMU 203 * It doesn't sound safe to use the TLBWriteNI cmd here 204 * which doesn't flush uTLBs. I'd rather be safe than sorry. 205 */ 206 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); 207 } 208 209 #else /* CONFIG_ARC_MMU_VER >= 4) */ 210 211 static void tlb_entry_erase(unsigned int vaddr_n_asid) 212 { 213 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT); 214 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry); 215 } 216 217 static void tlb_entry_insert(unsigned int pd0, pte_t pd1) 218 { 219 write_aux_reg(ARC_REG_TLBPD0, pd0); 220 write_aux_reg(ARC_REG_TLBPD1, pd1); 221 222 if (is_pae40_enabled()) 223 write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32); 224 225 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry); 226 } 227 228 #endif 229 230 /* 231 * Un-conditionally (without lookup) erase the entire MMU contents 232 */ 233 234 noinline void local_flush_tlb_all(void) 235 { 236 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 237 unsigned long flags; 238 unsigned int entry; 239 int num_tlb = mmu->sets * mmu->ways; 240 241 local_irq_save(flags); 242 243 /* Load PD0 and PD1 with template for a Blank Entry */ 244 write_aux_reg(ARC_REG_TLBPD1, 0); 245 246 if (is_pae40_enabled()) 247 write_aux_reg(ARC_REG_TLBPD1HI, 0); 248 249 write_aux_reg(ARC_REG_TLBPD0, 0); 250 251 for (entry = 0; entry < num_tlb; entry++) { 252 /* write this entry to the TLB */ 253 write_aux_reg(ARC_REG_TLBINDEX, entry); 254 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI); 255 } 256 257 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 258 const int stlb_idx = 0x800; 259 260 /* Blank sTLB entry */ 261 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ); 262 263 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) { 264 write_aux_reg(ARC_REG_TLBINDEX, entry); 265 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI); 266 } 267 } 268 269 utlb_invalidate(); 270 271 local_irq_restore(flags); 272 } 273 274 /* 275 * Flush the entire MM for userland. The fastest way is to move to Next ASID 276 */ 277 noinline void local_flush_tlb_mm(struct mm_struct *mm) 278 { 279 /* 280 * Small optimisation courtesy IA64 281 * flush_mm called during fork,exit,munmap etc, multiple times as well. 282 * Only for fork( ) do we need to move parent to a new MMU ctxt, 283 * all other cases are NOPs, hence this check. 284 */ 285 if (atomic_read(&mm->mm_users) == 0) 286 return; 287 288 /* 289 * - Move to a new ASID, but only if the mm is still wired in 290 * (Android Binder ended up calling this for vma->mm != tsk->mm, 291 * causing h/w - s/w ASID to get out of sync) 292 * - Also get_new_mmu_context() new implementation allocates a new 293 * ASID only if it is not allocated already - so unallocate first 294 */ 295 destroy_context(mm); 296 if (current->mm == mm) 297 get_new_mmu_context(mm); 298 } 299 300 /* 301 * Flush a Range of TLB entries for userland. 302 * @start is inclusive, while @end is exclusive 303 * Difference between this and Kernel Range Flush is 304 * -Here the fastest way (if range is too large) is to move to next ASID 305 * without doing any explicit Shootdown 306 * -In case of kernel Flush, entry has to be shot down explicitly 307 */ 308 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 309 unsigned long end) 310 { 311 const unsigned int cpu = smp_processor_id(); 312 unsigned long flags; 313 314 /* If range @start to @end is more than 32 TLB entries deep, 315 * its better to move to a new ASID rather than searching for 316 * individual entries and then shooting them down 317 * 318 * The calc above is rough, doesn't account for unaligned parts, 319 * since this is heuristics based anyways 320 */ 321 if (unlikely((end - start) >= PAGE_SIZE * 32)) { 322 local_flush_tlb_mm(vma->vm_mm); 323 return; 324 } 325 326 /* 327 * @start moved to page start: this alone suffices for checking 328 * loop end condition below, w/o need for aligning @end to end 329 * e.g. 2000 to 4001 will anyhow loop twice 330 */ 331 start &= PAGE_MASK; 332 333 local_irq_save(flags); 334 335 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { 336 while (start < end) { 337 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); 338 start += PAGE_SIZE; 339 } 340 } 341 342 local_irq_restore(flags); 343 } 344 345 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective) 346 * @start, @end interpreted as kvaddr 347 * Interestingly, shared TLB entries can also be flushed using just 348 * @start,@end alone (interpreted as user vaddr), although technically SASID 349 * is also needed. However our smart TLbProbe lookup takes care of that. 350 */ 351 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 352 { 353 unsigned long flags; 354 355 /* exactly same as above, except for TLB entry not taking ASID */ 356 357 if (unlikely((end - start) >= PAGE_SIZE * 32)) { 358 local_flush_tlb_all(); 359 return; 360 } 361 362 start &= PAGE_MASK; 363 364 local_irq_save(flags); 365 while (start < end) { 366 tlb_entry_erase(start); 367 start += PAGE_SIZE; 368 } 369 370 local_irq_restore(flags); 371 } 372 373 /* 374 * Delete TLB entry in MMU for a given page (??? address) 375 * NOTE One TLB entry contains translation for single PAGE 376 */ 377 378 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 379 { 380 const unsigned int cpu = smp_processor_id(); 381 unsigned long flags; 382 383 /* Note that it is critical that interrupts are DISABLED between 384 * checking the ASID and using it flush the TLB entry 385 */ 386 local_irq_save(flags); 387 388 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { 389 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); 390 } 391 392 local_irq_restore(flags); 393 } 394 395 #ifdef CONFIG_SMP 396 397 struct tlb_args { 398 struct vm_area_struct *ta_vma; 399 unsigned long ta_start; 400 unsigned long ta_end; 401 }; 402 403 static inline void ipi_flush_tlb_page(void *arg) 404 { 405 struct tlb_args *ta = arg; 406 407 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 408 } 409 410 static inline void ipi_flush_tlb_range(void *arg) 411 { 412 struct tlb_args *ta = arg; 413 414 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 415 } 416 417 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 418 static inline void ipi_flush_pmd_tlb_range(void *arg) 419 { 420 struct tlb_args *ta = arg; 421 422 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 423 } 424 #endif 425 426 static inline void ipi_flush_tlb_kernel_range(void *arg) 427 { 428 struct tlb_args *ta = (struct tlb_args *)arg; 429 430 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); 431 } 432 433 void flush_tlb_all(void) 434 { 435 on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); 436 } 437 438 void flush_tlb_mm(struct mm_struct *mm) 439 { 440 on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, 441 mm, 1); 442 } 443 444 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 445 { 446 struct tlb_args ta = { 447 .ta_vma = vma, 448 .ta_start = uaddr 449 }; 450 451 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); 452 } 453 454 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 455 unsigned long end) 456 { 457 struct tlb_args ta = { 458 .ta_vma = vma, 459 .ta_start = start, 460 .ta_end = end 461 }; 462 463 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); 464 } 465 466 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 467 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, 468 unsigned long end) 469 { 470 struct tlb_args ta = { 471 .ta_vma = vma, 472 .ta_start = start, 473 .ta_end = end 474 }; 475 476 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); 477 } 478 #endif 479 480 void flush_tlb_kernel_range(unsigned long start, unsigned long end) 481 { 482 struct tlb_args ta = { 483 .ta_start = start, 484 .ta_end = end 485 }; 486 487 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 488 } 489 #endif 490 491 /* 492 * Routine to create a TLB entry 493 */ 494 void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep) 495 { 496 unsigned long flags; 497 unsigned int asid_or_sasid, rwx; 498 unsigned long pd0; 499 pte_t pd1; 500 501 /* 502 * create_tlb() assumes that current->mm == vma->mm, since 503 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) 504 * -completes the lazy write to SASID reg (again valid for curr tsk) 505 * 506 * Removing the assumption involves 507 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. 508 * -Fix the TLB paranoid debug code to not trigger false negatives. 509 * -More importantly it makes this handler inconsistent with fast-path 510 * TLB Refill handler which always deals with "current" 511 * 512 * Lets see the use cases when current->mm != vma->mm and we land here 513 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault 514 * Here VM wants to pre-install a TLB entry for user stack while 515 * current->mm still points to pre-execve mm (hence the condition). 516 * However the stack vaddr is soon relocated (randomization) and 517 * move_page_tables() tries to undo that TLB entry. 518 * Thus not creating TLB entry is not any worse. 519 * 520 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a 521 * breakpoint in debugged task. Not creating a TLB now is not 522 * performance critical. 523 * 524 * Both the cases above are not good enough for code churn. 525 */ 526 if (current->active_mm != vma->vm_mm) 527 return; 528 529 local_irq_save(flags); 530 531 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr); 532 533 vaddr &= PAGE_MASK; 534 535 /* update this PTE credentials */ 536 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); 537 538 /* Create HW TLB(PD0,PD1) from PTE */ 539 540 /* ASID for this task */ 541 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; 542 543 pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); 544 545 /* 546 * ARC MMU provides fully orthogonal access bits for K/U mode, 547 * however Linux only saves 1 set to save PTE real-estate 548 * Here we convert 3 PTE bits into 6 MMU bits: 549 * -Kernel only entries have Kr Kw Kx 0 0 0 550 * -User entries have mirrored K and U bits 551 */ 552 rwx = pte_val(*ptep) & PTE_BITS_RWX; 553 554 if (pte_val(*ptep) & _PAGE_GLOBAL) 555 rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ 556 else 557 rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ 558 559 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); 560 561 tlb_entry_insert(pd0, pd1); 562 563 local_irq_restore(flags); 564 } 565 566 /* 567 * Called at the end of pagefault, for a userspace mapped page 568 * -pre-install the corresponding TLB entry into MMU 569 * -Finalize the delayed D-cache flush of kernel mapping of page due to 570 * flush_dcache_page(), copy_user_page() 571 * 572 * Note that flush (when done) involves both WBACK - so physical page is 573 * in sync as well as INV - so any non-congruent aliases don't remain 574 */ 575 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, 576 pte_t *ptep) 577 { 578 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 579 phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS; 580 struct page *page = pfn_to_page(pte_pfn(*ptep)); 581 582 create_tlb(vma, vaddr, ptep); 583 584 if (page == ZERO_PAGE(0)) { 585 return; 586 } 587 588 /* 589 * Exec page : Independent of aliasing/page-color considerations, 590 * since icache doesn't snoop dcache on ARC, any dirty 591 * K-mapping of a code page needs to be wback+inv so that 592 * icache fetch by userspace sees code correctly. 593 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it 594 * so userspace sees the right data. 595 * (Avoids the flush for Non-exec + congruent mapping case) 596 */ 597 if ((vma->vm_flags & VM_EXEC) || 598 addr_not_cache_congruent(paddr, vaddr)) { 599 600 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); 601 if (dirty) { 602 /* wback + inv dcache lines (K-mapping) */ 603 __flush_dcache_page(paddr, paddr); 604 605 /* invalidate any existing icache lines (U-mapping) */ 606 if (vma->vm_flags & VM_EXEC) 607 __inv_icache_page(paddr, vaddr); 608 } 609 } 610 } 611 612 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 613 614 /* 615 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP 616 * support. 617 * 618 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a 619 * new bit "SZ" in TLB page descriptor to distinguish between them. 620 * Super Page size is configurable in hardware (4K to 16M), but fixed once 621 * RTL builds. 622 * 623 * The exact THP size a Linux configuration will support is a function of: 624 * - MMU page size (typical 8K, RTL fixed) 625 * - software page walker address split between PGD:PTE:PFN (typical 626 * 11:8:13, but can be changed with 1 line) 627 * So for above default, THP size supported is 8K * (2^8) = 2M 628 * 629 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime 630 * reduces to 1 level (as PTE is folded into PGD and canonically referred 631 * to as PMD). 632 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc) 633 */ 634 635 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 636 pmd_t *pmd) 637 { 638 pte_t pte = __pte(pmd_val(*pmd)); 639 update_mmu_cache(vma, addr, &pte); 640 } 641 642 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 643 pgtable_t pgtable) 644 { 645 struct list_head *lh = (struct list_head *) pgtable; 646 647 assert_spin_locked(&mm->page_table_lock); 648 649 /* FIFO */ 650 if (!pmd_huge_pte(mm, pmdp)) 651 INIT_LIST_HEAD(lh); 652 else 653 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); 654 pmd_huge_pte(mm, pmdp) = pgtable; 655 } 656 657 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 658 { 659 struct list_head *lh; 660 pgtable_t pgtable; 661 662 assert_spin_locked(&mm->page_table_lock); 663 664 pgtable = pmd_huge_pte(mm, pmdp); 665 lh = (struct list_head *) pgtable; 666 if (list_empty(lh)) 667 pmd_huge_pte(mm, pmdp) = NULL; 668 else { 669 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; 670 list_del(lh); 671 } 672 673 pte_val(pgtable[0]) = 0; 674 pte_val(pgtable[1]) = 0; 675 676 return pgtable; 677 } 678 679 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, 680 unsigned long end) 681 { 682 unsigned int cpu; 683 unsigned long flags; 684 685 local_irq_save(flags); 686 687 cpu = smp_processor_id(); 688 689 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { 690 unsigned int asid = hw_pid(vma->vm_mm, cpu); 691 692 /* No need to loop here: this will always be for 1 Huge Page */ 693 tlb_entry_erase(start | _PAGE_HW_SZ | asid); 694 } 695 696 local_irq_restore(flags); 697 } 698 699 #endif 700 701 /* Read the Cache Build Configuration Registers, Decode them and save into 702 * the cpuinfo structure for later use. 703 * No Validation is done here, simply read/convert the BCRs 704 */ 705 void read_decode_mmu_bcr(void) 706 { 707 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 708 unsigned int tmp; 709 struct bcr_mmu_1_2 { 710 #ifdef CONFIG_CPU_BIG_ENDIAN 711 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; 712 #else 713 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; 714 #endif 715 } *mmu2; 716 717 struct bcr_mmu_3 { 718 #ifdef CONFIG_CPU_BIG_ENDIAN 719 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4, 720 u_itlb:4, u_dtlb:4; 721 #else 722 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4, 723 ways:4, ver:8; 724 #endif 725 } *mmu3; 726 727 struct bcr_mmu_4 { 728 #ifdef CONFIG_CPU_BIG_ENDIAN 729 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1, 730 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3; 731 #else 732 /* DTLB ITLB JES JE JA */ 733 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2, 734 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8; 735 #endif 736 } *mmu4; 737 738 tmp = read_aux_reg(ARC_REG_MMU_BCR); 739 mmu->ver = (tmp >> 24); 740 741 if (is_isa_arcompact()) { 742 if (mmu->ver <= 2) { 743 mmu2 = (struct bcr_mmu_1_2 *)&tmp; 744 mmu->pg_sz_k = TO_KB(0x2000); 745 mmu->sets = 1 << mmu2->sets; 746 mmu->ways = 1 << mmu2->ways; 747 mmu->u_dtlb = mmu2->u_dtlb; 748 mmu->u_itlb = mmu2->u_itlb; 749 } else { 750 mmu3 = (struct bcr_mmu_3 *)&tmp; 751 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); 752 mmu->sets = 1 << mmu3->sets; 753 mmu->ways = 1 << mmu3->ways; 754 mmu->u_dtlb = mmu3->u_dtlb; 755 mmu->u_itlb = mmu3->u_itlb; 756 mmu->sasid = mmu3->sasid; 757 } 758 } else { 759 mmu4 = (struct bcr_mmu_4 *)&tmp; 760 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); 761 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); 762 mmu->sets = 64 << mmu4->n_entry; 763 mmu->ways = mmu4->n_ways * 2; 764 mmu->u_dtlb = mmu4->u_dtlb * 4; 765 mmu->u_itlb = mmu4->u_itlb * 4; 766 mmu->sasid = mmu4->sasid; 767 pae_exists = mmu->pae = mmu4->pae; 768 } 769 } 770 771 char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) 772 { 773 int n = 0; 774 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; 775 char super_pg[64] = ""; 776 777 if (p_mmu->s_pg_sz_m) 778 scnprintf(super_pg, 64, "%dM Super Page %s", 779 p_mmu->s_pg_sz_m, 780 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE)); 781 782 n += scnprintf(buf + n, len - n, 783 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n", 784 p_mmu->ver, p_mmu->pg_sz_k, super_pg, 785 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways, 786 p_mmu->u_dtlb, p_mmu->u_itlb, 787 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40)); 788 789 return buf; 790 } 791 792 int pae40_exist_but_not_enab(void) 793 { 794 return pae_exists && !is_pae40_enabled(); 795 } 796 797 void arc_mmu_init(void) 798 { 799 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 800 char str[256]; 801 int compat = 0; 802 803 pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str))); 804 805 /* 806 * Can't be done in processor.h due to header include dependencies 807 */ 808 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE)); 809 810 /* 811 * stack top size sanity check, 812 * Can't be done in processor.h due to header include dependencies 813 */ 814 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE)); 815 816 /* 817 * Ensure that MMU features assumed by kernel exist in hardware. 818 * For older ARC700 cpus, it has to be exact match, since the MMU 819 * revisions were not backwards compatible (MMUv3 TLB layout changed 820 * so even if kernel for v2 didn't use any new cmds of v3, it would 821 * still not work. 822 * For HS cpus, MMUv4 was baseline and v5 is backwards compatible 823 * (will run older software). 824 */ 825 if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER) 826 compat = 1; 827 else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER) 828 compat = 1; 829 830 if (!compat) { 831 panic("MMU ver %d doesn't match kernel built for %d...\n", 832 mmu->ver, CONFIG_ARC_MMU_VER); 833 } 834 835 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) 836 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); 837 838 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 839 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) 840 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n", 841 (unsigned long)TO_MB(HPAGE_PMD_SIZE)); 842 843 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae) 844 panic("Hardware doesn't support PAE40\n"); 845 846 /* Enable the MMU */ 847 write_aux_reg(ARC_REG_PID, MMU_ENABLE); 848 849 /* In smp we use this reg for interrupt 1 scratch */ 850 #ifdef ARC_USE_SCRATCH_REG 851 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ 852 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); 853 #endif 854 855 if (pae40_exist_but_not_enab()) 856 write_aux_reg(ARC_REG_TLBPD1HI, 0); 857 } 858 859 /* 860 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4} 861 * The mapping is Column-first. 862 * --------------------- ----------- 863 * |way0|way1|way2|way3| |way0|way1| 864 * --------------------- ----------- 865 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 | 866 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 | 867 * ~ ~ ~ ~ 868 * [set127] | 508| 509| 510| 511| | 254| 255| 869 * --------------------- ----------- 870 * For normal operations we don't(must not) care how above works since 871 * MMU cmd getIndex(vaddr) abstracts that out. 872 * However for walking WAYS of a SET, we need to know this 873 */ 874 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) 875 876 /* Handling of Duplicate PD (TLB entry) in MMU. 877 * -Could be due to buggy customer tapeouts or obscure kernel bugs 878 * -MMU complaints not at the time of duplicate PD installation, but at the 879 * time of lookup matching multiple ways. 880 * -Ideally these should never happen - but if they do - workaround by deleting 881 * the duplicate one. 882 * -Knob to be verbose abt it.(TODO: hook them up to debugfs) 883 */ 884 volatile int dup_pd_silent; /* Be silent abt it or complain (default) */ 885 886 void do_tlb_overlap_fault(unsigned long cause, unsigned long address, 887 struct pt_regs *regs) 888 { 889 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; 890 unsigned long flags; 891 int set, n_ways = mmu->ways; 892 893 n_ways = min(n_ways, 4); 894 BUG_ON(mmu->ways > 4); 895 896 local_irq_save(flags); 897 898 /* loop thru all sets of TLB */ 899 for (set = 0; set < mmu->sets; set++) { 900 901 int is_valid, way; 902 unsigned int pd0[4]; 903 904 /* read out all the ways of current set */ 905 for (way = 0, is_valid = 0; way < n_ways; way++) { 906 write_aux_reg(ARC_REG_TLBINDEX, 907 SET_WAY_TO_IDX(mmu, set, way)); 908 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); 909 pd0[way] = read_aux_reg(ARC_REG_TLBPD0); 910 is_valid |= pd0[way] & _PAGE_PRESENT; 911 pd0[way] &= PAGE_MASK; 912 } 913 914 /* If all the WAYS in SET are empty, skip to next SET */ 915 if (!is_valid) 916 continue; 917 918 /* Scan the set for duplicate ways: needs a nested loop */ 919 for (way = 0; way < n_ways - 1; way++) { 920 921 int n; 922 923 if (!pd0[way]) 924 continue; 925 926 for (n = way + 1; n < n_ways; n++) { 927 if (pd0[way] != pd0[n]) 928 continue; 929 930 if (!dup_pd_silent) 931 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n", 932 pd0[way], set, way, n); 933 934 /* 935 * clear entry @way and not @n. 936 * This is critical to our optimised loop 937 */ 938 pd0[way] = 0; 939 write_aux_reg(ARC_REG_TLBINDEX, 940 SET_WAY_TO_IDX(mmu, set, way)); 941 __tlb_entry_erase(); 942 } 943 } 944 } 945 946 local_irq_restore(flags); 947 } 948 949 /*********************************************************************** 950 * Diagnostic Routines 951 * -Called from Low Level TLB Handlers if things don;t look good 952 **********************************************************************/ 953 954 #ifdef CONFIG_ARC_DBG_TLB_PARANOIA 955 956 /* 957 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS 958 * don't match 959 */ 960 void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path) 961 { 962 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", 963 is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid); 964 965 __asm__ __volatile__("flag 1"); 966 } 967 968 void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr) 969 { 970 unsigned int mmu_asid; 971 972 mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff; 973 974 /* 975 * At the time of a TLB miss/installation 976 * - HW version needs to match SW version 977 * - SW needs to have a valid ASID 978 */ 979 if (addr < 0x70000000 && 980 ((mm_asid == MM_CTXT_NO_ASID) || 981 (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK)))) 982 print_asid_mismatch(mm_asid, mmu_asid, 0); 983 } 984 #endif 985