1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/hardirq.h> 11 #include <linux/init.h> 12 #include <linux/highmem.h> 13 #include <linux/kernel.h> 14 #include <linux/linkage.h> 15 #include <linux/sched.h> 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/bitops.h> 19 20 #include <asm/bcache.h> 21 #include <asm/bootinfo.h> 22 #include <asm/cache.h> 23 #include <asm/cacheops.h> 24 #include <asm/cpu.h> 25 #include <asm/cpu-features.h> 26 #include <asm/io.h> 27 #include <asm/page.h> 28 #include <asm/pgtable.h> 29 #include <asm/r4kcache.h> 30 #include <asm/sections.h> 31 #include <asm/system.h> 32 #include <asm/mmu_context.h> 33 #include <asm/war.h> 34 #include <asm/cacheflush.h> /* for run_uncached() */ 35 36 37 /* 38 * Special Variant of smp_call_function for use by cache functions: 39 * 40 * o No return value 41 * o collapses to normal function call on UP kernels 42 * o collapses to normal function call on systems with a single shared 43 * primary cache. 44 */ 45 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 46 int wait) 47 { 48 preempt_disable(); 49 50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 51 smp_call_function(func, info, wait); 52 #endif 53 func(info); 54 preempt_enable(); 55 } 56 57 #if defined(CONFIG_MIPS_CMP) 58 #define cpu_has_safe_index_cacheops 0 59 #else 60 #define cpu_has_safe_index_cacheops 1 61 #endif 62 63 /* 64 * Must die. 65 */ 66 static unsigned long icache_size __read_mostly; 67 static unsigned long dcache_size __read_mostly; 68 static unsigned long scache_size __read_mostly; 69 70 /* 71 * Dummy cache handling routines for machines without boardcaches 72 */ 73 static void cache_noop(void) {} 74 75 static struct bcache_ops no_sc_ops = { 76 .bc_enable = (void *)cache_noop, 77 .bc_disable = (void *)cache_noop, 78 .bc_wback_inv = (void *)cache_noop, 79 .bc_inv = (void *)cache_noop 80 }; 81 82 struct bcache_ops *bcops = &no_sc_ops; 83 84 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 85 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 86 87 #define R4600_HIT_CACHEOP_WAR_IMPL \ 88 do { \ 89 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ 90 *(volatile unsigned long *)CKSEG1; \ 91 if (R4600_V1_HIT_CACHEOP_WAR) \ 92 __asm__ __volatile__("nop;nop;nop;nop"); \ 93 } while (0) 94 95 static void (*r4k_blast_dcache_page)(unsigned long addr); 96 97 static inline void r4k_blast_dcache_page_dc32(unsigned long addr) 98 { 99 R4600_HIT_CACHEOP_WAR_IMPL; 100 blast_dcache32_page(addr); 101 } 102 103 static void __cpuinit r4k_blast_dcache_page_setup(void) 104 { 105 unsigned long dc_lsize = cpu_dcache_line_size(); 106 107 if (dc_lsize == 0) 108 r4k_blast_dcache_page = (void *)cache_noop; 109 else if (dc_lsize == 16) 110 r4k_blast_dcache_page = blast_dcache16_page; 111 else if (dc_lsize == 32) 112 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 113 } 114 115 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 116 117 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) 118 { 119 unsigned long dc_lsize = cpu_dcache_line_size(); 120 121 if (dc_lsize == 0) 122 r4k_blast_dcache_page_indexed = (void *)cache_noop; 123 else if (dc_lsize == 16) 124 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; 125 else if (dc_lsize == 32) 126 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 127 } 128 129 static void (* r4k_blast_dcache)(void); 130 131 static void __cpuinit r4k_blast_dcache_setup(void) 132 { 133 unsigned long dc_lsize = cpu_dcache_line_size(); 134 135 if (dc_lsize == 0) 136 r4k_blast_dcache = (void *)cache_noop; 137 else if (dc_lsize == 16) 138 r4k_blast_dcache = blast_dcache16; 139 else if (dc_lsize == 32) 140 r4k_blast_dcache = blast_dcache32; 141 } 142 143 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ 144 #define JUMP_TO_ALIGN(order) \ 145 __asm__ __volatile__( \ 146 "b\t1f\n\t" \ 147 ".align\t" #order "\n\t" \ 148 "1:\n\t" \ 149 ) 150 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ 151 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) 152 153 static inline void blast_r4600_v1_icache32(void) 154 { 155 unsigned long flags; 156 157 local_irq_save(flags); 158 blast_icache32(); 159 local_irq_restore(flags); 160 } 161 162 static inline void tx49_blast_icache32(void) 163 { 164 unsigned long start = INDEX_BASE; 165 unsigned long end = start + current_cpu_data.icache.waysize; 166 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 167 unsigned long ws_end = current_cpu_data.icache.ways << 168 current_cpu_data.icache.waybit; 169 unsigned long ws, addr; 170 171 CACHE32_UNROLL32_ALIGN2; 172 /* I'm in even chunk. blast odd chunks */ 173 for (ws = 0; ws < ws_end; ws += ws_inc) 174 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 175 cache32_unroll32(addr|ws, Index_Invalidate_I); 176 CACHE32_UNROLL32_ALIGN; 177 /* I'm in odd chunk. blast even chunks */ 178 for (ws = 0; ws < ws_end; ws += ws_inc) 179 for (addr = start; addr < end; addr += 0x400 * 2) 180 cache32_unroll32(addr|ws, Index_Invalidate_I); 181 } 182 183 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) 184 { 185 unsigned long flags; 186 187 local_irq_save(flags); 188 blast_icache32_page_indexed(page); 189 local_irq_restore(flags); 190 } 191 192 static inline void tx49_blast_icache32_page_indexed(unsigned long page) 193 { 194 unsigned long indexmask = current_cpu_data.icache.waysize - 1; 195 unsigned long start = INDEX_BASE + (page & indexmask); 196 unsigned long end = start + PAGE_SIZE; 197 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 198 unsigned long ws_end = current_cpu_data.icache.ways << 199 current_cpu_data.icache.waybit; 200 unsigned long ws, addr; 201 202 CACHE32_UNROLL32_ALIGN2; 203 /* I'm in even chunk. blast odd chunks */ 204 for (ws = 0; ws < ws_end; ws += ws_inc) 205 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 206 cache32_unroll32(addr|ws, Index_Invalidate_I); 207 CACHE32_UNROLL32_ALIGN; 208 /* I'm in odd chunk. blast even chunks */ 209 for (ws = 0; ws < ws_end; ws += ws_inc) 210 for (addr = start; addr < end; addr += 0x400 * 2) 211 cache32_unroll32(addr|ws, Index_Invalidate_I); 212 } 213 214 static void (* r4k_blast_icache_page)(unsigned long addr); 215 216 static void __cpuinit r4k_blast_icache_page_setup(void) 217 { 218 unsigned long ic_lsize = cpu_icache_line_size(); 219 220 if (ic_lsize == 0) 221 r4k_blast_icache_page = (void *)cache_noop; 222 else if (ic_lsize == 16) 223 r4k_blast_icache_page = blast_icache16_page; 224 else if (ic_lsize == 32) 225 r4k_blast_icache_page = blast_icache32_page; 226 else if (ic_lsize == 64) 227 r4k_blast_icache_page = blast_icache64_page; 228 } 229 230 231 static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 232 233 static void __cpuinit r4k_blast_icache_page_indexed_setup(void) 234 { 235 unsigned long ic_lsize = cpu_icache_line_size(); 236 237 if (ic_lsize == 0) 238 r4k_blast_icache_page_indexed = (void *)cache_noop; 239 else if (ic_lsize == 16) 240 r4k_blast_icache_page_indexed = blast_icache16_page_indexed; 241 else if (ic_lsize == 32) { 242 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 243 r4k_blast_icache_page_indexed = 244 blast_icache32_r4600_v1_page_indexed; 245 else if (TX49XX_ICACHE_INDEX_INV_WAR) 246 r4k_blast_icache_page_indexed = 247 tx49_blast_icache32_page_indexed; 248 else 249 r4k_blast_icache_page_indexed = 250 blast_icache32_page_indexed; 251 } else if (ic_lsize == 64) 252 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 253 } 254 255 static void (* r4k_blast_icache)(void); 256 257 static void __cpuinit r4k_blast_icache_setup(void) 258 { 259 unsigned long ic_lsize = cpu_icache_line_size(); 260 261 if (ic_lsize == 0) 262 r4k_blast_icache = (void *)cache_noop; 263 else if (ic_lsize == 16) 264 r4k_blast_icache = blast_icache16; 265 else if (ic_lsize == 32) { 266 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 267 r4k_blast_icache = blast_r4600_v1_icache32; 268 else if (TX49XX_ICACHE_INDEX_INV_WAR) 269 r4k_blast_icache = tx49_blast_icache32; 270 else 271 r4k_blast_icache = blast_icache32; 272 } else if (ic_lsize == 64) 273 r4k_blast_icache = blast_icache64; 274 } 275 276 static void (* r4k_blast_scache_page)(unsigned long addr); 277 278 static void __cpuinit r4k_blast_scache_page_setup(void) 279 { 280 unsigned long sc_lsize = cpu_scache_line_size(); 281 282 if (scache_size == 0) 283 r4k_blast_scache_page = (void *)cache_noop; 284 else if (sc_lsize == 16) 285 r4k_blast_scache_page = blast_scache16_page; 286 else if (sc_lsize == 32) 287 r4k_blast_scache_page = blast_scache32_page; 288 else if (sc_lsize == 64) 289 r4k_blast_scache_page = blast_scache64_page; 290 else if (sc_lsize == 128) 291 r4k_blast_scache_page = blast_scache128_page; 292 } 293 294 static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 295 296 static void __cpuinit r4k_blast_scache_page_indexed_setup(void) 297 { 298 unsigned long sc_lsize = cpu_scache_line_size(); 299 300 if (scache_size == 0) 301 r4k_blast_scache_page_indexed = (void *)cache_noop; 302 else if (sc_lsize == 16) 303 r4k_blast_scache_page_indexed = blast_scache16_page_indexed; 304 else if (sc_lsize == 32) 305 r4k_blast_scache_page_indexed = blast_scache32_page_indexed; 306 else if (sc_lsize == 64) 307 r4k_blast_scache_page_indexed = blast_scache64_page_indexed; 308 else if (sc_lsize == 128) 309 r4k_blast_scache_page_indexed = blast_scache128_page_indexed; 310 } 311 312 static void (* r4k_blast_scache)(void); 313 314 static void __cpuinit r4k_blast_scache_setup(void) 315 { 316 unsigned long sc_lsize = cpu_scache_line_size(); 317 318 if (scache_size == 0) 319 r4k_blast_scache = (void *)cache_noop; 320 else if (sc_lsize == 16) 321 r4k_blast_scache = blast_scache16; 322 else if (sc_lsize == 32) 323 r4k_blast_scache = blast_scache32; 324 else if (sc_lsize == 64) 325 r4k_blast_scache = blast_scache64; 326 else if (sc_lsize == 128) 327 r4k_blast_scache = blast_scache128; 328 } 329 330 static inline void local_r4k___flush_cache_all(void * args) 331 { 332 #if defined(CONFIG_CPU_LOONGSON2) 333 r4k_blast_scache(); 334 return; 335 #endif 336 r4k_blast_dcache(); 337 r4k_blast_icache(); 338 339 switch (current_cpu_type()) { 340 case CPU_R4000SC: 341 case CPU_R4000MC: 342 case CPU_R4400SC: 343 case CPU_R4400MC: 344 case CPU_R10000: 345 case CPU_R12000: 346 case CPU_R14000: 347 r4k_blast_scache(); 348 } 349 } 350 351 static void r4k___flush_cache_all(void) 352 { 353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); 354 } 355 356 static inline int has_valid_asid(const struct mm_struct *mm) 357 { 358 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 359 int i; 360 361 for_each_online_cpu(i) 362 if (cpu_context(i, mm)) 363 return 1; 364 365 return 0; 366 #else 367 return cpu_context(smp_processor_id(), mm); 368 #endif 369 } 370 371 static void r4k__flush_cache_vmap(void) 372 { 373 r4k_blast_dcache(); 374 } 375 376 static void r4k__flush_cache_vunmap(void) 377 { 378 r4k_blast_dcache(); 379 } 380 381 static inline void local_r4k_flush_cache_range(void * args) 382 { 383 struct vm_area_struct *vma = args; 384 int exec = vma->vm_flags & VM_EXEC; 385 386 if (!(has_valid_asid(vma->vm_mm))) 387 return; 388 389 r4k_blast_dcache(); 390 if (exec) 391 r4k_blast_icache(); 392 } 393 394 static void r4k_flush_cache_range(struct vm_area_struct *vma, 395 unsigned long start, unsigned long end) 396 { 397 int exec = vma->vm_flags & VM_EXEC; 398 399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) 400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); 401 } 402 403 static inline void local_r4k_flush_cache_mm(void * args) 404 { 405 struct mm_struct *mm = args; 406 407 if (!has_valid_asid(mm)) 408 return; 409 410 /* 411 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we 412 * only flush the primary caches but R10000 and R12000 behave sane ... 413 * R4000SC and R4400SC indexed S-cache ops also invalidate primary 414 * caches, so we can bail out early. 415 */ 416 if (current_cpu_type() == CPU_R4000SC || 417 current_cpu_type() == CPU_R4000MC || 418 current_cpu_type() == CPU_R4400SC || 419 current_cpu_type() == CPU_R4400MC) { 420 r4k_blast_scache(); 421 return; 422 } 423 424 r4k_blast_dcache(); 425 } 426 427 static void r4k_flush_cache_mm(struct mm_struct *mm) 428 { 429 if (!cpu_has_dc_aliases) 430 return; 431 432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); 433 } 434 435 struct flush_cache_page_args { 436 struct vm_area_struct *vma; 437 unsigned long addr; 438 unsigned long pfn; 439 }; 440 441 static inline void local_r4k_flush_cache_page(void *args) 442 { 443 struct flush_cache_page_args *fcp_args = args; 444 struct vm_area_struct *vma = fcp_args->vma; 445 unsigned long addr = fcp_args->addr; 446 struct page *page = pfn_to_page(fcp_args->pfn); 447 int exec = vma->vm_flags & VM_EXEC; 448 struct mm_struct *mm = vma->vm_mm; 449 int map_coherent = 0; 450 pgd_t *pgdp; 451 pud_t *pudp; 452 pmd_t *pmdp; 453 pte_t *ptep; 454 void *vaddr; 455 456 /* 457 * If ownes no valid ASID yet, cannot possibly have gotten 458 * this page into the cache. 459 */ 460 if (!has_valid_asid(mm)) 461 return; 462 463 addr &= PAGE_MASK; 464 pgdp = pgd_offset(mm, addr); 465 pudp = pud_offset(pgdp, addr); 466 pmdp = pmd_offset(pudp, addr); 467 ptep = pte_offset(pmdp, addr); 468 469 /* 470 * If the page isn't marked valid, the page cannot possibly be 471 * in the cache. 472 */ 473 if (!(pte_present(*ptep))) 474 return; 475 476 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) 477 vaddr = NULL; 478 else { 479 /* 480 * Use kmap_coherent or kmap_atomic to do flushes for 481 * another ASID than the current one. 482 */ 483 map_coherent = (cpu_has_dc_aliases && 484 page_mapped(page) && !Page_dcache_dirty(page)); 485 if (map_coherent) 486 vaddr = kmap_coherent(page, addr); 487 else 488 vaddr = kmap_atomic(page, KM_USER0); 489 addr = (unsigned long)vaddr; 490 } 491 492 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 493 r4k_blast_dcache_page(addr); 494 if (exec && !cpu_icache_snoops_remote_store) 495 r4k_blast_scache_page(addr); 496 } 497 if (exec) { 498 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { 499 int cpu = smp_processor_id(); 500 501 if (cpu_context(cpu, mm) != 0) 502 drop_mmu_context(mm, cpu); 503 } else 504 r4k_blast_icache_page(addr); 505 } 506 507 if (vaddr) { 508 if (map_coherent) 509 kunmap_coherent(); 510 else 511 kunmap_atomic(vaddr, KM_USER0); 512 } 513 } 514 515 static void r4k_flush_cache_page(struct vm_area_struct *vma, 516 unsigned long addr, unsigned long pfn) 517 { 518 struct flush_cache_page_args args; 519 520 args.vma = vma; 521 args.addr = addr; 522 args.pfn = pfn; 523 524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); 525 } 526 527 static inline void local_r4k_flush_data_cache_page(void * addr) 528 { 529 r4k_blast_dcache_page((unsigned long) addr); 530 } 531 532 static void r4k_flush_data_cache_page(unsigned long addr) 533 { 534 if (in_atomic()) 535 local_r4k_flush_data_cache_page((void *)addr); 536 else 537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 538 1); 539 } 540 541 struct flush_icache_range_args { 542 unsigned long start; 543 unsigned long end; 544 }; 545 546 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) 547 { 548 if (!cpu_has_ic_fills_f_dc) { 549 if (end - start >= dcache_size) { 550 r4k_blast_dcache(); 551 } else { 552 R4600_HIT_CACHEOP_WAR_IMPL; 553 protected_blast_dcache_range(start, end); 554 } 555 } 556 557 if (end - start > icache_size) 558 r4k_blast_icache(); 559 else 560 protected_blast_icache_range(start, end); 561 } 562 563 static inline void local_r4k_flush_icache_range_ipi(void *args) 564 { 565 struct flush_icache_range_args *fir_args = args; 566 unsigned long start = fir_args->start; 567 unsigned long end = fir_args->end; 568 569 local_r4k_flush_icache_range(start, end); 570 } 571 572 static void r4k_flush_icache_range(unsigned long start, unsigned long end) 573 { 574 struct flush_icache_range_args args; 575 576 args.start = start; 577 args.end = end; 578 579 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1); 580 instruction_hazard(); 581 } 582 583 #ifdef CONFIG_DMA_NONCOHERENT 584 585 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) 586 { 587 /* Catch bad driver code */ 588 BUG_ON(size == 0); 589 590 if (cpu_has_inclusive_pcaches) { 591 if (size >= scache_size) 592 r4k_blast_scache(); 593 else 594 blast_scache_range(addr, addr + size); 595 return; 596 } 597 598 /* 599 * Either no secondary cache or the available caches don't have the 600 * subset property so we have to flush the primary caches 601 * explicitly 602 */ 603 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 604 r4k_blast_dcache(); 605 } else { 606 R4600_HIT_CACHEOP_WAR_IMPL; 607 blast_dcache_range(addr, addr + size); 608 } 609 610 bc_wback_inv(addr, size); 611 } 612 613 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) 614 { 615 /* Catch bad driver code */ 616 BUG_ON(size == 0); 617 618 if (cpu_has_inclusive_pcaches) { 619 if (size >= scache_size) 620 r4k_blast_scache(); 621 else 622 blast_inv_scache_range(addr, addr + size); 623 return; 624 } 625 626 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 627 r4k_blast_dcache(); 628 } else { 629 R4600_HIT_CACHEOP_WAR_IMPL; 630 blast_inv_dcache_range(addr, addr + size); 631 } 632 633 bc_inv(addr, size); 634 } 635 #endif /* CONFIG_DMA_NONCOHERENT */ 636 637 /* 638 * While we're protected against bad userland addresses we don't care 639 * very much about what happens in that case. Usually a segmentation 640 * fault will dump the process later on anyway ... 641 */ 642 static void local_r4k_flush_cache_sigtramp(void * arg) 643 { 644 unsigned long ic_lsize = cpu_icache_line_size(); 645 unsigned long dc_lsize = cpu_dcache_line_size(); 646 unsigned long sc_lsize = cpu_scache_line_size(); 647 unsigned long addr = (unsigned long) arg; 648 649 R4600_HIT_CACHEOP_WAR_IMPL; 650 if (dc_lsize) 651 protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); 652 if (!cpu_icache_snoops_remote_store && scache_size) 653 protected_writeback_scache_line(addr & ~(sc_lsize - 1)); 654 if (ic_lsize) 655 protected_flush_icache_line(addr & ~(ic_lsize - 1)); 656 if (MIPS4K_ICACHE_REFILL_WAR) { 657 __asm__ __volatile__ ( 658 ".set push\n\t" 659 ".set noat\n\t" 660 ".set mips3\n\t" 661 #ifdef CONFIG_32BIT 662 "la $at,1f\n\t" 663 #endif 664 #ifdef CONFIG_64BIT 665 "dla $at,1f\n\t" 666 #endif 667 "cache %0,($at)\n\t" 668 "nop; nop; nop\n" 669 "1:\n\t" 670 ".set pop" 671 : 672 : "i" (Hit_Invalidate_I)); 673 } 674 if (MIPS_CACHE_SYNC_WAR) 675 __asm__ __volatile__ ("sync"); 676 } 677 678 static void r4k_flush_cache_sigtramp(unsigned long addr) 679 { 680 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); 681 } 682 683 static void r4k_flush_icache_all(void) 684 { 685 if (cpu_has_vtag_icache) 686 r4k_blast_icache(); 687 } 688 689 static inline void rm7k_erratum31(void) 690 { 691 const unsigned long ic_lsize = 32; 692 unsigned long addr; 693 694 /* RM7000 erratum #31. The icache is screwed at startup. */ 695 write_c0_taglo(0); 696 write_c0_taghi(0); 697 698 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { 699 __asm__ __volatile__ ( 700 ".set push\n\t" 701 ".set noreorder\n\t" 702 ".set mips3\n\t" 703 "cache\t%1, 0(%0)\n\t" 704 "cache\t%1, 0x1000(%0)\n\t" 705 "cache\t%1, 0x2000(%0)\n\t" 706 "cache\t%1, 0x3000(%0)\n\t" 707 "cache\t%2, 0(%0)\n\t" 708 "cache\t%2, 0x1000(%0)\n\t" 709 "cache\t%2, 0x2000(%0)\n\t" 710 "cache\t%2, 0x3000(%0)\n\t" 711 "cache\t%1, 0(%0)\n\t" 712 "cache\t%1, 0x1000(%0)\n\t" 713 "cache\t%1, 0x2000(%0)\n\t" 714 "cache\t%1, 0x3000(%0)\n\t" 715 ".set pop\n" 716 : 717 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); 718 } 719 } 720 721 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", 722 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 723 }; 724 725 static void __cpuinit probe_pcache(void) 726 { 727 struct cpuinfo_mips *c = ¤t_cpu_data; 728 unsigned int config = read_c0_config(); 729 unsigned int prid = read_c0_prid(); 730 unsigned long config1; 731 unsigned int lsize; 732 733 switch (c->cputype) { 734 case CPU_R4600: /* QED style two way caches? */ 735 case CPU_R4700: 736 case CPU_R5000: 737 case CPU_NEVADA: 738 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 739 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 740 c->icache.ways = 2; 741 c->icache.waybit = __ffs(icache_size/2); 742 743 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 744 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 745 c->dcache.ways = 2; 746 c->dcache.waybit= __ffs(dcache_size/2); 747 748 c->options |= MIPS_CPU_CACHE_CDEX_P; 749 break; 750 751 case CPU_R5432: 752 case CPU_R5500: 753 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 754 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 755 c->icache.ways = 2; 756 c->icache.waybit= 0; 757 758 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 759 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 760 c->dcache.ways = 2; 761 c->dcache.waybit = 0; 762 763 c->options |= MIPS_CPU_CACHE_CDEX_P; 764 break; 765 766 case CPU_TX49XX: 767 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 768 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 769 c->icache.ways = 4; 770 c->icache.waybit= 0; 771 772 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 773 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 774 c->dcache.ways = 4; 775 c->dcache.waybit = 0; 776 777 c->options |= MIPS_CPU_CACHE_CDEX_P; 778 c->options |= MIPS_CPU_PREFETCH; 779 break; 780 781 case CPU_R4000PC: 782 case CPU_R4000SC: 783 case CPU_R4000MC: 784 case CPU_R4400PC: 785 case CPU_R4400SC: 786 case CPU_R4400MC: 787 case CPU_R4300: 788 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 789 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 790 c->icache.ways = 1; 791 c->icache.waybit = 0; /* doesn't matter */ 792 793 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 794 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 795 c->dcache.ways = 1; 796 c->dcache.waybit = 0; /* does not matter */ 797 798 c->options |= MIPS_CPU_CACHE_CDEX_P; 799 break; 800 801 case CPU_R10000: 802 case CPU_R12000: 803 case CPU_R14000: 804 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); 805 c->icache.linesz = 64; 806 c->icache.ways = 2; 807 c->icache.waybit = 0; 808 809 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); 810 c->dcache.linesz = 32; 811 c->dcache.ways = 2; 812 c->dcache.waybit = 0; 813 814 c->options |= MIPS_CPU_PREFETCH; 815 break; 816 817 case CPU_VR4133: 818 write_c0_config(config & ~VR41_CONF_P4K); 819 case CPU_VR4131: 820 /* Workaround for cache instruction bug of VR4131 */ 821 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || 822 c->processor_id == 0x0c82U) { 823 config |= 0x00400000U; 824 if (c->processor_id == 0x0c80U) 825 config |= VR41_CONF_BP; 826 write_c0_config(config); 827 } else 828 c->options |= MIPS_CPU_CACHE_CDEX_P; 829 830 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 831 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 832 c->icache.ways = 2; 833 c->icache.waybit = __ffs(icache_size/2); 834 835 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 836 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 837 c->dcache.ways = 2; 838 c->dcache.waybit = __ffs(dcache_size/2); 839 break; 840 841 case CPU_VR41XX: 842 case CPU_VR4111: 843 case CPU_VR4121: 844 case CPU_VR4122: 845 case CPU_VR4181: 846 case CPU_VR4181A: 847 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 848 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 849 c->icache.ways = 1; 850 c->icache.waybit = 0; /* doesn't matter */ 851 852 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 853 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 854 c->dcache.ways = 1; 855 c->dcache.waybit = 0; /* does not matter */ 856 857 c->options |= MIPS_CPU_CACHE_CDEX_P; 858 break; 859 860 case CPU_RM7000: 861 rm7k_erratum31(); 862 863 case CPU_RM9000: 864 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 865 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 866 c->icache.ways = 4; 867 c->icache.waybit = __ffs(icache_size / c->icache.ways); 868 869 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 870 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 871 c->dcache.ways = 4; 872 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); 873 874 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR) 875 c->options |= MIPS_CPU_CACHE_CDEX_P; 876 #endif 877 c->options |= MIPS_CPU_PREFETCH; 878 break; 879 880 case CPU_LOONGSON2: 881 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 882 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 883 if (prid & 0x3) 884 c->icache.ways = 4; 885 else 886 c->icache.ways = 2; 887 c->icache.waybit = 0; 888 889 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 890 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 891 if (prid & 0x3) 892 c->dcache.ways = 4; 893 else 894 c->dcache.ways = 2; 895 c->dcache.waybit = 0; 896 break; 897 898 default: 899 if (!(config & MIPS_CONF_M)) 900 panic("Don't know how to probe P-caches on this cpu."); 901 902 /* 903 * So we seem to be a MIPS32 or MIPS64 CPU 904 * So let's probe the I-cache ... 905 */ 906 config1 = read_c0_config1(); 907 908 if ((lsize = ((config1 >> 19) & 7))) 909 c->icache.linesz = 2 << lsize; 910 else 911 c->icache.linesz = lsize; 912 c->icache.sets = 64 << ((config1 >> 22) & 7); 913 c->icache.ways = 1 + ((config1 >> 16) & 7); 914 915 icache_size = c->icache.sets * 916 c->icache.ways * 917 c->icache.linesz; 918 c->icache.waybit = __ffs(icache_size/c->icache.ways); 919 920 if (config & 0x8) /* VI bit */ 921 c->icache.flags |= MIPS_CACHE_VTAG; 922 923 /* 924 * Now probe the MIPS32 / MIPS64 data cache. 925 */ 926 c->dcache.flags = 0; 927 928 if ((lsize = ((config1 >> 10) & 7))) 929 c->dcache.linesz = 2 << lsize; 930 else 931 c->dcache.linesz= lsize; 932 c->dcache.sets = 64 << ((config1 >> 13) & 7); 933 c->dcache.ways = 1 + ((config1 >> 7) & 7); 934 935 dcache_size = c->dcache.sets * 936 c->dcache.ways * 937 c->dcache.linesz; 938 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); 939 940 c->options |= MIPS_CPU_PREFETCH; 941 break; 942 } 943 944 /* 945 * Processor configuration sanity check for the R4000SC erratum 946 * #5. With page sizes larger than 32kB there is no possibility 947 * to get a VCE exception anymore so we don't care about this 948 * misconfiguration. The case is rather theoretical anyway; 949 * presumably no vendor is shipping his hardware in the "bad" 950 * configuration. 951 */ 952 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && 953 !(config & CONF_SC) && c->icache.linesz != 16 && 954 PAGE_SIZE <= 0x8000) 955 panic("Improper R4000SC processor configuration detected"); 956 957 /* compute a couple of other cache variables */ 958 c->icache.waysize = icache_size / c->icache.ways; 959 c->dcache.waysize = dcache_size / c->dcache.ways; 960 961 c->icache.sets = c->icache.linesz ? 962 icache_size / (c->icache.linesz * c->icache.ways) : 0; 963 c->dcache.sets = c->dcache.linesz ? 964 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; 965 966 /* 967 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB 968 * 2-way virtually indexed so normally would suffer from aliases. So 969 * normally they'd suffer from aliases but magic in the hardware deals 970 * with that for us so we don't need to take care ourselves. 971 */ 972 switch (c->cputype) { 973 case CPU_20KC: 974 case CPU_25KF: 975 case CPU_SB1: 976 case CPU_SB1A: 977 c->dcache.flags |= MIPS_CACHE_PINDEX; 978 break; 979 980 case CPU_R10000: 981 case CPU_R12000: 982 case CPU_R14000: 983 break; 984 985 case CPU_24K: 986 case CPU_34K: 987 case CPU_74K: 988 case CPU_1004K: 989 if ((read_c0_config7() & (1 << 16))) { 990 /* effectively physically indexed dcache, 991 thus no virtual aliases. */ 992 c->dcache.flags |= MIPS_CACHE_PINDEX; 993 break; 994 } 995 default: 996 if (c->dcache.waysize > PAGE_SIZE) 997 c->dcache.flags |= MIPS_CACHE_ALIASES; 998 } 999 1000 switch (c->cputype) { 1001 case CPU_20KC: 1002 /* 1003 * Some older 20Kc chips doesn't have the 'VI' bit in 1004 * the config register. 1005 */ 1006 c->icache.flags |= MIPS_CACHE_VTAG; 1007 break; 1008 1009 case CPU_AU1000: 1010 case CPU_AU1500: 1011 case CPU_AU1100: 1012 case CPU_AU1550: 1013 case CPU_AU1200: 1014 case CPU_AU1210: 1015 case CPU_AU1250: 1016 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1017 break; 1018 } 1019 1020 #ifdef CONFIG_CPU_LOONGSON2 1021 /* 1022 * LOONGSON2 has 4 way icache, but when using indexed cache op, 1023 * one op will act on all 4 ways 1024 */ 1025 c->icache.ways = 1; 1026 #endif 1027 1028 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", 1029 icache_size >> 10, 1030 cpu_has_vtag_icache ? "VIVT" : "VIPT", 1031 way_string[c->icache.ways], c->icache.linesz); 1032 1033 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", 1034 dcache_size >> 10, way_string[c->dcache.ways], 1035 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", 1036 (c->dcache.flags & MIPS_CACHE_ALIASES) ? 1037 "cache aliases" : "no aliases", 1038 c->dcache.linesz); 1039 } 1040 1041 /* 1042 * If you even _breathe_ on this function, look at the gcc output and make sure 1043 * it does not pop things on and off the stack for the cache sizing loop that 1044 * executes in KSEG1 space or else you will crash and burn badly. You have 1045 * been warned. 1046 */ 1047 static int __cpuinit probe_scache(void) 1048 { 1049 unsigned long flags, addr, begin, end, pow2; 1050 unsigned int config = read_c0_config(); 1051 struct cpuinfo_mips *c = ¤t_cpu_data; 1052 int tmp; 1053 1054 if (config & CONF_SC) 1055 return 0; 1056 1057 begin = (unsigned long) &_stext; 1058 begin &= ~((4 * 1024 * 1024) - 1); 1059 end = begin + (4 * 1024 * 1024); 1060 1061 /* 1062 * This is such a bitch, you'd think they would make it easy to do 1063 * this. Away you daemons of stupidity! 1064 */ 1065 local_irq_save(flags); 1066 1067 /* Fill each size-multiple cache line with a valid tag. */ 1068 pow2 = (64 * 1024); 1069 for (addr = begin; addr < end; addr = (begin + pow2)) { 1070 unsigned long *p = (unsigned long *) addr; 1071 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ 1072 pow2 <<= 1; 1073 } 1074 1075 /* Load first line with zero (therefore invalid) tag. */ 1076 write_c0_taglo(0); 1077 write_c0_taghi(0); 1078 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ 1079 cache_op(Index_Store_Tag_I, begin); 1080 cache_op(Index_Store_Tag_D, begin); 1081 cache_op(Index_Store_Tag_SD, begin); 1082 1083 /* Now search for the wrap around point. */ 1084 pow2 = (128 * 1024); 1085 tmp = 0; 1086 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1087 cache_op(Index_Load_Tag_SD, addr); 1088 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ 1089 if (!read_c0_taglo()) 1090 break; 1091 pow2 <<= 1; 1092 } 1093 local_irq_restore(flags); 1094 addr -= begin; 1095 1096 scache_size = addr; 1097 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); 1098 c->scache.ways = 1; 1099 c->dcache.waybit = 0; /* does not matter */ 1100 1101 return 1; 1102 } 1103 1104 #if defined(CONFIG_CPU_LOONGSON2) 1105 static void __init loongson2_sc_init(void) 1106 { 1107 struct cpuinfo_mips *c = ¤t_cpu_data; 1108 1109 scache_size = 512*1024; 1110 c->scache.linesz = 32; 1111 c->scache.ways = 4; 1112 c->scache.waybit = 0; 1113 c->scache.waysize = scache_size / (c->scache.ways); 1114 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1115 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1116 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1117 1118 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1119 } 1120 #endif 1121 1122 extern int r5k_sc_init(void); 1123 extern int rm7k_sc_init(void); 1124 extern int mips_sc_init(void); 1125 1126 static void __cpuinit setup_scache(void) 1127 { 1128 struct cpuinfo_mips *c = ¤t_cpu_data; 1129 unsigned int config = read_c0_config(); 1130 int sc_present = 0; 1131 1132 /* 1133 * Do the probing thing on R4000SC and R4400SC processors. Other 1134 * processors don't have a S-cache that would be relevant to the 1135 * Linux memory management. 1136 */ 1137 switch (c->cputype) { 1138 case CPU_R4000SC: 1139 case CPU_R4000MC: 1140 case CPU_R4400SC: 1141 case CPU_R4400MC: 1142 sc_present = run_uncached(probe_scache); 1143 if (sc_present) 1144 c->options |= MIPS_CPU_CACHE_CDEX_S; 1145 break; 1146 1147 case CPU_R10000: 1148 case CPU_R12000: 1149 case CPU_R14000: 1150 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); 1151 c->scache.linesz = 64 << ((config >> 13) & 1); 1152 c->scache.ways = 2; 1153 c->scache.waybit= 0; 1154 sc_present = 1; 1155 break; 1156 1157 case CPU_R5000: 1158 case CPU_NEVADA: 1159 #ifdef CONFIG_R5000_CPU_SCACHE 1160 r5k_sc_init(); 1161 #endif 1162 return; 1163 1164 case CPU_RM7000: 1165 case CPU_RM9000: 1166 #ifdef CONFIG_RM7000_CPU_SCACHE 1167 rm7k_sc_init(); 1168 #endif 1169 return; 1170 1171 #if defined(CONFIG_CPU_LOONGSON2) 1172 case CPU_LOONGSON2: 1173 loongson2_sc_init(); 1174 return; 1175 #endif 1176 1177 default: 1178 if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1179 c->isa_level == MIPS_CPU_ISA_M32R2 || 1180 c->isa_level == MIPS_CPU_ISA_M64R1 || 1181 c->isa_level == MIPS_CPU_ISA_M64R2) { 1182 #ifdef CONFIG_MIPS_CPU_SCACHE 1183 if (mips_sc_init ()) { 1184 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1185 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", 1186 scache_size >> 10, 1187 way_string[c->scache.ways], c->scache.linesz); 1188 } 1189 #else 1190 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) 1191 panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); 1192 #endif 1193 return; 1194 } 1195 sc_present = 0; 1196 } 1197 1198 if (!sc_present) 1199 return; 1200 1201 /* compute a couple of other cache variables */ 1202 c->scache.waysize = scache_size / c->scache.ways; 1203 1204 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1205 1206 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1207 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1208 1209 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1210 } 1211 1212 void au1x00_fixup_config_od(void) 1213 { 1214 /* 1215 * c0_config.od (bit 19) was write only (and read as 0) 1216 * on the early revisions of Alchemy SOCs. It disables the bus 1217 * transaction overlapping and needs to be set to fix various errata. 1218 */ 1219 switch (read_c0_prid()) { 1220 case 0x00030100: /* Au1000 DA */ 1221 case 0x00030201: /* Au1000 HA */ 1222 case 0x00030202: /* Au1000 HB */ 1223 case 0x01030200: /* Au1500 AB */ 1224 /* 1225 * Au1100 errata actually keeps silence about this bit, so we set it 1226 * just in case for those revisions that require it to be set according 1227 * to arch/mips/au1000/common/cputable.c 1228 */ 1229 case 0x02030200: /* Au1100 AB */ 1230 case 0x02030201: /* Au1100 BA */ 1231 case 0x02030202: /* Au1100 BC */ 1232 set_c0_config(1 << 19); 1233 break; 1234 } 1235 } 1236 1237 /* CP0 hazard avoidance. */ 1238 #define NXP_BARRIER() \ 1239 __asm__ __volatile__( \ 1240 ".set noreorder\n\t" \ 1241 "nop; nop; nop; nop; nop; nop;\n\t" \ 1242 ".set reorder\n\t") 1243 1244 static void nxp_pr4450_fixup_config(void) 1245 { 1246 unsigned long config0; 1247 1248 config0 = read_c0_config(); 1249 1250 /* clear all three cache coherency fields */ 1251 config0 &= ~(0x7 | (7 << 25) | (7 << 28)); 1252 config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | 1253 ((_page_cachable_default >> _CACHE_SHIFT) << 25) | 1254 ((_page_cachable_default >> _CACHE_SHIFT) << 28)); 1255 write_c0_config(config0); 1256 NXP_BARRIER(); 1257 } 1258 1259 static int __cpuinitdata cca = -1; 1260 1261 static int __init cca_setup(char *str) 1262 { 1263 get_option(&str, &cca); 1264 1265 return 1; 1266 } 1267 1268 __setup("cca=", cca_setup); 1269 1270 static void __cpuinit coherency_setup(void) 1271 { 1272 if (cca < 0 || cca > 7) 1273 cca = read_c0_config() & CONF_CM_CMASK; 1274 _page_cachable_default = cca << _CACHE_SHIFT; 1275 1276 pr_debug("Using cache attribute %d\n", cca); 1277 change_c0_config(CONF_CM_CMASK, cca); 1278 1279 /* 1280 * c0_status.cu=0 specifies that updates by the sc instruction use 1281 * the coherency mode specified by the TLB; 1 means cachable 1282 * coherent update on write will be used. Not all processors have 1283 * this bit and; some wire it to zero, others like Toshiba had the 1284 * silly idea of putting something else there ... 1285 */ 1286 switch (current_cpu_type()) { 1287 case CPU_R4000PC: 1288 case CPU_R4000SC: 1289 case CPU_R4000MC: 1290 case CPU_R4400PC: 1291 case CPU_R4400SC: 1292 case CPU_R4400MC: 1293 clear_c0_config(CONF_CU); 1294 break; 1295 /* 1296 * We need to catch the early Alchemy SOCs with 1297 * the write-only co_config.od bit and set it back to one... 1298 */ 1299 case CPU_AU1000: /* rev. DA, HA, HB */ 1300 case CPU_AU1100: /* rev. AB, BA, BC ?? */ 1301 case CPU_AU1500: /* rev. AB */ 1302 au1x00_fixup_config_od(); 1303 break; 1304 1305 case PRID_IMP_PR4450: 1306 nxp_pr4450_fixup_config(); 1307 break; 1308 } 1309 } 1310 1311 #if defined(CONFIG_DMA_NONCOHERENT) 1312 1313 static int __cpuinitdata coherentio; 1314 1315 static int __init setcoherentio(char *str) 1316 { 1317 coherentio = 1; 1318 1319 return 1; 1320 } 1321 1322 __setup("coherentio", setcoherentio); 1323 #endif 1324 1325 void __cpuinit r4k_cache_init(void) 1326 { 1327 extern void build_clear_page(void); 1328 extern void build_copy_page(void); 1329 extern char __weak except_vec2_generic; 1330 extern char __weak except_vec2_sb1; 1331 struct cpuinfo_mips *c = ¤t_cpu_data; 1332 1333 switch (c->cputype) { 1334 case CPU_SB1: 1335 case CPU_SB1A: 1336 set_uncached_handler(0x100, &except_vec2_sb1, 0x80); 1337 break; 1338 1339 default: 1340 set_uncached_handler(0x100, &except_vec2_generic, 0x80); 1341 break; 1342 } 1343 1344 probe_pcache(); 1345 setup_scache(); 1346 1347 r4k_blast_dcache_page_setup(); 1348 r4k_blast_dcache_page_indexed_setup(); 1349 r4k_blast_dcache_setup(); 1350 r4k_blast_icache_page_setup(); 1351 r4k_blast_icache_page_indexed_setup(); 1352 r4k_blast_icache_setup(); 1353 r4k_blast_scache_page_setup(); 1354 r4k_blast_scache_page_indexed_setup(); 1355 r4k_blast_scache_setup(); 1356 1357 /* 1358 * Some MIPS32 and MIPS64 processors have physically indexed caches. 1359 * This code supports virtually indexed processors and will be 1360 * unnecessarily inefficient on physically indexed processors. 1361 */ 1362 if (c->dcache.linesz) 1363 shm_align_mask = max_t( unsigned long, 1364 c->dcache.sets * c->dcache.linesz - 1, 1365 PAGE_SIZE - 1); 1366 else 1367 shm_align_mask = PAGE_SIZE-1; 1368 1369 __flush_cache_vmap = r4k__flush_cache_vmap; 1370 __flush_cache_vunmap = r4k__flush_cache_vunmap; 1371 1372 flush_cache_all = cache_noop; 1373 __flush_cache_all = r4k___flush_cache_all; 1374 flush_cache_mm = r4k_flush_cache_mm; 1375 flush_cache_page = r4k_flush_cache_page; 1376 flush_cache_range = r4k_flush_cache_range; 1377 1378 flush_cache_sigtramp = r4k_flush_cache_sigtramp; 1379 flush_icache_all = r4k_flush_icache_all; 1380 local_flush_data_cache_page = local_r4k_flush_data_cache_page; 1381 flush_data_cache_page = r4k_flush_data_cache_page; 1382 flush_icache_range = r4k_flush_icache_range; 1383 local_flush_icache_range = local_r4k_flush_icache_range; 1384 1385 #if defined(CONFIG_DMA_NONCOHERENT) 1386 if (coherentio) { 1387 _dma_cache_wback_inv = (void *)cache_noop; 1388 _dma_cache_wback = (void *)cache_noop; 1389 _dma_cache_inv = (void *)cache_noop; 1390 } else { 1391 _dma_cache_wback_inv = r4k_dma_cache_wback_inv; 1392 _dma_cache_wback = r4k_dma_cache_wback_inv; 1393 _dma_cache_inv = r4k_dma_cache_inv; 1394 } 1395 #endif 1396 1397 build_clear_page(); 1398 build_copy_page(); 1399 #if !defined(CONFIG_MIPS_CMP) 1400 local_r4k___flush_cache_all(NULL); 1401 #endif 1402 coherency_setup(); 1403 } 1404