1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/mm.h> 14 #include <linux/bitops.h> 15 16 #include <asm/bcache.h> 17 #include <asm/bootinfo.h> 18 #include <asm/cache.h> 19 #include <asm/cacheops.h> 20 #include <asm/cpu.h> 21 #include <asm/cpu-features.h> 22 #include <asm/io.h> 23 #include <asm/page.h> 24 #include <asm/pgtable.h> 25 #include <asm/r4kcache.h> 26 #include <asm/system.h> 27 #include <asm/mmu_context.h> 28 #include <asm/war.h> 29 #include <asm/cacheflush.h> /* for run_uncached() */ 30 31 32 /* 33 * Special Variant of smp_call_function for use by cache functions: 34 * 35 * o No return value 36 * o collapses to normal function call on UP kernels 37 * o collapses to normal function call on systems with a single shared 38 * primary cache. 39 */ 40 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, 41 int retry, int wait) 42 { 43 preempt_disable(); 44 45 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 46 smp_call_function(func, info, retry, wait); 47 #endif 48 func(info); 49 preempt_enable(); 50 } 51 52 /* 53 * Must die. 54 */ 55 static unsigned long icache_size __read_mostly; 56 static unsigned long dcache_size __read_mostly; 57 static unsigned long scache_size __read_mostly; 58 59 /* 60 * Dummy cache handling routines for machines without boardcaches 61 */ 62 static void cache_noop(void) {} 63 64 static struct bcache_ops no_sc_ops = { 65 .bc_enable = (void *)cache_noop, 66 .bc_disable = (void *)cache_noop, 67 .bc_wback_inv = (void *)cache_noop, 68 .bc_inv = (void *)cache_noop 69 }; 70 71 struct bcache_ops *bcops = &no_sc_ops; 72 73 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 74 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 75 76 #define R4600_HIT_CACHEOP_WAR_IMPL \ 77 do { \ 78 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ 79 *(volatile unsigned long *)CKSEG1; \ 80 if (R4600_V1_HIT_CACHEOP_WAR) \ 81 __asm__ __volatile__("nop;nop;nop;nop"); \ 82 } while (0) 83 84 static void (*r4k_blast_dcache_page)(unsigned long addr); 85 86 static inline void r4k_blast_dcache_page_dc32(unsigned long addr) 87 { 88 R4600_HIT_CACHEOP_WAR_IMPL; 89 blast_dcache32_page(addr); 90 } 91 92 static inline void r4k_blast_dcache_page_setup(void) 93 { 94 unsigned long dc_lsize = cpu_dcache_line_size(); 95 96 if (dc_lsize == 0) 97 r4k_blast_dcache_page = (void *)cache_noop; 98 else if (dc_lsize == 16) 99 r4k_blast_dcache_page = blast_dcache16_page; 100 else if (dc_lsize == 32) 101 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 102 } 103 104 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 105 106 static inline void r4k_blast_dcache_page_indexed_setup(void) 107 { 108 unsigned long dc_lsize = cpu_dcache_line_size(); 109 110 if (dc_lsize == 0) 111 r4k_blast_dcache_page_indexed = (void *)cache_noop; 112 else if (dc_lsize == 16) 113 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; 114 else if (dc_lsize == 32) 115 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 116 } 117 118 static void (* r4k_blast_dcache)(void); 119 120 static inline void r4k_blast_dcache_setup(void) 121 { 122 unsigned long dc_lsize = cpu_dcache_line_size(); 123 124 if (dc_lsize == 0) 125 r4k_blast_dcache = (void *)cache_noop; 126 else if (dc_lsize == 16) 127 r4k_blast_dcache = blast_dcache16; 128 else if (dc_lsize == 32) 129 r4k_blast_dcache = blast_dcache32; 130 } 131 132 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ 133 #define JUMP_TO_ALIGN(order) \ 134 __asm__ __volatile__( \ 135 "b\t1f\n\t" \ 136 ".align\t" #order "\n\t" \ 137 "1:\n\t" \ 138 ) 139 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ 140 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) 141 142 static inline void blast_r4600_v1_icache32(void) 143 { 144 unsigned long flags; 145 146 local_irq_save(flags); 147 blast_icache32(); 148 local_irq_restore(flags); 149 } 150 151 static inline void tx49_blast_icache32(void) 152 { 153 unsigned long start = INDEX_BASE; 154 unsigned long end = start + current_cpu_data.icache.waysize; 155 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 156 unsigned long ws_end = current_cpu_data.icache.ways << 157 current_cpu_data.icache.waybit; 158 unsigned long ws, addr; 159 160 CACHE32_UNROLL32_ALIGN2; 161 /* I'm in even chunk. blast odd chunks */ 162 for (ws = 0; ws < ws_end; ws += ws_inc) 163 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 164 cache32_unroll32(addr|ws,Index_Invalidate_I); 165 CACHE32_UNROLL32_ALIGN; 166 /* I'm in odd chunk. blast even chunks */ 167 for (ws = 0; ws < ws_end; ws += ws_inc) 168 for (addr = start; addr < end; addr += 0x400 * 2) 169 cache32_unroll32(addr|ws,Index_Invalidate_I); 170 } 171 172 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) 173 { 174 unsigned long flags; 175 176 local_irq_save(flags); 177 blast_icache32_page_indexed(page); 178 local_irq_restore(flags); 179 } 180 181 static inline void tx49_blast_icache32_page_indexed(unsigned long page) 182 { 183 unsigned long indexmask = current_cpu_data.icache.waysize - 1; 184 unsigned long start = INDEX_BASE + (page & indexmask); 185 unsigned long end = start + PAGE_SIZE; 186 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; 187 unsigned long ws_end = current_cpu_data.icache.ways << 188 current_cpu_data.icache.waybit; 189 unsigned long ws, addr; 190 191 CACHE32_UNROLL32_ALIGN2; 192 /* I'm in even chunk. blast odd chunks */ 193 for (ws = 0; ws < ws_end; ws += ws_inc) 194 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 195 cache32_unroll32(addr|ws,Index_Invalidate_I); 196 CACHE32_UNROLL32_ALIGN; 197 /* I'm in odd chunk. blast even chunks */ 198 for (ws = 0; ws < ws_end; ws += ws_inc) 199 for (addr = start; addr < end; addr += 0x400 * 2) 200 cache32_unroll32(addr|ws,Index_Invalidate_I); 201 } 202 203 static void (* r4k_blast_icache_page)(unsigned long addr); 204 205 static inline void r4k_blast_icache_page_setup(void) 206 { 207 unsigned long ic_lsize = cpu_icache_line_size(); 208 209 if (ic_lsize == 0) 210 r4k_blast_icache_page = (void *)cache_noop; 211 else if (ic_lsize == 16) 212 r4k_blast_icache_page = blast_icache16_page; 213 else if (ic_lsize == 32) 214 r4k_blast_icache_page = blast_icache32_page; 215 else if (ic_lsize == 64) 216 r4k_blast_icache_page = blast_icache64_page; 217 } 218 219 220 static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 221 222 static inline void r4k_blast_icache_page_indexed_setup(void) 223 { 224 unsigned long ic_lsize = cpu_icache_line_size(); 225 226 if (ic_lsize == 0) 227 r4k_blast_icache_page_indexed = (void *)cache_noop; 228 else if (ic_lsize == 16) 229 r4k_blast_icache_page_indexed = blast_icache16_page_indexed; 230 else if (ic_lsize == 32) { 231 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 232 r4k_blast_icache_page_indexed = 233 blast_icache32_r4600_v1_page_indexed; 234 else if (TX49XX_ICACHE_INDEX_INV_WAR) 235 r4k_blast_icache_page_indexed = 236 tx49_blast_icache32_page_indexed; 237 else 238 r4k_blast_icache_page_indexed = 239 blast_icache32_page_indexed; 240 } else if (ic_lsize == 64) 241 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 242 } 243 244 static void (* r4k_blast_icache)(void); 245 246 static inline void r4k_blast_icache_setup(void) 247 { 248 unsigned long ic_lsize = cpu_icache_line_size(); 249 250 if (ic_lsize == 0) 251 r4k_blast_icache = (void *)cache_noop; 252 else if (ic_lsize == 16) 253 r4k_blast_icache = blast_icache16; 254 else if (ic_lsize == 32) { 255 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) 256 r4k_blast_icache = blast_r4600_v1_icache32; 257 else if (TX49XX_ICACHE_INDEX_INV_WAR) 258 r4k_blast_icache = tx49_blast_icache32; 259 else 260 r4k_blast_icache = blast_icache32; 261 } else if (ic_lsize == 64) 262 r4k_blast_icache = blast_icache64; 263 } 264 265 static void (* r4k_blast_scache_page)(unsigned long addr); 266 267 static inline void r4k_blast_scache_page_setup(void) 268 { 269 unsigned long sc_lsize = cpu_scache_line_size(); 270 271 if (scache_size == 0) 272 r4k_blast_scache_page = (void *)cache_noop; 273 else if (sc_lsize == 16) 274 r4k_blast_scache_page = blast_scache16_page; 275 else if (sc_lsize == 32) 276 r4k_blast_scache_page = blast_scache32_page; 277 else if (sc_lsize == 64) 278 r4k_blast_scache_page = blast_scache64_page; 279 else if (sc_lsize == 128) 280 r4k_blast_scache_page = blast_scache128_page; 281 } 282 283 static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 284 285 static inline void r4k_blast_scache_page_indexed_setup(void) 286 { 287 unsigned long sc_lsize = cpu_scache_line_size(); 288 289 if (scache_size == 0) 290 r4k_blast_scache_page_indexed = (void *)cache_noop; 291 else if (sc_lsize == 16) 292 r4k_blast_scache_page_indexed = blast_scache16_page_indexed; 293 else if (sc_lsize == 32) 294 r4k_blast_scache_page_indexed = blast_scache32_page_indexed; 295 else if (sc_lsize == 64) 296 r4k_blast_scache_page_indexed = blast_scache64_page_indexed; 297 else if (sc_lsize == 128) 298 r4k_blast_scache_page_indexed = blast_scache128_page_indexed; 299 } 300 301 static void (* r4k_blast_scache)(void); 302 303 static inline void r4k_blast_scache_setup(void) 304 { 305 unsigned long sc_lsize = cpu_scache_line_size(); 306 307 if (scache_size == 0) 308 r4k_blast_scache = (void *)cache_noop; 309 else if (sc_lsize == 16) 310 r4k_blast_scache = blast_scache16; 311 else if (sc_lsize == 32) 312 r4k_blast_scache = blast_scache32; 313 else if (sc_lsize == 64) 314 r4k_blast_scache = blast_scache64; 315 else if (sc_lsize == 128) 316 r4k_blast_scache = blast_scache128; 317 } 318 319 /* 320 * This is former mm's flush_cache_all() which really should be 321 * flush_cache_vunmap these days ... 322 */ 323 static inline void local_r4k_flush_cache_all(void * args) 324 { 325 r4k_blast_dcache(); 326 r4k_blast_icache(); 327 } 328 329 static void r4k_flush_cache_all(void) 330 { 331 if (!cpu_has_dc_aliases) 332 return; 333 334 r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1); 335 } 336 337 static inline void local_r4k___flush_cache_all(void * args) 338 { 339 r4k_blast_dcache(); 340 r4k_blast_icache(); 341 342 switch (current_cpu_data.cputype) { 343 case CPU_R4000SC: 344 case CPU_R4000MC: 345 case CPU_R4400SC: 346 case CPU_R4400MC: 347 case CPU_R10000: 348 case CPU_R12000: 349 case CPU_R14000: 350 r4k_blast_scache(); 351 } 352 } 353 354 static void r4k___flush_cache_all(void) 355 { 356 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); 357 } 358 359 static inline void local_r4k_flush_cache_range(void * args) 360 { 361 struct vm_area_struct *vma = args; 362 int exec; 363 364 if (!(cpu_context(smp_processor_id(), vma->vm_mm))) 365 return; 366 367 exec = vma->vm_flags & VM_EXEC; 368 if (cpu_has_dc_aliases || exec) 369 r4k_blast_dcache(); 370 if (exec) 371 r4k_blast_icache(); 372 } 373 374 static void r4k_flush_cache_range(struct vm_area_struct *vma, 375 unsigned long start, unsigned long end) 376 { 377 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); 378 } 379 380 static inline void local_r4k_flush_cache_mm(void * args) 381 { 382 struct mm_struct *mm = args; 383 384 if (!cpu_context(smp_processor_id(), mm)) 385 return; 386 387 r4k_blast_dcache(); 388 r4k_blast_icache(); 389 390 /* 391 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we 392 * only flush the primary caches but R10000 and R12000 behave sane ... 393 */ 394 if (current_cpu_data.cputype == CPU_R4000SC || 395 current_cpu_data.cputype == CPU_R4000MC || 396 current_cpu_data.cputype == CPU_R4400SC || 397 current_cpu_data.cputype == CPU_R4400MC) 398 r4k_blast_scache(); 399 } 400 401 static void r4k_flush_cache_mm(struct mm_struct *mm) 402 { 403 if (!cpu_has_dc_aliases) 404 return; 405 406 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); 407 } 408 409 struct flush_cache_page_args { 410 struct vm_area_struct *vma; 411 unsigned long addr; 412 unsigned long pfn; 413 }; 414 415 static inline void local_r4k_flush_cache_page(void *args) 416 { 417 struct flush_cache_page_args *fcp_args = args; 418 struct vm_area_struct *vma = fcp_args->vma; 419 unsigned long addr = fcp_args->addr; 420 unsigned long paddr = fcp_args->pfn << PAGE_SHIFT; 421 int exec = vma->vm_flags & VM_EXEC; 422 struct mm_struct *mm = vma->vm_mm; 423 pgd_t *pgdp; 424 pud_t *pudp; 425 pmd_t *pmdp; 426 pte_t *ptep; 427 428 /* 429 * If ownes no valid ASID yet, cannot possibly have gotten 430 * this page into the cache. 431 */ 432 if (cpu_context(smp_processor_id(), mm) == 0) 433 return; 434 435 addr &= PAGE_MASK; 436 pgdp = pgd_offset(mm, addr); 437 pudp = pud_offset(pgdp, addr); 438 pmdp = pmd_offset(pudp, addr); 439 ptep = pte_offset(pmdp, addr); 440 441 /* 442 * If the page isn't marked valid, the page cannot possibly be 443 * in the cache. 444 */ 445 if (!(pte_val(*ptep) & _PAGE_PRESENT)) 446 return; 447 448 /* 449 * Doing flushes for another ASID than the current one is 450 * too difficult since stupid R4k caches do a TLB translation 451 * for every cache flush operation. So we do indexed flushes 452 * in that case, which doesn't overly flush the cache too much. 453 */ 454 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { 455 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 456 r4k_blast_dcache_page(addr); 457 if (exec && !cpu_icache_snoops_remote_store) 458 r4k_blast_scache_page(addr); 459 } 460 if (exec) 461 r4k_blast_icache_page(addr); 462 463 return; 464 } 465 466 /* 467 * Do indexed flush, too much work to get the (possible) TLB refills 468 * to work correctly. 469 */ 470 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 471 r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ? 472 paddr : addr); 473 if (exec && !cpu_icache_snoops_remote_store) { 474 r4k_blast_scache_page_indexed(paddr); 475 } 476 } 477 if (exec) { 478 if (cpu_has_vtag_icache) { 479 int cpu = smp_processor_id(); 480 481 if (cpu_context(cpu, mm) != 0) 482 drop_mmu_context(mm, cpu); 483 } else 484 r4k_blast_icache_page_indexed(addr); 485 } 486 } 487 488 static void r4k_flush_cache_page(struct vm_area_struct *vma, 489 unsigned long addr, unsigned long pfn) 490 { 491 struct flush_cache_page_args args; 492 493 args.vma = vma; 494 args.addr = addr; 495 args.pfn = pfn; 496 497 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); 498 } 499 500 static inline void local_r4k_flush_data_cache_page(void * addr) 501 { 502 r4k_blast_dcache_page((unsigned long) addr); 503 } 504 505 static void r4k_flush_data_cache_page(unsigned long addr) 506 { 507 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1); 508 } 509 510 struct flush_icache_range_args { 511 unsigned long start; 512 unsigned long end; 513 }; 514 515 static inline void local_r4k_flush_icache_range(void *args) 516 { 517 struct flush_icache_range_args *fir_args = args; 518 unsigned long start = fir_args->start; 519 unsigned long end = fir_args->end; 520 521 if (!cpu_has_ic_fills_f_dc) { 522 if (end - start >= dcache_size) { 523 r4k_blast_dcache(); 524 } else { 525 R4600_HIT_CACHEOP_WAR_IMPL; 526 protected_blast_dcache_range(start, end); 527 } 528 529 if (!cpu_icache_snoops_remote_store && scache_size) { 530 if (end - start > scache_size) 531 r4k_blast_scache(); 532 else 533 protected_blast_scache_range(start, end); 534 } 535 } 536 537 if (end - start > icache_size) 538 r4k_blast_icache(); 539 else 540 protected_blast_icache_range(start, end); 541 } 542 543 static void r4k_flush_icache_range(unsigned long start, unsigned long end) 544 { 545 struct flush_icache_range_args args; 546 547 args.start = start; 548 args.end = end; 549 550 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); 551 instruction_hazard(); 552 } 553 554 /* 555 * Ok, this seriously sucks. We use them to flush a user page but don't 556 * know the virtual address, so we have to blast away the whole icache 557 * which is significantly more expensive than the real thing. Otoh we at 558 * least know the kernel address of the page so we can flush it 559 * selectivly. 560 */ 561 562 struct flush_icache_page_args { 563 struct vm_area_struct *vma; 564 struct page *page; 565 }; 566 567 static inline void local_r4k_flush_icache_page(void *args) 568 { 569 struct flush_icache_page_args *fip_args = args; 570 struct vm_area_struct *vma = fip_args->vma; 571 struct page *page = fip_args->page; 572 573 /* 574 * Tricky ... Because we don't know the virtual address we've got the 575 * choice of either invalidating the entire primary and secondary 576 * caches or invalidating the secondary caches also. With the subset 577 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the 578 * secondary cache will result in any entries in the primary caches 579 * also getting invalidated which hopefully is a bit more economical. 580 */ 581 if (cpu_has_subset_pcaches) { 582 unsigned long addr = (unsigned long) page_address(page); 583 584 r4k_blast_scache_page(addr); 585 ClearPageDcacheDirty(page); 586 587 return; 588 } 589 590 if (!cpu_has_ic_fills_f_dc) { 591 unsigned long addr = (unsigned long) page_address(page); 592 r4k_blast_dcache_page(addr); 593 if (!cpu_icache_snoops_remote_store) 594 r4k_blast_scache_page(addr); 595 ClearPageDcacheDirty(page); 596 } 597 598 /* 599 * We're not sure of the virtual address(es) involved here, so 600 * we have to flush the entire I-cache. 601 */ 602 if (cpu_has_vtag_icache) { 603 int cpu = smp_processor_id(); 604 605 if (cpu_context(cpu, vma->vm_mm) != 0) 606 drop_mmu_context(vma->vm_mm, cpu); 607 } else 608 r4k_blast_icache(); 609 } 610 611 static void r4k_flush_icache_page(struct vm_area_struct *vma, 612 struct page *page) 613 { 614 struct flush_icache_page_args args; 615 616 /* 617 * If there's no context yet, or the page isn't executable, no I-cache 618 * flush is needed. 619 */ 620 if (!(vma->vm_flags & VM_EXEC)) 621 return; 622 623 args.vma = vma; 624 args.page = page; 625 626 r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1); 627 } 628 629 630 #ifdef CONFIG_DMA_NONCOHERENT 631 632 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) 633 { 634 /* Catch bad driver code */ 635 BUG_ON(size == 0); 636 637 if (cpu_has_subset_pcaches) { 638 if (size >= scache_size) 639 r4k_blast_scache(); 640 else 641 blast_scache_range(addr, addr + size); 642 return; 643 } 644 645 /* 646 * Either no secondary cache or the available caches don't have the 647 * subset property so we have to flush the primary caches 648 * explicitly 649 */ 650 if (size >= dcache_size) { 651 r4k_blast_dcache(); 652 } else { 653 R4600_HIT_CACHEOP_WAR_IMPL; 654 blast_dcache_range(addr, addr + size); 655 } 656 657 bc_wback_inv(addr, size); 658 } 659 660 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) 661 { 662 /* Catch bad driver code */ 663 BUG_ON(size == 0); 664 665 if (cpu_has_subset_pcaches) { 666 if (size >= scache_size) 667 r4k_blast_scache(); 668 else 669 blast_scache_range(addr, addr + size); 670 return; 671 } 672 673 if (size >= dcache_size) { 674 r4k_blast_dcache(); 675 } else { 676 R4600_HIT_CACHEOP_WAR_IMPL; 677 blast_dcache_range(addr, addr + size); 678 } 679 680 bc_inv(addr, size); 681 } 682 #endif /* CONFIG_DMA_NONCOHERENT */ 683 684 /* 685 * While we're protected against bad userland addresses we don't care 686 * very much about what happens in that case. Usually a segmentation 687 * fault will dump the process later on anyway ... 688 */ 689 static void local_r4k_flush_cache_sigtramp(void * arg) 690 { 691 unsigned long ic_lsize = cpu_icache_line_size(); 692 unsigned long dc_lsize = cpu_dcache_line_size(); 693 unsigned long sc_lsize = cpu_scache_line_size(); 694 unsigned long addr = (unsigned long) arg; 695 696 R4600_HIT_CACHEOP_WAR_IMPL; 697 if (dc_lsize) 698 protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); 699 if (!cpu_icache_snoops_remote_store && scache_size) 700 protected_writeback_scache_line(addr & ~(sc_lsize - 1)); 701 if (ic_lsize) 702 protected_flush_icache_line(addr & ~(ic_lsize - 1)); 703 if (MIPS4K_ICACHE_REFILL_WAR) { 704 __asm__ __volatile__ ( 705 ".set push\n\t" 706 ".set noat\n\t" 707 ".set mips3\n\t" 708 #ifdef CONFIG_32BIT 709 "la $at,1f\n\t" 710 #endif 711 #ifdef CONFIG_64BIT 712 "dla $at,1f\n\t" 713 #endif 714 "cache %0,($at)\n\t" 715 "nop; nop; nop\n" 716 "1:\n\t" 717 ".set pop" 718 : 719 : "i" (Hit_Invalidate_I)); 720 } 721 if (MIPS_CACHE_SYNC_WAR) 722 __asm__ __volatile__ ("sync"); 723 } 724 725 static void r4k_flush_cache_sigtramp(unsigned long addr) 726 { 727 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); 728 } 729 730 static void r4k_flush_icache_all(void) 731 { 732 if (cpu_has_vtag_icache) 733 r4k_blast_icache(); 734 } 735 736 static inline void rm7k_erratum31(void) 737 { 738 const unsigned long ic_lsize = 32; 739 unsigned long addr; 740 741 /* RM7000 erratum #31. The icache is screwed at startup. */ 742 write_c0_taglo(0); 743 write_c0_taghi(0); 744 745 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { 746 __asm__ __volatile__ ( 747 ".set push\n\t" 748 ".set noreorder\n\t" 749 ".set mips3\n\t" 750 "cache\t%1, 0(%0)\n\t" 751 "cache\t%1, 0x1000(%0)\n\t" 752 "cache\t%1, 0x2000(%0)\n\t" 753 "cache\t%1, 0x3000(%0)\n\t" 754 "cache\t%2, 0(%0)\n\t" 755 "cache\t%2, 0x1000(%0)\n\t" 756 "cache\t%2, 0x2000(%0)\n\t" 757 "cache\t%2, 0x3000(%0)\n\t" 758 "cache\t%1, 0(%0)\n\t" 759 "cache\t%1, 0x1000(%0)\n\t" 760 "cache\t%1, 0x2000(%0)\n\t" 761 "cache\t%1, 0x3000(%0)\n\t" 762 ".set pop\n" 763 : 764 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); 765 } 766 } 767 768 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way", 769 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 770 }; 771 772 static void __init probe_pcache(void) 773 { 774 struct cpuinfo_mips *c = ¤t_cpu_data; 775 unsigned int config = read_c0_config(); 776 unsigned int prid = read_c0_prid(); 777 unsigned long config1; 778 unsigned int lsize; 779 780 switch (c->cputype) { 781 case CPU_R4600: /* QED style two way caches? */ 782 case CPU_R4700: 783 case CPU_R5000: 784 case CPU_NEVADA: 785 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 786 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 787 c->icache.ways = 2; 788 c->icache.waybit = __ffs(icache_size/2); 789 790 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 791 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 792 c->dcache.ways = 2; 793 c->dcache.waybit= __ffs(dcache_size/2); 794 795 c->options |= MIPS_CPU_CACHE_CDEX_P; 796 break; 797 798 case CPU_R5432: 799 case CPU_R5500: 800 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 801 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 802 c->icache.ways = 2; 803 c->icache.waybit= 0; 804 805 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 806 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 807 c->dcache.ways = 2; 808 c->dcache.waybit = 0; 809 810 c->options |= MIPS_CPU_CACHE_CDEX_P; 811 break; 812 813 case CPU_TX49XX: 814 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 815 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 816 c->icache.ways = 4; 817 c->icache.waybit= 0; 818 819 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 820 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 821 c->dcache.ways = 4; 822 c->dcache.waybit = 0; 823 824 c->options |= MIPS_CPU_CACHE_CDEX_P; 825 c->options |= MIPS_CPU_PREFETCH; 826 break; 827 828 case CPU_R4000PC: 829 case CPU_R4000SC: 830 case CPU_R4000MC: 831 case CPU_R4400PC: 832 case CPU_R4400SC: 833 case CPU_R4400MC: 834 case CPU_R4300: 835 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 836 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 837 c->icache.ways = 1; 838 c->icache.waybit = 0; /* doesn't matter */ 839 840 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 841 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 842 c->dcache.ways = 1; 843 c->dcache.waybit = 0; /* does not matter */ 844 845 c->options |= MIPS_CPU_CACHE_CDEX_P; 846 break; 847 848 case CPU_R10000: 849 case CPU_R12000: 850 case CPU_R14000: 851 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); 852 c->icache.linesz = 64; 853 c->icache.ways = 2; 854 c->icache.waybit = 0; 855 856 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); 857 c->dcache.linesz = 32; 858 c->dcache.ways = 2; 859 c->dcache.waybit = 0; 860 861 c->options |= MIPS_CPU_PREFETCH; 862 break; 863 864 case CPU_VR4133: 865 write_c0_config(config & ~CONF_EB); 866 case CPU_VR4131: 867 /* Workaround for cache instruction bug of VR4131 */ 868 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || 869 c->processor_id == 0x0c82U) { 870 config &= ~0x00000030U; 871 config |= 0x00410000U; 872 write_c0_config(config); 873 } 874 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 875 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 876 c->icache.ways = 2; 877 c->icache.waybit = __ffs(icache_size/2); 878 879 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 880 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 881 c->dcache.ways = 2; 882 c->dcache.waybit = __ffs(dcache_size/2); 883 884 c->options |= MIPS_CPU_CACHE_CDEX_P; 885 break; 886 887 case CPU_VR41XX: 888 case CPU_VR4111: 889 case CPU_VR4121: 890 case CPU_VR4122: 891 case CPU_VR4181: 892 case CPU_VR4181A: 893 icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); 894 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 895 c->icache.ways = 1; 896 c->icache.waybit = 0; /* doesn't matter */ 897 898 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); 899 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 900 c->dcache.ways = 1; 901 c->dcache.waybit = 0; /* does not matter */ 902 903 c->options |= MIPS_CPU_CACHE_CDEX_P; 904 break; 905 906 case CPU_RM7000: 907 rm7k_erratum31(); 908 909 case CPU_RM9000: 910 icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); 911 c->icache.linesz = 16 << ((config & CONF_IB) >> 5); 912 c->icache.ways = 4; 913 c->icache.waybit = __ffs(icache_size / c->icache.ways); 914 915 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); 916 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); 917 c->dcache.ways = 4; 918 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); 919 920 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR) 921 c->options |= MIPS_CPU_CACHE_CDEX_P; 922 #endif 923 c->options |= MIPS_CPU_PREFETCH; 924 break; 925 926 default: 927 if (!(config & MIPS_CONF_M)) 928 panic("Don't know how to probe P-caches on this cpu."); 929 930 /* 931 * So we seem to be a MIPS32 or MIPS64 CPU 932 * So let's probe the I-cache ... 933 */ 934 config1 = read_c0_config1(); 935 936 if ((lsize = ((config1 >> 19) & 7))) 937 c->icache.linesz = 2 << lsize; 938 else 939 c->icache.linesz = lsize; 940 c->icache.sets = 64 << ((config1 >> 22) & 7); 941 c->icache.ways = 1 + ((config1 >> 16) & 7); 942 943 icache_size = c->icache.sets * 944 c->icache.ways * 945 c->icache.linesz; 946 c->icache.waybit = __ffs(icache_size/c->icache.ways); 947 948 if (config & 0x8) /* VI bit */ 949 c->icache.flags |= MIPS_CACHE_VTAG; 950 951 /* 952 * Now probe the MIPS32 / MIPS64 data cache. 953 */ 954 c->dcache.flags = 0; 955 956 if ((lsize = ((config1 >> 10) & 7))) 957 c->dcache.linesz = 2 << lsize; 958 else 959 c->dcache.linesz= lsize; 960 c->dcache.sets = 64 << ((config1 >> 13) & 7); 961 c->dcache.ways = 1 + ((config1 >> 7) & 7); 962 963 dcache_size = c->dcache.sets * 964 c->dcache.ways * 965 c->dcache.linesz; 966 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); 967 968 c->options |= MIPS_CPU_PREFETCH; 969 break; 970 } 971 972 /* 973 * Processor configuration sanity check for the R4000SC erratum 974 * #5. With page sizes larger than 32kB there is no possibility 975 * to get a VCE exception anymore so we don't care about this 976 * misconfiguration. The case is rather theoretical anyway; 977 * presumably no vendor is shipping his hardware in the "bad" 978 * configuration. 979 */ 980 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && 981 !(config & CONF_SC) && c->icache.linesz != 16 && 982 PAGE_SIZE <= 0x8000) 983 panic("Improper R4000SC processor configuration detected"); 984 985 /* compute a couple of other cache variables */ 986 c->icache.waysize = icache_size / c->icache.ways; 987 c->dcache.waysize = dcache_size / c->dcache.ways; 988 989 c->icache.sets = c->icache.linesz ? 990 icache_size / (c->icache.linesz * c->icache.ways) : 0; 991 c->dcache.sets = c->dcache.linesz ? 992 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; 993 994 /* 995 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB 996 * 2-way virtually indexed so normally would suffer from aliases. So 997 * normally they'd suffer from aliases but magic in the hardware deals 998 * with that for us so we don't need to take care ourselves. 999 */ 1000 switch (c->cputype) { 1001 case CPU_20KC: 1002 case CPU_25KF: 1003 c->dcache.flags |= MIPS_CACHE_PINDEX; 1004 case CPU_R10000: 1005 case CPU_R12000: 1006 case CPU_R14000: 1007 case CPU_SB1: 1008 break; 1009 case CPU_24K: 1010 case CPU_34K: 1011 case CPU_74K: 1012 if ((read_c0_config7() & (1 << 16))) { 1013 /* effectively physically indexed dcache, 1014 thus no virtual aliases. */ 1015 c->dcache.flags |= MIPS_CACHE_PINDEX; 1016 break; 1017 } 1018 default: 1019 if (c->dcache.waysize > PAGE_SIZE) 1020 c->dcache.flags |= MIPS_CACHE_ALIASES; 1021 } 1022 1023 switch (c->cputype) { 1024 case CPU_20KC: 1025 /* 1026 * Some older 20Kc chips doesn't have the 'VI' bit in 1027 * the config register. 1028 */ 1029 c->icache.flags |= MIPS_CACHE_VTAG; 1030 break; 1031 1032 case CPU_AU1000: 1033 case CPU_AU1500: 1034 case CPU_AU1100: 1035 case CPU_AU1550: 1036 case CPU_AU1200: 1037 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1038 break; 1039 } 1040 1041 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", 1042 icache_size >> 10, 1043 cpu_has_vtag_icache ? "virtually tagged" : "physically tagged", 1044 way_string[c->icache.ways], c->icache.linesz); 1045 1046 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", 1047 dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz); 1048 } 1049 1050 /* 1051 * If you even _breathe_ on this function, look at the gcc output and make sure 1052 * it does not pop things on and off the stack for the cache sizing loop that 1053 * executes in KSEG1 space or else you will crash and burn badly. You have 1054 * been warned. 1055 */ 1056 static int __init probe_scache(void) 1057 { 1058 extern unsigned long stext; 1059 unsigned long flags, addr, begin, end, pow2; 1060 unsigned int config = read_c0_config(); 1061 struct cpuinfo_mips *c = ¤t_cpu_data; 1062 int tmp; 1063 1064 if (config & CONF_SC) 1065 return 0; 1066 1067 begin = (unsigned long) &stext; 1068 begin &= ~((4 * 1024 * 1024) - 1); 1069 end = begin + (4 * 1024 * 1024); 1070 1071 /* 1072 * This is such a bitch, you'd think they would make it easy to do 1073 * this. Away you daemons of stupidity! 1074 */ 1075 local_irq_save(flags); 1076 1077 /* Fill each size-multiple cache line with a valid tag. */ 1078 pow2 = (64 * 1024); 1079 for (addr = begin; addr < end; addr = (begin + pow2)) { 1080 unsigned long *p = (unsigned long *) addr; 1081 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ 1082 pow2 <<= 1; 1083 } 1084 1085 /* Load first line with zero (therefore invalid) tag. */ 1086 write_c0_taglo(0); 1087 write_c0_taghi(0); 1088 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ 1089 cache_op(Index_Store_Tag_I, begin); 1090 cache_op(Index_Store_Tag_D, begin); 1091 cache_op(Index_Store_Tag_SD, begin); 1092 1093 /* Now search for the wrap around point. */ 1094 pow2 = (128 * 1024); 1095 tmp = 0; 1096 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { 1097 cache_op(Index_Load_Tag_SD, addr); 1098 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ 1099 if (!read_c0_taglo()) 1100 break; 1101 pow2 <<= 1; 1102 } 1103 local_irq_restore(flags); 1104 addr -= begin; 1105 1106 scache_size = addr; 1107 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); 1108 c->scache.ways = 1; 1109 c->dcache.waybit = 0; /* does not matter */ 1110 1111 return 1; 1112 } 1113 1114 extern int r5k_sc_init(void); 1115 extern int rm7k_sc_init(void); 1116 extern int mips_sc_init(void); 1117 1118 static void __init setup_scache(void) 1119 { 1120 struct cpuinfo_mips *c = ¤t_cpu_data; 1121 unsigned int config = read_c0_config(); 1122 int sc_present = 0; 1123 1124 /* 1125 * Do the probing thing on R4000SC and R4400SC processors. Other 1126 * processors don't have a S-cache that would be relevant to the 1127 * Linux memory managment. 1128 */ 1129 switch (c->cputype) { 1130 case CPU_R4000SC: 1131 case CPU_R4000MC: 1132 case CPU_R4400SC: 1133 case CPU_R4400MC: 1134 sc_present = run_uncached(probe_scache); 1135 if (sc_present) 1136 c->options |= MIPS_CPU_CACHE_CDEX_S; 1137 break; 1138 1139 case CPU_R10000: 1140 case CPU_R12000: 1141 case CPU_R14000: 1142 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); 1143 c->scache.linesz = 64 << ((config >> 13) & 1); 1144 c->scache.ways = 2; 1145 c->scache.waybit= 0; 1146 sc_present = 1; 1147 break; 1148 1149 case CPU_R5000: 1150 case CPU_NEVADA: 1151 #ifdef CONFIG_R5000_CPU_SCACHE 1152 r5k_sc_init(); 1153 #endif 1154 return; 1155 1156 case CPU_RM7000: 1157 case CPU_RM9000: 1158 #ifdef CONFIG_RM7000_CPU_SCACHE 1159 rm7k_sc_init(); 1160 #endif 1161 return; 1162 1163 default: 1164 if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1165 c->isa_level == MIPS_CPU_ISA_M32R2 || 1166 c->isa_level == MIPS_CPU_ISA_M64R1 || 1167 c->isa_level == MIPS_CPU_ISA_M64R2) { 1168 #ifdef CONFIG_MIPS_CPU_SCACHE 1169 if (mips_sc_init ()) { 1170 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1171 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", 1172 scache_size >> 10, 1173 way_string[c->scache.ways], c->scache.linesz); 1174 } 1175 #else 1176 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) 1177 panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); 1178 #endif 1179 return; 1180 } 1181 sc_present = 0; 1182 } 1183 1184 if (!sc_present) 1185 return; 1186 1187 /* compute a couple of other cache variables */ 1188 c->scache.waysize = scache_size / c->scache.ways; 1189 1190 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); 1191 1192 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1193 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1194 1195 c->options |= MIPS_CPU_SUBSET_CACHES; 1196 } 1197 1198 void au1x00_fixup_config_od(void) 1199 { 1200 /* 1201 * c0_config.od (bit 19) was write only (and read as 0) 1202 * on the early revisions of Alchemy SOCs. It disables the bus 1203 * transaction overlapping and needs to be set to fix various errata. 1204 */ 1205 switch (read_c0_prid()) { 1206 case 0x00030100: /* Au1000 DA */ 1207 case 0x00030201: /* Au1000 HA */ 1208 case 0x00030202: /* Au1000 HB */ 1209 case 0x01030200: /* Au1500 AB */ 1210 /* 1211 * Au1100 errata actually keeps silence about this bit, so we set it 1212 * just in case for those revisions that require it to be set according 1213 * to arch/mips/au1000/common/cputable.c 1214 */ 1215 case 0x02030200: /* Au1100 AB */ 1216 case 0x02030201: /* Au1100 BA */ 1217 case 0x02030202: /* Au1100 BC */ 1218 set_c0_config(1 << 19); 1219 break; 1220 } 1221 } 1222 1223 static inline void coherency_setup(void) 1224 { 1225 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); 1226 1227 /* 1228 * c0_status.cu=0 specifies that updates by the sc instruction use 1229 * the coherency mode specified by the TLB; 1 means cachable 1230 * coherent update on write will be used. Not all processors have 1231 * this bit and; some wire it to zero, others like Toshiba had the 1232 * silly idea of putting something else there ... 1233 */ 1234 switch (current_cpu_data.cputype) { 1235 case CPU_R4000PC: 1236 case CPU_R4000SC: 1237 case CPU_R4000MC: 1238 case CPU_R4400PC: 1239 case CPU_R4400SC: 1240 case CPU_R4400MC: 1241 clear_c0_config(CONF_CU); 1242 break; 1243 /* 1244 * We need to catch the ealry Alchemy SOCs with 1245 * the write-only co_config.od bit and set it back to one... 1246 */ 1247 case CPU_AU1000: /* rev. DA, HA, HB */ 1248 case CPU_AU1100: /* rev. AB, BA, BC ?? */ 1249 case CPU_AU1500: /* rev. AB */ 1250 au1x00_fixup_config_od(); 1251 break; 1252 } 1253 } 1254 1255 void __init r4k_cache_init(void) 1256 { 1257 extern void build_clear_page(void); 1258 extern void build_copy_page(void); 1259 extern char except_vec2_generic; 1260 struct cpuinfo_mips *c = ¤t_cpu_data; 1261 1262 /* Default cache error handler for R4000 and R5000 family */ 1263 set_uncached_handler (0x100, &except_vec2_generic, 0x80); 1264 1265 probe_pcache(); 1266 setup_scache(); 1267 1268 r4k_blast_dcache_page_setup(); 1269 r4k_blast_dcache_page_indexed_setup(); 1270 r4k_blast_dcache_setup(); 1271 r4k_blast_icache_page_setup(); 1272 r4k_blast_icache_page_indexed_setup(); 1273 r4k_blast_icache_setup(); 1274 r4k_blast_scache_page_setup(); 1275 r4k_blast_scache_page_indexed_setup(); 1276 r4k_blast_scache_setup(); 1277 1278 /* 1279 * Some MIPS32 and MIPS64 processors have physically indexed caches. 1280 * This code supports virtually indexed processors and will be 1281 * unnecessarily inefficient on physically indexed processors. 1282 */ 1283 if (c->dcache.linesz) 1284 shm_align_mask = max_t( unsigned long, 1285 c->dcache.sets * c->dcache.linesz - 1, 1286 PAGE_SIZE - 1); 1287 else 1288 shm_align_mask = PAGE_SIZE-1; 1289 flush_cache_all = r4k_flush_cache_all; 1290 __flush_cache_all = r4k___flush_cache_all; 1291 flush_cache_mm = r4k_flush_cache_mm; 1292 flush_cache_page = r4k_flush_cache_page; 1293 flush_icache_page = r4k_flush_icache_page; 1294 flush_cache_range = r4k_flush_cache_range; 1295 1296 flush_cache_sigtramp = r4k_flush_cache_sigtramp; 1297 flush_icache_all = r4k_flush_icache_all; 1298 local_flush_data_cache_page = local_r4k_flush_data_cache_page; 1299 flush_data_cache_page = r4k_flush_data_cache_page; 1300 flush_icache_range = r4k_flush_icache_range; 1301 1302 #ifdef CONFIG_DMA_NONCOHERENT 1303 _dma_cache_wback_inv = r4k_dma_cache_wback_inv; 1304 _dma_cache_wback = r4k_dma_cache_wback_inv; 1305 _dma_cache_inv = r4k_dma_cache_inv; 1306 #endif 1307 1308 build_clear_page(); 1309 build_copy_page(); 1310 local_r4k___flush_cache_all(NULL); 1311 coherency_setup(); 1312 } 1313