cache.c (d6da35e0c6d50f76eaf11a0496d3d2ec1a1fea3f) cache.c (2de8b4cc2051ee1d40eedbcf94de0e7d04507c37)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)

--- 13 unchanged lines hidden (view full) ---

22#include <asm/pdc.h>
23#include <asm/cache.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/page.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)

--- 13 unchanged lines hidden (view full) ---

22#include <asm/pdc.h>
23#include <asm/cache.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/page.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30#include <asm/mmu_context.h>
30
31int split_tlb __ro_after_init;
32int dcache_stride __ro_after_init;
33int icache_stride __ro_after_init;
34EXPORT_SYMBOL(dcache_stride);
35
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);

--- 48 unchanged lines hidden (view full) ---

86
87static inline void flush_data_cache(void)
88{
89 if (static_branch_likely(&parisc_has_dcache))
90 on_each_cpu(flush_data_cache_local, NULL, 1);
91}
92
93
31
32int split_tlb __ro_after_init;
33int dcache_stride __ro_after_init;
34int icache_stride __ro_after_init;
35EXPORT_SYMBOL(dcache_stride);
36
37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38EXPORT_SYMBOL(flush_dcache_page_asm);

--- 48 unchanged lines hidden (view full) ---

87
88static inline void flush_data_cache(void)
89{
90 if (static_branch_likely(&parisc_has_dcache))
91 on_each_cpu(flush_data_cache_local, NULL, 1);
92}
93
94
94/* Virtual address of pfn. */
95/* Kernel virtual address of pfn. */
95#define pfn_va(pfn) __va(PFN_PHYS(pfn))
96
97void
98__update_cache(pte_t pte)
99{
100 unsigned long pfn = pte_pfn(pte);
101 struct page *page;
102

--- 16 unchanged lines hidden (view full) ---

119show_cache_info(struct seq_file *m)
120{
121 char buf[32];
122
123 seq_printf(m, "I-cache\t\t: %ld KB\n",
124 cache_info.ic_size/1024 );
125 if (cache_info.dc_loop != 1)
126 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
96#define pfn_va(pfn) __va(PFN_PHYS(pfn))
97
98void
99__update_cache(pte_t pte)
100{
101 unsigned long pfn = pte_pfn(pte);
102 struct page *page;
103

--- 16 unchanged lines hidden (view full) ---

120show_cache_info(struct seq_file *m)
121{
122 char buf[32];
123
124 seq_printf(m, "I-cache\t\t: %ld KB\n",
125 cache_info.ic_size/1024 );
126 if (cache_info.dc_loop != 1)
127 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
127 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
128 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
128 cache_info.dc_size/1024,
129 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
130 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
129 cache_info.dc_size/1024,
130 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
131 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
131 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
132 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
133 cache_info.dc_conf.cc_alias
134 );
132 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
133 cache_info.it_size,
134 cache_info.dt_size,
135 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
136 );
137
138#ifndef CONFIG_PA20
139 /* BTLB - Block TLB */

--- 179 unchanged lines hidden (view full) ---

319 return;
320 preempt_disable();
321 flush_dcache_page_asm(physaddr, vmaddr);
322 if (vma->vm_flags & VM_EXEC)
323 flush_icache_page_asm(physaddr, vmaddr);
324 preempt_enable();
325}
326
135 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
136 cache_info.it_size,
137 cache_info.dt_size,
138 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
139 );
140
141#ifndef CONFIG_PA20
142 /* BTLB - Block TLB */

--- 179 unchanged lines hidden (view full) ---

322 return;
323 preempt_disable();
324 flush_dcache_page_asm(physaddr, vmaddr);
325 if (vma->vm_flags & VM_EXEC)
326 flush_icache_page_asm(physaddr, vmaddr);
327 preempt_enable();
328}
329
327static inline void
328__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
329 unsigned long physaddr)
330static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
330{
331{
331 if (!static_branch_likely(&parisc_has_cache))
332 return;
332 unsigned long flags, space, pgd, prot;
333#ifdef CONFIG_TLB_PTLOCK
334 unsigned long pgd_lock;
335#endif
336
337 vmaddr &= PAGE_MASK;
338
333 preempt_disable();
339 preempt_disable();
334 purge_dcache_page_asm(physaddr, vmaddr);
340
341 /* Set context for flush */
342 local_irq_save(flags);
343 prot = mfctl(8);
344 space = mfsp(SR_USER);
345 pgd = mfctl(25);
346#ifdef CONFIG_TLB_PTLOCK
347 pgd_lock = mfctl(28);
348#endif
349 switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
350 local_irq_restore(flags);
351
352 flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
335 if (vma->vm_flags & VM_EXEC)
353 if (vma->vm_flags & VM_EXEC)
336 flush_icache_page_asm(physaddr, vmaddr);
354 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
355 flush_tlb_page(vma, vmaddr);
356
357 /* Restore previous context */
358 local_irq_save(flags);
359#ifdef CONFIG_TLB_PTLOCK
360 mtctl(pgd_lock, 28);
361#endif
362 mtctl(pgd, 25);
363 mtsp(space, SR_USER);
364 mtctl(prot, 8);
365 local_irq_restore(flags);
366
337 preempt_enable();
338}
339
367 preempt_enable();
368}
369
370static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
371{
372 pte_t *ptep = NULL;
373 pgd_t *pgd = mm->pgd;
374 p4d_t *p4d;
375 pud_t *pud;
376 pmd_t *pmd;
377
378 if (!pgd_none(*pgd)) {
379 p4d = p4d_offset(pgd, addr);
380 if (!p4d_none(*p4d)) {
381 pud = pud_offset(p4d, addr);
382 if (!pud_none(*pud)) {
383 pmd = pmd_offset(pud, addr);
384 if (!pmd_none(*pmd))
385 ptep = pte_offset_map(pmd, addr);
386 }
387 }
388 }
389 return ptep;
390}
391
392static inline bool pte_needs_flush(pte_t pte)
393{
394 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
395 == (_PAGE_PRESENT | _PAGE_ACCESSED);
396}
397
340void flush_dcache_page(struct page *page)
341{
342 struct address_space *mapping = page_mapping_file(page);
343 struct vm_area_struct *mpnt;
344 unsigned long offset;
345 unsigned long addr, old_addr = 0;
398void flush_dcache_page(struct page *page)
399{
400 struct address_space *mapping = page_mapping_file(page);
401 struct vm_area_struct *mpnt;
402 unsigned long offset;
403 unsigned long addr, old_addr = 0;
404 unsigned long count = 0;
346 pgoff_t pgoff;
347
348 if (mapping && !mapping_mapped(mapping)) {
349 set_bit(PG_dcache_dirty, &page->flags);
350 return;
351 }
352
353 flush_kernel_dcache_page_addr(page_address(page));
354
355 if (!mapping)
356 return;
357
358 pgoff = page->index;
359
405 pgoff_t pgoff;
406
407 if (mapping && !mapping_mapped(mapping)) {
408 set_bit(PG_dcache_dirty, &page->flags);
409 return;
410 }
411
412 flush_kernel_dcache_page_addr(page_address(page));
413
414 if (!mapping)
415 return;
416
417 pgoff = page->index;
418
360 /* We have carefully arranged in arch_get_unmapped_area() that
419 /*
420 * We have carefully arranged in arch_get_unmapped_area() that
361 * *any* mappings of a file are always congruently mapped (whether
362 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
421 * *any* mappings of a file are always congruently mapped (whether
422 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
363 * to flush one address here for them all to become coherent */
364
423 * to flush one address here for them all to become coherent
424 * on machines that support equivalent aliasing
425 */
365 flush_dcache_mmap_lock(mapping);
366 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
367 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
368 addr = mpnt->vm_start + offset;
426 flush_dcache_mmap_lock(mapping);
427 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
428 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
429 addr = mpnt->vm_start + offset;
430 if (parisc_requires_coherency()) {
431 pte_t *ptep;
369
432
370 /* The TLB is the engine of coherence on parisc: The
371 * CPU is entitled to speculate any page with a TLB
372 * mapping, so here we kill the mapping then flush the
373 * page along a special flush only alias mapping.
374 * This guarantees that the page is no-longer in the
375 * cache for any process and nor may it be
376 * speculatively read in (until the user or kernel
377 * specifically accesses it, of course) */
378
379 flush_tlb_page(mpnt, addr);
380 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
381 != (addr & (SHM_COLOUR - 1))) {
382 __flush_cache_page(mpnt, addr, page_to_phys(page));
383 if (parisc_requires_coherency() && old_addr)
384 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
385 old_addr = addr;
433 ptep = get_ptep(mpnt->vm_mm, addr);
434 if (ptep && pte_needs_flush(*ptep))
435 flush_user_cache_page(mpnt, addr);
436 } else {
437 /*
438 * The TLB is the engine of coherence on parisc:
439 * The CPU is entitled to speculate any page
440 * with a TLB mapping, so here we kill the
441 * mapping then flush the page along a special
442 * flush only alias mapping. This guarantees that
443 * the page is no-longer in the cache for any
444 * process and nor may it be speculatively read
445 * in (until the user or kernel specifically
446 * accesses it, of course)
447 */
448 flush_tlb_page(mpnt, addr);
449 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
450 != (addr & (SHM_COLOUR - 1))) {
451 __flush_cache_page(mpnt, addr, page_to_phys(page));
452 /*
453 * Software is allowed to have any number
454 * of private mappings to a page.
455 */
456 if (!(mpnt->vm_flags & VM_SHARED))
457 continue;
458 if (old_addr)
459 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
460 old_addr, addr, mpnt->vm_file);
461 old_addr = addr;
462 }
386 }
463 }
464 WARN_ON(++count == 4096);
387 }
388 flush_dcache_mmap_unlock(mapping);
389}
390EXPORT_SYMBOL(flush_dcache_page);
391
392/* Defined in arch/parisc/kernel/pacache.S */
393EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
394EXPORT_SYMBOL(flush_kernel_icache_range_asm);
395
396#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
397static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
398
399#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
400static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
401
402void __init parisc_setup_cache_timing(void)
403{
404 unsigned long rangetime, alltime;
405 unsigned long size;
465 }
466 flush_dcache_mmap_unlock(mapping);
467}
468EXPORT_SYMBOL(flush_dcache_page);
469
470/* Defined in arch/parisc/kernel/pacache.S */
471EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
472EXPORT_SYMBOL(flush_kernel_icache_range_asm);
473
474#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
475static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
476
477#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
478static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
479
480void __init parisc_setup_cache_timing(void)
481{
482 unsigned long rangetime, alltime;
483 unsigned long size;
406 unsigned long threshold;
484 unsigned long threshold, threshold2;
407
408 alltime = mfctl(16);
409 flush_data_cache();
410 alltime = mfctl(16) - alltime;
411
412 size = (unsigned long)(_end - _text);
413 rangetime = mfctl(16);
414 flush_kernel_dcache_range((unsigned long)_text, size);
415 rangetime = mfctl(16) - rangetime;
416
417 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
418 alltime, size, rangetime);
419
485
486 alltime = mfctl(16);
487 flush_data_cache();
488 alltime = mfctl(16) - alltime;
489
490 size = (unsigned long)(_end - _text);
491 rangetime = mfctl(16);
492 flush_kernel_dcache_range((unsigned long)_text, size);
493 rangetime = mfctl(16) - rangetime;
494
495 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
496 alltime, size, rangetime);
497
420 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
421 if (threshold > cache_info.dc_size)
422 threshold = cache_info.dc_size;
423 if (threshold)
424 parisc_cache_flush_threshold = threshold;
498 threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
499 pr_info("Calculated flush threshold is %lu KiB\n",
500 threshold/1024);
501
502 /*
503 * The threshold computed above isn't very reliable. The following
504 * heuristic works reasonably well on c8000/rp3440.
505 */
506 threshold2 = cache_info.dc_size * num_online_cpus();
507 parisc_cache_flush_threshold = threshold2;
425 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
426 parisc_cache_flush_threshold/1024);
427
428 /* calculate TLB flush threshold */
429
430 /* On SMP machines, skip the TLB measure of kernel text which
431 * has been mapped as huge pages. */
432 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {

--- 39 unchanged lines hidden (view full) ---

472
473 flush_kernel_dcache_page_asm(addr);
474 purge_tlb_start(flags);
475 pdtlb(SR_KERNEL, addr);
476 purge_tlb_end(flags);
477}
478EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
479
508 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
509 parisc_cache_flush_threshold/1024);
510
511 /* calculate TLB flush threshold */
512
513 /* On SMP machines, skip the TLB measure of kernel text which
514 * has been mapped as huge pages. */
515 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {

--- 39 unchanged lines hidden (view full) ---

555
556 flush_kernel_dcache_page_asm(addr);
557 purge_tlb_start(flags);
558 pdtlb(SR_KERNEL, addr);
559 purge_tlb_end(flags);
560}
561EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
562
480void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
481 struct page *pg)
563static void flush_cache_page_if_present(struct vm_area_struct *vma,
564 unsigned long vmaddr, unsigned long pfn)
482{
565{
483 /* Copy using kernel mapping. No coherency is needed (all in
484 kunmap) for the `to' page. However, the `from' page needs to
485 be flushed through a mapping equivalent to the user mapping
486 before it can be accessed through the kernel mapping. */
487 preempt_disable();
488 flush_dcache_page_asm(__pa(vfrom), vaddr);
489 copy_page_asm(vto, vfrom);
490 preempt_enable();
566 pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
567
568 /*
569 * The pte check is racy and sometimes the flush will trigger
570 * a non-access TLB miss. Hopefully, the page has already been
571 * flushed.
572 */
573 if (ptep && pte_needs_flush(*ptep))
574 flush_cache_page(vma, vmaddr, pfn);
491}
575}
492EXPORT_SYMBOL(copy_user_page);
493
576
577void copy_user_highpage(struct page *to, struct page *from,
578 unsigned long vaddr, struct vm_area_struct *vma)
579{
580 void *kto, *kfrom;
581
582 kfrom = kmap_local_page(from);
583 kto = kmap_local_page(to);
584 flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
585 copy_page_asm(kto, kfrom);
586 kunmap_local(kto);
587 kunmap_local(kfrom);
588}
589
590void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
591 unsigned long user_vaddr, void *dst, void *src, int len)
592{
593 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
594 memcpy(dst, src, len);
595 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
596}
597
598void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
599 unsigned long user_vaddr, void *dst, void *src, int len)
600{
601 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
602 memcpy(dst, src, len);
603}
604
494/* __flush_tlb_range()
495 *
496 * returns 1 if all TLBs were flushed.
497 */
498int __flush_tlb_range(unsigned long sid, unsigned long start,
499 unsigned long end)
500{
501 unsigned long flags;

--- 13 unchanged lines hidden (view full) ---

515 pdtlb(SR_TEMP1, start);
516 pitlb(SR_TEMP1, start);
517 purge_tlb_end(flags);
518 start += PAGE_SIZE;
519 }
520 return 0;
521}
522
605/* __flush_tlb_range()
606 *
607 * returns 1 if all TLBs were flushed.
608 */
609int __flush_tlb_range(unsigned long sid, unsigned long start,
610 unsigned long end)
611{
612 unsigned long flags;

--- 13 unchanged lines hidden (view full) ---

626 pdtlb(SR_TEMP1, start);
627 pitlb(SR_TEMP1, start);
628 purge_tlb_end(flags);
629 start += PAGE_SIZE;
630 }
631 return 0;
632}
633
523static inline unsigned long mm_total_size(struct mm_struct *mm)
634static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
524{
635{
525 struct vm_area_struct *vma;
526 unsigned long usize = 0;
636 unsigned long addr, pfn;
637 pte_t *ptep;
527
638
528 for (vma = mm->mmap; vma; vma = vma->vm_next)
529 usize += vma->vm_end - vma->vm_start;
530 return usize;
531}
532
533static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
534{
535 pte_t *ptep = NULL;
536
537 if (!pgd_none(*pgd)) {
538 p4d_t *p4d = p4d_offset(pgd, addr);
539 if (!p4d_none(*p4d)) {
540 pud_t *pud = pud_offset(p4d, addr);
541 if (!pud_none(*pud)) {
542 pmd_t *pmd = pmd_offset(pud, addr);
543 if (!pmd_none(*pmd))
544 ptep = pte_offset_map(pmd, addr);
639 for (addr = start; addr < end; addr += PAGE_SIZE) {
640 /*
641 * The vma can contain pages that aren't present. Although
642 * the pte search is expensive, we need the pte to find the
643 * page pfn and to check whether the page should be flushed.
644 */
645 ptep = get_ptep(vma->vm_mm, addr);
646 if (ptep && pte_needs_flush(*ptep)) {
647 if (parisc_requires_coherency()) {
648 flush_user_cache_page(vma, addr);
649 } else {
650 pfn = pte_pfn(*ptep);
651 if (WARN_ON(!pfn_valid(pfn)))
652 return;
653 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
545 }
546 }
547 }
654 }
655 }
656 }
548 return ptep;
549}
550
657}
658
551static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
552 unsigned long start, unsigned long end)
659static inline unsigned long mm_total_size(struct mm_struct *mm)
553{
660{
554 unsigned long addr, pfn;
555 pte_t *ptep;
661 struct vm_area_struct *vma;
662 unsigned long usize = 0;
556
663
557 for (addr = start; addr < end; addr += PAGE_SIZE) {
558 ptep = get_ptep(mm->pgd, addr);
559 if (ptep) {
560 pfn = pte_pfn(*ptep);
561 flush_cache_page(vma, addr, pfn);
562 }
563 }
664 for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
665 usize += vma->vm_end - vma->vm_start;
666 return usize;
564}
565
566void flush_cache_mm(struct mm_struct *mm)
567{
568 struct vm_area_struct *vma;
569
667}
668
669void flush_cache_mm(struct mm_struct *mm)
670{
671 struct vm_area_struct *vma;
672
570 /* Flushing the whole cache on each cpu takes forever on
571 rp3440, etc. So, avoid it if the mm isn't too big. */
572 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
573 mm_total_size(mm) >= parisc_cache_flush_threshold) {
574 if (mm->context.space_id)
575 flush_tlb_all();
673 /*
674 * Flushing the whole cache on each cpu takes forever on
675 * rp3440, etc. So, avoid it if the mm isn't too big.
676 *
677 * Note that we must flush the entire cache on machines
678 * with aliasing caches to prevent random segmentation
679 * faults.
680 */
681 if (!parisc_requires_coherency()
682 || mm_total_size(mm) >= parisc_cache_flush_threshold) {
683 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
684 return;
685 flush_tlb_all();
576 flush_cache_all();
577 return;
578 }
579
686 flush_cache_all();
687 return;
688 }
689
690 /* Flush mm */
580 for (vma = mm->mmap; vma; vma = vma->vm_next)
691 for (vma = mm->mmap; vma; vma = vma->vm_next)
581 flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
692 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
582}
583
693}
694
584void flush_cache_range(struct vm_area_struct *vma,
585 unsigned long start, unsigned long end)
695void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
586{
696{
587 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
588 end - start >= parisc_cache_flush_threshold) {
589 if (vma->vm_mm->context.space_id)
590 flush_tlb_range(vma, start, end);
697 if (!parisc_requires_coherency()
698 || end - start >= parisc_cache_flush_threshold) {
699 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
700 return;
701 flush_tlb_range(vma, start, end);
591 flush_cache_all();
592 return;
593 }
594
702 flush_cache_all();
703 return;
704 }
705
595 flush_cache_pages(vma, vma->vm_mm, start, end);
706 flush_cache_pages(vma, start, end);
596}
597
707}
708
598void
599flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
709void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
600{
710{
601 if (pfn_valid(pfn)) {
602 if (likely(vma->vm_mm->context.space_id)) {
603 flush_tlb_page(vma, vmaddr);
604 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
605 } else {
606 __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
607 }
711 if (WARN_ON(!pfn_valid(pfn)))
712 return;
713 if (parisc_requires_coherency())
714 flush_user_cache_page(vma, vmaddr);
715 else
716 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
717}
718
719void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
720{
721 if (!PageAnon(page))
722 return;
723
724 if (parisc_requires_coherency()) {
725 flush_user_cache_page(vma, vmaddr);
726 return;
608 }
727 }
728
729 flush_tlb_page(vma, vmaddr);
730 preempt_disable();
731 flush_dcache_page_asm(page_to_phys(page), vmaddr);
732 preempt_enable();
609}
610
611void flush_kernel_vmap_range(void *vaddr, int size)
612{
613 unsigned long start = (unsigned long)vaddr;
614 unsigned long end = start + size;
615
616 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&

--- 27 unchanged lines hidden ---
733}
734
735void flush_kernel_vmap_range(void *vaddr, int size)
736{
737 unsigned long start = (unsigned long)vaddr;
738 unsigned long end = start + size;
739
740 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&

--- 27 unchanged lines hidden ---