cache.c (06d07429858317ded2db7986113a9e0129cd599b) cache.c (72d95924ee35c8cd16ef52f912483ee938a34d49)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)

--- 6 unchanged lines hidden (view full) ---

15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <linux/syscalls.h>
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)

--- 6 unchanged lines hidden (view full) ---

15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <linux/sched/mm.h>
22#include <linux/syscalls.h>
23#include <linux/vmalloc.h>
23#include <asm/pdc.h>
24#include <asm/cache.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/page.h>
28#include <asm/processor.h>
29#include <asm/sections.h>
30#include <asm/shmparam.h>
31#include <asm/mmu_context.h>
32#include <asm/cachectl.h>
33
24#include <asm/pdc.h>
25#include <asm/cache.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28#include <asm/page.h>
29#include <asm/processor.h>
30#include <asm/sections.h>
31#include <asm/shmparam.h>
32#include <asm/mmu_context.h>
33#include <asm/cachectl.h>
34
35#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
36
37/*
38 * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
39 * of page flushes done flush_cache_page_if_present. There are some
40 * pros and cons in using this option. It may increase the risk of
41 * random segmentation faults.
42 */
43#define CONFIG_FLUSH_PAGE_ACCESSED 0
44
34int split_tlb __ro_after_init;
35int dcache_stride __ro_after_init;
36int icache_stride __ro_after_init;
37EXPORT_SYMBOL(dcache_stride);
38
45int split_tlb __ro_after_init;
46int dcache_stride __ro_after_init;
47int icache_stride __ro_after_init;
48EXPORT_SYMBOL(dcache_stride);
49
50/* Internal implementation in arch/parisc/kernel/pacache.S */
39void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40EXPORT_SYMBOL(flush_dcache_page_asm);
41void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
42void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
51void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
52EXPORT_SYMBOL(flush_dcache_page_asm);
53void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
54void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
43
44/* Internal implementation in arch/parisc/kernel/pacache.S */
45void flush_data_cache_local(void *); /* flushes local data-cache only */
46void flush_instruction_cache_local(void); /* flushes local code-cache only */
47
55void flush_data_cache_local(void *); /* flushes local data-cache only */
56void flush_instruction_cache_local(void); /* flushes local code-cache only */
57
58static void flush_kernel_dcache_page_addr(const void *addr);
59
48/* On some machines (i.e., ones with the Merced bus), there can be
49 * only a single PxTLB broadcast at a time; this must be guaranteed
50 * by software. We need a spinlock around all TLB flushes to ensure
51 * this.
52 */
53DEFINE_SPINLOCK(pa_tlb_flush_lock);
54
55#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)

--- 260 unchanged lines hidden (view full) ---

316}
317
318static inline void
319__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
320 unsigned long physaddr)
321{
322 if (!static_branch_likely(&parisc_has_cache))
323 return;
60/* On some machines (i.e., ones with the Merced bus), there can be
61 * only a single PxTLB broadcast at a time; this must be guaranteed
62 * by software. We need a spinlock around all TLB flushes to ensure
63 * this.
64 */
65DEFINE_SPINLOCK(pa_tlb_flush_lock);
66
67#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)

--- 260 unchanged lines hidden (view full) ---

328}
329
330static inline void
331__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
332 unsigned long physaddr)
333{
334 if (!static_branch_likely(&parisc_has_cache))
335 return;
336
337 /*
338 * The TLB is the engine of coherence on parisc. The CPU is
339 * entitled to speculate any page with a TLB mapping, so here
340 * we kill the mapping then flush the page along a special flush
341 * only alias mapping. This guarantees that the page is no-longer
342 * in the cache for any process and nor may it be speculatively
343 * read in (until the user or kernel specifically accesses it,
344 * of course).
345 */
346 flush_tlb_page(vma, vmaddr);
347
324 preempt_disable();
325 flush_dcache_page_asm(physaddr, vmaddr);
326 if (vma->vm_flags & VM_EXEC)
327 flush_icache_page_asm(physaddr, vmaddr);
328 preempt_enable();
329}
330
348 preempt_disable();
349 flush_dcache_page_asm(physaddr, vmaddr);
350 if (vma->vm_flags & VM_EXEC)
351 flush_icache_page_asm(physaddr, vmaddr);
352 preempt_enable();
353}
354
331static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
355static void flush_kernel_dcache_page_addr(const void *addr)
332{
356{
333 unsigned long flags, space, pgd, prot;
334#ifdef CONFIG_TLB_PTLOCK
335 unsigned long pgd_lock;
336#endif
357 unsigned long vaddr = (unsigned long)addr;
358 unsigned long flags;
337
359
338 vmaddr &= PAGE_MASK;
360 /* Purge TLB entry to remove translation on all CPUs */
361 purge_tlb_start(flags);
362 pdtlb(SR_KERNEL, addr);
363 purge_tlb_end(flags);
339
364
365 /* Use tmpalias flush to prevent data cache move-in */
340 preempt_disable();
366 preempt_disable();
367 flush_dcache_page_asm(__pa(vaddr), vaddr);
368 preempt_enable();
369}
341
370
342 /* Set context for flush */
343 local_irq_save(flags);
344 prot = mfctl(8);
345 space = mfsp(SR_USER);
346 pgd = mfctl(25);
347#ifdef CONFIG_TLB_PTLOCK
348 pgd_lock = mfctl(28);
349#endif
350 switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
351 local_irq_restore(flags);
371static void flush_kernel_icache_page_addr(const void *addr)
372{
373 unsigned long vaddr = (unsigned long)addr;
374 unsigned long flags;
352
375
353 flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
354 if (vma->vm_flags & VM_EXEC)
355 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
356 flush_tlb_page(vma, vmaddr);
376 /* Purge TLB entry to remove translation on all CPUs */
377 purge_tlb_start(flags);
378 pdtlb(SR_KERNEL, addr);
379 purge_tlb_end(flags);
357
380
358 /* Restore previous context */
359 local_irq_save(flags);
360#ifdef CONFIG_TLB_PTLOCK
361 mtctl(pgd_lock, 28);
362#endif
363 mtctl(pgd, 25);
364 mtsp(space, SR_USER);
365 mtctl(prot, 8);
366 local_irq_restore(flags);
367
381 /* Use tmpalias flush to prevent instruction cache move-in */
382 preempt_disable();
383 flush_icache_page_asm(__pa(vaddr), vaddr);
368 preempt_enable();
369}
370
384 preempt_enable();
385}
386
387void kunmap_flush_on_unmap(const void *addr)
388{
389 flush_kernel_dcache_page_addr(addr);
390}
391EXPORT_SYMBOL(kunmap_flush_on_unmap);
392
371void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
372 unsigned int nr)
373{
374 void *kaddr = page_address(page);
375
376 for (;;) {
377 flush_kernel_dcache_page_addr(kaddr);
393void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
394 unsigned int nr)
395{
396 void *kaddr = page_address(page);
397
398 for (;;) {
399 flush_kernel_dcache_page_addr(kaddr);
378 flush_kernel_icache_page(kaddr);
400 flush_kernel_icache_page_addr(kaddr);
379 if (--nr == 0)
380 break;
381 kaddr += PAGE_SIZE;
382 }
383}
384
401 if (--nr == 0)
402 break;
403 kaddr += PAGE_SIZE;
404 }
405}
406
407/*
408 * Walk page directory for MM to find PTEP pointer for address ADDR.
409 */
385static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
386{
387 pte_t *ptep = NULL;
388 pgd_t *pgd = mm->pgd;
389 p4d_t *p4d;
390 pud_t *pud;
391 pmd_t *pmd;
392

--- 12 unchanged lines hidden (view full) ---

405}
406
407static inline bool pte_needs_flush(pte_t pte)
408{
409 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
410 == (_PAGE_PRESENT | _PAGE_ACCESSED);
411}
412
410static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
411{
412 pte_t *ptep = NULL;
413 pgd_t *pgd = mm->pgd;
414 p4d_t *p4d;
415 pud_t *pud;
416 pmd_t *pmd;
417

--- 12 unchanged lines hidden (view full) ---

430}
431
432static inline bool pte_needs_flush(pte_t pte)
433{
434 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
435 == (_PAGE_PRESENT | _PAGE_ACCESSED);
436}
437
438/*
439 * Return user physical address. Returns 0 if page is not present.
440 */
441static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
442{
443 unsigned long flags, space, pgd, prot, pa;
444#ifdef CONFIG_TLB_PTLOCK
445 unsigned long pgd_lock;
446#endif
447
448 /* Save context */
449 local_irq_save(flags);
450 prot = mfctl(8);
451 space = mfsp(SR_USER);
452 pgd = mfctl(25);
453#ifdef CONFIG_TLB_PTLOCK
454 pgd_lock = mfctl(28);
455#endif
456
457 /* Set context for lpa_user */
458 switch_mm_irqs_off(NULL, mm, NULL);
459 pa = lpa_user(addr);
460
461 /* Restore previous context */
462#ifdef CONFIG_TLB_PTLOCK
463 mtctl(pgd_lock, 28);
464#endif
465 mtctl(pgd, 25);
466 mtsp(space, SR_USER);
467 mtctl(prot, 8);
468 local_irq_restore(flags);
469
470 return pa;
471}
472
413void flush_dcache_folio(struct folio *folio)
414{
415 struct address_space *mapping = folio_flush_mapping(folio);
416 struct vm_area_struct *vma;
417 unsigned long addr, old_addr = 0;
418 void *kaddr;
419 unsigned long count = 0;
420 unsigned long i, nr, flags;

--- 32 unchanged lines hidden (view full) ---

453 pfn -= offset;
454 nr += offset;
455 } else {
456 addr += offset * PAGE_SIZE;
457 }
458 if (addr + nr * PAGE_SIZE > vma->vm_end)
459 nr = (vma->vm_end - addr) / PAGE_SIZE;
460
473void flush_dcache_folio(struct folio *folio)
474{
475 struct address_space *mapping = folio_flush_mapping(folio);
476 struct vm_area_struct *vma;
477 unsigned long addr, old_addr = 0;
478 void *kaddr;
479 unsigned long count = 0;
480 unsigned long i, nr, flags;

--- 32 unchanged lines hidden (view full) ---

513 pfn -= offset;
514 nr += offset;
515 } else {
516 addr += offset * PAGE_SIZE;
517 }
518 if (addr + nr * PAGE_SIZE > vma->vm_end)
519 nr = (vma->vm_end - addr) / PAGE_SIZE;
520
461 if (parisc_requires_coherency()) {
462 for (i = 0; i < nr; i++) {
463 pte_t *ptep = get_ptep(vma->vm_mm,
464 addr + i * PAGE_SIZE);
465 if (!ptep)
466 continue;
467 if (pte_needs_flush(*ptep))
468 flush_user_cache_page(vma,
469 addr + i * PAGE_SIZE);
470 /* Optimise accesses to the same table? */
471 pte_unmap(ptep);
472 }
473 } else {
521 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
522 != (addr & (SHM_COLOUR - 1))) {
523 for (i = 0; i < nr; i++)
524 __flush_cache_page(vma,
525 addr + i * PAGE_SIZE,
526 (pfn + i) * PAGE_SIZE);
474 /*
527 /*
475 * The TLB is the engine of coherence on parisc:
476 * The CPU is entitled to speculate any page
477 * with a TLB mapping, so here we kill the
478 * mapping then flush the page along a special
479 * flush only alias mapping. This guarantees that
480 * the page is no-longer in the cache for any
481 * process and nor may it be speculatively read
482 * in (until the user or kernel specifically
483 * accesses it, of course)
528 * Software is allowed to have any number
529 * of private mappings to a page.
484 */
530 */
485 for (i = 0; i < nr; i++)
486 flush_tlb_page(vma, addr + i * PAGE_SIZE);
487 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
488 != (addr & (SHM_COLOUR - 1))) {
489 for (i = 0; i < nr; i++)
490 __flush_cache_page(vma,
491 addr + i * PAGE_SIZE,
492 (pfn + i) * PAGE_SIZE);
493 /*
494 * Software is allowed to have any number
495 * of private mappings to a page.
496 */
497 if (!(vma->vm_flags & VM_SHARED))
498 continue;
499 if (old_addr)
500 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
501 old_addr, addr, vma->vm_file);
502 if (nr == folio_nr_pages(folio))
503 old_addr = addr;
504 }
531 if (!(vma->vm_flags & VM_SHARED))
532 continue;
533 if (old_addr)
534 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
535 old_addr, addr, vma->vm_file);
536 if (nr == folio_nr_pages(folio))
537 old_addr = addr;
505 }
506 WARN_ON(++count == 4096);
507 }
508 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
509}
510EXPORT_SYMBOL(flush_dcache_folio);
511
512/* Defined in arch/parisc/kernel/pacache.S */

--- 73 unchanged lines hidden (view full) ---

586 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
587 parisc_tlb_flush_threshold/1024);
588}
589
590extern void purge_kernel_dcache_page_asm(unsigned long);
591extern void clear_user_page_asm(void *, unsigned long);
592extern void copy_user_page_asm(void *, void *, unsigned long);
593
538 }
539 WARN_ON(++count == 4096);
540 }
541 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
542}
543EXPORT_SYMBOL(flush_dcache_folio);
544
545/* Defined in arch/parisc/kernel/pacache.S */

--- 73 unchanged lines hidden (view full) ---

619 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
620 parisc_tlb_flush_threshold/1024);
621}
622
623extern void purge_kernel_dcache_page_asm(unsigned long);
624extern void clear_user_page_asm(void *, unsigned long);
625extern void copy_user_page_asm(void *, void *, unsigned long);
626
594void flush_kernel_dcache_page_addr(const void *addr)
595{
596 unsigned long flags;
597
598 flush_kernel_dcache_page_asm(addr);
599 purge_tlb_start(flags);
600 pdtlb(SR_KERNEL, addr);
601 purge_tlb_end(flags);
602}
603EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
604
605static void flush_cache_page_if_present(struct vm_area_struct *vma,
627static void flush_cache_page_if_present(struct vm_area_struct *vma,
606 unsigned long vmaddr, unsigned long pfn)
628 unsigned long vmaddr)
607{
629{
630#if CONFIG_FLUSH_PAGE_ACCESSED
608 bool needs_flush = false;
631 bool needs_flush = false;
609 pte_t *ptep;
632 pte_t *ptep, pte;
610
633
611 /*
612 * The pte check is racy and sometimes the flush will trigger
613 * a non-access TLB miss. Hopefully, the page has already been
614 * flushed.
615 */
616 ptep = get_ptep(vma->vm_mm, vmaddr);
617 if (ptep) {
634 ptep = get_ptep(vma->vm_mm, vmaddr);
635 if (ptep) {
618 needs_flush = pte_needs_flush(*ptep);
636 pte = ptep_get(ptep);
637 needs_flush = pte_needs_flush(pte);
619 pte_unmap(ptep);
620 }
621 if (needs_flush)
638 pte_unmap(ptep);
639 }
640 if (needs_flush)
622 flush_cache_page(vma, vmaddr, pfn);
641 __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
642#else
643 struct mm_struct *mm = vma->vm_mm;
644 unsigned long physaddr = get_upa(mm, vmaddr);
645
646 if (physaddr)
647 __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
648#endif
623}
624
625void copy_user_highpage(struct page *to, struct page *from,
626 unsigned long vaddr, struct vm_area_struct *vma)
627{
628 void *kto, *kfrom;
629
630 kfrom = kmap_local_page(from);
631 kto = kmap_local_page(to);
649}
650
651void copy_user_highpage(struct page *to, struct page *from,
652 unsigned long vaddr, struct vm_area_struct *vma)
653{
654 void *kto, *kfrom;
655
656 kfrom = kmap_local_page(from);
657 kto = kmap_local_page(to);
632 flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
658 __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
633 copy_page_asm(kto, kfrom);
634 kunmap_local(kto);
635 kunmap_local(kfrom);
636}
637
638void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
639 unsigned long user_vaddr, void *dst, void *src, int len)
640{
659 copy_page_asm(kto, kfrom);
660 kunmap_local(kto);
661 kunmap_local(kfrom);
662}
663
664void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
665 unsigned long user_vaddr, void *dst, void *src, int len)
666{
641 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
667 __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
642 memcpy(dst, src, len);
668 memcpy(dst, src, len);
643 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
669 flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
644}
645
646void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
647 unsigned long user_vaddr, void *dst, void *src, int len)
648{
670}
671
672void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
673 unsigned long user_vaddr, void *dst, void *src, int len)
674{
649 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
675 __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
650 memcpy(dst, src, len);
676 memcpy(dst, src, len);
677 flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
651}
652
653/* __flush_tlb_range()
654 *
655 * returns 1 if all TLBs were flushed.
656 */
657int __flush_tlb_range(unsigned long sid, unsigned long start,
658 unsigned long end)

--- 17 unchanged lines hidden (view full) ---

676 purge_tlb_end(flags);
677 start += PAGE_SIZE;
678 }
679 return 0;
680}
681
682static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
683{
678}
679
680/* __flush_tlb_range()
681 *
682 * returns 1 if all TLBs were flushed.
683 */
684int __flush_tlb_range(unsigned long sid, unsigned long start,
685 unsigned long end)

--- 17 unchanged lines hidden (view full) ---

703 purge_tlb_end(flags);
704 start += PAGE_SIZE;
705 }
706 return 0;
707}
708
709static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
710{
684 unsigned long addr, pfn;
685 pte_t *ptep;
711 unsigned long addr;
686
712
687 for (addr = start; addr < end; addr += PAGE_SIZE) {
688 bool needs_flush = false;
689 /*
690 * The vma can contain pages that aren't present. Although
691 * the pte search is expensive, we need the pte to find the
692 * page pfn and to check whether the page should be flushed.
693 */
694 ptep = get_ptep(vma->vm_mm, addr);
695 if (ptep) {
696 needs_flush = pte_needs_flush(*ptep);
697 pfn = pte_pfn(*ptep);
698 pte_unmap(ptep);
699 }
700 if (needs_flush) {
701 if (parisc_requires_coherency()) {
702 flush_user_cache_page(vma, addr);
703 } else {
704 if (WARN_ON(!pfn_valid(pfn)))
705 return;
706 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
707 }
708 }
709 }
713 for (addr = start; addr < end; addr += PAGE_SIZE)
714 flush_cache_page_if_present(vma, addr);
710}
711
712static inline unsigned long mm_total_size(struct mm_struct *mm)
713{
714 struct vm_area_struct *vma;
715 unsigned long usize = 0;
716 VMA_ITERATOR(vmi, mm, 0);
717

--- 34 unchanged lines hidden (view full) ---

752
753void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
754{
755 if (!parisc_requires_coherency()
756 || end - start >= parisc_cache_flush_threshold) {
757 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
758 return;
759 flush_tlb_range(vma, start, end);
715}
716
717static inline unsigned long mm_total_size(struct mm_struct *mm)
718{
719 struct vm_area_struct *vma;
720 unsigned long usize = 0;
721 VMA_ITERATOR(vmi, mm, 0);
722

--- 34 unchanged lines hidden (view full) ---

757
758void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
759{
760 if (!parisc_requires_coherency()
761 || end - start >= parisc_cache_flush_threshold) {
762 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
763 return;
764 flush_tlb_range(vma, start, end);
760 flush_cache_all();
765 if (vma->vm_flags & VM_EXEC)
766 flush_cache_all();
767 else
768 flush_data_cache();
761 return;
762 }
763
769 return;
770 }
771
764 flush_cache_pages(vma, start, end);
772 flush_cache_pages(vma, start & PAGE_MASK, end);
765}
766
767void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
768{
773}
774
775void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
776{
769 if (WARN_ON(!pfn_valid(pfn)))
770 return;
771 if (parisc_requires_coherency())
772 flush_user_cache_page(vma, vmaddr);
773 else
774 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
777 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
775}
776
777void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
778{
779 if (!PageAnon(page))
780 return;
781
778}
779
780void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
781{
782 if (!PageAnon(page))
783 return;
784
782 if (parisc_requires_coherency()) {
783 if (vma->vm_flags & VM_SHARED)
784 flush_data_cache();
785 else
786 flush_user_cache_page(vma, vmaddr);
785 __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
786}
787
788int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
789 pte_t *ptep)
790{
791 pte_t pte = ptep_get(ptep);
792
793 if (!pte_young(pte))
794 return 0;
795 set_pte(ptep, pte_mkold(pte));
796#if CONFIG_FLUSH_PAGE_ACCESSED
797 __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
798#endif
799 return 1;
800}
801
802/*
803 * After a PTE is cleared, we have no way to flush the cache for
804 * the physical page. On PA8800 and PA8900 processors, these lines
805 * can cause random cache corruption. Thus, we must flush the cache
806 * as well as the TLB when clearing a PTE that's valid.
807 */
808pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
809 pte_t *ptep)
810{
811 struct mm_struct *mm = (vma)->vm_mm;
812 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
813 unsigned long pfn = pte_pfn(pte);
814
815 if (pfn_valid(pfn))
816 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
817 else if (pte_accessible(mm, pte))
818 flush_tlb_page(vma, addr);
819
820 return pte;
821}
822
823/*
824 * The physical address for pages in the ioremap case can be obtained
825 * from the vm_struct struct. I wasn't able to successfully handle the
826 * vmalloc and vmap cases. We have an array of struct page pointers in
827 * the uninitialized vmalloc case but the flush failed using page_to_pfn.
828 */
829void flush_cache_vmap(unsigned long start, unsigned long end)
830{
831 unsigned long addr, physaddr;
832 struct vm_struct *vm;
833
834 /* Prevent cache move-in */
835 flush_tlb_kernel_range(start, end);
836
837 if (end - start >= parisc_cache_flush_threshold) {
838 flush_cache_all();
787 return;
788 }
789
839 return;
840 }
841
790 flush_tlb_page(vma, vmaddr);
791 preempt_disable();
792 flush_dcache_page_asm(page_to_phys(page), vmaddr);
793 preempt_enable();
842 if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
843 flush_cache_all();
844 return;
845 }
846
847 vm = find_vm_area((void *)start);
848 if (WARN_ON_ONCE(!vm)) {
849 flush_cache_all();
850 return;
851 }
852
853 /* The physical addresses of IOREMAP regions are contiguous */
854 if (vm->flags & VM_IOREMAP) {
855 physaddr = vm->phys_addr;
856 for (addr = start; addr < end; addr += PAGE_SIZE) {
857 preempt_disable();
858 flush_dcache_page_asm(physaddr, start);
859 flush_icache_page_asm(physaddr, start);
860 preempt_enable();
861 physaddr += PAGE_SIZE;
862 }
863 return;
864 }
865
866 flush_cache_all();
794}
867}
868EXPORT_SYMBOL(flush_cache_vmap);
795
869
870/*
871 * The vm_struct has been retired and the page table is set up. The
872 * last page in the range is a guard page. Its physical address can't
873 * be determined using lpa, so there is no way to flush the range
874 * using flush_dcache_page_asm.
875 */
876void flush_cache_vunmap(unsigned long start, unsigned long end)
877{
878 /* Prevent cache move-in */
879 flush_tlb_kernel_range(start, end);
880 flush_data_cache();
881}
882EXPORT_SYMBOL(flush_cache_vunmap);
883
884/*
885 * On systems with PA8800/PA8900 processors, there is no way to flush
886 * a vmap range other than using the architected loop to flush the
887 * entire cache. The page directory is not set up, so we can't use
888 * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
889 * L2 is physically indexed but FDCE/FICE instructions in virtual
890 * mode output their virtual address on the core bus, not their
891 * real address. As a result, the L2 cache index formed from the
892 * virtual address will most likely not be the same as the L2 index
893 * formed from the real address.
894 */
796void flush_kernel_vmap_range(void *vaddr, int size)
797{
798 unsigned long start = (unsigned long)vaddr;
799 unsigned long end = start + size;
800
895void flush_kernel_vmap_range(void *vaddr, int size)
896{
897 unsigned long start = (unsigned long)vaddr;
898 unsigned long end = start + size;
899
801 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
802 (unsigned long)size >= parisc_cache_flush_threshold) {
803 flush_tlb_kernel_range(start, end);
804 flush_data_cache();
900 flush_tlb_kernel_range(start, end);
901
902 if (!static_branch_likely(&parisc_has_dcache))
805 return;
903 return;
904
905 /* If interrupts are disabled, we can only do local flush */
906 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
907 flush_data_cache_local(NULL);
908 return;
806 }
807
909 }
910
808 flush_kernel_dcache_range_asm(start, end);
809 flush_tlb_kernel_range(start, end);
911 flush_data_cache();
810}
811EXPORT_SYMBOL(flush_kernel_vmap_range);
812
813void invalidate_kernel_vmap_range(void *vaddr, int size)
814{
815 unsigned long start = (unsigned long)vaddr;
816 unsigned long end = start + size;
817
818 /* Ensure DMA is complete */
819 asm_syncdma();
820
912}
913EXPORT_SYMBOL(flush_kernel_vmap_range);
914
915void invalidate_kernel_vmap_range(void *vaddr, int size)
916{
917 unsigned long start = (unsigned long)vaddr;
918 unsigned long end = start + size;
919
920 /* Ensure DMA is complete */
921 asm_syncdma();
922
821 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
822 (unsigned long)size >= parisc_cache_flush_threshold) {
823 flush_tlb_kernel_range(start, end);
824 flush_data_cache();
923 flush_tlb_kernel_range(start, end);
924
925 if (!static_branch_likely(&parisc_has_dcache))
825 return;
926 return;
927
928 /* If interrupts are disabled, we can only do local flush */
929 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
930 flush_data_cache_local(NULL);
931 return;
826 }
827
932 }
933
828 purge_kernel_dcache_range_asm(start, end);
829 flush_tlb_kernel_range(start, end);
934 flush_data_cache();
830}
831EXPORT_SYMBOL(invalidate_kernel_vmap_range);
832
833
834SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
835 unsigned int, cache)
836{
837 unsigned long start, end;

--- 41 unchanged lines hidden ---
935}
936EXPORT_SYMBOL(invalidate_kernel_vmap_range);
937
938
939SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
940 unsigned int, cache)
941{
942 unsigned long start, end;

--- 41 unchanged lines hidden ---