cache.c (2612e3bbc0386368a850140a6c9b990cd496a5ec) cache.c (e70bbca607424dbb236cc641adba39c2cc0d65c5)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)

--- 80 unchanged lines hidden (view full) ---

89 if (static_branch_likely(&parisc_has_dcache))
90 on_each_cpu(flush_data_cache_local, NULL, 1);
91}
92
93
94/* Kernel virtual address of pfn. */
95#define pfn_va(pfn) __va(PFN_PHYS(pfn))
96
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)

--- 80 unchanged lines hidden (view full) ---

89 if (static_branch_likely(&parisc_has_dcache))
90 on_each_cpu(flush_data_cache_local, NULL, 1);
91}
92
93
94/* Kernel virtual address of pfn. */
95#define pfn_va(pfn) __va(PFN_PHYS(pfn))
96
97void
98__update_cache(pte_t pte)
97void __update_cache(pte_t pte)
99{
100 unsigned long pfn = pte_pfn(pte);
98{
99 unsigned long pfn = pte_pfn(pte);
101 struct page *page;
100 struct folio *folio;
101 unsigned int nr;
102
103 /* We don't have pte special. As a result, we can be called with
104 an invalid pfn and we don't need to flush the kernel dcache page.
105 This occurs with FireGL card in C8000. */
106 if (!pfn_valid(pfn))
107 return;
108
102
103 /* We don't have pte special. As a result, we can be called with
104 an invalid pfn and we don't need to flush the kernel dcache page.
105 This occurs with FireGL card in C8000. */
106 if (!pfn_valid(pfn))
107 return;
108
109 page = pfn_to_page(pfn);
110 if (page_mapping_file(page) &&
111 test_bit(PG_dcache_dirty, &page->flags)) {
112 flush_kernel_dcache_page_addr(pfn_va(pfn));
113 clear_bit(PG_dcache_dirty, &page->flags);
109 folio = page_folio(pfn_to_page(pfn));
110 pfn = folio_pfn(folio);
111 nr = folio_nr_pages(folio);
112 if (folio_flush_mapping(folio) &&
113 test_bit(PG_dcache_dirty, &folio->flags)) {
114 while (nr--)
115 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116 clear_bit(PG_dcache_dirty, &folio->flags);
114 } else if (parisc_requires_coherency())
117 } else if (parisc_requires_coherency())
115 flush_kernel_dcache_page_addr(pfn_va(pfn));
118 while (nr--)
119 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116}
117
118void
119show_cache_info(struct seq_file *m)
120{
121 char buf[32];
122
123 seq_printf(m, "I-cache\t\t: %ld KB\n",

--- 237 unchanged lines hidden (view full) ---

361 mtctl(pgd, 25);
362 mtsp(space, SR_USER);
363 mtctl(prot, 8);
364 local_irq_restore(flags);
365
366 preempt_enable();
367}
368
120}
121
122void
123show_cache_info(struct seq_file *m)
124{
125 char buf[32];
126
127 seq_printf(m, "I-cache\t\t: %ld KB\n",

--- 237 unchanged lines hidden (view full) ---

365 mtctl(pgd, 25);
366 mtsp(space, SR_USER);
367 mtctl(prot, 8);
368 local_irq_restore(flags);
369
370 preempt_enable();
371}
372
373void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
374 unsigned int nr)
375{
376 void *kaddr = page_address(page);
377
378 for (;;) {
379 flush_kernel_dcache_page_addr(kaddr);
380 flush_kernel_icache_page(kaddr);
381 if (--nr == 0)
382 break;
383 kaddr += PAGE_SIZE;
384 }
385}
386
369static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
370{
371 pte_t *ptep = NULL;
372 pgd_t *pgd = mm->pgd;
373 p4d_t *p4d;
374 pud_t *pud;
375 pmd_t *pmd;
376

--- 12 unchanged lines hidden (view full) ---

389}
390
391static inline bool pte_needs_flush(pte_t pte)
392{
393 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
394 == (_PAGE_PRESENT | _PAGE_ACCESSED);
395}
396
387static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
388{
389 pte_t *ptep = NULL;
390 pgd_t *pgd = mm->pgd;
391 p4d_t *p4d;
392 pud_t *pud;
393 pmd_t *pmd;
394

--- 12 unchanged lines hidden (view full) ---

407}
408
409static inline bool pte_needs_flush(pte_t pte)
410{
411 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
412 == (_PAGE_PRESENT | _PAGE_ACCESSED);
413}
414
397void flush_dcache_page(struct page *page)
415void flush_dcache_folio(struct folio *folio)
398{
416{
399 struct address_space *mapping = page_mapping_file(page);
400 struct vm_area_struct *mpnt;
401 unsigned long offset;
417 struct address_space *mapping = folio_flush_mapping(folio);
418 struct vm_area_struct *vma;
402 unsigned long addr, old_addr = 0;
419 unsigned long addr, old_addr = 0;
420 void *kaddr;
403 unsigned long count = 0;
421 unsigned long count = 0;
404 unsigned long flags;
422 unsigned long i, nr, flags;
405 pgoff_t pgoff;
406
407 if (mapping && !mapping_mapped(mapping)) {
423 pgoff_t pgoff;
424
425 if (mapping && !mapping_mapped(mapping)) {
408 set_bit(PG_dcache_dirty, &page->flags);
426 set_bit(PG_dcache_dirty, &folio->flags);
409 return;
410 }
411
427 return;
428 }
429
412 flush_kernel_dcache_page_addr(page_address(page));
430 nr = folio_nr_pages(folio);
431 kaddr = folio_address(folio);
432 for (i = 0; i < nr; i++)
433 flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
413
414 if (!mapping)
415 return;
416
434
435 if (!mapping)
436 return;
437
417 pgoff = page->index;
438 pgoff = folio->index;
418
419 /*
420 * We have carefully arranged in arch_get_unmapped_area() that
421 * *any* mappings of a file are always congruently mapped (whether
422 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
423 * to flush one address here for them all to become coherent
424 * on machines that support equivalent aliasing
425 */
426 flush_dcache_mmap_lock_irqsave(mapping, flags);
439
440 /*
441 * We have carefully arranged in arch_get_unmapped_area() that
442 * *any* mappings of a file are always congruently mapped (whether
443 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
444 * to flush one address here for them all to become coherent
445 * on machines that support equivalent aliasing
446 */
447 flush_dcache_mmap_lock_irqsave(mapping, flags);
427 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
428 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
429 addr = mpnt->vm_start + offset;
430 if (parisc_requires_coherency()) {
431 bool needs_flush = false;
432 pte_t *ptep;
448 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
449 unsigned long offset = pgoff - vma->vm_pgoff;
450 unsigned long pfn = folio_pfn(folio);
433
451
434 ptep = get_ptep(mpnt->vm_mm, addr);
435 if (ptep) {
436 needs_flush = pte_needs_flush(*ptep);
452 addr = vma->vm_start;
453 nr = folio_nr_pages(folio);
454 if (offset > -nr) {
455 pfn -= offset;
456 nr += offset;
457 } else {
458 addr += offset * PAGE_SIZE;
459 }
460 if (addr + nr * PAGE_SIZE > vma->vm_end)
461 nr = (vma->vm_end - addr) / PAGE_SIZE;
462
463 if (parisc_requires_coherency()) {
464 for (i = 0; i < nr; i++) {
465 pte_t *ptep = get_ptep(vma->vm_mm,
466 addr + i * PAGE_SIZE);
467 if (!ptep)
468 continue;
469 if (pte_needs_flush(*ptep))
470 flush_user_cache_page(vma,
471 addr + i * PAGE_SIZE);
472 /* Optimise accesses to the same table? */
437 pte_unmap(ptep);
438 }
473 pte_unmap(ptep);
474 }
439 if (needs_flush)
440 flush_user_cache_page(mpnt, addr);
441 } else {
442 /*
443 * The TLB is the engine of coherence on parisc:
444 * The CPU is entitled to speculate any page
445 * with a TLB mapping, so here we kill the
446 * mapping then flush the page along a special
447 * flush only alias mapping. This guarantees that
448 * the page is no-longer in the cache for any
449 * process and nor may it be speculatively read
450 * in (until the user or kernel specifically
451 * accesses it, of course)
452 */
475 } else {
476 /*
477 * The TLB is the engine of coherence on parisc:
478 * The CPU is entitled to speculate any page
479 * with a TLB mapping, so here we kill the
480 * mapping then flush the page along a special
481 * flush only alias mapping. This guarantees that
482 * the page is no-longer in the cache for any
483 * process and nor may it be speculatively read
484 * in (until the user or kernel specifically
485 * accesses it, of course)
486 */
453 flush_tlb_page(mpnt, addr);
487 for (i = 0; i < nr; i++)
488 flush_tlb_page(vma, addr + i * PAGE_SIZE);
454 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
455 != (addr & (SHM_COLOUR - 1))) {
489 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
490 != (addr & (SHM_COLOUR - 1))) {
456 __flush_cache_page(mpnt, addr, page_to_phys(page));
491 for (i = 0; i < nr; i++)
492 __flush_cache_page(vma,
493 addr + i * PAGE_SIZE,
494 (pfn + i) * PAGE_SIZE);
457 /*
458 * Software is allowed to have any number
459 * of private mappings to a page.
460 */
495 /*
496 * Software is allowed to have any number
497 * of private mappings to a page.
498 */
461 if (!(mpnt->vm_flags & VM_SHARED))
499 if (!(vma->vm_flags & VM_SHARED))
462 continue;
463 if (old_addr)
464 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
500 continue;
501 if (old_addr)
502 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
465 old_addr, addr, mpnt->vm_file);
466 old_addr = addr;
503 old_addr, addr, vma->vm_file);
504 if (nr == folio_nr_pages(folio))
505 old_addr = addr;
467 }
468 }
469 WARN_ON(++count == 4096);
470 }
471 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
472}
506 }
507 }
508 WARN_ON(++count == 4096);
509 }
510 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
511}
473EXPORT_SYMBOL(flush_dcache_page);
512EXPORT_SYMBOL(flush_dcache_folio);
474
475/* Defined in arch/parisc/kernel/pacache.S */
476EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
477EXPORT_SYMBOL(flush_kernel_icache_range_asm);
478
479#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
480static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
481

--- 360 unchanged lines hidden ---
513
514/* Defined in arch/parisc/kernel/pacache.S */
515EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
516EXPORT_SYMBOL(flush_kernel_icache_range_asm);
517
518#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
519static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
520

--- 360 unchanged lines hidden ---