cacheflush.c (ac94be498f84f7327533b62faca4c3da64434904) | cacheflush.c (6bd33e1ece528f67646db33bf97406b747dafda0) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2017 SiFive 4 */ 5 6#include <asm/pgtable.h> 7#include <asm/cacheflush.h> 8 9#ifdef CONFIG_SMP 10 11#include <asm/sbi.h> 12 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2017 SiFive 4 */ 5 6#include <asm/pgtable.h> 7#include <asm/cacheflush.h> 8 9#ifdef CONFIG_SMP 10 11#include <asm/sbi.h> 12 |
13static void ipi_remote_fence_i(void *info) 14{ 15 return local_flush_icache_all(); 16} 17 |
|
13void flush_icache_all(void) 14{ | 18void flush_icache_all(void) 19{ |
15 sbi_remote_fence_i(NULL); | 20 if (IS_ENABLED(CONFIG_RISCV_SBI)) 21 sbi_remote_fence_i(NULL); 22 else 23 on_each_cpu(ipi_remote_fence_i, NULL, 1); |
16} 17 18/* 19 * Performs an icache flush for the given MM context. RISC-V has no direct 20 * mechanism for instruction cache shoot downs, so instead we send an IPI that 21 * informs the remote harts they need to flush their local instruction caches. 22 * To avoid pathologically slow behavior in a common case (a bunch of 23 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the 24 * IPIs for harts that are not currently executing a MM context and instead 25 * schedule a deferred local instruction cache flush to be performed before 26 * execution resumes on each hart. 27 */ 28void flush_icache_mm(struct mm_struct *mm, bool local) 29{ 30 unsigned int cpu; | 24} 25 26/* 27 * Performs an icache flush for the given MM context. RISC-V has no direct 28 * mechanism for instruction cache shoot downs, so instead we send an IPI that 29 * informs the remote harts they need to flush their local instruction caches. 30 * To avoid pathologically slow behavior in a common case (a bunch of 31 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the 32 * IPIs for harts that are not currently executing a MM context and instead 33 * schedule a deferred local instruction cache flush to be performed before 34 * execution resumes on each hart. 35 */ 36void flush_icache_mm(struct mm_struct *mm, bool local) 37{ 38 unsigned int cpu; |
31 cpumask_t others, hmask, *mask; | 39 cpumask_t others, *mask; |
32 33 preempt_disable(); 34 35 /* Mark every hart's icache as needing a flush for this MM. */ 36 mask = &mm->context.icache_stale_mask; 37 cpumask_setall(mask); 38 /* Flush this hart's I$ now, and mark it as flushed. */ 39 cpu = smp_processor_id(); 40 cpumask_clear_cpu(cpu, mask); 41 local_flush_icache_all(); 42 43 /* 44 * Flush the I$ of other harts concurrently executing, and mark them as 45 * flushed. 46 */ 47 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 48 local |= cpumask_empty(&others); | 40 41 preempt_disable(); 42 43 /* Mark every hart's icache as needing a flush for this MM. */ 44 mask = &mm->context.icache_stale_mask; 45 cpumask_setall(mask); 46 /* Flush this hart's I$ now, and mark it as flushed. */ 47 cpu = smp_processor_id(); 48 cpumask_clear_cpu(cpu, mask); 49 local_flush_icache_all(); 50 51 /* 52 * Flush the I$ of other harts concurrently executing, and mark them as 53 * flushed. 54 */ 55 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 56 local |= cpumask_empty(&others); |
49 if (mm != current->active_mm || !local) { 50 riscv_cpuid_to_hartid_mask(&others, &hmask); 51 sbi_remote_fence_i(hmask.bits); 52 } else { | 57 if (mm == current->active_mm && local) { |
53 /* 54 * It's assumed that at least one strongly ordered operation is 55 * performed on this hart between setting a hart's cpumask bit 56 * and scheduling this MM context on that hart. Sending an SBI 57 * remote message will do this, but in the case where no 58 * messages are sent we still need to order this hart's writes 59 * with flush_icache_deferred(). 60 */ 61 smp_mb(); | 58 /* 59 * It's assumed that at least one strongly ordered operation is 60 * performed on this hart between setting a hart's cpumask bit 61 * and scheduling this MM context on that hart. Sending an SBI 62 * remote message will do this, but in the case where no 63 * messages are sent we still need to order this hart's writes 64 * with flush_icache_deferred(). 65 */ 66 smp_mb(); |
67 } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { 68 cpumask_t hartid_mask; 69 70 riscv_cpuid_to_hartid_mask(&others, &hartid_mask); 71 sbi_remote_fence_i(cpumask_bits(&hartid_mask)); 72 } else { 73 on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); |
|
62 } 63 64 preempt_enable(); 65} 66 67#endif /* CONFIG_SMP */ 68 | 74 } 75 76 preempt_enable(); 77} 78 79#endif /* CONFIG_SMP */ 80 |
81#ifdef CONFIG_MMU |
|
69void flush_icache_pte(pte_t pte) 70{ 71 struct page *page = pte_page(pte); 72 73 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 74 flush_icache_all(); 75} | 82void flush_icache_pte(pte_t pte) 83{ 84 struct page *page = pte_page(pte); 85 86 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 87 flush_icache_all(); 88} |
89#endif /* CONFIG_MMU */ |
|