1 /* 2 * Copyright (C) 2017 SiFive 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <asm/pgtable.h> 15 #include <asm/cacheflush.h> 16 17 #ifdef CONFIG_SMP 18 19 #include <asm/sbi.h> 20 21 void flush_icache_all(void) 22 { 23 sbi_remote_fence_i(NULL); 24 } 25 26 /* 27 * Performs an icache flush for the given MM context. RISC-V has no direct 28 * mechanism for instruction cache shoot downs, so instead we send an IPI that 29 * informs the remote harts they need to flush their local instruction caches. 30 * To avoid pathologically slow behavior in a common case (a bunch of 31 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the 32 * IPIs for harts that are not currently executing a MM context and instead 33 * schedule a deferred local instruction cache flush to be performed before 34 * execution resumes on each hart. 35 */ 36 void flush_icache_mm(struct mm_struct *mm, bool local) 37 { 38 unsigned int cpu; 39 cpumask_t others, hmask, *mask; 40 41 preempt_disable(); 42 43 /* Mark every hart's icache as needing a flush for this MM. */ 44 mask = &mm->context.icache_stale_mask; 45 cpumask_setall(mask); 46 /* Flush this hart's I$ now, and mark it as flushed. */ 47 cpu = smp_processor_id(); 48 cpumask_clear_cpu(cpu, mask); 49 local_flush_icache_all(); 50 51 /* 52 * Flush the I$ of other harts concurrently executing, and mark them as 53 * flushed. 54 */ 55 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 56 local |= cpumask_empty(&others); 57 if (mm != current->active_mm || !local) { 58 cpumask_clear(&hmask); 59 riscv_cpuid_to_hartid_mask(&others, &hmask); 60 sbi_remote_fence_i(hmask.bits); 61 } else { 62 /* 63 * It's assumed that at least one strongly ordered operation is 64 * performed on this hart between setting a hart's cpumask bit 65 * and scheduling this MM context on that hart. Sending an SBI 66 * remote message will do this, but in the case where no 67 * messages are sent we still need to order this hart's writes 68 * with flush_icache_deferred(). 69 */ 70 smp_mb(); 71 } 72 73 preempt_enable(); 74 } 75 76 #endif /* CONFIG_SMP */ 77 78 void flush_icache_pte(pte_t pte) 79 { 80 struct page *page = pte_page(pte); 81 82 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 83 flush_icache_all(); 84 } 85