xref: /linux/arch/riscv/mm/cacheflush.c (revision d6dcdabafcd7c612b164079d00da6d9775863a0b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 SiFive
4  */
5 
6 #include <linux/acpi.h>
7 #include <linux/of.h>
8 #include <asm/acpi.h>
9 #include <asm/cacheflush.h>
10 
11 #ifdef CONFIG_SMP
12 
13 #include <asm/sbi.h>
14 
15 static void ipi_remote_fence_i(void *info)
16 {
17 	return local_flush_icache_all();
18 }
19 
20 void flush_icache_all(void)
21 {
22 	local_flush_icache_all();
23 
24 	if (num_online_cpus() < 2)
25 		return;
26 	else if (riscv_use_sbi_for_rfence())
27 		sbi_remote_fence_i(NULL);
28 	else
29 		on_each_cpu(ipi_remote_fence_i, NULL, 1);
30 }
31 EXPORT_SYMBOL(flush_icache_all);
32 
33 /*
34  * Performs an icache flush for the given MM context.  RISC-V has no direct
35  * mechanism for instruction cache shoot downs, so instead we send an IPI that
36  * informs the remote harts they need to flush their local instruction caches.
37  * To avoid pathologically slow behavior in a common case (a bunch of
38  * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
39  * IPIs for harts that are not currently executing a MM context and instead
40  * schedule a deferred local instruction cache flush to be performed before
41  * execution resumes on each hart.
42  */
43 void flush_icache_mm(struct mm_struct *mm, bool local)
44 {
45 	unsigned int cpu;
46 	cpumask_t others, *mask;
47 
48 	preempt_disable();
49 
50 	/* Mark every hart's icache as needing a flush for this MM. */
51 	mask = &mm->context.icache_stale_mask;
52 	cpumask_setall(mask);
53 	/* Flush this hart's I$ now, and mark it as flushed. */
54 	cpu = smp_processor_id();
55 	cpumask_clear_cpu(cpu, mask);
56 	local_flush_icache_all();
57 
58 	/*
59 	 * Flush the I$ of other harts concurrently executing, and mark them as
60 	 * flushed.
61 	 */
62 	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
63 	local |= cpumask_empty(&others);
64 	if (mm == current->active_mm && local) {
65 		/*
66 		 * It's assumed that at least one strongly ordered operation is
67 		 * performed on this hart between setting a hart's cpumask bit
68 		 * and scheduling this MM context on that hart.  Sending an SBI
69 		 * remote message will do this, but in the case where no
70 		 * messages are sent we still need to order this hart's writes
71 		 * with flush_icache_deferred().
72 		 */
73 		smp_mb();
74 	} else if (riscv_use_sbi_for_rfence()) {
75 		sbi_remote_fence_i(&others);
76 	} else {
77 		on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
78 	}
79 
80 	preempt_enable();
81 }
82 
83 #endif /* CONFIG_SMP */
84 
85 #ifdef CONFIG_MMU
86 void flush_icache_pte(struct mm_struct *mm, pte_t pte)
87 {
88 	struct folio *folio = page_folio(pte_page(pte));
89 
90 	if (!test_bit(PG_dcache_clean, &folio->flags)) {
91 		flush_icache_mm(mm, false);
92 		set_bit(PG_dcache_clean, &folio->flags);
93 	}
94 }
95 #endif /* CONFIG_MMU */
96 
97 unsigned int riscv_cbom_block_size;
98 EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
99 
100 unsigned int riscv_cboz_block_size;
101 EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
102 
103 static void __init cbo_get_block_size(struct device_node *node,
104 				      const char *name, u32 *block_size,
105 				      unsigned long *first_hartid)
106 {
107 	unsigned long hartid;
108 	u32 val;
109 
110 	if (riscv_of_processor_hartid(node, &hartid))
111 		return;
112 
113 	if (of_property_read_u32(node, name, &val))
114 		return;
115 
116 	if (!*block_size) {
117 		*block_size = val;
118 		*first_hartid = hartid;
119 	} else if (*block_size != val) {
120 		pr_warn("%s mismatched between harts %lu and %lu\n",
121 			name, *first_hartid, hartid);
122 	}
123 }
124 
125 void __init riscv_init_cbo_blocksizes(void)
126 {
127 	unsigned long cbom_hartid, cboz_hartid;
128 	u32 cbom_block_size = 0, cboz_block_size = 0;
129 	struct device_node *node;
130 	struct acpi_table_header *rhct;
131 	acpi_status status;
132 
133 	if (acpi_disabled) {
134 		for_each_of_cpu_node(node) {
135 			/* set block-size for cbom and/or cboz extension if available */
136 			cbo_get_block_size(node, "riscv,cbom-block-size",
137 					   &cbom_block_size, &cbom_hartid);
138 			cbo_get_block_size(node, "riscv,cboz-block-size",
139 					   &cboz_block_size, &cboz_hartid);
140 		}
141 	} else {
142 		status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
143 		if (ACPI_FAILURE(status))
144 			return;
145 
146 		acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
147 		acpi_put_table((struct acpi_table_header *)rhct);
148 	}
149 
150 	if (cbom_block_size)
151 		riscv_cbom_block_size = cbom_block_size;
152 
153 	if (cboz_block_size)
154 		riscv_cboz_block_size = cboz_block_size;
155 }
156