xref: /linux/arch/riscv/mm/tlbflush.c (revision 119b1e61a769aa98e68599f44721661a4d8c55f3)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
7 #include <linux/mmu_notifier.h>
8 #include <asm/sbi.h>
9 #include <asm/mmu_context.h>
10 #include <asm/cpufeature.h>
11 
12 #define has_svinval()	riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
13 
local_sfence_inval_ir(void)14 static inline void local_sfence_inval_ir(void)
15 {
16 	asm volatile(SFENCE_INVAL_IR() ::: "memory");
17 }
18 
local_sfence_w_inval(void)19 static inline void local_sfence_w_inval(void)
20 {
21 	asm volatile(SFENCE_W_INVAL() ::: "memory");
22 }
23 
local_sinval_vma(unsigned long vma,unsigned long asid)24 static inline void local_sinval_vma(unsigned long vma, unsigned long asid)
25 {
26 	if (asid != FLUSH_TLB_NO_ASID)
27 		asm volatile(SINVAL_VMA(%0, %1) : : "r" (vma), "r" (asid) : "memory");
28 	else
29 		asm volatile(SINVAL_VMA(%0, zero) : : "r" (vma) : "memory");
30 }
31 
32 /*
33  * Flush entire TLB if number of entries to be flushed is greater
34  * than the threshold below.
35  */
36 unsigned long tlb_flush_all_threshold __read_mostly = 64;
37 
local_flush_tlb_range_threshold_asid(unsigned long start,unsigned long size,unsigned long stride,unsigned long asid)38 static void local_flush_tlb_range_threshold_asid(unsigned long start,
39 						 unsigned long size,
40 						 unsigned long stride,
41 						 unsigned long asid)
42 {
43 	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
44 	int i;
45 
46 	if (nr_ptes_in_range > tlb_flush_all_threshold) {
47 		local_flush_tlb_all_asid(asid);
48 		return;
49 	}
50 
51 	if (has_svinval()) {
52 		local_sfence_w_inval();
53 		for (i = 0; i < nr_ptes_in_range; ++i) {
54 			local_sinval_vma(start, asid);
55 			start += stride;
56 		}
57 		local_sfence_inval_ir();
58 		return;
59 	}
60 
61 	for (i = 0; i < nr_ptes_in_range; ++i) {
62 		local_flush_tlb_page_asid(start, asid);
63 		start += stride;
64 	}
65 }
66 
local_flush_tlb_range_asid(unsigned long start,unsigned long size,unsigned long stride,unsigned long asid)67 static inline void local_flush_tlb_range_asid(unsigned long start,
68 		unsigned long size, unsigned long stride, unsigned long asid)
69 {
70 	if (size <= stride)
71 		local_flush_tlb_page_asid(start, asid);
72 	else if (size == FLUSH_TLB_MAX_SIZE)
73 		local_flush_tlb_all_asid(asid);
74 	else
75 		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
76 }
77 
78 /* Flush a range of kernel pages without broadcasting */
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)79 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
80 {
81 	local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
82 }
83 
__ipi_flush_tlb_all(void * info)84 static void __ipi_flush_tlb_all(void *info)
85 {
86 	local_flush_tlb_all();
87 }
88 
flush_tlb_all(void)89 void flush_tlb_all(void)
90 {
91 	if (num_online_cpus() < 2)
92 		local_flush_tlb_all();
93 	else if (riscv_use_sbi_for_rfence())
94 		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
95 	else
96 		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
97 }
98 
99 struct flush_tlb_range_data {
100 	unsigned long asid;
101 	unsigned long start;
102 	unsigned long size;
103 	unsigned long stride;
104 };
105 
__ipi_flush_tlb_range_asid(void * info)106 static void __ipi_flush_tlb_range_asid(void *info)
107 {
108 	struct flush_tlb_range_data *d = info;
109 
110 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
111 }
112 
get_mm_asid(struct mm_struct * mm)113 static inline unsigned long get_mm_asid(struct mm_struct *mm)
114 {
115 	return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
116 }
117 
__flush_tlb_range(struct mm_struct * mm,const struct cpumask * cmask,unsigned long start,unsigned long size,unsigned long stride)118 static void __flush_tlb_range(struct mm_struct *mm,
119 			      const struct cpumask *cmask,
120 			      unsigned long start, unsigned long size,
121 			      unsigned long stride)
122 {
123 	unsigned long asid = get_mm_asid(mm);
124 	unsigned int cpu;
125 
126 	if (cpumask_empty(cmask))
127 		return;
128 
129 	cpu = get_cpu();
130 
131 	/* Check if the TLB flush needs to be sent to other CPUs. */
132 	if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
133 		local_flush_tlb_range_asid(start, size, stride, asid);
134 	} else if (riscv_use_sbi_for_rfence()) {
135 		sbi_remote_sfence_vma_asid(cmask, start, size, asid);
136 	} else {
137 		struct flush_tlb_range_data ftd;
138 
139 		ftd.asid = asid;
140 		ftd.start = start;
141 		ftd.size = size;
142 		ftd.stride = stride;
143 		on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
144 	}
145 
146 	put_cpu();
147 
148 	if (mm)
149 		mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
150 }
151 
flush_tlb_mm(struct mm_struct * mm)152 void flush_tlb_mm(struct mm_struct *mm)
153 {
154 	__flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
155 }
156 
flush_tlb_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned int page_size)157 void flush_tlb_mm_range(struct mm_struct *mm,
158 			unsigned long start, unsigned long end,
159 			unsigned int page_size)
160 {
161 	__flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
162 }
163 
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)164 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
165 {
166 	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
167 			  addr, PAGE_SIZE, PAGE_SIZE);
168 }
169 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)170 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
171 		     unsigned long end)
172 {
173 	unsigned long stride_size;
174 
175 	if (!is_vm_hugetlb_page(vma)) {
176 		stride_size = PAGE_SIZE;
177 	} else {
178 		stride_size = huge_page_size(hstate_vma(vma));
179 
180 		/*
181 		 * As stated in the privileged specification, every PTE in a
182 		 * NAPOT region must be invalidated, so reset the stride in that
183 		 * case.
184 		 */
185 		if (has_svnapot()) {
186 			if (stride_size >= PGDIR_SIZE)
187 				stride_size = PGDIR_SIZE;
188 			else if (stride_size >= P4D_SIZE)
189 				stride_size = P4D_SIZE;
190 			else if (stride_size >= PUD_SIZE)
191 				stride_size = PUD_SIZE;
192 			else if (stride_size >= PMD_SIZE)
193 				stride_size = PMD_SIZE;
194 			else
195 				stride_size = PAGE_SIZE;
196 		}
197 	}
198 
199 	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
200 			  start, end - start, stride_size);
201 }
202 
flush_tlb_kernel_range(unsigned long start,unsigned long end)203 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
204 {
205 	__flush_tlb_range(NULL, cpu_online_mask,
206 			  start, end - start, PAGE_SIZE);
207 }
208 
209 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)210 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
211 			unsigned long end)
212 {
213 	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
214 			  start, end - start, PMD_SIZE);
215 }
216 
flush_pud_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)217 void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start,
218 			 unsigned long end)
219 {
220 	__flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
221 			  start, end - start, PUD_SIZE);
222 }
223 #endif
224 
arch_tlbbatch_should_defer(struct mm_struct * mm)225 bool arch_tlbbatch_should_defer(struct mm_struct *mm)
226 {
227 	return true;
228 }
229 
arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch * batch,struct mm_struct * mm,unsigned long start,unsigned long end)230 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
231 		struct mm_struct *mm, unsigned long start, unsigned long end)
232 {
233 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
234 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
235 }
236 
arch_flush_tlb_batched_pending(struct mm_struct * mm)237 void arch_flush_tlb_batched_pending(struct mm_struct *mm)
238 {
239 	flush_tlb_mm(mm);
240 }
241 
arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch * batch)242 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
243 {
244 	__flush_tlb_range(NULL, &batch->cpumask,
245 			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
246 	cpumask_clear(&batch->cpumask);
247 }
248