xref: /linux/arch/riscv/mm/tlbflush.c (revision 9e56ff53b4115875667760445b028357848b4748)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
7 #include <asm/sbi.h>
8 #include <asm/mmu_context.h>
9 
10 static inline void local_flush_tlb_all_asid(unsigned long asid)
11 {
12 	if (asid != FLUSH_TLB_NO_ASID)
13 		__asm__ __volatile__ ("sfence.vma x0, %0"
14 				:
15 				: "r" (asid)
16 				: "memory");
17 	else
18 		local_flush_tlb_all();
19 }
20 
21 static inline void local_flush_tlb_page_asid(unsigned long addr,
22 		unsigned long asid)
23 {
24 	if (asid != FLUSH_TLB_NO_ASID)
25 		__asm__ __volatile__ ("sfence.vma %0, %1"
26 				:
27 				: "r" (addr), "r" (asid)
28 				: "memory");
29 	else
30 		local_flush_tlb_page(addr);
31 }
32 
33 /*
34  * Flush entire TLB if number of entries to be flushed is greater
35  * than the threshold below.
36  */
37 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
38 
39 static void local_flush_tlb_range_threshold_asid(unsigned long start,
40 						 unsigned long size,
41 						 unsigned long stride,
42 						 unsigned long asid)
43 {
44 	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
45 	int i;
46 
47 	if (nr_ptes_in_range > tlb_flush_all_threshold) {
48 		local_flush_tlb_all_asid(asid);
49 		return;
50 	}
51 
52 	for (i = 0; i < nr_ptes_in_range; ++i) {
53 		local_flush_tlb_page_asid(start, asid);
54 		start += stride;
55 	}
56 }
57 
58 static inline void local_flush_tlb_range_asid(unsigned long start,
59 		unsigned long size, unsigned long stride, unsigned long asid)
60 {
61 	if (size <= stride)
62 		local_flush_tlb_page_asid(start, asid);
63 	else if (size == FLUSH_TLB_MAX_SIZE)
64 		local_flush_tlb_all_asid(asid);
65 	else
66 		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
67 }
68 
69 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
70 {
71 	local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
72 }
73 
74 static void __ipi_flush_tlb_all(void *info)
75 {
76 	local_flush_tlb_all();
77 }
78 
79 void flush_tlb_all(void)
80 {
81 	if (riscv_use_ipi_for_rfence())
82 		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
83 	else
84 		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
85 }
86 
87 struct flush_tlb_range_data {
88 	unsigned long asid;
89 	unsigned long start;
90 	unsigned long size;
91 	unsigned long stride;
92 };
93 
94 static void __ipi_flush_tlb_range_asid(void *info)
95 {
96 	struct flush_tlb_range_data *d = info;
97 
98 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
99 }
100 
101 static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
102 			      unsigned long start, unsigned long size,
103 			      unsigned long stride)
104 {
105 	struct flush_tlb_range_data ftd;
106 	bool broadcast;
107 
108 	if (cpumask_empty(cmask))
109 		return;
110 
111 	if (cmask != cpu_online_mask) {
112 		unsigned int cpuid;
113 
114 		cpuid = get_cpu();
115 		/* check if the tlbflush needs to be sent to other CPUs */
116 		broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
117 	} else {
118 		broadcast = true;
119 	}
120 
121 	if (broadcast) {
122 		if (riscv_use_ipi_for_rfence()) {
123 			ftd.asid = asid;
124 			ftd.start = start;
125 			ftd.size = size;
126 			ftd.stride = stride;
127 			on_each_cpu_mask(cmask,
128 					 __ipi_flush_tlb_range_asid,
129 					 &ftd, 1);
130 		} else
131 			sbi_remote_sfence_vma_asid(cmask,
132 						   start, size, asid);
133 	} else {
134 		local_flush_tlb_range_asid(start, size, stride, asid);
135 	}
136 
137 	if (cmask != cpu_online_mask)
138 		put_cpu();
139 }
140 
141 static inline unsigned long get_mm_asid(struct mm_struct *mm)
142 {
143 	return static_branch_unlikely(&use_asid_allocator) ?
144 			atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
145 }
146 
147 void flush_tlb_mm(struct mm_struct *mm)
148 {
149 	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
150 			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
151 }
152 
153 void flush_tlb_mm_range(struct mm_struct *mm,
154 			unsigned long start, unsigned long end,
155 			unsigned int page_size)
156 {
157 	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
158 			  start, end - start, page_size);
159 }
160 
161 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
162 {
163 	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
164 			  addr, PAGE_SIZE, PAGE_SIZE);
165 }
166 
167 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
168 		     unsigned long end)
169 {
170 	unsigned long stride_size;
171 
172 	if (!is_vm_hugetlb_page(vma)) {
173 		stride_size = PAGE_SIZE;
174 	} else {
175 		stride_size = huge_page_size(hstate_vma(vma));
176 
177 		/*
178 		 * As stated in the privileged specification, every PTE in a
179 		 * NAPOT region must be invalidated, so reset the stride in that
180 		 * case.
181 		 */
182 		if (has_svnapot()) {
183 			if (stride_size >= PGDIR_SIZE)
184 				stride_size = PGDIR_SIZE;
185 			else if (stride_size >= P4D_SIZE)
186 				stride_size = P4D_SIZE;
187 			else if (stride_size >= PUD_SIZE)
188 				stride_size = PUD_SIZE;
189 			else if (stride_size >= PMD_SIZE)
190 				stride_size = PMD_SIZE;
191 			else
192 				stride_size = PAGE_SIZE;
193 		}
194 	}
195 
196 	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
197 			  start, end - start, stride_size);
198 }
199 
200 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
201 {
202 	__flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
203 			  start, end - start, PAGE_SIZE);
204 }
205 
206 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
207 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
208 			unsigned long end)
209 {
210 	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
211 			  start, end - start, PMD_SIZE);
212 }
213 #endif
214 
215 bool arch_tlbbatch_should_defer(struct mm_struct *mm)
216 {
217 	return true;
218 }
219 
220 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
221 			       struct mm_struct *mm,
222 			       unsigned long uaddr)
223 {
224 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
225 }
226 
227 void arch_flush_tlb_batched_pending(struct mm_struct *mm)
228 {
229 	flush_tlb_mm(mm);
230 }
231 
232 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
233 {
234 	__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
235 			  FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
236 }
237