xref: /linux/arch/riscv/mm/tlbflush.c (revision 266aa3b4812e97942a8ce5c7aafa7da059f7b5b8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
7 #include <asm/sbi.h>
8 #include <asm/mmu_context.h>
9 
10 /*
11  * Flush entire TLB if number of entries to be flushed is greater
12  * than the threshold below.
13  */
14 unsigned long tlb_flush_all_threshold __read_mostly = 64;
15 
16 static void local_flush_tlb_range_threshold_asid(unsigned long start,
17 						 unsigned long size,
18 						 unsigned long stride,
19 						 unsigned long asid)
20 {
21 	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
22 	int i;
23 
24 	if (nr_ptes_in_range > tlb_flush_all_threshold) {
25 		local_flush_tlb_all_asid(asid);
26 		return;
27 	}
28 
29 	for (i = 0; i < nr_ptes_in_range; ++i) {
30 		local_flush_tlb_page_asid(start, asid);
31 		start += stride;
32 	}
33 }
34 
35 static inline void local_flush_tlb_range_asid(unsigned long start,
36 		unsigned long size, unsigned long stride, unsigned long asid)
37 {
38 	if (size <= stride)
39 		local_flush_tlb_page_asid(start, asid);
40 	else if (size == FLUSH_TLB_MAX_SIZE)
41 		local_flush_tlb_all_asid(asid);
42 	else
43 		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
44 }
45 
46 /* Flush a range of kernel pages without broadcasting */
47 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
48 {
49 	local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
50 }
51 
52 static void __ipi_flush_tlb_all(void *info)
53 {
54 	local_flush_tlb_all();
55 }
56 
57 void flush_tlb_all(void)
58 {
59 	if (num_online_cpus() < 2)
60 		local_flush_tlb_all();
61 	else if (riscv_use_sbi_for_rfence())
62 		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
63 	else
64 		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
65 }
66 
67 struct flush_tlb_range_data {
68 	unsigned long asid;
69 	unsigned long start;
70 	unsigned long size;
71 	unsigned long stride;
72 };
73 
74 static void __ipi_flush_tlb_range_asid(void *info)
75 {
76 	struct flush_tlb_range_data *d = info;
77 
78 	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
79 }
80 
81 static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
82 			      unsigned long start, unsigned long size,
83 			      unsigned long stride)
84 {
85 	unsigned int cpu;
86 
87 	if (cpumask_empty(cmask))
88 		return;
89 
90 	cpu = get_cpu();
91 
92 	/* Check if the TLB flush needs to be sent to other CPUs. */
93 	if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
94 		local_flush_tlb_range_asid(start, size, stride, asid);
95 	} else if (riscv_use_sbi_for_rfence()) {
96 		sbi_remote_sfence_vma_asid(cmask, start, size, asid);
97 	} else {
98 		struct flush_tlb_range_data ftd;
99 
100 		ftd.asid = asid;
101 		ftd.start = start;
102 		ftd.size = size;
103 		ftd.stride = stride;
104 		on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
105 	}
106 
107 	put_cpu();
108 }
109 
110 static inline unsigned long get_mm_asid(struct mm_struct *mm)
111 {
112 	return cntx2asid(atomic_long_read(&mm->context.id));
113 }
114 
115 void flush_tlb_mm(struct mm_struct *mm)
116 {
117 	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
118 			  0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
119 }
120 
121 void flush_tlb_mm_range(struct mm_struct *mm,
122 			unsigned long start, unsigned long end,
123 			unsigned int page_size)
124 {
125 	__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
126 			  start, end - start, page_size);
127 }
128 
129 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
130 {
131 	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
132 			  addr, PAGE_SIZE, PAGE_SIZE);
133 }
134 
135 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
136 		     unsigned long end)
137 {
138 	unsigned long stride_size;
139 
140 	if (!is_vm_hugetlb_page(vma)) {
141 		stride_size = PAGE_SIZE;
142 	} else {
143 		stride_size = huge_page_size(hstate_vma(vma));
144 
145 		/*
146 		 * As stated in the privileged specification, every PTE in a
147 		 * NAPOT region must be invalidated, so reset the stride in that
148 		 * case.
149 		 */
150 		if (has_svnapot()) {
151 			if (stride_size >= PGDIR_SIZE)
152 				stride_size = PGDIR_SIZE;
153 			else if (stride_size >= P4D_SIZE)
154 				stride_size = P4D_SIZE;
155 			else if (stride_size >= PUD_SIZE)
156 				stride_size = PUD_SIZE;
157 			else if (stride_size >= PMD_SIZE)
158 				stride_size = PMD_SIZE;
159 			else
160 				stride_size = PAGE_SIZE;
161 		}
162 	}
163 
164 	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
165 			  start, end - start, stride_size);
166 }
167 
168 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
169 {
170 	__flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
171 			  start, end - start, PAGE_SIZE);
172 }
173 
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
176 			unsigned long end)
177 {
178 	__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
179 			  start, end - start, PMD_SIZE);
180 }
181 #endif
182 
183 bool arch_tlbbatch_should_defer(struct mm_struct *mm)
184 {
185 	return true;
186 }
187 
188 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
189 			       struct mm_struct *mm,
190 			       unsigned long uaddr)
191 {
192 	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
193 }
194 
195 void arch_flush_tlb_batched_pending(struct mm_struct *mm)
196 {
197 	flush_tlb_mm(mm);
198 }
199 
200 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
201 {
202 	__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
203 			  FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
204 	cpumask_clear(&batch->cpumask);
205 }
206