1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/tlbflush.h
4 *
5 * Copyright (C) 1999-2003 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8 #ifndef __ASM_TLBFLUSH_H
9 #define __ASM_TLBFLUSH_H
10
11 #ifndef __ASSEMBLY__
12
13 #include <linux/bitfield.h>
14 #include <linux/mm_types.h>
15 #include <linux/sched.h>
16 #include <linux/mmu_notifier.h>
17 #include <asm/cputype.h>
18 #include <asm/mmu.h>
19
20 /*
21 * Raw TLBI operations.
22 *
23 * Where necessary, use the __tlbi() macro to avoid asm()
24 * boilerplate. Drivers and most kernel code should use the TLB
25 * management routines in preference to the macro below.
26 *
27 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
28 * on whether a particular TLBI operation takes an argument or
29 * not. The macros handles invoking the asm with or without the
30 * register argument as appropriate.
31 */
32 #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
33 "tlbi " #op "\n" \
34 ALTERNATIVE("nop\n nop", \
35 "dsb ish\n tlbi " #op, \
36 ARM64_WORKAROUND_REPEAT_TLBI, \
37 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
38 : : )
39
40 #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
41 "tlbi " #op ", %0\n" \
42 ALTERNATIVE("nop\n nop", \
43 "dsb ish\n tlbi " #op ", %0", \
44 ARM64_WORKAROUND_REPEAT_TLBI, \
45 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
46 : : "r" (arg))
47
48 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
49
50 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
51
52 #define __tlbi_user(op, arg) do { \
53 if (arm64_kernel_unmapped_at_el0()) \
54 __tlbi(op, (arg) | USER_ASID_FLAG); \
55 } while (0)
56
57 /* This macro creates a properly formatted VA operand for the TLBI */
58 #define __TLBI_VADDR(addr, asid) \
59 ({ \
60 unsigned long __ta = (addr) >> 12; \
61 __ta &= GENMASK_ULL(43, 0); \
62 __ta |= (unsigned long)(asid) << 48; \
63 __ta; \
64 })
65
66 /*
67 * Get translation granule of the system, which is decided by
68 * PAGE_SIZE. Used by TTL.
69 * - 4KB : 1
70 * - 16KB : 2
71 * - 64KB : 3
72 */
73 #define TLBI_TTL_TG_4K 1
74 #define TLBI_TTL_TG_16K 2
75 #define TLBI_TTL_TG_64K 3
76
get_trans_granule(void)77 static inline unsigned long get_trans_granule(void)
78 {
79 switch (PAGE_SIZE) {
80 case SZ_4K:
81 return TLBI_TTL_TG_4K;
82 case SZ_16K:
83 return TLBI_TTL_TG_16K;
84 case SZ_64K:
85 return TLBI_TTL_TG_64K;
86 default:
87 return 0;
88 }
89 }
90
91 /*
92 * Level-based TLBI operations.
93 *
94 * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
95 * the level at which the invalidation must take place. If the level is
96 * wrong, no invalidation may take place. In the case where the level
97 * cannot be easily determined, the value TLBI_TTL_UNKNOWN will perform
98 * a non-hinted invalidation. Any provided level outside the hint range
99 * will also cause fall-back to non-hinted invalidation.
100 *
101 * For Stage-2 invalidation, use the level values provided to that effect
102 * in asm/stage2_pgtable.h.
103 */
104 #define TLBI_TTL_MASK GENMASK_ULL(47, 44)
105
106 #define TLBI_TTL_UNKNOWN INT_MAX
107
108 #define __tlbi_level(op, addr, level) do { \
109 u64 arg = addr; \
110 \
111 if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
112 level >= 0 && level <= 3) { \
113 u64 ttl = level & 3; \
114 ttl |= get_trans_granule() << 2; \
115 arg &= ~TLBI_TTL_MASK; \
116 arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
117 } \
118 \
119 __tlbi(op, arg); \
120 } while(0)
121
122 #define __tlbi_user_level(op, arg, level) do { \
123 if (arm64_kernel_unmapped_at_el0()) \
124 __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
125 } while (0)
126
127 /*
128 * This macro creates a properly formatted VA operand for the TLB RANGE. The
129 * value bit assignments are:
130 *
131 * +----------+------+-------+-------+-------+----------------------+
132 * | ASID | TG | SCALE | NUM | TTL | BADDR |
133 * +-----------------+-------+-------+-------+----------------------+
134 * |63 48|47 46|45 44|43 39|38 37|36 0|
135 *
136 * The address range is determined by below formula: [BADDR, BADDR + (NUM + 1) *
137 * 2^(5*SCALE + 1) * PAGESIZE)
138 *
139 * Note that the first argument, baddr, is pre-shifted; If LPA2 is in use, BADDR
140 * holds addr[52:16]. Else BADDR holds page number. See for example ARM DDI
141 * 0487J.a section C5.5.60 "TLBI VAE1IS, TLBI VAE1ISNXS, TLB Invalidate by VA,
142 * EL1, Inner Shareable".
143 *
144 */
145 #define TLBIR_ASID_MASK GENMASK_ULL(63, 48)
146 #define TLBIR_TG_MASK GENMASK_ULL(47, 46)
147 #define TLBIR_SCALE_MASK GENMASK_ULL(45, 44)
148 #define TLBIR_NUM_MASK GENMASK_ULL(43, 39)
149 #define TLBIR_TTL_MASK GENMASK_ULL(38, 37)
150 #define TLBIR_BADDR_MASK GENMASK_ULL(36, 0)
151
152 #define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \
153 ({ \
154 unsigned long __ta = 0; \
155 unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
156 __ta |= FIELD_PREP(TLBIR_BADDR_MASK, baddr); \
157 __ta |= FIELD_PREP(TLBIR_TTL_MASK, __ttl); \
158 __ta |= FIELD_PREP(TLBIR_NUM_MASK, num); \
159 __ta |= FIELD_PREP(TLBIR_SCALE_MASK, scale); \
160 __ta |= FIELD_PREP(TLBIR_TG_MASK, get_trans_granule()); \
161 __ta |= FIELD_PREP(TLBIR_ASID_MASK, asid); \
162 __ta; \
163 })
164
165 /* These macros are used by the TLBI RANGE feature. */
166 #define __TLBI_RANGE_PAGES(num, scale) \
167 ((unsigned long)((num) + 1) << (5 * (scale) + 1))
168 #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
169
170 /*
171 * Generate 'num' values from -1 to 31 with -1 rejected by the
172 * __flush_tlb_range() loop below. Its return value is only
173 * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
174 * 'pages' is more than that, you must iterate over the overall
175 * range.
176 */
177 #define __TLBI_RANGE_NUM(pages, scale) \
178 ({ \
179 int __pages = min((pages), \
180 __TLBI_RANGE_PAGES(31, (scale))); \
181 (__pages >> (5 * (scale) + 1)) - 1; \
182 })
183
184 /*
185 * TLB Invalidation
186 * ================
187 *
188 * This header file implements the low-level TLB invalidation routines
189 * (sometimes referred to as "flushing" in the kernel) for arm64.
190 *
191 * Every invalidation operation uses the following template:
192 *
193 * DSB ISHST // Ensure prior page-table updates have completed
194 * TLBI ... // Invalidate the TLB
195 * DSB ISH // Ensure the TLB invalidation has completed
196 * if (invalidated kernel mappings)
197 * ISB // Discard any instructions fetched from the old mapping
198 *
199 *
200 * The following functions form part of the "core" TLB invalidation API,
201 * as documented in Documentation/core-api/cachetlb.rst:
202 *
203 * flush_tlb_all()
204 * Invalidate the entire TLB (kernel + user) on all CPUs
205 *
206 * flush_tlb_mm(mm)
207 * Invalidate an entire user address space on all CPUs.
208 * The 'mm' argument identifies the ASID to invalidate.
209 *
210 * flush_tlb_range(vma, start, end)
211 * Invalidate the virtual-address range '[start, end)' on all
212 * CPUs for the user address space corresponding to 'vma->mm'.
213 * Note that this operation also invalidates any walk-cache
214 * entries associated with translations for the specified address
215 * range.
216 *
217 * flush_tlb_kernel_range(start, end)
218 * Same as flush_tlb_range(..., start, end), but applies to
219 * kernel mappings rather than a particular user address space.
220 * Whilst not explicitly documented, this function is used when
221 * unmapping pages from vmalloc/io space.
222 *
223 * flush_tlb_page(vma, addr)
224 * Invalidate a single user mapping for address 'addr' in the
225 * address space corresponding to 'vma->mm'. Note that this
226 * operation only invalidates a single, last-level page-table
227 * entry and therefore does not affect any walk-caches.
228 *
229 *
230 * Next, we have some undocumented invalidation routines that you probably
231 * don't want to call unless you know what you're doing:
232 *
233 * local_flush_tlb_all()
234 * Same as flush_tlb_all(), but only applies to the calling CPU.
235 *
236 * __flush_tlb_kernel_pgtable(addr)
237 * Invalidate a single kernel mapping for address 'addr' on all
238 * CPUs, ensuring that any walk-cache entries associated with the
239 * translation are also invalidated.
240 *
241 * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
242 * Invalidate the virtual-address range '[start, end)' on all
243 * CPUs for the user address space corresponding to 'vma->mm'.
244 * The invalidation operations are issued at a granularity
245 * determined by 'stride' and only affect any walk-cache entries
246 * if 'last_level' is equal to false. tlb_level is the level at
247 * which the invalidation must take place. If the level is wrong,
248 * no invalidation may take place. In the case where the level
249 * cannot be easily determined, the value TLBI_TTL_UNKNOWN will
250 * perform a non-hinted invalidation.
251 *
252 *
253 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
254 * on top of these routines, since that is our interface to the mmu_gather
255 * API as used by munmap() and friends.
256 */
local_flush_tlb_all(void)257 static inline void local_flush_tlb_all(void)
258 {
259 dsb(nshst);
260 __tlbi(vmalle1);
261 dsb(nsh);
262 isb();
263 }
264
flush_tlb_all(void)265 static inline void flush_tlb_all(void)
266 {
267 dsb(ishst);
268 __tlbi(vmalle1is);
269 dsb(ish);
270 isb();
271 }
272
flush_tlb_mm(struct mm_struct * mm)273 static inline void flush_tlb_mm(struct mm_struct *mm)
274 {
275 unsigned long asid;
276
277 dsb(ishst);
278 asid = __TLBI_VADDR(0, ASID(mm));
279 __tlbi(aside1is, asid);
280 __tlbi_user(aside1is, asid);
281 dsb(ish);
282 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
283 }
284
__flush_tlb_page_nosync(struct mm_struct * mm,unsigned long uaddr)285 static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
286 unsigned long uaddr)
287 {
288 unsigned long addr;
289
290 dsb(ishst);
291 addr = __TLBI_VADDR(uaddr, ASID(mm));
292 __tlbi(vale1is, addr);
293 __tlbi_user(vale1is, addr);
294 mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
295 (uaddr & PAGE_MASK) + PAGE_SIZE);
296 }
297
flush_tlb_page_nosync(struct vm_area_struct * vma,unsigned long uaddr)298 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
299 unsigned long uaddr)
300 {
301 return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
302 }
303
flush_tlb_page(struct vm_area_struct * vma,unsigned long uaddr)304 static inline void flush_tlb_page(struct vm_area_struct *vma,
305 unsigned long uaddr)
306 {
307 flush_tlb_page_nosync(vma, uaddr);
308 dsb(ish);
309 }
310
arch_tlbbatch_should_defer(struct mm_struct * mm)311 static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
312 {
313 /*
314 * TLB flush deferral is not required on systems which are affected by
315 * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
316 * will have two consecutive TLBI instructions with a dsb(ish) in between
317 * defeating the purpose (i.e save overall 'dsb ish' cost).
318 */
319 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
320 return false;
321
322 return true;
323 }
324
arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch * batch,struct mm_struct * mm,unsigned long uaddr)325 static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
326 struct mm_struct *mm,
327 unsigned long uaddr)
328 {
329 __flush_tlb_page_nosync(mm, uaddr);
330 }
331
332 /*
333 * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
334 * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
335 * flush_tlb_batched_pending().
336 */
arch_flush_tlb_batched_pending(struct mm_struct * mm)337 static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
338 {
339 dsb(ish);
340 }
341
342 /*
343 * To support TLB batched flush for multiple pages unmapping, we only send
344 * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
345 * completion at the end in arch_tlbbatch_flush(). Since we've already issued
346 * TLBI for each page so only a DSB is needed to synchronise its effect on the
347 * other CPUs.
348 *
349 * This will save the time waiting on DSB comparing issuing a TLBI;DSB sequence
350 * for each page.
351 */
arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch * batch)352 static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
353 {
354 dsb(ish);
355 }
356
357 /*
358 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
359 * necessarily a performance improvement.
360 */
361 #define MAX_DVM_OPS PTRS_PER_PTE
362
363 /*
364 * __flush_tlb_range_op - Perform TLBI operation upon a range
365 *
366 * @op: TLBI instruction that operates on a range (has 'r' prefix)
367 * @start: The start address of the range
368 * @pages: Range as the number of pages from 'start'
369 * @stride: Flush granularity
370 * @asid: The ASID of the task (0 for IPA instructions)
371 * @tlb_level: Translation Table level hint, if known
372 * @tlbi_user: If 'true', call an additional __tlbi_user()
373 * (typically for user ASIDs). 'flase' for IPA instructions
374 * @lpa2: If 'true', the lpa2 scheme is used as set out below
375 *
376 * When the CPU does not support TLB range operations, flush the TLB
377 * entries one by one at the granularity of 'stride'. If the TLB
378 * range ops are supported, then:
379 *
380 * 1. If FEAT_LPA2 is in use, the start address of a range operation must be
381 * 64KB aligned, so flush pages one by one until the alignment is reached
382 * using the non-range operations. This step is skipped if LPA2 is not in
383 * use.
384 *
385 * 2. The minimum range granularity is decided by 'scale', so multiple range
386 * TLBI operations may be required. Start from scale = 3, flush the largest
387 * possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
388 * requested range, then decrement scale and continue until one or zero pages
389 * are left. We must start from highest scale to ensure 64KB start alignment
390 * is maintained in the LPA2 case.
391 *
392 * 3. If there is 1 page remaining, flush it through non-range operations. Range
393 * operations can only span an even number of pages. We save this for last to
394 * ensure 64KB start alignment is maintained for the LPA2 case.
395 */
396 #define __flush_tlb_range_op(op, start, pages, stride, \
397 asid, tlb_level, tlbi_user, lpa2) \
398 do { \
399 int num = 0; \
400 int scale = 3; \
401 int shift = lpa2 ? 16 : PAGE_SHIFT; \
402 unsigned long addr; \
403 \
404 while (pages > 0) { \
405 if (!system_supports_tlb_range() || \
406 pages == 1 || \
407 (lpa2 && start != ALIGN(start, SZ_64K))) { \
408 addr = __TLBI_VADDR(start, asid); \
409 __tlbi_level(op, addr, tlb_level); \
410 if (tlbi_user) \
411 __tlbi_user_level(op, addr, tlb_level); \
412 start += stride; \
413 pages -= stride >> PAGE_SHIFT; \
414 continue; \
415 } \
416 \
417 num = __TLBI_RANGE_NUM(pages, scale); \
418 if (num >= 0) { \
419 addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
420 scale, num, tlb_level); \
421 __tlbi(r##op, addr); \
422 if (tlbi_user) \
423 __tlbi_user(r##op, addr); \
424 start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
425 pages -= __TLBI_RANGE_PAGES(num, scale); \
426 } \
427 scale--; \
428 } \
429 } while (0)
430
431 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
432 __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
433
__flush_tlb_range_nosync(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long stride,bool last_level,int tlb_level)434 static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
435 unsigned long start, unsigned long end,
436 unsigned long stride, bool last_level,
437 int tlb_level)
438 {
439 unsigned long asid, pages;
440
441 start = round_down(start, stride);
442 end = round_up(end, stride);
443 pages = (end - start) >> PAGE_SHIFT;
444
445 /*
446 * When not uses TLB range ops, we can handle up to
447 * (MAX_DVM_OPS - 1) pages;
448 * When uses TLB range ops, we can handle up to
449 * MAX_TLBI_RANGE_PAGES pages.
450 */
451 if ((!system_supports_tlb_range() &&
452 (end - start) >= (MAX_DVM_OPS * stride)) ||
453 pages > MAX_TLBI_RANGE_PAGES) {
454 flush_tlb_mm(vma->vm_mm);
455 return;
456 }
457
458 dsb(ishst);
459 asid = ASID(vma->vm_mm);
460
461 if (last_level)
462 __flush_tlb_range_op(vale1is, start, pages, stride, asid,
463 tlb_level, true, lpa2_is_enabled());
464 else
465 __flush_tlb_range_op(vae1is, start, pages, stride, asid,
466 tlb_level, true, lpa2_is_enabled());
467
468 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
469 }
470
__flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long stride,bool last_level,int tlb_level)471 static inline void __flush_tlb_range(struct vm_area_struct *vma,
472 unsigned long start, unsigned long end,
473 unsigned long stride, bool last_level,
474 int tlb_level)
475 {
476 __flush_tlb_range_nosync(vma, start, end, stride,
477 last_level, tlb_level);
478 dsb(ish);
479 }
480
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)481 static inline void flush_tlb_range(struct vm_area_struct *vma,
482 unsigned long start, unsigned long end)
483 {
484 /*
485 * We cannot use leaf-only invalidation here, since we may be invalidating
486 * table entries as part of collapsing hugepages or moving page tables.
487 * Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
488 * information here.
489 */
490 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
491 }
492
flush_tlb_kernel_range(unsigned long start,unsigned long end)493 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
494 {
495 unsigned long addr;
496
497 if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
498 flush_tlb_all();
499 return;
500 }
501
502 start = __TLBI_VADDR(start, 0);
503 end = __TLBI_VADDR(end, 0);
504
505 dsb(ishst);
506 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
507 __tlbi(vaale1is, addr);
508 dsb(ish);
509 isb();
510 }
511
512 /*
513 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
514 * table levels (pgd/pud/pmd).
515 */
__flush_tlb_kernel_pgtable(unsigned long kaddr)516 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
517 {
518 unsigned long addr = __TLBI_VADDR(kaddr, 0);
519
520 dsb(ishst);
521 __tlbi(vaae1is, addr);
522 dsb(ish);
523 isb();
524 }
525 #endif
526
527 #endif
528