xref: /linux/arch/um/include/asm/tlbflush.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #ifndef __UM_TLBFLUSH_H
7 #define __UM_TLBFLUSH_H
8 
9 #include <linux/mm.h>
10 
11 /*
12  * In UML, we need to sync the TLB over by using mmap/munmap/mprotect syscalls
13  * from the process handling the MM (which can be the kernel itself).
14  *
15  * To track updates, we can hook into set_ptes and flush_tlb_*. With set_ptes
16  * we catch all PTE transitions where memory that was unusable becomes usable.
17  * While with flush_tlb_* we can track any memory that becomes unusable and
18  * even if a higher layer of the page table was modified.
19  *
20  * So, we simply track updates using both methods and mark the memory area to
21  * be synced later on. The only special case is that flush_tlb_kern_* needs to
22  * be executed immediately as there is no good synchronization point in that
23  * case. In contrast, in the set_ptes case we can wait for the next kernel
24  * segfault before we do the synchornization.
25  *
26  *  - flush_tlb_all() flushes all processes TLBs
27  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
28  *  - flush_tlb_page(vma, vmaddr) flushes one page
29  *  - flush_tlb_range(vma, start, end) flushes a range of pages
30  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
31  */
32 
33 extern int um_tlb_sync(struct mm_struct *mm);
34 
35 extern void flush_tlb_all(void);
36 extern void flush_tlb_mm(struct mm_struct *mm);
37 
38 static inline void flush_tlb_page(struct vm_area_struct *vma,
39 				  unsigned long address)
40 {
41 	um_tlb_mark_sync(vma->vm_mm, address, address + PAGE_SIZE);
42 }
43 
44 static inline void flush_tlb_range(struct vm_area_struct *vma,
45 				   unsigned long start, unsigned long end)
46 {
47 	um_tlb_mark_sync(vma->vm_mm, start, end);
48 }
49 
50 static inline void flush_tlb_kernel_range(unsigned long start,
51 					  unsigned long end)
52 {
53 	um_tlb_mark_sync(&init_mm, start, end);
54 
55 	/* Kernel needs to be synced immediately */
56 	um_tlb_sync(&init_mm);
57 }
58 
59 #endif
60