xref: /linux/include/asm-generic/tlb.h (revision 54a8a2220c936a47840c9a3d74910c5a56fae2ed)
1 /* asm-generic/tlb.h
2  *
3  *	Generic TLB shootdown code
4  *
5  * Copyright 2001 Red Hat, Inc.
6  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; either version
11  * 2 of the License, or (at your option) any later version.
12  */
13 #ifndef _ASM_GENERIC__TLB_H
14 #define _ASM_GENERIC__TLB_H
15 
16 #include <linux/config.h>
17 #include <linux/swap.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlbflush.h>
20 
21 /*
22  * For UP we don't need to worry about TLB flush
23  * and page free order so much..
24  */
25 #ifdef CONFIG_SMP
26   #ifdef ARCH_FREE_PTR_NR
27     #define FREE_PTR_NR   ARCH_FREE_PTR_NR
28   #else
29     #define FREE_PTE_NR	506
30   #endif
31   #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
32 #else
33   #define FREE_PTE_NR	1
34   #define tlb_fast_mode(tlb) 1
35 #endif
36 
37 /* struct mmu_gather is an opaque type used by the mm code for passing around
38  * any data needed by arch specific code for tlb_remove_page.  This structure
39  * can be per-CPU or per-MM as the page table lock is held for the duration of
40  * TLB shootdown.
41  */
42 struct mmu_gather {
43 	struct mm_struct	*mm;
44 	unsigned int		nr;	/* set to ~0U means fast mode */
45 	unsigned int		need_flush;/* Really unmapped some ptes? */
46 	unsigned int		fullmm; /* non-zero means full mm flush */
47 	unsigned long		freed;
48 	struct page *		pages[FREE_PTE_NR];
49 };
50 
51 /* Users of the generic TLB shootdown code must declare this storage space. */
52 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
53 
54 /* tlb_gather_mmu
55  *	Return a pointer to an initialized struct mmu_gather.
56  */
57 static inline struct mmu_gather *
58 tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
59 {
60 	struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
61 
62 	tlb->mm = mm;
63 
64 	/* Use fast mode if only one CPU is online */
65 	tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
66 
67 	tlb->fullmm = full_mm_flush;
68 	tlb->freed = 0;
69 
70 	return tlb;
71 }
72 
73 static inline void
74 tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
75 {
76 	if (!tlb->need_flush)
77 		return;
78 	tlb->need_flush = 0;
79 	tlb_flush(tlb);
80 	if (!tlb_fast_mode(tlb)) {
81 		free_pages_and_swap_cache(tlb->pages, tlb->nr);
82 		tlb->nr = 0;
83 	}
84 }
85 
86 /* tlb_finish_mmu
87  *	Called at the end of the shootdown operation to free up any resources
88  *	that were required.  The page table lock is still held at this point.
89  */
90 static inline void
91 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
92 {
93 	int freed = tlb->freed;
94 	struct mm_struct *mm = tlb->mm;
95 	int rss = get_mm_counter(mm, rss);
96 
97 	if (rss < freed)
98 		freed = rss;
99 	add_mm_counter(mm, rss, -freed);
100 	tlb_flush_mmu(tlb, start, end);
101 
102 	/* keep the page table cache within bounds */
103 	check_pgt_cache();
104 }
105 
106 static inline unsigned int
107 tlb_is_full_mm(struct mmu_gather *tlb)
108 {
109 	return tlb->fullmm;
110 }
111 
112 /* tlb_remove_page
113  *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
114  *	handling the additional races in SMP caused by other CPUs caching valid
115  *	mappings in their TLBs.
116  */
117 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
118 {
119 	tlb->need_flush = 1;
120 	if (tlb_fast_mode(tlb)) {
121 		free_page_and_swap_cache(page);
122 		return;
123 	}
124 	tlb->pages[tlb->nr++] = page;
125 	if (tlb->nr >= FREE_PTE_NR)
126 		tlb_flush_mmu(tlb, 0, 0);
127 }
128 
129 /**
130  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
131  *
132  * Record the fact that pte's were really umapped in ->need_flush, so we can
133  * later optimise away the tlb invalidate.   This helps when userspace is
134  * unmapping already-unmapped pages, which happens quite a lot.
135  */
136 #define tlb_remove_tlb_entry(tlb, ptep, address)		\
137 	do {							\
138 		tlb->need_flush = 1;				\
139 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
140 	} while (0)
141 
142 #define pte_free_tlb(tlb, ptep)					\
143 	do {							\
144 		tlb->need_flush = 1;				\
145 		__pte_free_tlb(tlb, ptep);			\
146 	} while (0)
147 
148 #ifndef __ARCH_HAS_4LEVEL_HACK
149 #define pud_free_tlb(tlb, pudp)					\
150 	do {							\
151 		tlb->need_flush = 1;				\
152 		__pud_free_tlb(tlb, pudp);			\
153 	} while (0)
154 #endif
155 
156 #define pmd_free_tlb(tlb, pmdp)					\
157 	do {							\
158 		tlb->need_flush = 1;				\
159 		__pmd_free_tlb(tlb, pmdp);			\
160 	} while (0)
161 
162 #define tlb_migrate_finish(mm) do {} while (0)
163 
164 #endif /* _ASM_GENERIC__TLB_H */
165