xref: /linux/arch/s390/include/asm/tlb.h (revision 1aea9b3f921003f0880f0676ae85d87c9f1cb4a2)
1 #ifndef _S390_TLB_H
2 #define _S390_TLB_H
3 
4 /*
5  * TLB flushing on s390 is complicated. The following requirement
6  * from the principles of operation is the most arduous:
7  *
8  * "A valid table entry must not be changed while it is attached
9  * to any CPU and may be used for translation by that CPU except to
10  * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11  * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12  * table entry, or (3) make a change by means of a COMPARE AND SWAP
13  * AND PURGE instruction that purges the TLB."
14  *
15  * The modification of a pte of an active mm struct therefore is
16  * a two step process: i) invalidate the pte, ii) store the new pte.
17  * This is true for the page protection bit as well.
18  * The only possible optimization is to flush at the beginning of
19  * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20  *
21  * Pages used for the page tables is a different story. FIXME: more
22  */
23 
24 #include <linux/mm.h>
25 #include <linux/pagemap.h>
26 #include <linux/swap.h>
27 #include <asm/processor.h>
28 #include <asm/pgalloc.h>
29 #include <asm/tlbflush.h>
30 
31 struct mmu_gather {
32 	struct mm_struct *mm;
33 	struct mmu_table_batch *batch;
34 	unsigned int fullmm;
35 	unsigned long start, end;
36 };
37 
38 struct mmu_table_batch {
39 	struct rcu_head		rcu;
40 	unsigned int		nr;
41 	void			*tables[0];
42 };
43 
44 #define MAX_TABLE_BATCH		\
45 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
46 
47 extern void tlb_table_flush(struct mmu_gather *tlb);
48 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
49 
50 static inline void tlb_gather_mmu(struct mmu_gather *tlb,
51 				  struct mm_struct *mm,
52 				  unsigned long start,
53 				  unsigned long end)
54 {
55 	tlb->mm = mm;
56 	tlb->start = start;
57 	tlb->end = end;
58 	tlb->fullmm = !(start | (end+1));
59 	tlb->batch = NULL;
60 }
61 
62 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
63 {
64 	__tlb_flush_mm_lazy(tlb->mm);
65 }
66 
67 static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
68 {
69 	tlb_table_flush(tlb);
70 }
71 
72 
73 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
74 {
75 	tlb_flush_mmu_tlbonly(tlb);
76 	tlb_flush_mmu_free(tlb);
77 }
78 
79 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
80 				  unsigned long start, unsigned long end)
81 {
82 	tlb_flush_mmu(tlb);
83 }
84 
85 /*
86  * Release the page cache reference for a pte removed by
87  * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
88  * has already been freed, so just do free_page_and_swap_cache.
89  */
90 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91 {
92 	free_page_and_swap_cache(page);
93 	return false; /* avoid calling tlb_flush_mmu */
94 }
95 
96 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97 {
98 	free_page_and_swap_cache(page);
99 }
100 
101 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
102 					  struct page *page, int page_size)
103 {
104 	return __tlb_remove_page(tlb, page);
105 }
106 
107 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
108 					struct page *page, int page_size)
109 {
110 	return tlb_remove_page(tlb, page);
111 }
112 
113 /*
114  * pte_free_tlb frees a pte table and clears the CRSTE for the
115  * page table from the tlb.
116  */
117 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
118 				unsigned long address)
119 {
120 	page_table_free_rcu(tlb, (unsigned long *) pte, address);
121 }
122 
123 /*
124  * pmd_free_tlb frees a pmd table and clears the CRSTE for the
125  * segment table entry from the tlb.
126  * If the mm uses a two level page table the single pmd is freed
127  * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
128  * to avoid the double free of the pmd in this case.
129  */
130 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
131 				unsigned long address)
132 {
133 	if (tlb->mm->context.asce_limit <= (1UL << 31))
134 		return;
135 	pgtable_pmd_page_dtor(virt_to_page(pmd));
136 	tlb_remove_table(tlb, pmd);
137 }
138 
139 /*
140  * p4d_free_tlb frees a pud table and clears the CRSTE for the
141  * region second table entry from the tlb.
142  * If the mm uses a four level page table the single p4d is freed
143  * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
144  * to avoid the double free of the p4d in this case.
145  */
146 static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
147 				unsigned long address)
148 {
149 	if (tlb->mm->context.asce_limit <= (1UL << 53))
150 		return;
151 	tlb_remove_table(tlb, p4d);
152 }
153 
154 /*
155  * pud_free_tlb frees a pud table and clears the CRSTE for the
156  * region third table entry from the tlb.
157  * If the mm uses a three level page table the single pud is freed
158  * as the pgd. pud_free_tlb checks the asce_limit against 4TB
159  * to avoid the double free of the pud in this case.
160  */
161 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
162 				unsigned long address)
163 {
164 	if (tlb->mm->context.asce_limit <= (1UL << 42))
165 		return;
166 	tlb_remove_table(tlb, pud);
167 }
168 
169 #define tlb_start_vma(tlb, vma)			do { } while (0)
170 #define tlb_end_vma(tlb, vma)			do { } while (0)
171 #define tlb_remove_tlb_entry(tlb, ptep, addr)	do { } while (0)
172 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr)	do { } while (0)
173 #define tlb_migrate_finish(mm)			do { } while (0)
174 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
175 	tlb_remove_tlb_entry(tlb, ptep, address)
176 
177 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
178 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
179 						     unsigned int page_size)
180 {
181 }
182 
183 #endif /* _S390_TLB_H */
184