xref: /linux/include/linux/swapops.h (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAPOPS_H
3 #define _LINUX_SWAPOPS_H
4 
5 #include <linux/radix-tree.h>
6 #include <linux/bug.h>
7 #include <linux/mm_types.h>
8 
9 #ifdef CONFIG_MMU
10 
11 #ifdef CONFIG_SWAP
12 #include <linux/swapfile.h>
13 #endif	/* CONFIG_SWAP */
14 
15 /*
16  * swapcache pages are stored in the swapper_space radix tree.  We want to
17  * get good packing density in that tree, so the index should be dense in
18  * the low-order bits.
19  *
20  * We arrange the `type' and `offset' fields so that `type' is at the six
21  * high-order bits of the swp_entry_t and `offset' is right-aligned in the
22  * remaining bits.  Although `type' itself needs only five bits, we allow for
23  * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
24  *
25  * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
26  */
27 #define SWP_TYPE_SHIFT	(BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
28 #define SWP_OFFSET_MASK	((1UL << SWP_TYPE_SHIFT) - 1)
29 
30 /*
31  * Definitions only for PFN swap entries (see leafeant_has_pfn()).  To
32  * store PFN, we only need SWP_PFN_BITS bits.  Each of the pfn swap entries
33  * can use the extra bits to store other information besides PFN.
34  */
35 #ifdef MAX_PHYSMEM_BITS
36 #define SWP_PFN_BITS		(MAX_PHYSMEM_BITS - PAGE_SHIFT)
37 #else  /* MAX_PHYSMEM_BITS */
38 #define SWP_PFN_BITS		min_t(int, \
39 				      sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
40 				      SWP_TYPE_SHIFT)
41 #endif	/* MAX_PHYSMEM_BITS */
42 #define SWP_PFN_MASK		(BIT(SWP_PFN_BITS) - 1)
43 
44 /**
45  * Migration swap entry specific bitfield definitions.  Layout:
46  *
47  *   |----------+--------------------|
48  *   | swp_type | swp_offset         |
49  *   |----------+--------+-+-+-------|
50  *   |          | resv   |D|A|  PFN  |
51  *   |----------+--------+-+-+-------|
52  *
53  * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
54  * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
55  *
56  * Note: A/D bits will be stored in migration entries iff there're enough
57  * free bits in arch specific swp offset.  By default we'll ignore A/D bits
58  * when migrating a page.  Please refer to migration_entry_supports_ad()
59  * for more information.  If there're more bits besides PFN and A/D bits,
60  * they should be reserved and always be zeros.
61  */
62 #define SWP_MIG_YOUNG_BIT		(SWP_PFN_BITS)
63 #define SWP_MIG_DIRTY_BIT		(SWP_PFN_BITS + 1)
64 #define SWP_MIG_TOTAL_BITS		(SWP_PFN_BITS + 2)
65 
66 #define SWP_MIG_YOUNG			BIT(SWP_MIG_YOUNG_BIT)
67 #define SWP_MIG_DIRTY			BIT(SWP_MIG_DIRTY_BIT)
68 
69 /* Clear all flags but only keep swp_entry_t related information */
pte_swp_clear_flags(pte_t pte)70 static inline pte_t pte_swp_clear_flags(pte_t pte)
71 {
72 	if (pte_swp_exclusive(pte))
73 		pte = pte_swp_clear_exclusive(pte);
74 	if (pte_swp_soft_dirty(pte))
75 		pte = pte_swp_clear_soft_dirty(pte);
76 	if (pte_swp_uffd_wp(pte))
77 		pte = pte_swp_clear_uffd_wp(pte);
78 	return pte;
79 }
80 
81 /*
82  * Store a type+offset into a swp_entry_t in an arch-independent format
83  */
swp_entry(unsigned long type,pgoff_t offset)84 static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
85 {
86 	swp_entry_t ret;
87 
88 	ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
89 	return ret;
90 }
91 
92 /*
93  * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in
94  * arch-independent format
95  */
swp_type(swp_entry_t entry)96 static inline unsigned swp_type(swp_entry_t entry)
97 {
98 	return (entry.val >> SWP_TYPE_SHIFT);
99 }
100 
101 /*
102  * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in
103  * arch-independent format
104  */
swp_offset(swp_entry_t entry)105 static inline pgoff_t swp_offset(swp_entry_t entry)
106 {
107 	return entry.val & SWP_OFFSET_MASK;
108 }
109 
110 /*
111  * Convert the arch-independent representation of a swp_entry_t into the
112  * arch-dependent pte representation.
113  */
swp_entry_to_pte(swp_entry_t entry)114 static inline pte_t swp_entry_to_pte(swp_entry_t entry)
115 {
116 	swp_entry_t arch_entry;
117 
118 	arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
119 	return __swp_entry_to_pte(arch_entry);
120 }
121 
radix_to_swp_entry(void * arg)122 static inline swp_entry_t radix_to_swp_entry(void *arg)
123 {
124 	swp_entry_t entry;
125 
126 	entry.val = xa_to_value(arg);
127 	return entry;
128 }
129 
swp_to_radix_entry(swp_entry_t entry)130 static inline void *swp_to_radix_entry(swp_entry_t entry)
131 {
132 	return xa_mk_value(entry.val);
133 }
134 
135 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
make_readable_device_private_entry(pgoff_t offset)136 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
137 {
138 	return swp_entry(SWP_DEVICE_READ, offset);
139 }
140 
make_writable_device_private_entry(pgoff_t offset)141 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
142 {
143 	return swp_entry(SWP_DEVICE_WRITE, offset);
144 }
145 
make_device_exclusive_entry(pgoff_t offset)146 static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
147 {
148 	return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
149 }
150 
151 #else /* CONFIG_DEVICE_PRIVATE */
make_readable_device_private_entry(pgoff_t offset)152 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
153 {
154 	return swp_entry(0, 0);
155 }
156 
make_writable_device_private_entry(pgoff_t offset)157 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
158 {
159 	return swp_entry(0, 0);
160 }
161 
make_device_exclusive_entry(pgoff_t offset)162 static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
163 {
164 	return swp_entry(0, 0);
165 }
166 
167 #endif /* CONFIG_DEVICE_PRIVATE */
168 
169 #ifdef CONFIG_MIGRATION
170 
make_readable_migration_entry(pgoff_t offset)171 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
172 {
173 	return swp_entry(SWP_MIGRATION_READ, offset);
174 }
175 
make_readable_exclusive_migration_entry(pgoff_t offset)176 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
177 {
178 	return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
179 }
180 
make_writable_migration_entry(pgoff_t offset)181 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
182 {
183 	return swp_entry(SWP_MIGRATION_WRITE, offset);
184 }
185 
186 /*
187  * Returns whether the host has large enough swap offset field to support
188  * carrying over pgtable A/D bits for page migrations.  The result is
189  * pretty much arch specific.
190  */
migration_entry_supports_ad(void)191 static inline bool migration_entry_supports_ad(void)
192 {
193 #ifdef CONFIG_SWAP
194 	return swap_migration_ad_supported;
195 #else  /* CONFIG_SWAP */
196 	return false;
197 #endif	/* CONFIG_SWAP */
198 }
199 
make_migration_entry_young(swp_entry_t entry)200 static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
201 {
202 	if (migration_entry_supports_ad())
203 		return swp_entry(swp_type(entry),
204 				 swp_offset(entry) | SWP_MIG_YOUNG);
205 	return entry;
206 }
207 
make_migration_entry_dirty(swp_entry_t entry)208 static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
209 {
210 	if (migration_entry_supports_ad())
211 		return swp_entry(swp_type(entry),
212 				 swp_offset(entry) | SWP_MIG_DIRTY);
213 	return entry;
214 }
215 
216 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
217 					unsigned long address);
218 extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
219 #else  /* CONFIG_MIGRATION */
make_readable_migration_entry(pgoff_t offset)220 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
221 {
222 	return swp_entry(0, 0);
223 }
224 
make_readable_exclusive_migration_entry(pgoff_t offset)225 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
226 {
227 	return swp_entry(0, 0);
228 }
229 
make_writable_migration_entry(pgoff_t offset)230 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
231 {
232 	return swp_entry(0, 0);
233 }
234 
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)235 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
236 					unsigned long address) { }
migration_entry_wait_huge(struct vm_area_struct * vma,unsigned long addr,pte_t * pte)237 static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
238 					     unsigned long addr, pte_t *pte) { }
239 
make_migration_entry_young(swp_entry_t entry)240 static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
241 {
242 	return entry;
243 }
244 
make_migration_entry_dirty(swp_entry_t entry)245 static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
246 {
247 	return entry;
248 }
249 
250 #endif	/* CONFIG_MIGRATION */
251 
252 #ifdef CONFIG_MEMORY_FAILURE
253 
254 /*
255  * Support for hardware poisoned pages
256  */
make_hwpoison_entry(struct page * page)257 static inline swp_entry_t make_hwpoison_entry(struct page *page)
258 {
259 	BUG_ON(!PageLocked(page));
260 	return swp_entry(SWP_HWPOISON, page_to_pfn(page));
261 }
262 
is_hwpoison_entry(swp_entry_t entry)263 static inline int is_hwpoison_entry(swp_entry_t entry)
264 {
265 	return swp_type(entry) == SWP_HWPOISON;
266 }
267 
268 #else
269 
make_hwpoison_entry(struct page * page)270 static inline swp_entry_t make_hwpoison_entry(struct page *page)
271 {
272 	return swp_entry(0, 0);
273 }
274 
is_hwpoison_entry(swp_entry_t swp)275 static inline int is_hwpoison_entry(swp_entry_t swp)
276 {
277 	return 0;
278 }
279 #endif
280 
281 typedef unsigned long pte_marker;
282 
283 #define  PTE_MARKER_UFFD_WP			BIT(0)
284 /*
285  * "Poisoned" here is meant in the very general sense of "future accesses are
286  * invalid", instead of referring very specifically to hardware memory errors.
287  * This marker is meant to represent any of various different causes of this.
288  *
289  * Note that, when encountered by the faulting logic, PTEs with this marker will
290  * result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error
291  * logic.
292  */
293 #define  PTE_MARKER_POISONED			BIT(1)
294 /*
295  * Indicates that, on fault, this PTE will case a SIGSEGV signal to be
296  * sent. This means guard markers behave in effect as if the region were mapped
297  * PROT_NONE, rather than if they were a memory hole or equivalent.
298  */
299 #define  PTE_MARKER_GUARD			BIT(2)
300 #define  PTE_MARKER_MASK			(BIT(3) - 1)
301 
make_pte_marker_entry(pte_marker marker)302 static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
303 {
304 	return swp_entry(SWP_PTE_MARKER, marker);
305 }
306 
make_pte_marker(pte_marker marker)307 static inline pte_t make_pte_marker(pte_marker marker)
308 {
309 	return swp_entry_to_pte(make_pte_marker_entry(marker));
310 }
311 
make_poisoned_swp_entry(void)312 static inline swp_entry_t make_poisoned_swp_entry(void)
313 {
314 	return make_pte_marker_entry(PTE_MARKER_POISONED);
315 }
316 
make_guard_swp_entry(void)317 static inline swp_entry_t make_guard_swp_entry(void)
318 {
319 	return make_pte_marker_entry(PTE_MARKER_GUARD);
320 }
321 
322 struct page_vma_mapped_walk;
323 
324 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
325 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
326 		struct page *page);
327 
328 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
329 		struct page *new);
330 
331 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
332 
swp_entry_to_pmd(swp_entry_t entry)333 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
334 {
335 	swp_entry_t arch_entry;
336 
337 	arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
338 	return __swp_entry_to_pmd(arch_entry);
339 }
340 
341 #else  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
set_pmd_migration_entry(struct page_vma_mapped_walk * pvmw,struct page * page)342 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
343 		struct page *page)
344 {
345 	BUILD_BUG();
346 }
347 
remove_migration_pmd(struct page_vma_mapped_walk * pvmw,struct page * new)348 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
349 		struct page *new)
350 {
351 	BUILD_BUG();
352 }
353 
pmd_migration_entry_wait(struct mm_struct * m,pmd_t * p)354 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
355 
swp_entry_to_pmd(swp_entry_t entry)356 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
357 {
358 	return __pmd(0);
359 }
360 
361 #endif  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
362 
363 #endif /* CONFIG_MMU */
364 #endif /* _LINUX_SWAPOPS_H */
365