xref: /linux/mm/swap_table.h (revision 07adc4cf1ecd316e7b6f4a142e5f5e96ce697e65)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _MM_SWAP_TABLE_H
3 #define _MM_SWAP_TABLE_H
4 
5 #include <linux/rcupdate.h>
6 #include <linux/atomic.h>
7 #include "swap.h"
8 
9 /* A typical flat array in each cluster as swap table */
10 struct swap_table {
11 	atomic_long_t entries[SWAPFILE_CLUSTER];
12 };
13 
14 /*
15  * A swap table entry represents the status of a swap slot on a swap
16  * (physical or virtual) device. The swap table in each cluster is a
17  * 1:1 map of the swap slots in this cluster.
18  *
19  * Each swap table entry could be a pointer (folio), a XA_VALUE
20  * (shadow), or NULL.
21  */
22 
23 /*
24  * Helpers for casting one type of info into a swap table entry.
25  */
26 static inline unsigned long null_to_swp_tb(void)
27 {
28 	BUILD_BUG_ON(sizeof(unsigned long) != sizeof(atomic_long_t));
29 	return 0;
30 }
31 
32 static inline unsigned long folio_to_swp_tb(struct folio *folio)
33 {
34 	BUILD_BUG_ON(sizeof(unsigned long) != sizeof(void *));
35 	return (unsigned long)folio;
36 }
37 
38 static inline unsigned long shadow_swp_to_tb(void *shadow)
39 {
40 	BUILD_BUG_ON((BITS_PER_XA_VALUE + 1) !=
41 		     BITS_PER_BYTE * sizeof(unsigned long));
42 	VM_WARN_ON_ONCE(shadow && !xa_is_value(shadow));
43 	return (unsigned long)shadow;
44 }
45 
46 /*
47  * Helpers for swap table entry type checking.
48  */
49 static inline bool swp_tb_is_null(unsigned long swp_tb)
50 {
51 	return !swp_tb;
52 }
53 
54 static inline bool swp_tb_is_folio(unsigned long swp_tb)
55 {
56 	return !xa_is_value((void *)swp_tb) && !swp_tb_is_null(swp_tb);
57 }
58 
59 static inline bool swp_tb_is_shadow(unsigned long swp_tb)
60 {
61 	return xa_is_value((void *)swp_tb);
62 }
63 
64 /*
65  * Helpers for retrieving info from swap table.
66  */
67 static inline struct folio *swp_tb_to_folio(unsigned long swp_tb)
68 {
69 	VM_WARN_ON(!swp_tb_is_folio(swp_tb));
70 	return (void *)swp_tb;
71 }
72 
73 static inline void *swp_tb_to_shadow(unsigned long swp_tb)
74 {
75 	VM_WARN_ON(!swp_tb_is_shadow(swp_tb));
76 	return (void *)swp_tb;
77 }
78 
79 /*
80  * Helpers for accessing or modifying the swap table of a cluster,
81  * the swap cluster must be locked.
82  */
83 static inline void __swap_table_set(struct swap_cluster_info *ci,
84 				    unsigned int off, unsigned long swp_tb)
85 {
86 	atomic_long_t *table = rcu_dereference_protected(ci->table, true);
87 
88 	lockdep_assert_held(&ci->lock);
89 	VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER);
90 	atomic_long_set(&table[off], swp_tb);
91 }
92 
93 static inline unsigned long __swap_table_xchg(struct swap_cluster_info *ci,
94 					      unsigned int off, unsigned long swp_tb)
95 {
96 	atomic_long_t *table = rcu_dereference_protected(ci->table, true);
97 
98 	lockdep_assert_held(&ci->lock);
99 	VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER);
100 	/* Ordering is guaranteed by cluster lock, relax */
101 	return atomic_long_xchg_relaxed(&table[off], swp_tb);
102 }
103 
104 static inline unsigned long __swap_table_get(struct swap_cluster_info *ci,
105 					     unsigned int off)
106 {
107 	atomic_long_t *table;
108 
109 	VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER);
110 	table = rcu_dereference_check(ci->table, lockdep_is_held(&ci->lock));
111 
112 	return atomic_long_read(&table[off]);
113 }
114 
115 static inline unsigned long swap_table_get(struct swap_cluster_info *ci,
116 					unsigned int off)
117 {
118 	atomic_long_t *table;
119 	unsigned long swp_tb;
120 
121 	rcu_read_lock();
122 	table = rcu_dereference(ci->table);
123 	swp_tb = table ? atomic_long_read(&table[off]) : null_to_swp_tb();
124 	rcu_read_unlock();
125 
126 	return swp_tb;
127 }
128 #endif
129