1*8578e0c0SKairui Song /* SPDX-License-Identifier: GPL-2.0 */ 2*8578e0c0SKairui Song #ifndef _MM_SWAP_TABLE_H 3*8578e0c0SKairui Song #define _MM_SWAP_TABLE_H 4*8578e0c0SKairui Song 5*8578e0c0SKairui Song #include "swap.h" 6*8578e0c0SKairui Song 7*8578e0c0SKairui Song /* 8*8578e0c0SKairui Song * A swap table entry represents the status of a swap slot on a swap 9*8578e0c0SKairui Song * (physical or virtual) device. The swap table in each cluster is a 10*8578e0c0SKairui Song * 1:1 map of the swap slots in this cluster. 11*8578e0c0SKairui Song * 12*8578e0c0SKairui Song * Each swap table entry could be a pointer (folio), a XA_VALUE 13*8578e0c0SKairui Song * (shadow), or NULL. 14*8578e0c0SKairui Song */ 15*8578e0c0SKairui Song 16*8578e0c0SKairui Song /* 17*8578e0c0SKairui Song * Helpers for casting one type of info into a swap table entry. 18*8578e0c0SKairui Song */ 19*8578e0c0SKairui Song static inline unsigned long null_to_swp_tb(void) 20*8578e0c0SKairui Song { 21*8578e0c0SKairui Song BUILD_BUG_ON(sizeof(unsigned long) != sizeof(atomic_long_t)); 22*8578e0c0SKairui Song return 0; 23*8578e0c0SKairui Song } 24*8578e0c0SKairui Song 25*8578e0c0SKairui Song static inline unsigned long folio_to_swp_tb(struct folio *folio) 26*8578e0c0SKairui Song { 27*8578e0c0SKairui Song BUILD_BUG_ON(sizeof(unsigned long) != sizeof(void *)); 28*8578e0c0SKairui Song return (unsigned long)folio; 29*8578e0c0SKairui Song } 30*8578e0c0SKairui Song 31*8578e0c0SKairui Song static inline unsigned long shadow_swp_to_tb(void *shadow) 32*8578e0c0SKairui Song { 33*8578e0c0SKairui Song BUILD_BUG_ON((BITS_PER_XA_VALUE + 1) != 34*8578e0c0SKairui Song BITS_PER_BYTE * sizeof(unsigned long)); 35*8578e0c0SKairui Song VM_WARN_ON_ONCE(shadow && !xa_is_value(shadow)); 36*8578e0c0SKairui Song return (unsigned long)shadow; 37*8578e0c0SKairui Song } 38*8578e0c0SKairui Song 39*8578e0c0SKairui Song /* 40*8578e0c0SKairui Song * Helpers for swap table entry type checking. 41*8578e0c0SKairui Song */ 42*8578e0c0SKairui Song static inline bool swp_tb_is_null(unsigned long swp_tb) 43*8578e0c0SKairui Song { 44*8578e0c0SKairui Song return !swp_tb; 45*8578e0c0SKairui Song } 46*8578e0c0SKairui Song 47*8578e0c0SKairui Song static inline bool swp_tb_is_folio(unsigned long swp_tb) 48*8578e0c0SKairui Song { 49*8578e0c0SKairui Song return !xa_is_value((void *)swp_tb) && !swp_tb_is_null(swp_tb); 50*8578e0c0SKairui Song } 51*8578e0c0SKairui Song 52*8578e0c0SKairui Song static inline bool swp_tb_is_shadow(unsigned long swp_tb) 53*8578e0c0SKairui Song { 54*8578e0c0SKairui Song return xa_is_value((void *)swp_tb); 55*8578e0c0SKairui Song } 56*8578e0c0SKairui Song 57*8578e0c0SKairui Song /* 58*8578e0c0SKairui Song * Helpers for retrieving info from swap table. 59*8578e0c0SKairui Song */ 60*8578e0c0SKairui Song static inline struct folio *swp_tb_to_folio(unsigned long swp_tb) 61*8578e0c0SKairui Song { 62*8578e0c0SKairui Song VM_WARN_ON(!swp_tb_is_folio(swp_tb)); 63*8578e0c0SKairui Song return (void *)swp_tb; 64*8578e0c0SKairui Song } 65*8578e0c0SKairui Song 66*8578e0c0SKairui Song static inline void *swp_tb_to_shadow(unsigned long swp_tb) 67*8578e0c0SKairui Song { 68*8578e0c0SKairui Song VM_WARN_ON(!swp_tb_is_shadow(swp_tb)); 69*8578e0c0SKairui Song return (void *)swp_tb; 70*8578e0c0SKairui Song } 71*8578e0c0SKairui Song 72*8578e0c0SKairui Song /* 73*8578e0c0SKairui Song * Helpers for accessing or modifying the swap table of a cluster, 74*8578e0c0SKairui Song * the swap cluster must be locked. 75*8578e0c0SKairui Song */ 76*8578e0c0SKairui Song static inline void __swap_table_set(struct swap_cluster_info *ci, 77*8578e0c0SKairui Song unsigned int off, unsigned long swp_tb) 78*8578e0c0SKairui Song { 79*8578e0c0SKairui Song VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); 80*8578e0c0SKairui Song atomic_long_set(&ci->table[off], swp_tb); 81*8578e0c0SKairui Song } 82*8578e0c0SKairui Song 83*8578e0c0SKairui Song static inline unsigned long __swap_table_xchg(struct swap_cluster_info *ci, 84*8578e0c0SKairui Song unsigned int off, unsigned long swp_tb) 85*8578e0c0SKairui Song { 86*8578e0c0SKairui Song VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); 87*8578e0c0SKairui Song /* Ordering is guaranteed by cluster lock, relax */ 88*8578e0c0SKairui Song return atomic_long_xchg_relaxed(&ci->table[off], swp_tb); 89*8578e0c0SKairui Song } 90*8578e0c0SKairui Song 91*8578e0c0SKairui Song static inline unsigned long __swap_table_get(struct swap_cluster_info *ci, 92*8578e0c0SKairui Song unsigned int off) 93*8578e0c0SKairui Song { 94*8578e0c0SKairui Song VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); 95*8578e0c0SKairui Song return atomic_long_read(&ci->table[off]); 96*8578e0c0SKairui Song } 97*8578e0c0SKairui Song #endif 98