1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2018 Red Hat, Inc. 4 */ 5 #ifndef __LIBXFS_GROUP_H 6 #define __LIBXFS_GROUP_H 1 7 8 struct xfs_group { 9 struct xfs_mount *xg_mount; 10 uint32_t xg_gno; 11 enum xfs_group_type xg_type; 12 atomic_t xg_ref; /* passive reference count */ 13 atomic_t xg_active_ref; /* active reference count */ 14 15 /* Precalculated geometry info */ 16 uint32_t xg_block_count; /* max usable gbno */ 17 uint32_t xg_min_gbno; /* min usable gbno */ 18 19 #ifdef __KERNEL__ 20 /* -- kernel only structures below this line -- */ 21 22 union { 23 /* 24 * For perags and non-zoned RT groups: 25 * Track freed but not yet committed extents. 26 */ 27 struct xfs_extent_busy_tree *xg_busy_extents; 28 29 /* 30 * For zoned RT groups: 31 * List of groups that need a zone reset. 32 * 33 * The zonegc code forces a log flush of the rtrmap inode before 34 * resetting the write pointer, so there is no need for 35 * individual busy extent tracking. 36 */ 37 struct xfs_group *xg_next_reset; 38 }; 39 40 /* 41 * Bitsets of per-ag metadata that have been checked and/or are sick. 42 * Callers should hold xg_state_lock before accessing this field. 43 */ 44 uint16_t xg_checked; 45 uint16_t xg_sick; 46 spinlock_t xg_state_lock; 47 48 /* 49 * We use xfs_drain to track the number of deferred log intent items 50 * that have been queued (but not yet processed) so that waiters (e.g. 51 * scrub) will not lock resources when other threads are in the middle 52 * of processing a chain of intent items only to find momentary 53 * inconsistencies. 54 */ 55 struct xfs_defer_drain xg_intents_drain; 56 57 /* 58 * Hook to feed rmapbt updates to an active online repair. 59 */ 60 struct xfs_hooks xg_rmap_update_hooks; 61 #endif /* __KERNEL__ */ 62 }; 63 64 struct xfs_group *xfs_group_get(struct xfs_mount *mp, uint32_t index, 65 enum xfs_group_type type); 66 struct xfs_group *xfs_group_get_by_fsb(struct xfs_mount *mp, 67 xfs_fsblock_t fsbno, enum xfs_group_type type); 68 struct xfs_group *xfs_group_hold(struct xfs_group *xg); 69 void xfs_group_put(struct xfs_group *xg); 70 71 struct xfs_group *xfs_group_grab(struct xfs_mount *mp, uint32_t index, 72 enum xfs_group_type type); 73 struct xfs_group *xfs_group_next_range(struct xfs_mount *mp, 74 struct xfs_group *xg, uint32_t start_index, uint32_t end_index, 75 enum xfs_group_type type); 76 struct xfs_group *xfs_group_grab_next_mark(struct xfs_mount *mp, 77 struct xfs_group *xg, xa_mark_t mark, enum xfs_group_type type); 78 void xfs_group_rele(struct xfs_group *xg); 79 80 void xfs_group_free(struct xfs_mount *mp, uint32_t index, 81 enum xfs_group_type type, void (*uninit)(struct xfs_group *xg)); 82 int xfs_group_insert(struct xfs_mount *mp, struct xfs_group *xg, 83 uint32_t index, enum xfs_group_type); 84 85 #define xfs_group_set_mark(_xg, _mark) \ 86 xa_set_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \ 87 (_xg)->xg_gno, (_mark)) 88 #define xfs_group_clear_mark(_xg, _mark) \ 89 xa_clear_mark(&(_xg)->xg_mount->m_groups[(_xg)->xg_type].xa, \ 90 (_xg)->xg_gno, (_mark)) 91 #define xfs_group_marked(_mp, _type, _mark) \ 92 xa_marked(&(_mp)->m_groups[(_type)].xa, (_mark)) 93 94 static inline xfs_agblock_t 95 xfs_group_max_blocks( 96 struct xfs_group *xg) 97 { 98 return xg->xg_mount->m_groups[xg->xg_type].blocks; 99 } 100 101 static inline xfs_rfsblock_t 102 xfs_groups_to_rfsbs( 103 struct xfs_mount *mp, 104 uint32_t nr_groups, 105 enum xfs_group_type type) 106 { 107 return (xfs_rfsblock_t)mp->m_groups[type].blocks * nr_groups; 108 } 109 110 static inline xfs_fsblock_t 111 xfs_group_start_fsb( 112 struct xfs_group *xg) 113 { 114 return ((xfs_fsblock_t)xg->xg_gno) << 115 xg->xg_mount->m_groups[xg->xg_type].blklog; 116 } 117 118 static inline xfs_fsblock_t 119 xfs_gbno_to_fsb( 120 struct xfs_group *xg, 121 xfs_agblock_t gbno) 122 { 123 return xfs_group_start_fsb(xg) | gbno; 124 } 125 126 static inline xfs_daddr_t 127 xfs_gbno_to_daddr( 128 struct xfs_group *xg, 129 xfs_agblock_t gbno) 130 { 131 struct xfs_mount *mp = xg->xg_mount; 132 struct xfs_groups *g = &mp->m_groups[xg->xg_type]; 133 xfs_fsblock_t fsbno; 134 135 if (g->has_daddr_gaps) 136 fsbno = xfs_gbno_to_fsb(xg, gbno); 137 else 138 fsbno = (xfs_fsblock_t)xg->xg_gno * g->blocks + gbno; 139 140 return XFS_FSB_TO_BB(mp, g->start_fsb + fsbno); 141 } 142 143 static inline uint32_t 144 xfs_fsb_to_gno( 145 struct xfs_mount *mp, 146 xfs_fsblock_t fsbno, 147 enum xfs_group_type type) 148 { 149 if (!mp->m_groups[type].blklog) 150 return 0; 151 return fsbno >> mp->m_groups[type].blklog; 152 } 153 154 static inline xfs_agblock_t 155 xfs_fsb_to_gbno( 156 struct xfs_mount *mp, 157 xfs_fsblock_t fsbno, 158 enum xfs_group_type type) 159 { 160 return fsbno & mp->m_groups[type].blkmask; 161 } 162 163 static inline bool 164 xfs_verify_gbno( 165 struct xfs_group *xg, 166 uint32_t gbno) 167 { 168 if (gbno >= xg->xg_block_count) 169 return false; 170 if (gbno < xg->xg_min_gbno) 171 return false; 172 return true; 173 } 174 175 static inline bool 176 xfs_verify_gbext( 177 struct xfs_group *xg, 178 uint32_t gbno, 179 uint32_t glen) 180 { 181 uint32_t end; 182 183 if (!xfs_verify_gbno(xg, gbno)) 184 return false; 185 if (glen == 0 || check_add_overflow(gbno, glen - 1, &end)) 186 return false; 187 if (!xfs_verify_gbno(xg, end)) 188 return false; 189 return true; 190 } 191 192 #endif /* __LIBXFS_GROUP_H */ 193