1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2022-2024 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_bit.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_btree.h" 15 #include "xfs_alloc_btree.h" 16 #include "xfs_rmap_btree.h" 17 #include "xfs_alloc.h" 18 #include "xfs_ialloc.h" 19 #include "xfs_rmap.h" 20 #include "xfs_ag.h" 21 #include "xfs_ag_resv.h" 22 #include "xfs_health.h" 23 #include "xfs_error.h" 24 #include "xfs_bmap.h" 25 #include "xfs_defer.h" 26 #include "xfs_log_format.h" 27 #include "xfs_trans.h" 28 #include "xfs_trace.h" 29 #include "xfs_inode.h" 30 #include "xfs_icache.h" 31 #include "xfs_rtgroup.h" 32 #include "xfs_rtbitmap.h" 33 #include "xfs_metafile.h" 34 #include "xfs_metadir.h" 35 36 int 37 xfs_rtgroup_alloc( 38 struct xfs_mount *mp, 39 xfs_rgnumber_t rgno, 40 xfs_rgnumber_t rgcount, 41 xfs_rtbxlen_t rextents) 42 { 43 struct xfs_rtgroup *rtg; 44 int error; 45 46 rtg = kzalloc(sizeof(struct xfs_rtgroup), GFP_KERNEL); 47 if (!rtg) 48 return -ENOMEM; 49 50 error = xfs_group_insert(mp, rtg_group(rtg), rgno, XG_TYPE_RTG); 51 if (error) 52 goto out_free_rtg; 53 return 0; 54 55 out_free_rtg: 56 kfree(rtg); 57 return error; 58 } 59 60 void 61 xfs_rtgroup_free( 62 struct xfs_mount *mp, 63 xfs_rgnumber_t rgno) 64 { 65 xfs_group_free(mp, rgno, XG_TYPE_RTG, NULL); 66 } 67 68 /* Free a range of incore rtgroup objects. */ 69 void 70 xfs_free_rtgroups( 71 struct xfs_mount *mp, 72 xfs_rgnumber_t first_rgno, 73 xfs_rgnumber_t end_rgno) 74 { 75 xfs_rgnumber_t rgno; 76 77 for (rgno = first_rgno; rgno < end_rgno; rgno++) 78 xfs_rtgroup_free(mp, rgno); 79 } 80 81 /* Initialize some range of incore rtgroup objects. */ 82 int 83 xfs_initialize_rtgroups( 84 struct xfs_mount *mp, 85 xfs_rgnumber_t first_rgno, 86 xfs_rgnumber_t end_rgno, 87 xfs_rtbxlen_t rextents) 88 { 89 xfs_rgnumber_t index; 90 int error; 91 92 if (first_rgno >= end_rgno) 93 return 0; 94 95 for (index = first_rgno; index < end_rgno; index++) { 96 error = xfs_rtgroup_alloc(mp, index, end_rgno, rextents); 97 if (error) 98 goto out_unwind_new_rtgs; 99 } 100 101 return 0; 102 103 out_unwind_new_rtgs: 104 xfs_free_rtgroups(mp, first_rgno, index); 105 return error; 106 } 107 108 /* Compute the number of rt extents in this realtime group. */ 109 xfs_rtxnum_t 110 __xfs_rtgroup_extents( 111 struct xfs_mount *mp, 112 xfs_rgnumber_t rgno, 113 xfs_rgnumber_t rgcount, 114 xfs_rtbxlen_t rextents) 115 { 116 ASSERT(rgno < rgcount); 117 if (rgno == rgcount - 1) 118 return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents); 119 120 ASSERT(xfs_has_rtgroups(mp)); 121 return mp->m_sb.sb_rgextents; 122 } 123 124 xfs_rtxnum_t 125 xfs_rtgroup_extents( 126 struct xfs_mount *mp, 127 xfs_rgnumber_t rgno) 128 { 129 return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount, 130 mp->m_sb.sb_rextents); 131 } 132 133 /* 134 * Update the rt extent count of the previous tail rtgroup if it changed during 135 * recovery (i.e. recovery of a growfs). 136 */ 137 int 138 xfs_update_last_rtgroup_size( 139 struct xfs_mount *mp, 140 xfs_rgnumber_t prev_rgcount) 141 { 142 struct xfs_rtgroup *rtg; 143 144 ASSERT(prev_rgcount > 0); 145 146 rtg = xfs_rtgroup_grab(mp, prev_rgcount - 1); 147 if (!rtg) 148 return -EFSCORRUPTED; 149 rtg->rtg_extents = __xfs_rtgroup_extents(mp, prev_rgcount - 1, 150 mp->m_sb.sb_rgcount, mp->m_sb.sb_rextents); 151 xfs_rtgroup_rele(rtg); 152 return 0; 153 } 154 155 /* Lock metadata inodes associated with this rt group. */ 156 void 157 xfs_rtgroup_lock( 158 struct xfs_rtgroup *rtg, 159 unsigned int rtglock_flags) 160 { 161 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS)); 162 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) || 163 !(rtglock_flags & XFS_RTGLOCK_BITMAP)); 164 165 if (rtglock_flags & XFS_RTGLOCK_BITMAP) 166 xfs_rtbitmap_lock(rtg_mount(rtg)); 167 else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) 168 xfs_rtbitmap_lock_shared(rtg_mount(rtg), XFS_RBMLOCK_BITMAP); 169 } 170 171 /* Unlock metadata inodes associated with this rt group. */ 172 void 173 xfs_rtgroup_unlock( 174 struct xfs_rtgroup *rtg, 175 unsigned int rtglock_flags) 176 { 177 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS)); 178 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) || 179 !(rtglock_flags & XFS_RTGLOCK_BITMAP)); 180 181 if (rtglock_flags & XFS_RTGLOCK_BITMAP) 182 xfs_rtbitmap_unlock(rtg_mount(rtg)); 183 else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) 184 xfs_rtbitmap_unlock_shared(rtg_mount(rtg), XFS_RBMLOCK_BITMAP); 185 } 186 187 /* 188 * Join realtime group metadata inodes to the transaction. The ILOCKs will be 189 * released on transaction commit. 190 */ 191 void 192 xfs_rtgroup_trans_join( 193 struct xfs_trans *tp, 194 struct xfs_rtgroup *rtg, 195 unsigned int rtglock_flags) 196 { 197 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS)); 198 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED)); 199 200 if (rtglock_flags & XFS_RTGLOCK_BITMAP) 201 xfs_rtbitmap_trans_join(tp); 202 } 203 204 #ifdef CONFIG_PROVE_LOCKING 205 static struct lock_class_key xfs_rtginode_lock_class; 206 207 static int 208 xfs_rtginode_ilock_cmp_fn( 209 const struct lockdep_map *m1, 210 const struct lockdep_map *m2) 211 { 212 const struct xfs_inode *ip1 = 213 container_of(m1, struct xfs_inode, i_lock.dep_map); 214 const struct xfs_inode *ip2 = 215 container_of(m2, struct xfs_inode, i_lock.dep_map); 216 217 if (ip1->i_projid < ip2->i_projid) 218 return -1; 219 if (ip1->i_projid > ip2->i_projid) 220 return 1; 221 return 0; 222 } 223 224 static inline void 225 xfs_rtginode_ilock_print_fn( 226 const struct lockdep_map *m) 227 { 228 const struct xfs_inode *ip = 229 container_of(m, struct xfs_inode, i_lock.dep_map); 230 231 printk(KERN_CONT " rgno=%u", ip->i_projid); 232 } 233 234 /* 235 * Most of the time each of the RTG inode locks are only taken one at a time. 236 * But when committing deferred ops, more than one of a kind can be taken. 237 * However, deferred rt ops will be committed in rgno order so there is no 238 * potential for deadlocks. The code here is needed to tell lockdep about this 239 * order. 240 */ 241 static inline void 242 xfs_rtginode_lockdep_setup( 243 struct xfs_inode *ip, 244 xfs_rgnumber_t rgno, 245 enum xfs_rtg_inodes type) 246 { 247 lockdep_set_class_and_subclass(&ip->i_lock, &xfs_rtginode_lock_class, 248 type); 249 lock_set_cmp_fn(&ip->i_lock, xfs_rtginode_ilock_cmp_fn, 250 xfs_rtginode_ilock_print_fn); 251 } 252 #else 253 #define xfs_rtginode_lockdep_setup(ip, rgno, type) do { } while (0) 254 #endif /* CONFIG_PROVE_LOCKING */ 255 256 struct xfs_rtginode_ops { 257 const char *name; /* short name */ 258 259 enum xfs_metafile_type metafile_type; 260 261 /* Does the fs have this feature? */ 262 bool (*enabled)(struct xfs_mount *mp); 263 }; 264 265 static const struct xfs_rtginode_ops xfs_rtginode_ops[XFS_RTGI_MAX] = { 266 }; 267 268 /* Return the shortname of this rtgroup inode. */ 269 const char * 270 xfs_rtginode_name( 271 enum xfs_rtg_inodes type) 272 { 273 return xfs_rtginode_ops[type].name; 274 } 275 276 /* Return the metafile type of this rtgroup inode. */ 277 enum xfs_metafile_type 278 xfs_rtginode_metafile_type( 279 enum xfs_rtg_inodes type) 280 { 281 return xfs_rtginode_ops[type].metafile_type; 282 } 283 284 /* Should this rtgroup inode be present? */ 285 bool 286 xfs_rtginode_enabled( 287 struct xfs_rtgroup *rtg, 288 enum xfs_rtg_inodes type) 289 { 290 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type]; 291 292 if (!ops->enabled) 293 return true; 294 return ops->enabled(rtg_mount(rtg)); 295 } 296 297 /* Load and existing rtgroup inode into the rtgroup structure. */ 298 int 299 xfs_rtginode_load( 300 struct xfs_rtgroup *rtg, 301 enum xfs_rtg_inodes type, 302 struct xfs_trans *tp) 303 { 304 struct xfs_mount *mp = tp->t_mountp; 305 const char *path; 306 struct xfs_inode *ip; 307 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type]; 308 int error; 309 310 if (!xfs_rtginode_enabled(rtg, type)) 311 return 0; 312 313 if (!mp->m_rtdirip) 314 return -EFSCORRUPTED; 315 316 path = xfs_rtginode_path(rtg_rgno(rtg), type); 317 if (!path) 318 return -ENOMEM; 319 error = xfs_metadir_load(tp, mp->m_rtdirip, path, ops->metafile_type, 320 &ip); 321 kfree(path); 322 323 if (error) 324 return error; 325 326 if (XFS_IS_CORRUPT(mp, ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && 327 ip->i_df.if_format != XFS_DINODE_FMT_BTREE)) { 328 xfs_irele(ip); 329 return -EFSCORRUPTED; 330 } 331 332 if (XFS_IS_CORRUPT(mp, ip->i_projid != rtg_rgno(rtg))) { 333 xfs_irele(ip); 334 return -EFSCORRUPTED; 335 } 336 337 xfs_rtginode_lockdep_setup(ip, rtg_rgno(rtg), type); 338 rtg->rtg_inodes[type] = ip; 339 return 0; 340 } 341 342 /* Release an rtgroup metadata inode. */ 343 void 344 xfs_rtginode_irele( 345 struct xfs_inode **ipp) 346 { 347 if (*ipp) 348 xfs_irele(*ipp); 349 *ipp = NULL; 350 } 351 352 /* Create the parent directory for all rtgroup inodes and load it. */ 353 int 354 xfs_rtginode_mkdir_parent( 355 struct xfs_mount *mp) 356 { 357 if (!mp->m_metadirip) 358 return -EFSCORRUPTED; 359 360 return xfs_metadir_mkdir(mp->m_metadirip, "rtgroups", &mp->m_rtdirip); 361 } 362 363 /* Load the parent directory of all rtgroup inodes. */ 364 int 365 xfs_rtginode_load_parent( 366 struct xfs_trans *tp) 367 { 368 struct xfs_mount *mp = tp->t_mountp; 369 370 if (!mp->m_metadirip) 371 return -EFSCORRUPTED; 372 373 return xfs_metadir_load(tp, mp->m_metadirip, "rtgroups", 374 XFS_METAFILE_DIR, &mp->m_rtdirip); 375 } 376