xref: /linux/fs/xfs/libxfs/xfs_rtgroup.c (revision c148bc7535650fbfa95a1f571b9ffa2ab478ea33)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2022-2024 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_alloc_btree.h"
16 #include "xfs_rmap_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_ialloc.h"
19 #include "xfs_rmap.h"
20 #include "xfs_ag.h"
21 #include "xfs_ag_resv.h"
22 #include "xfs_health.h"
23 #include "xfs_error.h"
24 #include "xfs_bmap.h"
25 #include "xfs_defer.h"
26 #include "xfs_log_format.h"
27 #include "xfs_trans.h"
28 #include "xfs_trace.h"
29 #include "xfs_inode.h"
30 #include "xfs_icache.h"
31 #include "xfs_buf_item.h"
32 #include "xfs_rtgroup.h"
33 #include "xfs_rtbitmap.h"
34 #include "xfs_metafile.h"
35 #include "xfs_metadir.h"
36 #include "xfs_rtrmap_btree.h"
37 #include "xfs_rtrefcount_btree.h"
38 
39 /* Find the first usable fsblock in this rtgroup. */
40 static inline uint32_t
xfs_rtgroup_min_block(struct xfs_mount * mp,xfs_rgnumber_t rgno)41 xfs_rtgroup_min_block(
42 	struct xfs_mount	*mp,
43 	xfs_rgnumber_t		rgno)
44 {
45 	if (xfs_has_rtsb(mp) && rgno == 0)
46 		return mp->m_sb.sb_rextsize;
47 
48 	return 0;
49 }
50 
51 /* Precompute this group's geometry */
52 void
xfs_rtgroup_calc_geometry(struct xfs_mount * mp,struct xfs_rtgroup * rtg,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)53 xfs_rtgroup_calc_geometry(
54 	struct xfs_mount	*mp,
55 	struct xfs_rtgroup	*rtg,
56 	xfs_rgnumber_t		rgno,
57 	xfs_rgnumber_t		rgcount,
58 	xfs_rtbxlen_t		rextents)
59 {
60 	rtg->rtg_extents = __xfs_rtgroup_extents(mp, rgno, rgcount, rextents);
61 	rtg_group(rtg)->xg_block_count = rtg->rtg_extents * mp->m_sb.sb_rextsize;
62 	rtg_group(rtg)->xg_min_gbno = xfs_rtgroup_min_block(mp, rgno);
63 }
64 
65 int
xfs_rtgroup_alloc(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)66 xfs_rtgroup_alloc(
67 	struct xfs_mount	*mp,
68 	xfs_rgnumber_t		rgno,
69 	xfs_rgnumber_t		rgcount,
70 	xfs_rtbxlen_t		rextents)
71 {
72 	struct xfs_rtgroup	*rtg;
73 	int			error;
74 
75 	rtg = kzalloc(sizeof(struct xfs_rtgroup), GFP_KERNEL);
76 	if (!rtg)
77 		return -ENOMEM;
78 
79 	xfs_rtgroup_calc_geometry(mp, rtg, rgno, rgcount, rextents);
80 
81 	error = xfs_group_insert(mp, rtg_group(rtg), rgno, XG_TYPE_RTG);
82 	if (error)
83 		goto out_free_rtg;
84 	return 0;
85 
86 out_free_rtg:
87 	kfree(rtg);
88 	return error;
89 }
90 
91 void
xfs_rtgroup_free(struct xfs_mount * mp,xfs_rgnumber_t rgno)92 xfs_rtgroup_free(
93 	struct xfs_mount	*mp,
94 	xfs_rgnumber_t		rgno)
95 {
96 	xfs_group_free(mp, rgno, XG_TYPE_RTG, NULL);
97 }
98 
99 /* Free a range of incore rtgroup objects. */
100 void
xfs_free_rtgroups(struct xfs_mount * mp,xfs_rgnumber_t first_rgno,xfs_rgnumber_t end_rgno)101 xfs_free_rtgroups(
102 	struct xfs_mount	*mp,
103 	xfs_rgnumber_t		first_rgno,
104 	xfs_rgnumber_t		end_rgno)
105 {
106 	xfs_rgnumber_t		rgno;
107 
108 	for (rgno = first_rgno; rgno < end_rgno; rgno++)
109 		xfs_rtgroup_free(mp, rgno);
110 }
111 
112 /* Initialize some range of incore rtgroup objects. */
113 int
xfs_initialize_rtgroups(struct xfs_mount * mp,xfs_rgnumber_t first_rgno,xfs_rgnumber_t end_rgno,xfs_rtbxlen_t rextents)114 xfs_initialize_rtgroups(
115 	struct xfs_mount	*mp,
116 	xfs_rgnumber_t		first_rgno,
117 	xfs_rgnumber_t		end_rgno,
118 	xfs_rtbxlen_t		rextents)
119 {
120 	xfs_rgnumber_t		index;
121 	int			error;
122 
123 	if (first_rgno >= end_rgno)
124 		return 0;
125 
126 	for (index = first_rgno; index < end_rgno; index++) {
127 		error = xfs_rtgroup_alloc(mp, index, end_rgno, rextents);
128 		if (error)
129 			goto out_unwind_new_rtgs;
130 	}
131 
132 	return 0;
133 
134 out_unwind_new_rtgs:
135 	xfs_free_rtgroups(mp, first_rgno, index);
136 	return error;
137 }
138 
139 /* Compute the number of rt extents in this realtime group. */
140 xfs_rtxnum_t
__xfs_rtgroup_extents(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)141 __xfs_rtgroup_extents(
142 	struct xfs_mount	*mp,
143 	xfs_rgnumber_t		rgno,
144 	xfs_rgnumber_t		rgcount,
145 	xfs_rtbxlen_t		rextents)
146 {
147 	ASSERT(rgno < rgcount);
148 	if (rgno == rgcount - 1)
149 		return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
150 
151 	ASSERT(xfs_has_rtgroups(mp));
152 	return mp->m_sb.sb_rgextents;
153 }
154 
155 xfs_rtxnum_t
xfs_rtgroup_extents(struct xfs_mount * mp,xfs_rgnumber_t rgno)156 xfs_rtgroup_extents(
157 	struct xfs_mount	*mp,
158 	xfs_rgnumber_t		rgno)
159 {
160 	return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
161 			mp->m_sb.sb_rextents);
162 }
163 
164 /*
165  * Update the rt extent count of the previous tail rtgroup if it changed during
166  * recovery (i.e. recovery of a growfs).
167  */
168 int
xfs_update_last_rtgroup_size(struct xfs_mount * mp,xfs_rgnumber_t prev_rgcount)169 xfs_update_last_rtgroup_size(
170 	struct xfs_mount	*mp,
171 	xfs_rgnumber_t		prev_rgcount)
172 {
173 	struct xfs_rtgroup	*rtg;
174 
175 	ASSERT(prev_rgcount > 0);
176 
177 	rtg = xfs_rtgroup_grab(mp, prev_rgcount - 1);
178 	if (!rtg)
179 		return -EFSCORRUPTED;
180 	rtg->rtg_extents = __xfs_rtgroup_extents(mp, prev_rgcount - 1,
181 			mp->m_sb.sb_rgcount, mp->m_sb.sb_rextents);
182 	rtg_group(rtg)->xg_block_count = rtg->rtg_extents * mp->m_sb.sb_rextsize;
183 	xfs_rtgroup_rele(rtg);
184 	return 0;
185 }
186 
187 /* Lock metadata inodes associated with this rt group. */
188 void
xfs_rtgroup_lock(struct xfs_rtgroup * rtg,unsigned int rtglock_flags)189 xfs_rtgroup_lock(
190 	struct xfs_rtgroup	*rtg,
191 	unsigned int		rtglock_flags)
192 {
193 	ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
194 	ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
195 	       !(rtglock_flags & XFS_RTGLOCK_BITMAP));
196 
197 	if (!xfs_has_zoned(rtg_mount(rtg))) {
198 		if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
199 			/*
200 			 * Lock both realtime free space metadata inodes for a
201 			 * freespace update.
202 			 */
203 			xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_EXCL);
204 			xfs_ilock(rtg_summary(rtg), XFS_ILOCK_EXCL);
205 		} else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
206 			xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_SHARED);
207 		}
208 	}
209 
210 	if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
211 		xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
212 
213 	if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
214 		xfs_ilock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
215 }
216 
217 /* Unlock metadata inodes associated with this rt group. */
218 void
xfs_rtgroup_unlock(struct xfs_rtgroup * rtg,unsigned int rtglock_flags)219 xfs_rtgroup_unlock(
220 	struct xfs_rtgroup	*rtg,
221 	unsigned int		rtglock_flags)
222 {
223 	ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
224 	ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
225 	       !(rtglock_flags & XFS_RTGLOCK_BITMAP));
226 
227 	if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
228 		xfs_iunlock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
229 
230 	if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
231 		xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
232 
233 	if (!xfs_has_zoned(rtg_mount(rtg))) {
234 		if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
235 			xfs_iunlock(rtg_summary(rtg), XFS_ILOCK_EXCL);
236 			xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_EXCL);
237 		} else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
238 			xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_SHARED);
239 		}
240 	}
241 }
242 
243 /*
244  * Join realtime group metadata inodes to the transaction.  The ILOCKs will be
245  * released on transaction commit.
246  */
247 void
xfs_rtgroup_trans_join(struct xfs_trans * tp,struct xfs_rtgroup * rtg,unsigned int rtglock_flags)248 xfs_rtgroup_trans_join(
249 	struct xfs_trans	*tp,
250 	struct xfs_rtgroup	*rtg,
251 	unsigned int		rtglock_flags)
252 {
253 	ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
254 	ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED));
255 
256 	if (!xfs_has_zoned(rtg_mount(rtg)) &&
257 	    (rtglock_flags & XFS_RTGLOCK_BITMAP)) {
258 		xfs_trans_ijoin(tp, rtg_bitmap(rtg), XFS_ILOCK_EXCL);
259 		xfs_trans_ijoin(tp, rtg_summary(rtg), XFS_ILOCK_EXCL);
260 	}
261 
262 	if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
263 		xfs_trans_ijoin(tp, rtg_rmap(rtg), XFS_ILOCK_EXCL);
264 
265 	if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
266 		xfs_trans_ijoin(tp, rtg_refcount(rtg), XFS_ILOCK_EXCL);
267 }
268 
269 /* Retrieve rt group geometry. */
270 int
xfs_rtgroup_get_geometry(struct xfs_rtgroup * rtg,struct xfs_rtgroup_geometry * rgeo)271 xfs_rtgroup_get_geometry(
272 	struct xfs_rtgroup	*rtg,
273 	struct xfs_rtgroup_geometry *rgeo)
274 {
275 	/* Fill out form. */
276 	memset(rgeo, 0, sizeof(*rgeo));
277 	rgeo->rg_number = rtg_rgno(rtg);
278 	rgeo->rg_length = rtg_blocks(rtg);
279 	xfs_rtgroup_geom_health(rtg, rgeo);
280 	return 0;
281 }
282 
283 #ifdef CONFIG_PROVE_LOCKING
284 static struct lock_class_key xfs_rtginode_lock_class;
285 
286 static int
xfs_rtginode_ilock_cmp_fn(const struct lockdep_map * m1,const struct lockdep_map * m2)287 xfs_rtginode_ilock_cmp_fn(
288 	const struct lockdep_map	*m1,
289 	const struct lockdep_map	*m2)
290 {
291 	const struct xfs_inode *ip1 =
292 		container_of(m1, struct xfs_inode, i_lock.dep_map);
293 	const struct xfs_inode *ip2 =
294 		container_of(m2, struct xfs_inode, i_lock.dep_map);
295 
296 	if (ip1->i_projid < ip2->i_projid)
297 		return -1;
298 	if (ip1->i_projid > ip2->i_projid)
299 		return 1;
300 	return 0;
301 }
302 
303 static inline void
xfs_rtginode_ilock_print_fn(const struct lockdep_map * m)304 xfs_rtginode_ilock_print_fn(
305 	const struct lockdep_map	*m)
306 {
307 	const struct xfs_inode *ip =
308 		container_of(m, struct xfs_inode, i_lock.dep_map);
309 
310 	printk(KERN_CONT " rgno=%u metatype=%s", ip->i_projid,
311 			xfs_metafile_type_str(ip->i_metatype));
312 }
313 
314 /*
315  * Most of the time each of the RTG inode locks are only taken one at a time.
316  * But when committing deferred ops, more than one of a kind can be taken.
317  * However, deferred rt ops will be committed in rgno order so there is no
318  * potential for deadlocks.  The code here is needed to tell lockdep about this
319  * order.
320  */
321 static inline void
xfs_rtginode_lockdep_setup(struct xfs_inode * ip,xfs_rgnumber_t rgno,enum xfs_rtg_inodes type)322 xfs_rtginode_lockdep_setup(
323 	struct xfs_inode	*ip,
324 	xfs_rgnumber_t		rgno,
325 	enum xfs_rtg_inodes	type)
326 {
327 	lockdep_set_class_and_subclass(&ip->i_lock, &xfs_rtginode_lock_class,
328 			type);
329 	lock_set_cmp_fn(&ip->i_lock, xfs_rtginode_ilock_cmp_fn,
330 			xfs_rtginode_ilock_print_fn);
331 }
332 #else
333 #define xfs_rtginode_lockdep_setup(ip, rgno, type)	do { } while (0)
334 #endif /* CONFIG_PROVE_LOCKING */
335 
336 struct xfs_rtginode_ops {
337 	const char		*name;	/* short name */
338 
339 	enum xfs_metafile_type	metafile_type;
340 
341 	unsigned int		sick;	/* rtgroup sickness flag */
342 
343 	unsigned int		fmt_mask; /* all valid data fork formats */
344 
345 	/* Does the fs have this feature? */
346 	bool			(*enabled)(const struct xfs_mount *mp);
347 
348 	/* Create this rtgroup metadata inode and initialize it. */
349 	int			(*create)(struct xfs_rtgroup *rtg,
350 					  struct xfs_inode *ip,
351 					  struct xfs_trans *tp,
352 					  bool init);
353 };
354 
355 static const struct xfs_rtginode_ops xfs_rtginode_ops[XFS_RTGI_MAX] = {
356 	[XFS_RTGI_BITMAP] = {
357 		.name		= "bitmap",
358 		.metafile_type	= XFS_METAFILE_RTBITMAP,
359 		.sick		= XFS_SICK_RG_BITMAP,
360 		.fmt_mask	= (1U << XFS_DINODE_FMT_EXTENTS) |
361 				  (1U << XFS_DINODE_FMT_BTREE),
362 		.enabled	= xfs_has_nonzoned,
363 		.create		= xfs_rtbitmap_create,
364 	},
365 	[XFS_RTGI_SUMMARY] = {
366 		.name		= "summary",
367 		.metafile_type	= XFS_METAFILE_RTSUMMARY,
368 		.sick		= XFS_SICK_RG_SUMMARY,
369 		.fmt_mask	= (1U << XFS_DINODE_FMT_EXTENTS) |
370 				  (1U << XFS_DINODE_FMT_BTREE),
371 		.enabled	= xfs_has_nonzoned,
372 		.create		= xfs_rtsummary_create,
373 	},
374 	[XFS_RTGI_RMAP] = {
375 		.name		= "rmap",
376 		.metafile_type	= XFS_METAFILE_RTRMAP,
377 		.sick		= XFS_SICK_RG_RMAPBT,
378 		.fmt_mask	= 1U << XFS_DINODE_FMT_META_BTREE,
379 		/*
380 		 * growfs must create the rtrmap inodes before adding a
381 		 * realtime volume to the filesystem, so we cannot use the
382 		 * rtrmapbt predicate here.
383 		 */
384 		.enabled	= xfs_has_rmapbt,
385 		.create		= xfs_rtrmapbt_create,
386 	},
387 	[XFS_RTGI_REFCOUNT] = {
388 		.name		= "refcount",
389 		.metafile_type	= XFS_METAFILE_RTREFCOUNT,
390 		.sick		= XFS_SICK_RG_REFCNTBT,
391 		.fmt_mask	= 1U << XFS_DINODE_FMT_META_BTREE,
392 		/* same comment about growfs and rmap inodes applies here */
393 		.enabled	= xfs_has_reflink,
394 		.create		= xfs_rtrefcountbt_create,
395 	},
396 };
397 
398 /* Return the shortname of this rtgroup inode. */
399 const char *
xfs_rtginode_name(enum xfs_rtg_inodes type)400 xfs_rtginode_name(
401 	enum xfs_rtg_inodes	type)
402 {
403 	return xfs_rtginode_ops[type].name;
404 }
405 
406 /* Return the metafile type of this rtgroup inode. */
407 enum xfs_metafile_type
xfs_rtginode_metafile_type(enum xfs_rtg_inodes type)408 xfs_rtginode_metafile_type(
409 	enum xfs_rtg_inodes	type)
410 {
411 	return xfs_rtginode_ops[type].metafile_type;
412 }
413 
414 /* Should this rtgroup inode be present? */
415 bool
xfs_rtginode_enabled(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)416 xfs_rtginode_enabled(
417 	struct xfs_rtgroup	*rtg,
418 	enum xfs_rtg_inodes	type)
419 {
420 	const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
421 
422 	if (!ops->enabled)
423 		return true;
424 	return ops->enabled(rtg_mount(rtg));
425 }
426 
427 /* Mark an rtgroup inode sick */
428 void
xfs_rtginode_mark_sick(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)429 xfs_rtginode_mark_sick(
430 	struct xfs_rtgroup	*rtg,
431 	enum xfs_rtg_inodes	type)
432 {
433 	const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
434 
435 	xfs_group_mark_sick(rtg_group(rtg), ops->sick);
436 }
437 
438 /* Load and existing rtgroup inode into the rtgroup structure. */
439 int
xfs_rtginode_load(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type,struct xfs_trans * tp)440 xfs_rtginode_load(
441 	struct xfs_rtgroup	*rtg,
442 	enum xfs_rtg_inodes	type,
443 	struct xfs_trans	*tp)
444 {
445 	struct xfs_mount	*mp = tp->t_mountp;
446 	struct xfs_inode	*ip;
447 	const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
448 	int			error;
449 
450 	if (!xfs_rtginode_enabled(rtg, type))
451 		return 0;
452 
453 	if (!xfs_has_rtgroups(mp)) {
454 		xfs_ino_t	ino;
455 
456 		switch (type) {
457 		case XFS_RTGI_BITMAP:
458 			ino = mp->m_sb.sb_rbmino;
459 			break;
460 		case XFS_RTGI_SUMMARY:
461 			ino = mp->m_sb.sb_rsumino;
462 			break;
463 		default:
464 			/* None of the other types exist on !rtgroups */
465 			return 0;
466 		}
467 
468 		error = xfs_trans_metafile_iget(tp, ino, ops->metafile_type,
469 				&ip);
470 	} else {
471 		const char	*path;
472 
473 		if (!mp->m_rtdirip) {
474 			xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
475 			return -EFSCORRUPTED;
476 		}
477 
478 		path = xfs_rtginode_path(rtg_rgno(rtg), type);
479 		if (!path)
480 			return -ENOMEM;
481 		error = xfs_metadir_load(tp, mp->m_rtdirip, path,
482 				ops->metafile_type, &ip);
483 		kfree(path);
484 	}
485 
486 	if (error) {
487 		if (xfs_metadata_is_sick(error))
488 			xfs_rtginode_mark_sick(rtg, type);
489 		return error;
490 	}
491 
492 	if (XFS_IS_CORRUPT(mp, !((1U << ip->i_df.if_format) & ops->fmt_mask))) {
493 		xfs_irele(ip);
494 		xfs_rtginode_mark_sick(rtg, type);
495 		return -EFSCORRUPTED;
496 	}
497 
498 	if (XFS_IS_CORRUPT(mp, ip->i_projid != rtg_rgno(rtg))) {
499 		xfs_irele(ip);
500 		xfs_rtginode_mark_sick(rtg, type);
501 		return -EFSCORRUPTED;
502 	}
503 
504 	xfs_rtginode_lockdep_setup(ip, rtg_rgno(rtg), type);
505 	rtg->rtg_inodes[type] = ip;
506 	return 0;
507 }
508 
509 /* Release an rtgroup metadata inode. */
510 void
xfs_rtginode_irele(struct xfs_inode ** ipp)511 xfs_rtginode_irele(
512 	struct xfs_inode	**ipp)
513 {
514 	if (*ipp)
515 		xfs_irele(*ipp);
516 	*ipp = NULL;
517 }
518 
519 /* Add a metadata inode for a realtime rmap btree. */
520 int
xfs_rtginode_create(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type,bool init)521 xfs_rtginode_create(
522 	struct xfs_rtgroup		*rtg,
523 	enum xfs_rtg_inodes		type,
524 	bool				init)
525 {
526 	const struct xfs_rtginode_ops	*ops = &xfs_rtginode_ops[type];
527 	struct xfs_mount		*mp = rtg_mount(rtg);
528 	struct xfs_metadir_update	upd = {
529 		.dp			= mp->m_rtdirip,
530 		.metafile_type		= ops->metafile_type,
531 	};
532 	int				error;
533 
534 	if (!xfs_rtginode_enabled(rtg, type))
535 		return 0;
536 
537 	if (!mp->m_rtdirip) {
538 		xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
539 		return -EFSCORRUPTED;
540 	}
541 
542 	upd.path = xfs_rtginode_path(rtg_rgno(rtg), type);
543 	if (!upd.path)
544 		return -ENOMEM;
545 
546 	error = xfs_metadir_start_create(&upd);
547 	if (error)
548 		goto out_path;
549 
550 	error = xfs_metadir_create(&upd, S_IFREG);
551 	if (error)
552 		goto out_cancel;
553 
554 	xfs_rtginode_lockdep_setup(upd.ip, rtg_rgno(rtg), type);
555 
556 	upd.ip->i_projid = rtg_rgno(rtg);
557 	error = ops->create(rtg, upd.ip, upd.tp, init);
558 	if (error)
559 		goto out_cancel;
560 
561 	error = xfs_metadir_commit(&upd);
562 	if (error)
563 		goto out_path;
564 
565 	kfree(upd.path);
566 	xfs_finish_inode_setup(upd.ip);
567 	rtg->rtg_inodes[type] = upd.ip;
568 	return 0;
569 
570 out_cancel:
571 	xfs_metadir_cancel(&upd, error);
572 	/* Have to finish setting up the inode to ensure it's deleted. */
573 	if (upd.ip) {
574 		xfs_finish_inode_setup(upd.ip);
575 		xfs_irele(upd.ip);
576 	}
577 out_path:
578 	kfree(upd.path);
579 	return error;
580 }
581 
582 /* Create the parent directory for all rtgroup inodes and load it. */
583 int
xfs_rtginode_mkdir_parent(struct xfs_mount * mp)584 xfs_rtginode_mkdir_parent(
585 	struct xfs_mount	*mp)
586 {
587 	if (!mp->m_metadirip) {
588 		xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
589 		return -EFSCORRUPTED;
590 	}
591 
592 	return xfs_metadir_mkdir(mp->m_metadirip, "rtgroups", &mp->m_rtdirip);
593 }
594 
595 /* Load the parent directory of all rtgroup inodes. */
596 int
xfs_rtginode_load_parent(struct xfs_trans * tp)597 xfs_rtginode_load_parent(
598 	struct xfs_trans	*tp)
599 {
600 	struct xfs_mount	*mp = tp->t_mountp;
601 
602 	if (!mp->m_metadirip) {
603 		xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
604 		return -EFSCORRUPTED;
605 	}
606 
607 	return xfs_metadir_load(tp, mp->m_metadirip, "rtgroups",
608 			XFS_METAFILE_DIR, &mp->m_rtdirip);
609 }
610 
611 /* Check superblock fields for a read or a write. */
612 static xfs_failaddr_t
xfs_rtsb_verify_common(struct xfs_buf * bp)613 xfs_rtsb_verify_common(
614 	struct xfs_buf		*bp)
615 {
616 	struct xfs_rtsb		*rsb = bp->b_addr;
617 
618 	if (!xfs_verify_magic(bp, rsb->rsb_magicnum))
619 		return __this_address;
620 	if (rsb->rsb_pad)
621 		return __this_address;
622 
623 	/* Everything to the end of the fs block must be zero */
624 	if (memchr_inv(rsb + 1, 0, BBTOB(bp->b_length) - sizeof(*rsb)))
625 		return __this_address;
626 
627 	return NULL;
628 }
629 
630 /* Check superblock fields for a read or revalidation. */
631 static inline xfs_failaddr_t
xfs_rtsb_verify_all(struct xfs_buf * bp)632 xfs_rtsb_verify_all(
633 	struct xfs_buf		*bp)
634 {
635 	struct xfs_rtsb		*rsb = bp->b_addr;
636 	struct xfs_mount	*mp = bp->b_mount;
637 	xfs_failaddr_t		fa;
638 
639 	fa = xfs_rtsb_verify_common(bp);
640 	if (fa)
641 		return fa;
642 
643 	if (memcmp(&rsb->rsb_fname, &mp->m_sb.sb_fname, XFSLABEL_MAX))
644 		return __this_address;
645 	if (!uuid_equal(&rsb->rsb_uuid, &mp->m_sb.sb_uuid))
646 		return __this_address;
647 	if (!uuid_equal(&rsb->rsb_meta_uuid, &mp->m_sb.sb_meta_uuid))
648 		return  __this_address;
649 
650 	return NULL;
651 }
652 
653 static void
xfs_rtsb_read_verify(struct xfs_buf * bp)654 xfs_rtsb_read_verify(
655 	struct xfs_buf		*bp)
656 {
657 	xfs_failaddr_t		fa;
658 
659 	if (!xfs_buf_verify_cksum(bp, XFS_RTSB_CRC_OFF)) {
660 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
661 		return;
662 	}
663 
664 	fa = xfs_rtsb_verify_all(bp);
665 	if (fa)
666 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
667 }
668 
669 static void
xfs_rtsb_write_verify(struct xfs_buf * bp)670 xfs_rtsb_write_verify(
671 	struct xfs_buf		*bp)
672 {
673 	xfs_failaddr_t		fa;
674 
675 	fa = xfs_rtsb_verify_common(bp);
676 	if (fa) {
677 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
678 		return;
679 	}
680 
681 	xfs_buf_update_cksum(bp, XFS_RTSB_CRC_OFF);
682 }
683 
684 const struct xfs_buf_ops xfs_rtsb_buf_ops = {
685 	.name		= "xfs_rtsb",
686 	.magic		= { 0, cpu_to_be32(XFS_RTSB_MAGIC) },
687 	.verify_read	= xfs_rtsb_read_verify,
688 	.verify_write	= xfs_rtsb_write_verify,
689 	.verify_struct	= xfs_rtsb_verify_all,
690 };
691 
692 /* Update a realtime superblock from the primary fs super */
693 void
xfs_update_rtsb(struct xfs_buf * rtsb_bp,const struct xfs_buf * sb_bp)694 xfs_update_rtsb(
695 	struct xfs_buf		*rtsb_bp,
696 	const struct xfs_buf	*sb_bp)
697 {
698 	const struct xfs_dsb	*dsb = sb_bp->b_addr;
699 	struct xfs_rtsb		*rsb = rtsb_bp->b_addr;
700 	const uuid_t		*meta_uuid;
701 
702 	rsb->rsb_magicnum = cpu_to_be32(XFS_RTSB_MAGIC);
703 
704 	rsb->rsb_pad = 0;
705 	memcpy(&rsb->rsb_fname, &dsb->sb_fname, XFSLABEL_MAX);
706 
707 	memcpy(&rsb->rsb_uuid, &dsb->sb_uuid, sizeof(rsb->rsb_uuid));
708 
709 	/*
710 	 * The metadata uuid is the fs uuid if the metauuid feature is not
711 	 * enabled.
712 	 */
713 	if (dsb->sb_features_incompat &
714 				cpu_to_be32(XFS_SB_FEAT_INCOMPAT_META_UUID))
715 		meta_uuid = &dsb->sb_meta_uuid;
716 	else
717 		meta_uuid = &dsb->sb_uuid;
718 	memcpy(&rsb->rsb_meta_uuid, meta_uuid, sizeof(rsb->rsb_meta_uuid));
719 }
720 
721 /*
722  * Update the realtime superblock from a filesystem superblock and log it to
723  * the given transaction.
724  */
725 struct xfs_buf *
xfs_log_rtsb(struct xfs_trans * tp,const struct xfs_buf * sb_bp)726 xfs_log_rtsb(
727 	struct xfs_trans	*tp,
728 	const struct xfs_buf	*sb_bp)
729 {
730 	struct xfs_buf		*rtsb_bp;
731 
732 	if (!xfs_has_rtsb(tp->t_mountp))
733 		return NULL;
734 
735 	rtsb_bp = xfs_trans_getrtsb(tp);
736 	if (!rtsb_bp) {
737 		/*
738 		 * It's possible for the rtgroups feature to be enabled but
739 		 * there is no incore rt superblock buffer if the rt geometry
740 		 * was specified at mkfs time but the rt section has not yet
741 		 * been attached.  In this case, rblocks must be zero.
742 		 */
743 		ASSERT(tp->t_mountp->m_sb.sb_rblocks == 0);
744 		return NULL;
745 	}
746 
747 	xfs_update_rtsb(rtsb_bp, sb_bp);
748 	xfs_trans_ordered_buf(tp, rtsb_bp);
749 	return rtsb_bp;
750 }
751