1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2022-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_alloc_btree.h"
16 #include "xfs_rmap_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_ialloc.h"
19 #include "xfs_rmap.h"
20 #include "xfs_ag.h"
21 #include "xfs_ag_resv.h"
22 #include "xfs_health.h"
23 #include "xfs_error.h"
24 #include "xfs_bmap.h"
25 #include "xfs_defer.h"
26 #include "xfs_log_format.h"
27 #include "xfs_trans.h"
28 #include "xfs_trace.h"
29 #include "xfs_inode.h"
30 #include "xfs_icache.h"
31 #include "xfs_buf_item.h"
32 #include "xfs_rtgroup.h"
33 #include "xfs_rtbitmap.h"
34 #include "xfs_metafile.h"
35 #include "xfs_metadir.h"
36
37 /* Find the first usable fsblock in this rtgroup. */
38 static inline uint32_t
xfs_rtgroup_min_block(struct xfs_mount * mp,xfs_rgnumber_t rgno)39 xfs_rtgroup_min_block(
40 struct xfs_mount *mp,
41 xfs_rgnumber_t rgno)
42 {
43 if (xfs_has_rtsb(mp) && rgno == 0)
44 return mp->m_sb.sb_rextsize;
45
46 return 0;
47 }
48
49 /* Precompute this group's geometry */
50 void
xfs_rtgroup_calc_geometry(struct xfs_mount * mp,struct xfs_rtgroup * rtg,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)51 xfs_rtgroup_calc_geometry(
52 struct xfs_mount *mp,
53 struct xfs_rtgroup *rtg,
54 xfs_rgnumber_t rgno,
55 xfs_rgnumber_t rgcount,
56 xfs_rtbxlen_t rextents)
57 {
58 rtg->rtg_extents = __xfs_rtgroup_extents(mp, rgno, rgcount, rextents);
59 rtg_group(rtg)->xg_block_count = rtg->rtg_extents * mp->m_sb.sb_rextsize;
60 rtg_group(rtg)->xg_min_gbno = xfs_rtgroup_min_block(mp, rgno);
61 }
62
63 int
xfs_rtgroup_alloc(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)64 xfs_rtgroup_alloc(
65 struct xfs_mount *mp,
66 xfs_rgnumber_t rgno,
67 xfs_rgnumber_t rgcount,
68 xfs_rtbxlen_t rextents)
69 {
70 struct xfs_rtgroup *rtg;
71 int error;
72
73 rtg = kzalloc(sizeof(struct xfs_rtgroup), GFP_KERNEL);
74 if (!rtg)
75 return -ENOMEM;
76
77 xfs_rtgroup_calc_geometry(mp, rtg, rgno, rgcount, rextents);
78
79 error = xfs_group_insert(mp, rtg_group(rtg), rgno, XG_TYPE_RTG);
80 if (error)
81 goto out_free_rtg;
82 return 0;
83
84 out_free_rtg:
85 kfree(rtg);
86 return error;
87 }
88
89 void
xfs_rtgroup_free(struct xfs_mount * mp,xfs_rgnumber_t rgno)90 xfs_rtgroup_free(
91 struct xfs_mount *mp,
92 xfs_rgnumber_t rgno)
93 {
94 xfs_group_free(mp, rgno, XG_TYPE_RTG, NULL);
95 }
96
97 /* Free a range of incore rtgroup objects. */
98 void
xfs_free_rtgroups(struct xfs_mount * mp,xfs_rgnumber_t first_rgno,xfs_rgnumber_t end_rgno)99 xfs_free_rtgroups(
100 struct xfs_mount *mp,
101 xfs_rgnumber_t first_rgno,
102 xfs_rgnumber_t end_rgno)
103 {
104 xfs_rgnumber_t rgno;
105
106 for (rgno = first_rgno; rgno < end_rgno; rgno++)
107 xfs_rtgroup_free(mp, rgno);
108 }
109
110 /* Initialize some range of incore rtgroup objects. */
111 int
xfs_initialize_rtgroups(struct xfs_mount * mp,xfs_rgnumber_t first_rgno,xfs_rgnumber_t end_rgno,xfs_rtbxlen_t rextents)112 xfs_initialize_rtgroups(
113 struct xfs_mount *mp,
114 xfs_rgnumber_t first_rgno,
115 xfs_rgnumber_t end_rgno,
116 xfs_rtbxlen_t rextents)
117 {
118 xfs_rgnumber_t index;
119 int error;
120
121 if (first_rgno >= end_rgno)
122 return 0;
123
124 for (index = first_rgno; index < end_rgno; index++) {
125 error = xfs_rtgroup_alloc(mp, index, end_rgno, rextents);
126 if (error)
127 goto out_unwind_new_rtgs;
128 }
129
130 return 0;
131
132 out_unwind_new_rtgs:
133 xfs_free_rtgroups(mp, first_rgno, index);
134 return error;
135 }
136
137 /* Compute the number of rt extents in this realtime group. */
138 xfs_rtxnum_t
__xfs_rtgroup_extents(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)139 __xfs_rtgroup_extents(
140 struct xfs_mount *mp,
141 xfs_rgnumber_t rgno,
142 xfs_rgnumber_t rgcount,
143 xfs_rtbxlen_t rextents)
144 {
145 ASSERT(rgno < rgcount);
146 if (rgno == rgcount - 1)
147 return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
148
149 ASSERT(xfs_has_rtgroups(mp));
150 return mp->m_sb.sb_rgextents;
151 }
152
153 xfs_rtxnum_t
xfs_rtgroup_extents(struct xfs_mount * mp,xfs_rgnumber_t rgno)154 xfs_rtgroup_extents(
155 struct xfs_mount *mp,
156 xfs_rgnumber_t rgno)
157 {
158 return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
159 mp->m_sb.sb_rextents);
160 }
161
162 /*
163 * Update the rt extent count of the previous tail rtgroup if it changed during
164 * recovery (i.e. recovery of a growfs).
165 */
166 int
xfs_update_last_rtgroup_size(struct xfs_mount * mp,xfs_rgnumber_t prev_rgcount)167 xfs_update_last_rtgroup_size(
168 struct xfs_mount *mp,
169 xfs_rgnumber_t prev_rgcount)
170 {
171 struct xfs_rtgroup *rtg;
172
173 ASSERT(prev_rgcount > 0);
174
175 rtg = xfs_rtgroup_grab(mp, prev_rgcount - 1);
176 if (!rtg)
177 return -EFSCORRUPTED;
178 rtg->rtg_extents = __xfs_rtgroup_extents(mp, prev_rgcount - 1,
179 mp->m_sb.sb_rgcount, mp->m_sb.sb_rextents);
180 rtg_group(rtg)->xg_block_count = rtg->rtg_extents * mp->m_sb.sb_rextsize;
181 xfs_rtgroup_rele(rtg);
182 return 0;
183 }
184
185 /* Lock metadata inodes associated with this rt group. */
186 void
xfs_rtgroup_lock(struct xfs_rtgroup * rtg,unsigned int rtglock_flags)187 xfs_rtgroup_lock(
188 struct xfs_rtgroup *rtg,
189 unsigned int rtglock_flags)
190 {
191 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
192 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
193 !(rtglock_flags & XFS_RTGLOCK_BITMAP));
194
195 if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
196 /*
197 * Lock both realtime free space metadata inodes for a freespace
198 * update.
199 */
200 xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
201 xfs_ilock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
202 } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
203 xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
204 }
205 }
206
207 /* Unlock metadata inodes associated with this rt group. */
208 void
xfs_rtgroup_unlock(struct xfs_rtgroup * rtg,unsigned int rtglock_flags)209 xfs_rtgroup_unlock(
210 struct xfs_rtgroup *rtg,
211 unsigned int rtglock_flags)
212 {
213 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
214 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
215 !(rtglock_flags & XFS_RTGLOCK_BITMAP));
216
217 if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
218 xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL);
219 xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL);
220 } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
221 xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED);
222 }
223 }
224
225 /*
226 * Join realtime group metadata inodes to the transaction. The ILOCKs will be
227 * released on transaction commit.
228 */
229 void
xfs_rtgroup_trans_join(struct xfs_trans * tp,struct xfs_rtgroup * rtg,unsigned int rtglock_flags)230 xfs_rtgroup_trans_join(
231 struct xfs_trans *tp,
232 struct xfs_rtgroup *rtg,
233 unsigned int rtglock_flags)
234 {
235 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
236 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED));
237
238 if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
239 xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_BITMAP],
240 XFS_ILOCK_EXCL);
241 xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_SUMMARY],
242 XFS_ILOCK_EXCL);
243 }
244 }
245
246 /* Retrieve rt group geometry. */
247 int
xfs_rtgroup_get_geometry(struct xfs_rtgroup * rtg,struct xfs_rtgroup_geometry * rgeo)248 xfs_rtgroup_get_geometry(
249 struct xfs_rtgroup *rtg,
250 struct xfs_rtgroup_geometry *rgeo)
251 {
252 /* Fill out form. */
253 memset(rgeo, 0, sizeof(*rgeo));
254 rgeo->rg_number = rtg_rgno(rtg);
255 rgeo->rg_length = rtg_group(rtg)->xg_block_count;
256 xfs_rtgroup_geom_health(rtg, rgeo);
257 return 0;
258 }
259
260 #ifdef CONFIG_PROVE_LOCKING
261 static struct lock_class_key xfs_rtginode_lock_class;
262
263 static int
xfs_rtginode_ilock_cmp_fn(const struct lockdep_map * m1,const struct lockdep_map * m2)264 xfs_rtginode_ilock_cmp_fn(
265 const struct lockdep_map *m1,
266 const struct lockdep_map *m2)
267 {
268 const struct xfs_inode *ip1 =
269 container_of(m1, struct xfs_inode, i_lock.dep_map);
270 const struct xfs_inode *ip2 =
271 container_of(m2, struct xfs_inode, i_lock.dep_map);
272
273 if (ip1->i_projid < ip2->i_projid)
274 return -1;
275 if (ip1->i_projid > ip2->i_projid)
276 return 1;
277 return 0;
278 }
279
280 static inline void
xfs_rtginode_ilock_print_fn(const struct lockdep_map * m)281 xfs_rtginode_ilock_print_fn(
282 const struct lockdep_map *m)
283 {
284 const struct xfs_inode *ip =
285 container_of(m, struct xfs_inode, i_lock.dep_map);
286
287 printk(KERN_CONT " rgno=%u", ip->i_projid);
288 }
289
290 /*
291 * Most of the time each of the RTG inode locks are only taken one at a time.
292 * But when committing deferred ops, more than one of a kind can be taken.
293 * However, deferred rt ops will be committed in rgno order so there is no
294 * potential for deadlocks. The code here is needed to tell lockdep about this
295 * order.
296 */
297 static inline void
xfs_rtginode_lockdep_setup(struct xfs_inode * ip,xfs_rgnumber_t rgno,enum xfs_rtg_inodes type)298 xfs_rtginode_lockdep_setup(
299 struct xfs_inode *ip,
300 xfs_rgnumber_t rgno,
301 enum xfs_rtg_inodes type)
302 {
303 lockdep_set_class_and_subclass(&ip->i_lock, &xfs_rtginode_lock_class,
304 type);
305 lock_set_cmp_fn(&ip->i_lock, xfs_rtginode_ilock_cmp_fn,
306 xfs_rtginode_ilock_print_fn);
307 }
308 #else
309 #define xfs_rtginode_lockdep_setup(ip, rgno, type) do { } while (0)
310 #endif /* CONFIG_PROVE_LOCKING */
311
312 struct xfs_rtginode_ops {
313 const char *name; /* short name */
314
315 enum xfs_metafile_type metafile_type;
316
317 unsigned int sick; /* rtgroup sickness flag */
318
319 /* Does the fs have this feature? */
320 bool (*enabled)(struct xfs_mount *mp);
321
322 /* Create this rtgroup metadata inode and initialize it. */
323 int (*create)(struct xfs_rtgroup *rtg,
324 struct xfs_inode *ip,
325 struct xfs_trans *tp,
326 bool init);
327 };
328
329 static const struct xfs_rtginode_ops xfs_rtginode_ops[XFS_RTGI_MAX] = {
330 [XFS_RTGI_BITMAP] = {
331 .name = "bitmap",
332 .metafile_type = XFS_METAFILE_RTBITMAP,
333 .sick = XFS_SICK_RG_BITMAP,
334 .create = xfs_rtbitmap_create,
335 },
336 [XFS_RTGI_SUMMARY] = {
337 .name = "summary",
338 .metafile_type = XFS_METAFILE_RTSUMMARY,
339 .sick = XFS_SICK_RG_SUMMARY,
340 .create = xfs_rtsummary_create,
341 },
342 };
343
344 /* Return the shortname of this rtgroup inode. */
345 const char *
xfs_rtginode_name(enum xfs_rtg_inodes type)346 xfs_rtginode_name(
347 enum xfs_rtg_inodes type)
348 {
349 return xfs_rtginode_ops[type].name;
350 }
351
352 /* Return the metafile type of this rtgroup inode. */
353 enum xfs_metafile_type
xfs_rtginode_metafile_type(enum xfs_rtg_inodes type)354 xfs_rtginode_metafile_type(
355 enum xfs_rtg_inodes type)
356 {
357 return xfs_rtginode_ops[type].metafile_type;
358 }
359
360 /* Should this rtgroup inode be present? */
361 bool
xfs_rtginode_enabled(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)362 xfs_rtginode_enabled(
363 struct xfs_rtgroup *rtg,
364 enum xfs_rtg_inodes type)
365 {
366 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
367
368 if (!ops->enabled)
369 return true;
370 return ops->enabled(rtg_mount(rtg));
371 }
372
373 /* Mark an rtgroup inode sick */
374 void
xfs_rtginode_mark_sick(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)375 xfs_rtginode_mark_sick(
376 struct xfs_rtgroup *rtg,
377 enum xfs_rtg_inodes type)
378 {
379 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
380
381 xfs_group_mark_sick(rtg_group(rtg), ops->sick);
382 }
383
384 /* Load and existing rtgroup inode into the rtgroup structure. */
385 int
xfs_rtginode_load(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type,struct xfs_trans * tp)386 xfs_rtginode_load(
387 struct xfs_rtgroup *rtg,
388 enum xfs_rtg_inodes type,
389 struct xfs_trans *tp)
390 {
391 struct xfs_mount *mp = tp->t_mountp;
392 struct xfs_inode *ip;
393 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
394 int error;
395
396 if (!xfs_rtginode_enabled(rtg, type))
397 return 0;
398
399 if (!xfs_has_rtgroups(mp)) {
400 xfs_ino_t ino;
401
402 switch (type) {
403 case XFS_RTGI_BITMAP:
404 ino = mp->m_sb.sb_rbmino;
405 break;
406 case XFS_RTGI_SUMMARY:
407 ino = mp->m_sb.sb_rsumino;
408 break;
409 default:
410 /* None of the other types exist on !rtgroups */
411 return 0;
412 }
413
414 error = xfs_trans_metafile_iget(tp, ino, ops->metafile_type,
415 &ip);
416 } else {
417 const char *path;
418
419 if (!mp->m_rtdirip) {
420 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
421 return -EFSCORRUPTED;
422 }
423
424 path = xfs_rtginode_path(rtg_rgno(rtg), type);
425 if (!path)
426 return -ENOMEM;
427 error = xfs_metadir_load(tp, mp->m_rtdirip, path,
428 ops->metafile_type, &ip);
429 kfree(path);
430 }
431
432 if (error) {
433 if (xfs_metadata_is_sick(error))
434 xfs_rtginode_mark_sick(rtg, type);
435 return error;
436 }
437
438 if (XFS_IS_CORRUPT(mp, ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
439 ip->i_df.if_format != XFS_DINODE_FMT_BTREE)) {
440 xfs_irele(ip);
441 xfs_rtginode_mark_sick(rtg, type);
442 return -EFSCORRUPTED;
443 }
444
445 if (XFS_IS_CORRUPT(mp, ip->i_projid != rtg_rgno(rtg))) {
446 xfs_irele(ip);
447 xfs_rtginode_mark_sick(rtg, type);
448 return -EFSCORRUPTED;
449 }
450
451 xfs_rtginode_lockdep_setup(ip, rtg_rgno(rtg), type);
452 rtg->rtg_inodes[type] = ip;
453 return 0;
454 }
455
456 /* Release an rtgroup metadata inode. */
457 void
xfs_rtginode_irele(struct xfs_inode ** ipp)458 xfs_rtginode_irele(
459 struct xfs_inode **ipp)
460 {
461 if (*ipp)
462 xfs_irele(*ipp);
463 *ipp = NULL;
464 }
465
466 /* Add a metadata inode for a realtime rmap btree. */
467 int
xfs_rtginode_create(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type,bool init)468 xfs_rtginode_create(
469 struct xfs_rtgroup *rtg,
470 enum xfs_rtg_inodes type,
471 bool init)
472 {
473 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
474 struct xfs_mount *mp = rtg_mount(rtg);
475 struct xfs_metadir_update upd = {
476 .dp = mp->m_rtdirip,
477 .metafile_type = ops->metafile_type,
478 };
479 int error;
480
481 if (!xfs_rtginode_enabled(rtg, type))
482 return 0;
483
484 if (!mp->m_rtdirip) {
485 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
486 return -EFSCORRUPTED;
487 }
488
489 upd.path = xfs_rtginode_path(rtg_rgno(rtg), type);
490 if (!upd.path)
491 return -ENOMEM;
492
493 error = xfs_metadir_start_create(&upd);
494 if (error)
495 goto out_path;
496
497 error = xfs_metadir_create(&upd, S_IFREG);
498 if (error)
499 goto out_cancel;
500
501 xfs_rtginode_lockdep_setup(upd.ip, rtg_rgno(rtg), type);
502
503 upd.ip->i_projid = rtg_rgno(rtg);
504 error = ops->create(rtg, upd.ip, upd.tp, init);
505 if (error)
506 goto out_cancel;
507
508 error = xfs_metadir_commit(&upd);
509 if (error)
510 goto out_path;
511
512 kfree(upd.path);
513 xfs_finish_inode_setup(upd.ip);
514 rtg->rtg_inodes[type] = upd.ip;
515 return 0;
516
517 out_cancel:
518 xfs_metadir_cancel(&upd, error);
519 /* Have to finish setting up the inode to ensure it's deleted. */
520 if (upd.ip) {
521 xfs_finish_inode_setup(upd.ip);
522 xfs_irele(upd.ip);
523 }
524 out_path:
525 kfree(upd.path);
526 return error;
527 }
528
529 /* Create the parent directory for all rtgroup inodes and load it. */
530 int
xfs_rtginode_mkdir_parent(struct xfs_mount * mp)531 xfs_rtginode_mkdir_parent(
532 struct xfs_mount *mp)
533 {
534 if (!mp->m_metadirip) {
535 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
536 return -EFSCORRUPTED;
537 }
538
539 return xfs_metadir_mkdir(mp->m_metadirip, "rtgroups", &mp->m_rtdirip);
540 }
541
542 /* Load the parent directory of all rtgroup inodes. */
543 int
xfs_rtginode_load_parent(struct xfs_trans * tp)544 xfs_rtginode_load_parent(
545 struct xfs_trans *tp)
546 {
547 struct xfs_mount *mp = tp->t_mountp;
548
549 if (!mp->m_metadirip) {
550 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
551 return -EFSCORRUPTED;
552 }
553
554 return xfs_metadir_load(tp, mp->m_metadirip, "rtgroups",
555 XFS_METAFILE_DIR, &mp->m_rtdirip);
556 }
557
558 /* Check superblock fields for a read or a write. */
559 static xfs_failaddr_t
xfs_rtsb_verify_common(struct xfs_buf * bp)560 xfs_rtsb_verify_common(
561 struct xfs_buf *bp)
562 {
563 struct xfs_rtsb *rsb = bp->b_addr;
564
565 if (!xfs_verify_magic(bp, rsb->rsb_magicnum))
566 return __this_address;
567 if (rsb->rsb_pad)
568 return __this_address;
569
570 /* Everything to the end of the fs block must be zero */
571 if (memchr_inv(rsb + 1, 0, BBTOB(bp->b_length) - sizeof(*rsb)))
572 return __this_address;
573
574 return NULL;
575 }
576
577 /* Check superblock fields for a read or revalidation. */
578 static inline xfs_failaddr_t
xfs_rtsb_verify_all(struct xfs_buf * bp)579 xfs_rtsb_verify_all(
580 struct xfs_buf *bp)
581 {
582 struct xfs_rtsb *rsb = bp->b_addr;
583 struct xfs_mount *mp = bp->b_mount;
584 xfs_failaddr_t fa;
585
586 fa = xfs_rtsb_verify_common(bp);
587 if (fa)
588 return fa;
589
590 if (memcmp(&rsb->rsb_fname, &mp->m_sb.sb_fname, XFSLABEL_MAX))
591 return __this_address;
592 if (!uuid_equal(&rsb->rsb_uuid, &mp->m_sb.sb_uuid))
593 return __this_address;
594 if (!uuid_equal(&rsb->rsb_meta_uuid, &mp->m_sb.sb_meta_uuid))
595 return __this_address;
596
597 return NULL;
598 }
599
600 static void
xfs_rtsb_read_verify(struct xfs_buf * bp)601 xfs_rtsb_read_verify(
602 struct xfs_buf *bp)
603 {
604 xfs_failaddr_t fa;
605
606 if (!xfs_buf_verify_cksum(bp, XFS_RTSB_CRC_OFF)) {
607 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
608 return;
609 }
610
611 fa = xfs_rtsb_verify_all(bp);
612 if (fa)
613 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
614 }
615
616 static void
xfs_rtsb_write_verify(struct xfs_buf * bp)617 xfs_rtsb_write_verify(
618 struct xfs_buf *bp)
619 {
620 xfs_failaddr_t fa;
621
622 fa = xfs_rtsb_verify_common(bp);
623 if (fa) {
624 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
625 return;
626 }
627
628 xfs_buf_update_cksum(bp, XFS_RTSB_CRC_OFF);
629 }
630
631 const struct xfs_buf_ops xfs_rtsb_buf_ops = {
632 .name = "xfs_rtsb",
633 .magic = { 0, cpu_to_be32(XFS_RTSB_MAGIC) },
634 .verify_read = xfs_rtsb_read_verify,
635 .verify_write = xfs_rtsb_write_verify,
636 .verify_struct = xfs_rtsb_verify_all,
637 };
638
639 /* Update a realtime superblock from the primary fs super */
640 void
xfs_update_rtsb(struct xfs_buf * rtsb_bp,const struct xfs_buf * sb_bp)641 xfs_update_rtsb(
642 struct xfs_buf *rtsb_bp,
643 const struct xfs_buf *sb_bp)
644 {
645 const struct xfs_dsb *dsb = sb_bp->b_addr;
646 struct xfs_rtsb *rsb = rtsb_bp->b_addr;
647 const uuid_t *meta_uuid;
648
649 rsb->rsb_magicnum = cpu_to_be32(XFS_RTSB_MAGIC);
650
651 rsb->rsb_pad = 0;
652 memcpy(&rsb->rsb_fname, &dsb->sb_fname, XFSLABEL_MAX);
653
654 memcpy(&rsb->rsb_uuid, &dsb->sb_uuid, sizeof(rsb->rsb_uuid));
655
656 /*
657 * The metadata uuid is the fs uuid if the metauuid feature is not
658 * enabled.
659 */
660 if (dsb->sb_features_incompat &
661 cpu_to_be32(XFS_SB_FEAT_INCOMPAT_META_UUID))
662 meta_uuid = &dsb->sb_meta_uuid;
663 else
664 meta_uuid = &dsb->sb_uuid;
665 memcpy(&rsb->rsb_meta_uuid, meta_uuid, sizeof(rsb->rsb_meta_uuid));
666 }
667
668 /*
669 * Update the realtime superblock from a filesystem superblock and log it to
670 * the given transaction.
671 */
672 struct xfs_buf *
xfs_log_rtsb(struct xfs_trans * tp,const struct xfs_buf * sb_bp)673 xfs_log_rtsb(
674 struct xfs_trans *tp,
675 const struct xfs_buf *sb_bp)
676 {
677 struct xfs_buf *rtsb_bp;
678
679 if (!xfs_has_rtsb(tp->t_mountp))
680 return NULL;
681
682 rtsb_bp = xfs_trans_getrtsb(tp);
683 if (!rtsb_bp) {
684 /*
685 * It's possible for the rtgroups feature to be enabled but
686 * there is no incore rt superblock buffer if the rt geometry
687 * was specified at mkfs time but the rt section has not yet
688 * been attached. In this case, rblocks must be zero.
689 */
690 ASSERT(tp->t_mountp->m_sb.sb_rblocks == 0);
691 return NULL;
692 }
693
694 xfs_update_rtsb(rtsb_bp, sb_bp);
695 xfs_trans_ordered_buf(tp, rtsb_bp);
696 return rtsb_bp;
697 }
698