1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2022-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_alloc_btree.h"
16 #include "xfs_rmap_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_ialloc.h"
19 #include "xfs_rmap.h"
20 #include "xfs_ag.h"
21 #include "xfs_ag_resv.h"
22 #include "xfs_health.h"
23 #include "xfs_error.h"
24 #include "xfs_bmap.h"
25 #include "xfs_defer.h"
26 #include "xfs_log_format.h"
27 #include "xfs_trans.h"
28 #include "xfs_trace.h"
29 #include "xfs_inode.h"
30 #include "xfs_icache.h"
31 #include "xfs_buf_item.h"
32 #include "xfs_rtgroup.h"
33 #include "xfs_rtbitmap.h"
34 #include "xfs_metafile.h"
35 #include "xfs_metadir.h"
36 #include "xfs_rtrmap_btree.h"
37 #include "xfs_rtrefcount_btree.h"
38
39 /* Find the first usable fsblock in this rtgroup. */
40 static inline uint32_t
xfs_rtgroup_min_block(struct xfs_mount * mp,xfs_rgnumber_t rgno)41 xfs_rtgroup_min_block(
42 struct xfs_mount *mp,
43 xfs_rgnumber_t rgno)
44 {
45 if (xfs_has_rtsb(mp) && rgno == 0)
46 return mp->m_sb.sb_rextsize;
47
48 return 0;
49 }
50
51 /* Compute the number of rt extents in this realtime group. */
52 static xfs_rtxnum_t
__xfs_rtgroup_extents(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)53 __xfs_rtgroup_extents(
54 struct xfs_mount *mp,
55 xfs_rgnumber_t rgno,
56 xfs_rgnumber_t rgcount,
57 xfs_rtbxlen_t rextents)
58 {
59 ASSERT(rgno < rgcount);
60 if (rgno == rgcount - 1)
61 return rextents - ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
62
63 ASSERT(xfs_has_rtgroups(mp));
64 return mp->m_sb.sb_rgextents;
65 }
66
67 xfs_rtxnum_t
xfs_rtgroup_extents(struct xfs_mount * mp,xfs_rgnumber_t rgno)68 xfs_rtgroup_extents(
69 struct xfs_mount *mp,
70 xfs_rgnumber_t rgno)
71 {
72 return __xfs_rtgroup_extents(mp, rgno, mp->m_sb.sb_rgcount,
73 mp->m_sb.sb_rextents);
74 }
75
76 /* Precompute this group's geometry */
77 void
xfs_rtgroup_calc_geometry(struct xfs_mount * mp,struct xfs_rtgroup * rtg,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)78 xfs_rtgroup_calc_geometry(
79 struct xfs_mount *mp,
80 struct xfs_rtgroup *rtg,
81 xfs_rgnumber_t rgno,
82 xfs_rgnumber_t rgcount,
83 xfs_rtbxlen_t rextents)
84 {
85 rtg->rtg_extents = __xfs_rtgroup_extents(mp, rgno, rgcount, rextents);
86 rtg_group(rtg)->xg_block_count =
87 rtg->rtg_extents * mp->m_sb.sb_rextsize;
88 rtg_group(rtg)->xg_min_gbno = xfs_rtgroup_min_block(mp, rgno);
89 }
90
91 int
xfs_rtgroup_alloc(struct xfs_mount * mp,xfs_rgnumber_t rgno,xfs_rgnumber_t rgcount,xfs_rtbxlen_t rextents)92 xfs_rtgroup_alloc(
93 struct xfs_mount *mp,
94 xfs_rgnumber_t rgno,
95 xfs_rgnumber_t rgcount,
96 xfs_rtbxlen_t rextents)
97 {
98 struct xfs_rtgroup *rtg;
99 int error;
100
101 rtg = kzalloc(sizeof(struct xfs_rtgroup), GFP_KERNEL);
102 if (!rtg)
103 return -ENOMEM;
104
105 xfs_rtgroup_calc_geometry(mp, rtg, rgno, rgcount, rextents);
106
107 error = xfs_group_insert(mp, rtg_group(rtg), rgno, XG_TYPE_RTG);
108 if (error)
109 goto out_free_rtg;
110 return 0;
111
112 out_free_rtg:
113 kfree(rtg);
114 return error;
115 }
116
117 void
xfs_rtgroup_free(struct xfs_mount * mp,xfs_rgnumber_t rgno)118 xfs_rtgroup_free(
119 struct xfs_mount *mp,
120 xfs_rgnumber_t rgno)
121 {
122 xfs_group_free(mp, rgno, XG_TYPE_RTG, NULL);
123 }
124
125 /* Free a range of incore rtgroup objects. */
126 void
xfs_free_rtgroups(struct xfs_mount * mp,xfs_rgnumber_t first_rgno,xfs_rgnumber_t end_rgno)127 xfs_free_rtgroups(
128 struct xfs_mount *mp,
129 xfs_rgnumber_t first_rgno,
130 xfs_rgnumber_t end_rgno)
131 {
132 xfs_rgnumber_t rgno;
133
134 for (rgno = first_rgno; rgno < end_rgno; rgno++)
135 xfs_rtgroup_free(mp, rgno);
136 }
137
138 /* Initialize some range of incore rtgroup objects. */
139 int
xfs_initialize_rtgroups(struct xfs_mount * mp,xfs_rgnumber_t first_rgno,xfs_rgnumber_t end_rgno,xfs_rtbxlen_t rextents)140 xfs_initialize_rtgroups(
141 struct xfs_mount *mp,
142 xfs_rgnumber_t first_rgno,
143 xfs_rgnumber_t end_rgno,
144 xfs_rtbxlen_t rextents)
145 {
146 xfs_rgnumber_t index;
147 int error;
148
149 if (first_rgno >= end_rgno)
150 return 0;
151
152 for (index = first_rgno; index < end_rgno; index++) {
153 error = xfs_rtgroup_alloc(mp, index, end_rgno, rextents);
154 if (error)
155 goto out_unwind_new_rtgs;
156 }
157
158 return 0;
159
160 out_unwind_new_rtgs:
161 xfs_free_rtgroups(mp, first_rgno, index);
162 return error;
163 }
164
165 /*
166 * Update the rt extent count of the previous tail rtgroup if it changed during
167 * recovery (i.e. recovery of a growfs).
168 */
169 int
xfs_update_last_rtgroup_size(struct xfs_mount * mp,xfs_rgnumber_t prev_rgcount)170 xfs_update_last_rtgroup_size(
171 struct xfs_mount *mp,
172 xfs_rgnumber_t prev_rgcount)
173 {
174 struct xfs_rtgroup *rtg;
175
176 ASSERT(prev_rgcount > 0);
177
178 rtg = xfs_rtgroup_grab(mp, prev_rgcount - 1);
179 if (!rtg)
180 return -EFSCORRUPTED;
181 rtg->rtg_extents = __xfs_rtgroup_extents(mp, prev_rgcount - 1,
182 mp->m_sb.sb_rgcount, mp->m_sb.sb_rextents);
183 rtg_group(rtg)->xg_block_count = rtg->rtg_extents * mp->m_sb.sb_rextsize;
184 xfs_rtgroup_rele(rtg);
185 return 0;
186 }
187
188 /* Lock metadata inodes associated with this rt group. */
189 void
xfs_rtgroup_lock(struct xfs_rtgroup * rtg,unsigned int rtglock_flags)190 xfs_rtgroup_lock(
191 struct xfs_rtgroup *rtg,
192 unsigned int rtglock_flags)
193 {
194 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
195 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
196 !(rtglock_flags & XFS_RTGLOCK_BITMAP));
197
198 if (!xfs_has_zoned(rtg_mount(rtg))) {
199 if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
200 /*
201 * Lock both realtime free space metadata inodes for a
202 * freespace update.
203 */
204 xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_EXCL);
205 xfs_ilock(rtg_summary(rtg), XFS_ILOCK_EXCL);
206 } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
207 xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_SHARED);
208 }
209 }
210
211 if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
212 xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
213
214 if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
215 xfs_ilock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
216 }
217
218 /* Unlock metadata inodes associated with this rt group. */
219 void
xfs_rtgroup_unlock(struct xfs_rtgroup * rtg,unsigned int rtglock_flags)220 xfs_rtgroup_unlock(
221 struct xfs_rtgroup *rtg,
222 unsigned int rtglock_flags)
223 {
224 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
225 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) ||
226 !(rtglock_flags & XFS_RTGLOCK_BITMAP));
227
228 if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
229 xfs_iunlock(rtg_refcount(rtg), XFS_ILOCK_EXCL);
230
231 if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
232 xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_EXCL);
233
234 if (!xfs_has_zoned(rtg_mount(rtg))) {
235 if (rtglock_flags & XFS_RTGLOCK_BITMAP) {
236 xfs_iunlock(rtg_summary(rtg), XFS_ILOCK_EXCL);
237 xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_EXCL);
238 } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) {
239 xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_SHARED);
240 }
241 }
242 }
243
244 /*
245 * Join realtime group metadata inodes to the transaction. The ILOCKs will be
246 * released on transaction commit.
247 */
248 void
xfs_rtgroup_trans_join(struct xfs_trans * tp,struct xfs_rtgroup * rtg,unsigned int rtglock_flags)249 xfs_rtgroup_trans_join(
250 struct xfs_trans *tp,
251 struct xfs_rtgroup *rtg,
252 unsigned int rtglock_flags)
253 {
254 ASSERT(!(rtglock_flags & ~XFS_RTGLOCK_ALL_FLAGS));
255 ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED));
256
257 if (!xfs_has_zoned(rtg_mount(rtg)) &&
258 (rtglock_flags & XFS_RTGLOCK_BITMAP)) {
259 xfs_trans_ijoin(tp, rtg_bitmap(rtg), XFS_ILOCK_EXCL);
260 xfs_trans_ijoin(tp, rtg_summary(rtg), XFS_ILOCK_EXCL);
261 }
262
263 if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg))
264 xfs_trans_ijoin(tp, rtg_rmap(rtg), XFS_ILOCK_EXCL);
265
266 if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg))
267 xfs_trans_ijoin(tp, rtg_refcount(rtg), XFS_ILOCK_EXCL);
268 }
269
270 /* Retrieve rt group geometry. */
271 int
xfs_rtgroup_get_geometry(struct xfs_rtgroup * rtg,struct xfs_rtgroup_geometry * rgeo)272 xfs_rtgroup_get_geometry(
273 struct xfs_rtgroup *rtg,
274 struct xfs_rtgroup_geometry *rgeo)
275 {
276 /* Fill out form. */
277 memset(rgeo, 0, sizeof(*rgeo));
278 rgeo->rg_number = rtg_rgno(rtg);
279 rgeo->rg_length = rtg_blocks(rtg);
280 xfs_rtgroup_geom_health(rtg, rgeo);
281 return 0;
282 }
283
284 #ifdef CONFIG_PROVE_LOCKING
285 static struct lock_class_key xfs_rtginode_lock_class;
286
287 static int
xfs_rtginode_ilock_cmp_fn(const struct lockdep_map * m1,const struct lockdep_map * m2)288 xfs_rtginode_ilock_cmp_fn(
289 const struct lockdep_map *m1,
290 const struct lockdep_map *m2)
291 {
292 const struct xfs_inode *ip1 =
293 container_of(m1, struct xfs_inode, i_lock.dep_map);
294 const struct xfs_inode *ip2 =
295 container_of(m2, struct xfs_inode, i_lock.dep_map);
296
297 if (ip1->i_projid < ip2->i_projid)
298 return -1;
299 if (ip1->i_projid > ip2->i_projid)
300 return 1;
301 return 0;
302 }
303
304 static inline void
xfs_rtginode_ilock_print_fn(const struct lockdep_map * m)305 xfs_rtginode_ilock_print_fn(
306 const struct lockdep_map *m)
307 {
308 const struct xfs_inode *ip =
309 container_of(m, struct xfs_inode, i_lock.dep_map);
310
311 printk(KERN_CONT " rgno=%u metatype=%s", ip->i_projid,
312 xfs_metafile_type_str(ip->i_metatype));
313 }
314
315 /*
316 * Most of the time each of the RTG inode locks are only taken one at a time.
317 * But when committing deferred ops, more than one of a kind can be taken.
318 * However, deferred rt ops will be committed in rgno order so there is no
319 * potential for deadlocks. The code here is needed to tell lockdep about this
320 * order.
321 */
322 static inline void
xfs_rtginode_lockdep_setup(struct xfs_inode * ip,xfs_rgnumber_t rgno,enum xfs_rtg_inodes type)323 xfs_rtginode_lockdep_setup(
324 struct xfs_inode *ip,
325 xfs_rgnumber_t rgno,
326 enum xfs_rtg_inodes type)
327 {
328 lockdep_set_class_and_subclass(&ip->i_lock, &xfs_rtginode_lock_class,
329 type);
330 lock_set_cmp_fn(&ip->i_lock, xfs_rtginode_ilock_cmp_fn,
331 xfs_rtginode_ilock_print_fn);
332 }
333 #else
334 #define xfs_rtginode_lockdep_setup(ip, rgno, type) do { } while (0)
335 #endif /* CONFIG_PROVE_LOCKING */
336
337 struct xfs_rtginode_ops {
338 const char *name; /* short name */
339
340 enum xfs_metafile_type metafile_type;
341
342 unsigned int sick; /* rtgroup sickness flag */
343
344 unsigned int fmt_mask; /* all valid data fork formats */
345
346 /* Does the fs have this feature? */
347 bool (*enabled)(const struct xfs_mount *mp);
348
349 /* Create this rtgroup metadata inode and initialize it. */
350 int (*create)(struct xfs_rtgroup *rtg,
351 struct xfs_inode *ip,
352 struct xfs_trans *tp,
353 bool init);
354 };
355
356 static const struct xfs_rtginode_ops xfs_rtginode_ops[XFS_RTGI_MAX] = {
357 [XFS_RTGI_BITMAP] = {
358 .name = "bitmap",
359 .metafile_type = XFS_METAFILE_RTBITMAP,
360 .sick = XFS_SICK_RG_BITMAP,
361 .fmt_mask = (1U << XFS_DINODE_FMT_EXTENTS) |
362 (1U << XFS_DINODE_FMT_BTREE),
363 .enabled = xfs_has_nonzoned,
364 .create = xfs_rtbitmap_create,
365 },
366 [XFS_RTGI_SUMMARY] = {
367 .name = "summary",
368 .metafile_type = XFS_METAFILE_RTSUMMARY,
369 .sick = XFS_SICK_RG_SUMMARY,
370 .fmt_mask = (1U << XFS_DINODE_FMT_EXTENTS) |
371 (1U << XFS_DINODE_FMT_BTREE),
372 .enabled = xfs_has_nonzoned,
373 .create = xfs_rtsummary_create,
374 },
375 [XFS_RTGI_RMAP] = {
376 .name = "rmap",
377 .metafile_type = XFS_METAFILE_RTRMAP,
378 .sick = XFS_SICK_RG_RMAPBT,
379 .fmt_mask = 1U << XFS_DINODE_FMT_META_BTREE,
380 /*
381 * growfs must create the rtrmap inodes before adding a
382 * realtime volume to the filesystem, so we cannot use the
383 * rtrmapbt predicate here.
384 */
385 .enabled = xfs_has_rmapbt,
386 .create = xfs_rtrmapbt_create,
387 },
388 [XFS_RTGI_REFCOUNT] = {
389 .name = "refcount",
390 .metafile_type = XFS_METAFILE_RTREFCOUNT,
391 .sick = XFS_SICK_RG_REFCNTBT,
392 .fmt_mask = 1U << XFS_DINODE_FMT_META_BTREE,
393 /* same comment about growfs and rmap inodes applies here */
394 .enabled = xfs_has_reflink,
395 .create = xfs_rtrefcountbt_create,
396 },
397 };
398
399 /* Return the shortname of this rtgroup inode. */
400 const char *
xfs_rtginode_name(enum xfs_rtg_inodes type)401 xfs_rtginode_name(
402 enum xfs_rtg_inodes type)
403 {
404 return xfs_rtginode_ops[type].name;
405 }
406
407 /* Return the metafile type of this rtgroup inode. */
408 enum xfs_metafile_type
xfs_rtginode_metafile_type(enum xfs_rtg_inodes type)409 xfs_rtginode_metafile_type(
410 enum xfs_rtg_inodes type)
411 {
412 return xfs_rtginode_ops[type].metafile_type;
413 }
414
415 /* Should this rtgroup inode be present? */
416 bool
xfs_rtginode_enabled(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)417 xfs_rtginode_enabled(
418 struct xfs_rtgroup *rtg,
419 enum xfs_rtg_inodes type)
420 {
421 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
422
423 if (!ops->enabled)
424 return true;
425 return ops->enabled(rtg_mount(rtg));
426 }
427
428 /* Mark an rtgroup inode sick */
429 void
xfs_rtginode_mark_sick(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type)430 xfs_rtginode_mark_sick(
431 struct xfs_rtgroup *rtg,
432 enum xfs_rtg_inodes type)
433 {
434 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
435
436 xfs_group_mark_sick(rtg_group(rtg), ops->sick);
437 }
438
439 /* Load and existing rtgroup inode into the rtgroup structure. */
440 int
xfs_rtginode_load(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type,struct xfs_trans * tp)441 xfs_rtginode_load(
442 struct xfs_rtgroup *rtg,
443 enum xfs_rtg_inodes type,
444 struct xfs_trans *tp)
445 {
446 struct xfs_mount *mp = tp->t_mountp;
447 struct xfs_inode *ip;
448 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
449 int error;
450
451 if (!xfs_rtginode_enabled(rtg, type))
452 return 0;
453
454 if (!xfs_has_rtgroups(mp)) {
455 xfs_ino_t ino;
456
457 switch (type) {
458 case XFS_RTGI_BITMAP:
459 ino = mp->m_sb.sb_rbmino;
460 break;
461 case XFS_RTGI_SUMMARY:
462 ino = mp->m_sb.sb_rsumino;
463 break;
464 default:
465 /* None of the other types exist on !rtgroups */
466 return 0;
467 }
468
469 error = xfs_trans_metafile_iget(tp, ino, ops->metafile_type,
470 &ip);
471 } else {
472 const char *path;
473
474 if (!mp->m_rtdirip) {
475 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
476 return -EFSCORRUPTED;
477 }
478
479 path = xfs_rtginode_path(rtg_rgno(rtg), type);
480 if (!path)
481 return -ENOMEM;
482 error = xfs_metadir_load(tp, mp->m_rtdirip, path,
483 ops->metafile_type, &ip);
484 kfree(path);
485 }
486
487 if (error) {
488 if (xfs_metadata_is_sick(error))
489 xfs_rtginode_mark_sick(rtg, type);
490 return error;
491 }
492
493 if (XFS_IS_CORRUPT(mp, !((1U << ip->i_df.if_format) & ops->fmt_mask))) {
494 xfs_irele(ip);
495 xfs_rtginode_mark_sick(rtg, type);
496 return -EFSCORRUPTED;
497 }
498
499 if (XFS_IS_CORRUPT(mp, ip->i_projid != rtg_rgno(rtg))) {
500 xfs_irele(ip);
501 xfs_rtginode_mark_sick(rtg, type);
502 return -EFSCORRUPTED;
503 }
504
505 xfs_rtginode_lockdep_setup(ip, rtg_rgno(rtg), type);
506 rtg->rtg_inodes[type] = ip;
507 return 0;
508 }
509
510 /* Release an rtgroup metadata inode. */
511 void
xfs_rtginode_irele(struct xfs_inode ** ipp)512 xfs_rtginode_irele(
513 struct xfs_inode **ipp)
514 {
515 if (*ipp)
516 xfs_irele(*ipp);
517 *ipp = NULL;
518 }
519
520 /* Add a metadata inode for a realtime rmap btree. */
521 int
xfs_rtginode_create(struct xfs_rtgroup * rtg,enum xfs_rtg_inodes type,bool init)522 xfs_rtginode_create(
523 struct xfs_rtgroup *rtg,
524 enum xfs_rtg_inodes type,
525 bool init)
526 {
527 const struct xfs_rtginode_ops *ops = &xfs_rtginode_ops[type];
528 struct xfs_mount *mp = rtg_mount(rtg);
529 struct xfs_metadir_update upd = {
530 .dp = mp->m_rtdirip,
531 .metafile_type = ops->metafile_type,
532 };
533 int error;
534
535 if (!xfs_rtginode_enabled(rtg, type))
536 return 0;
537
538 if (!mp->m_rtdirip) {
539 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
540 return -EFSCORRUPTED;
541 }
542
543 upd.path = xfs_rtginode_path(rtg_rgno(rtg), type);
544 if (!upd.path)
545 return -ENOMEM;
546
547 error = xfs_metadir_start_create(&upd);
548 if (error)
549 goto out_path;
550
551 error = xfs_metadir_create(&upd, S_IFREG);
552 if (error)
553 goto out_cancel;
554
555 xfs_rtginode_lockdep_setup(upd.ip, rtg_rgno(rtg), type);
556
557 upd.ip->i_projid = rtg_rgno(rtg);
558 error = ops->create(rtg, upd.ip, upd.tp, init);
559 if (error)
560 goto out_cancel;
561
562 error = xfs_metadir_commit(&upd);
563 if (error)
564 goto out_path;
565
566 kfree(upd.path);
567 xfs_finish_inode_setup(upd.ip);
568 rtg->rtg_inodes[type] = upd.ip;
569 return 0;
570
571 out_cancel:
572 xfs_metadir_cancel(&upd, error);
573 /* Have to finish setting up the inode to ensure it's deleted. */
574 if (upd.ip) {
575 xfs_finish_inode_setup(upd.ip);
576 xfs_irele(upd.ip);
577 }
578 out_path:
579 kfree(upd.path);
580 return error;
581 }
582
583 /* Create the parent directory for all rtgroup inodes and load it. */
584 int
xfs_rtginode_mkdir_parent(struct xfs_mount * mp)585 xfs_rtginode_mkdir_parent(
586 struct xfs_mount *mp)
587 {
588 if (!mp->m_metadirip) {
589 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
590 return -EFSCORRUPTED;
591 }
592
593 return xfs_metadir_mkdir(mp->m_metadirip, "rtgroups", &mp->m_rtdirip);
594 }
595
596 /* Load the parent directory of all rtgroup inodes. */
597 int
xfs_rtginode_load_parent(struct xfs_trans * tp)598 xfs_rtginode_load_parent(
599 struct xfs_trans *tp)
600 {
601 struct xfs_mount *mp = tp->t_mountp;
602
603 if (!mp->m_metadirip) {
604 xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
605 return -EFSCORRUPTED;
606 }
607
608 return xfs_metadir_load(tp, mp->m_metadirip, "rtgroups",
609 XFS_METAFILE_DIR, &mp->m_rtdirip);
610 }
611
612 /* Check superblock fields for a read or a write. */
613 static xfs_failaddr_t
xfs_rtsb_verify_common(struct xfs_buf * bp)614 xfs_rtsb_verify_common(
615 struct xfs_buf *bp)
616 {
617 struct xfs_rtsb *rsb = bp->b_addr;
618
619 if (!xfs_verify_magic(bp, rsb->rsb_magicnum))
620 return __this_address;
621 if (rsb->rsb_pad)
622 return __this_address;
623
624 /* Everything to the end of the fs block must be zero */
625 if (memchr_inv(rsb + 1, 0, BBTOB(bp->b_length) - sizeof(*rsb)))
626 return __this_address;
627
628 return NULL;
629 }
630
631 /* Check superblock fields for a read or revalidation. */
632 static inline xfs_failaddr_t
xfs_rtsb_verify_all(struct xfs_buf * bp)633 xfs_rtsb_verify_all(
634 struct xfs_buf *bp)
635 {
636 struct xfs_rtsb *rsb = bp->b_addr;
637 struct xfs_mount *mp = bp->b_mount;
638 xfs_failaddr_t fa;
639
640 fa = xfs_rtsb_verify_common(bp);
641 if (fa)
642 return fa;
643
644 if (memcmp(&rsb->rsb_fname, &mp->m_sb.sb_fname, XFSLABEL_MAX))
645 return __this_address;
646 if (!uuid_equal(&rsb->rsb_uuid, &mp->m_sb.sb_uuid))
647 return __this_address;
648 if (!uuid_equal(&rsb->rsb_meta_uuid, &mp->m_sb.sb_meta_uuid))
649 return __this_address;
650
651 return NULL;
652 }
653
654 static void
xfs_rtsb_read_verify(struct xfs_buf * bp)655 xfs_rtsb_read_verify(
656 struct xfs_buf *bp)
657 {
658 xfs_failaddr_t fa;
659
660 if (!xfs_buf_verify_cksum(bp, XFS_RTSB_CRC_OFF)) {
661 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
662 return;
663 }
664
665 fa = xfs_rtsb_verify_all(bp);
666 if (fa)
667 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
668 }
669
670 static void
xfs_rtsb_write_verify(struct xfs_buf * bp)671 xfs_rtsb_write_verify(
672 struct xfs_buf *bp)
673 {
674 xfs_failaddr_t fa;
675
676 fa = xfs_rtsb_verify_common(bp);
677 if (fa) {
678 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
679 return;
680 }
681
682 xfs_buf_update_cksum(bp, XFS_RTSB_CRC_OFF);
683 }
684
685 const struct xfs_buf_ops xfs_rtsb_buf_ops = {
686 .name = "xfs_rtsb",
687 .magic = { 0, cpu_to_be32(XFS_RTSB_MAGIC) },
688 .verify_read = xfs_rtsb_read_verify,
689 .verify_write = xfs_rtsb_write_verify,
690 .verify_struct = xfs_rtsb_verify_all,
691 };
692
693 /* Update a realtime superblock from the primary fs super */
694 void
xfs_update_rtsb(struct xfs_buf * rtsb_bp,const struct xfs_buf * sb_bp)695 xfs_update_rtsb(
696 struct xfs_buf *rtsb_bp,
697 const struct xfs_buf *sb_bp)
698 {
699 const struct xfs_dsb *dsb = sb_bp->b_addr;
700 struct xfs_rtsb *rsb = rtsb_bp->b_addr;
701 const uuid_t *meta_uuid;
702
703 rsb->rsb_magicnum = cpu_to_be32(XFS_RTSB_MAGIC);
704
705 rsb->rsb_pad = 0;
706 memcpy(&rsb->rsb_fname, &dsb->sb_fname, XFSLABEL_MAX);
707
708 memcpy(&rsb->rsb_uuid, &dsb->sb_uuid, sizeof(rsb->rsb_uuid));
709
710 /*
711 * The metadata uuid is the fs uuid if the metauuid feature is not
712 * enabled.
713 */
714 if (dsb->sb_features_incompat &
715 cpu_to_be32(XFS_SB_FEAT_INCOMPAT_META_UUID))
716 meta_uuid = &dsb->sb_meta_uuid;
717 else
718 meta_uuid = &dsb->sb_uuid;
719 memcpy(&rsb->rsb_meta_uuid, meta_uuid, sizeof(rsb->rsb_meta_uuid));
720 }
721
722 /*
723 * Update the realtime superblock from a filesystem superblock and log it to
724 * the given transaction.
725 */
726 struct xfs_buf *
xfs_log_rtsb(struct xfs_trans * tp,const struct xfs_buf * sb_bp)727 xfs_log_rtsb(
728 struct xfs_trans *tp,
729 const struct xfs_buf *sb_bp)
730 {
731 struct xfs_buf *rtsb_bp;
732
733 if (!xfs_has_rtsb(tp->t_mountp))
734 return NULL;
735
736 rtsb_bp = xfs_trans_getrtsb(tp);
737 if (!rtsb_bp) {
738 /*
739 * It's possible for the rtgroups feature to be enabled but
740 * there is no incore rt superblock buffer if the rt geometry
741 * was specified at mkfs time but the rt section has not yet
742 * been attached. In this case, rblocks must be zero.
743 */
744 ASSERT(tp->t_mountp->m_sb.sb_rblocks == 0);
745 return NULL;
746 }
747
748 xfs_update_rtsb(rtsb_bp, sb_bp);
749 xfs_trans_ordered_buf(tp, rtsb_bp);
750 return rtsb_bp;
751 }
752