1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023-2025 Christoph Hellwig.
4 * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5 */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27
28 void
xfs_open_zone_put(struct xfs_open_zone * oz)29 xfs_open_zone_put(
30 struct xfs_open_zone *oz)
31 {
32 if (atomic_dec_and_test(&oz->oz_ref)) {
33 xfs_rtgroup_rele(oz->oz_rtg);
34 kfree(oz);
35 }
36 }
37
38 static inline uint32_t
xfs_zone_bucket(struct xfs_mount * mp,uint32_t used_blocks)39 xfs_zone_bucket(
40 struct xfs_mount *mp,
41 uint32_t used_blocks)
42 {
43 return XFS_ZONE_USED_BUCKETS * used_blocks /
44 mp->m_groups[XG_TYPE_RTG].blocks;
45 }
46
47 static inline void
xfs_zone_add_to_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t to_bucket)48 xfs_zone_add_to_bucket(
49 struct xfs_zone_info *zi,
50 xfs_rgnumber_t rgno,
51 uint32_t to_bucket)
52 {
53 __set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
54 zi->zi_used_bucket_entries[to_bucket]++;
55 }
56
57 static inline void
xfs_zone_remove_from_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t from_bucket)58 xfs_zone_remove_from_bucket(
59 struct xfs_zone_info *zi,
60 xfs_rgnumber_t rgno,
61 uint32_t from_bucket)
62 {
63 __clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
64 zi->zi_used_bucket_entries[from_bucket]--;
65 }
66
67 static void
xfs_zone_account_reclaimable(struct xfs_rtgroup * rtg,uint32_t freed)68 xfs_zone_account_reclaimable(
69 struct xfs_rtgroup *rtg,
70 uint32_t freed)
71 {
72 struct xfs_group *xg = &rtg->rtg_group;
73 struct xfs_mount *mp = rtg_mount(rtg);
74 struct xfs_zone_info *zi = mp->m_zone_info;
75 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
76 xfs_rgnumber_t rgno = rtg_rgno(rtg);
77 uint32_t from_bucket = xfs_zone_bucket(mp, used + freed);
78 uint32_t to_bucket = xfs_zone_bucket(mp, used);
79 bool was_full = (used + freed == rtg_blocks(rtg));
80
81 /*
82 * This can be called from log recovery, where the zone_info structure
83 * hasn't been allocated yet. Skip all work as xfs_mount_zones will
84 * add the zones to the right buckets before the file systems becomes
85 * active.
86 */
87 if (!zi)
88 return;
89
90 if (!used) {
91 /*
92 * The zone is now empty, remove it from the bottom bucket and
93 * trigger a reset.
94 */
95 trace_xfs_zone_emptied(rtg);
96
97 if (!was_full)
98 xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE);
99
100 spin_lock(&zi->zi_used_buckets_lock);
101 if (!was_full)
102 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
103 spin_unlock(&zi->zi_used_buckets_lock);
104
105 spin_lock(&zi->zi_reset_list_lock);
106 xg->xg_next_reset = zi->zi_reset_list;
107 zi->zi_reset_list = xg;
108 spin_unlock(&zi->zi_reset_list_lock);
109
110 if (zi->zi_gc_thread)
111 wake_up_process(zi->zi_gc_thread);
112 } else if (was_full) {
113 /*
114 * The zone transitioned from full, mark it up as reclaimable
115 * and wake up GC which might be waiting for zones to reclaim.
116 */
117 spin_lock(&zi->zi_used_buckets_lock);
118 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
119 spin_unlock(&zi->zi_used_buckets_lock);
120
121 xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE);
122 if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
123 wake_up_process(zi->zi_gc_thread);
124 } else if (to_bucket != from_bucket) {
125 /*
126 * Move the zone to a new bucket if it dropped below the
127 * threshold.
128 */
129 spin_lock(&zi->zi_used_buckets_lock);
130 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
131 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
132 spin_unlock(&zi->zi_used_buckets_lock);
133 }
134 }
135
136 static void
xfs_open_zone_mark_full(struct xfs_open_zone * oz)137 xfs_open_zone_mark_full(
138 struct xfs_open_zone *oz)
139 {
140 struct xfs_rtgroup *rtg = oz->oz_rtg;
141 struct xfs_mount *mp = rtg_mount(rtg);
142 struct xfs_zone_info *zi = mp->m_zone_info;
143 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
144
145 trace_xfs_zone_full(rtg);
146
147 WRITE_ONCE(rtg->rtg_open_zone, NULL);
148
149 spin_lock(&zi->zi_open_zones_lock);
150 if (oz->oz_is_gc) {
151 ASSERT(current == zi->zi_gc_thread);
152 zi->zi_open_gc_zone = NULL;
153 } else {
154 zi->zi_nr_open_zones--;
155 list_del_init(&oz->oz_entry);
156 }
157 spin_unlock(&zi->zi_open_zones_lock);
158 xfs_open_zone_put(oz);
159
160 wake_up_all(&zi->zi_zone_wait);
161 if (used < rtg_blocks(rtg))
162 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
163 }
164
165 static void
xfs_zone_record_blocks(struct xfs_trans * tp,xfs_fsblock_t fsbno,xfs_filblks_t len,struct xfs_open_zone * oz,bool used)166 xfs_zone_record_blocks(
167 struct xfs_trans *tp,
168 xfs_fsblock_t fsbno,
169 xfs_filblks_t len,
170 struct xfs_open_zone *oz,
171 bool used)
172 {
173 struct xfs_mount *mp = tp->t_mountp;
174 struct xfs_rtgroup *rtg = oz->oz_rtg;
175 struct xfs_inode *rmapip = rtg_rmap(rtg);
176
177 trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
178
179 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
180 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
181 if (used) {
182 rmapip->i_used_blocks += len;
183 ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
184 } else {
185 xfs_add_frextents(mp, len);
186 }
187 oz->oz_written += len;
188 if (oz->oz_written == rtg_blocks(rtg))
189 xfs_open_zone_mark_full(oz);
190 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
191 }
192
193 static int
xfs_zoned_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * new,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)194 xfs_zoned_map_extent(
195 struct xfs_trans *tp,
196 struct xfs_inode *ip,
197 struct xfs_bmbt_irec *new,
198 struct xfs_open_zone *oz,
199 xfs_fsblock_t old_startblock)
200 {
201 struct xfs_bmbt_irec data;
202 int nmaps = 1;
203 int error;
204
205 /* Grab the corresponding mapping in the data fork. */
206 error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
207 &nmaps, 0);
208 if (error)
209 return error;
210
211 /*
212 * Cap the update to the existing extent in the data fork because we can
213 * only overwrite one extent at a time.
214 */
215 ASSERT(new->br_blockcount >= data.br_blockcount);
216 new->br_blockcount = data.br_blockcount;
217
218 /*
219 * If a data write raced with this GC write, keep the existing data in
220 * the data fork, mark our newly written GC extent as reclaimable, then
221 * move on to the next extent.
222 */
223 if (old_startblock != NULLFSBLOCK &&
224 old_startblock != data.br_startblock)
225 goto skip;
226
227 trace_xfs_reflink_cow_remap_from(ip, new);
228 trace_xfs_reflink_cow_remap_to(ip, &data);
229
230 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
231 XFS_IEXT_REFLINK_END_COW_CNT);
232 if (error)
233 return error;
234
235 if (data.br_startblock != HOLESTARTBLOCK) {
236 ASSERT(data.br_startblock != DELAYSTARTBLOCK);
237 ASSERT(!isnullstartblock(data.br_startblock));
238
239 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
240 if (xfs_is_reflink_inode(ip)) {
241 xfs_refcount_decrease_extent(tp, true, &data);
242 } else {
243 error = xfs_free_extent_later(tp, data.br_startblock,
244 data.br_blockcount, NULL,
245 XFS_AG_RESV_NONE,
246 XFS_FREE_EXTENT_REALTIME);
247 if (error)
248 return error;
249 }
250 }
251
252 xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
253 true);
254
255 /* Map the new blocks into the data fork. */
256 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
257 return 0;
258
259 skip:
260 trace_xfs_reflink_cow_remap_skip(ip, new);
261 xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
262 false);
263 return 0;
264 }
265
266 int
xfs_zoned_end_io(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,xfs_daddr_t daddr,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)267 xfs_zoned_end_io(
268 struct xfs_inode *ip,
269 xfs_off_t offset,
270 xfs_off_t count,
271 xfs_daddr_t daddr,
272 struct xfs_open_zone *oz,
273 xfs_fsblock_t old_startblock)
274 {
275 struct xfs_mount *mp = ip->i_mount;
276 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
277 struct xfs_bmbt_irec new = {
278 .br_startoff = XFS_B_TO_FSBT(mp, offset),
279 .br_startblock = xfs_daddr_to_rtb(mp, daddr),
280 .br_state = XFS_EXT_NORM,
281 };
282 unsigned int resblks =
283 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
284 struct xfs_trans *tp;
285 int error;
286
287 if (xfs_is_shutdown(mp))
288 return -EIO;
289
290 while (new.br_startoff < end_fsb) {
291 new.br_blockcount = end_fsb - new.br_startoff;
292
293 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
294 XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
295 if (error)
296 return error;
297 xfs_ilock(ip, XFS_ILOCK_EXCL);
298 xfs_trans_ijoin(tp, ip, 0);
299
300 error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
301 if (error)
302 xfs_trans_cancel(tp);
303 else
304 error = xfs_trans_commit(tp);
305 xfs_iunlock(ip, XFS_ILOCK_EXCL);
306 if (error)
307 return error;
308
309 new.br_startoff += new.br_blockcount;
310 new.br_startblock += new.br_blockcount;
311 if (old_startblock != NULLFSBLOCK)
312 old_startblock += new.br_blockcount;
313 }
314
315 return 0;
316 }
317
318 /*
319 * "Free" blocks allocated in a zone.
320 *
321 * Just decrement the used blocks counter and report the space as freed.
322 */
323 int
xfs_zone_free_blocks(struct xfs_trans * tp,struct xfs_rtgroup * rtg,xfs_fsblock_t fsbno,xfs_filblks_t len)324 xfs_zone_free_blocks(
325 struct xfs_trans *tp,
326 struct xfs_rtgroup *rtg,
327 xfs_fsblock_t fsbno,
328 xfs_filblks_t len)
329 {
330 struct xfs_mount *mp = tp->t_mountp;
331 struct xfs_inode *rmapip = rtg_rmap(rtg);
332
333 xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
334
335 if (len > rmapip->i_used_blocks) {
336 xfs_err(mp,
337 "trying to free more blocks (%lld) than used counter (%u).",
338 len, rmapip->i_used_blocks);
339 ASSERT(len <= rmapip->i_used_blocks);
340 xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
341 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
342 return -EFSCORRUPTED;
343 }
344
345 trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
346
347 rmapip->i_used_blocks -= len;
348 /*
349 * Don't add open zones to the reclaimable buckets. The I/O completion
350 * for writing the last block will take care of accounting for already
351 * unused blocks instead.
352 */
353 if (!READ_ONCE(rtg->rtg_open_zone))
354 xfs_zone_account_reclaimable(rtg, len);
355 xfs_add_frextents(mp, len);
356 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
357 return 0;
358 }
359
360 /*
361 * Check if the zone containing the data just before the offset we are
362 * writing to is still open and has space.
363 */
364 static struct xfs_open_zone *
xfs_last_used_zone(struct iomap_ioend * ioend)365 xfs_last_used_zone(
366 struct iomap_ioend *ioend)
367 {
368 struct xfs_inode *ip = XFS_I(ioend->io_inode);
369 struct xfs_mount *mp = ip->i_mount;
370 xfs_fileoff_t offset_fsb = XFS_B_TO_FSB(mp, ioend->io_offset);
371 struct xfs_rtgroup *rtg = NULL;
372 struct xfs_open_zone *oz = NULL;
373 struct xfs_iext_cursor icur;
374 struct xfs_bmbt_irec got;
375
376 xfs_ilock(ip, XFS_ILOCK_SHARED);
377 if (!xfs_iext_lookup_extent_before(ip, &ip->i_df, &offset_fsb,
378 &icur, &got)) {
379 xfs_iunlock(ip, XFS_ILOCK_SHARED);
380 return NULL;
381 }
382 xfs_iunlock(ip, XFS_ILOCK_SHARED);
383
384 rtg = xfs_rtgroup_grab(mp, xfs_rtb_to_rgno(mp, got.br_startblock));
385 if (!rtg)
386 return NULL;
387
388 xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
389 oz = READ_ONCE(rtg->rtg_open_zone);
390 if (oz && (oz->oz_is_gc || !atomic_inc_not_zero(&oz->oz_ref)))
391 oz = NULL;
392 xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
393
394 xfs_rtgroup_rele(rtg);
395 return oz;
396 }
397
398 static struct xfs_group *
xfs_find_free_zone(struct xfs_mount * mp,unsigned long start,unsigned long end)399 xfs_find_free_zone(
400 struct xfs_mount *mp,
401 unsigned long start,
402 unsigned long end)
403 {
404 struct xfs_zone_info *zi = mp->m_zone_info;
405 XA_STATE (xas, &mp->m_groups[XG_TYPE_RTG].xa, start);
406 struct xfs_group *xg;
407
408 xas_lock(&xas);
409 xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE)
410 if (atomic_inc_not_zero(&xg->xg_active_ref))
411 goto found;
412 xas_unlock(&xas);
413 return NULL;
414
415 found:
416 xas_clear_mark(&xas, XFS_RTG_FREE);
417 atomic_dec(&zi->zi_nr_free_zones);
418 zi->zi_free_zone_cursor = xg->xg_gno;
419 xas_unlock(&xas);
420 return xg;
421 }
422
423 static struct xfs_open_zone *
xfs_init_open_zone(struct xfs_rtgroup * rtg,xfs_rgblock_t write_pointer,enum rw_hint write_hint,bool is_gc)424 xfs_init_open_zone(
425 struct xfs_rtgroup *rtg,
426 xfs_rgblock_t write_pointer,
427 enum rw_hint write_hint,
428 bool is_gc)
429 {
430 struct xfs_open_zone *oz;
431
432 oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL);
433 spin_lock_init(&oz->oz_alloc_lock);
434 atomic_set(&oz->oz_ref, 1);
435 oz->oz_rtg = rtg;
436 oz->oz_write_pointer = write_pointer;
437 oz->oz_written = write_pointer;
438 oz->oz_write_hint = write_hint;
439 oz->oz_is_gc = is_gc;
440
441 /*
442 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
443 * inode, but we don't really want to take that here because we are
444 * under the zone_list_lock. Ensure the pointer is only set for a fully
445 * initialized open zone structure so that a racy lookup finding it is
446 * fine.
447 */
448 WRITE_ONCE(rtg->rtg_open_zone, oz);
449 return oz;
450 }
451
452 /*
453 * Find a completely free zone, open it, and return a reference.
454 */
455 struct xfs_open_zone *
xfs_open_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool is_gc)456 xfs_open_zone(
457 struct xfs_mount *mp,
458 enum rw_hint write_hint,
459 bool is_gc)
460 {
461 struct xfs_zone_info *zi = mp->m_zone_info;
462 struct xfs_group *xg;
463
464 xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX);
465 if (!xg)
466 xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor);
467 if (!xg)
468 return NULL;
469
470 set_current_state(TASK_RUNNING);
471 return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
472 }
473
474 static struct xfs_open_zone *
xfs_try_open_zone(struct xfs_mount * mp,enum rw_hint write_hint)475 xfs_try_open_zone(
476 struct xfs_mount *mp,
477 enum rw_hint write_hint)
478 {
479 struct xfs_zone_info *zi = mp->m_zone_info;
480 struct xfs_open_zone *oz;
481
482 if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
483 return NULL;
484 if (atomic_read(&zi->zi_nr_free_zones) <
485 XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
486 return NULL;
487
488 /*
489 * Increment the open zone count to reserve our slot before dropping
490 * zi_open_zones_lock.
491 */
492 zi->zi_nr_open_zones++;
493 spin_unlock(&zi->zi_open_zones_lock);
494 oz = xfs_open_zone(mp, write_hint, false);
495 spin_lock(&zi->zi_open_zones_lock);
496 if (!oz) {
497 zi->zi_nr_open_zones--;
498 return NULL;
499 }
500
501 atomic_inc(&oz->oz_ref);
502 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
503
504 /*
505 * If this was the last free zone, other waiters might be waiting
506 * on us to write to it as well.
507 */
508 wake_up_all(&zi->zi_zone_wait);
509
510 if (xfs_zoned_need_gc(mp))
511 wake_up_process(zi->zi_gc_thread);
512
513 trace_xfs_zone_opened(oz->oz_rtg);
514 return oz;
515 }
516
517 /*
518 * For data with short or medium lifetime, try to colocated it into an
519 * already open zone with a matching temperature.
520 */
521 static bool
xfs_colocate_eagerly(enum rw_hint file_hint)522 xfs_colocate_eagerly(
523 enum rw_hint file_hint)
524 {
525 switch (file_hint) {
526 case WRITE_LIFE_MEDIUM:
527 case WRITE_LIFE_SHORT:
528 case WRITE_LIFE_NONE:
529 return true;
530 default:
531 return false;
532 }
533 }
534
535 static bool
xfs_good_hint_match(struct xfs_open_zone * oz,enum rw_hint file_hint)536 xfs_good_hint_match(
537 struct xfs_open_zone *oz,
538 enum rw_hint file_hint)
539 {
540 switch (oz->oz_write_hint) {
541 case WRITE_LIFE_LONG:
542 case WRITE_LIFE_EXTREME:
543 /* colocate long and extreme */
544 if (file_hint == WRITE_LIFE_LONG ||
545 file_hint == WRITE_LIFE_EXTREME)
546 return true;
547 break;
548 case WRITE_LIFE_MEDIUM:
549 /* colocate medium with medium */
550 if (file_hint == WRITE_LIFE_MEDIUM)
551 return true;
552 break;
553 case WRITE_LIFE_SHORT:
554 case WRITE_LIFE_NONE:
555 case WRITE_LIFE_NOT_SET:
556 /* colocate short and none */
557 if (file_hint <= WRITE_LIFE_SHORT)
558 return true;
559 break;
560 }
561 return false;
562 }
563
564 static bool
xfs_try_use_zone(struct xfs_zone_info * zi,enum rw_hint file_hint,struct xfs_open_zone * oz,bool lowspace)565 xfs_try_use_zone(
566 struct xfs_zone_info *zi,
567 enum rw_hint file_hint,
568 struct xfs_open_zone *oz,
569 bool lowspace)
570 {
571 if (oz->oz_write_pointer == rtg_blocks(oz->oz_rtg))
572 return false;
573 if (!lowspace && !xfs_good_hint_match(oz, file_hint))
574 return false;
575 if (!atomic_inc_not_zero(&oz->oz_ref))
576 return false;
577
578 /*
579 * If we have a hint set for the data, use that for the zone even if
580 * some data was written already without any hint set, but don't change
581 * the temperature after that as that would make little sense without
582 * tracking per-temperature class written block counts, which is
583 * probably overkill anyway.
584 */
585 if (file_hint != WRITE_LIFE_NOT_SET &&
586 oz->oz_write_hint == WRITE_LIFE_NOT_SET)
587 oz->oz_write_hint = file_hint;
588
589 /*
590 * If we couldn't match by inode or life time we just pick the first
591 * zone with enough space above. For that we want the least busy zone
592 * for some definition of "least" busy. For now this simple LRU
593 * algorithm that rotates every zone to the end of the list will do it,
594 * even if it isn't exactly cache friendly.
595 */
596 if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
597 list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
598 return true;
599 }
600
601 static struct xfs_open_zone *
xfs_select_open_zone_lru(struct xfs_zone_info * zi,enum rw_hint file_hint,bool lowspace)602 xfs_select_open_zone_lru(
603 struct xfs_zone_info *zi,
604 enum rw_hint file_hint,
605 bool lowspace)
606 {
607 struct xfs_open_zone *oz;
608
609 lockdep_assert_held(&zi->zi_open_zones_lock);
610
611 list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
612 if (xfs_try_use_zone(zi, file_hint, oz, lowspace))
613 return oz;
614
615 cond_resched_lock(&zi->zi_open_zones_lock);
616 return NULL;
617 }
618
619 static struct xfs_open_zone *
xfs_select_open_zone_mru(struct xfs_zone_info * zi,enum rw_hint file_hint)620 xfs_select_open_zone_mru(
621 struct xfs_zone_info *zi,
622 enum rw_hint file_hint)
623 {
624 struct xfs_open_zone *oz;
625
626 lockdep_assert_held(&zi->zi_open_zones_lock);
627
628 list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
629 if (xfs_try_use_zone(zi, file_hint, oz, false))
630 return oz;
631
632 cond_resched_lock(&zi->zi_open_zones_lock);
633 return NULL;
634 }
635
xfs_inode_write_hint(struct xfs_inode * ip)636 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
637 {
638 if (xfs_has_nolifetime(ip->i_mount))
639 return WRITE_LIFE_NOT_SET;
640 return VFS_I(ip)->i_write_hint;
641 }
642
643 /*
644 * Try to pack inodes that are written back after they were closed tight instead
645 * of trying to open new zones for them or spread them to the least recently
646 * used zone. This optimizes the data layout for workloads that untar or copy
647 * a lot of small files. Right now this does not separate multiple such
648 * streams.
649 */
xfs_zoned_pack_tight(struct xfs_inode * ip)650 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
651 {
652 return !inode_is_open_for_write(VFS_I(ip)) &&
653 !(ip->i_diflags & XFS_DIFLAG_APPEND);
654 }
655
656 /*
657 * Pick a new zone for writes.
658 *
659 * If we aren't using up our budget of open zones just open a new one from the
660 * freelist. Else try to find one that matches the expected data lifetime. If
661 * we don't find one that is good pick any zone that is available.
662 */
663 static struct xfs_open_zone *
xfs_select_zone_nowait(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)664 xfs_select_zone_nowait(
665 struct xfs_mount *mp,
666 enum rw_hint write_hint,
667 bool pack_tight)
668 {
669 struct xfs_zone_info *zi = mp->m_zone_info;
670 struct xfs_open_zone *oz = NULL;
671
672 if (xfs_is_shutdown(mp))
673 return NULL;
674
675 /*
676 * Try to fill up open zones with matching temperature if available. It
677 * is better to try to co-locate data when this is favorable, so we can
678 * activate empty zones when it is statistically better to separate
679 * data.
680 */
681 spin_lock(&zi->zi_open_zones_lock);
682 if (xfs_colocate_eagerly(write_hint))
683 oz = xfs_select_open_zone_lru(zi, write_hint, false);
684 else if (pack_tight)
685 oz = xfs_select_open_zone_mru(zi, write_hint);
686 if (oz)
687 goto out_unlock;
688
689 /*
690 * See if we can open a new zone and use that.
691 */
692 oz = xfs_try_open_zone(mp, write_hint);
693 if (oz)
694 goto out_unlock;
695
696 /*
697 * Try to colocate cold data with other cold data if we failed to open a
698 * new zone for it.
699 */
700 if (write_hint != WRITE_LIFE_NOT_SET &&
701 !xfs_colocate_eagerly(write_hint))
702 oz = xfs_select_open_zone_lru(zi, write_hint, false);
703 if (!oz)
704 oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, false);
705 if (!oz)
706 oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, true);
707 out_unlock:
708 spin_unlock(&zi->zi_open_zones_lock);
709 return oz;
710 }
711
712 static struct xfs_open_zone *
xfs_select_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)713 xfs_select_zone(
714 struct xfs_mount *mp,
715 enum rw_hint write_hint,
716 bool pack_tight)
717 {
718 struct xfs_zone_info *zi = mp->m_zone_info;
719 DEFINE_WAIT (wait);
720 struct xfs_open_zone *oz;
721
722 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
723 if (oz)
724 return oz;
725
726 for (;;) {
727 prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
728 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
729 if (oz)
730 break;
731 schedule();
732 }
733 finish_wait(&zi->zi_zone_wait, &wait);
734 return oz;
735 }
736
737 static unsigned int
xfs_zone_alloc_blocks(struct xfs_open_zone * oz,xfs_filblks_t count_fsb,sector_t * sector,bool * is_seq)738 xfs_zone_alloc_blocks(
739 struct xfs_open_zone *oz,
740 xfs_filblks_t count_fsb,
741 sector_t *sector,
742 bool *is_seq)
743 {
744 struct xfs_rtgroup *rtg = oz->oz_rtg;
745 struct xfs_mount *mp = rtg_mount(rtg);
746 xfs_rgblock_t rgbno;
747
748 spin_lock(&oz->oz_alloc_lock);
749 count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
750 (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_write_pointer);
751 if (!count_fsb) {
752 spin_unlock(&oz->oz_alloc_lock);
753 return 0;
754 }
755 rgbno = oz->oz_write_pointer;
756 oz->oz_write_pointer += count_fsb;
757 spin_unlock(&oz->oz_alloc_lock);
758
759 trace_xfs_zone_alloc_blocks(oz, rgbno, count_fsb);
760
761 *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
762 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
763 if (!*is_seq)
764 *sector += XFS_FSB_TO_BB(mp, rgbno);
765 return XFS_FSB_TO_B(mp, count_fsb);
766 }
767
768 void
xfs_mark_rtg_boundary(struct iomap_ioend * ioend)769 xfs_mark_rtg_boundary(
770 struct iomap_ioend *ioend)
771 {
772 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
773 sector_t sector = ioend->io_bio.bi_iter.bi_sector;
774
775 if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
776 ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
777 }
778
779 static void
xfs_submit_zoned_bio(struct iomap_ioend * ioend,struct xfs_open_zone * oz,bool is_seq)780 xfs_submit_zoned_bio(
781 struct iomap_ioend *ioend,
782 struct xfs_open_zone *oz,
783 bool is_seq)
784 {
785 ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
786 ioend->io_private = oz;
787 atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
788
789 if (is_seq) {
790 ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
791 ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
792 } else {
793 xfs_mark_rtg_boundary(ioend);
794 }
795
796 submit_bio(&ioend->io_bio);
797 }
798
799 void
xfs_zone_alloc_and_submit(struct iomap_ioend * ioend,struct xfs_open_zone ** oz)800 xfs_zone_alloc_and_submit(
801 struct iomap_ioend *ioend,
802 struct xfs_open_zone **oz)
803 {
804 struct xfs_inode *ip = XFS_I(ioend->io_inode);
805 struct xfs_mount *mp = ip->i_mount;
806 enum rw_hint write_hint = xfs_inode_write_hint(ip);
807 bool pack_tight = xfs_zoned_pack_tight(ip);
808 unsigned int alloc_len;
809 struct iomap_ioend *split;
810 bool is_seq;
811
812 if (xfs_is_shutdown(mp))
813 goto out_error;
814
815 /*
816 * If we don't have a cached zone in this write context, see if the
817 * last extent before the one we are writing to points to an active
818 * zone. If so, just continue writing to it.
819 */
820 if (!*oz && ioend->io_offset)
821 *oz = xfs_last_used_zone(ioend);
822 if (!*oz) {
823 select_zone:
824 *oz = xfs_select_zone(mp, write_hint, pack_tight);
825 if (!*oz)
826 goto out_error;
827 }
828
829 alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
830 &ioend->io_sector, &is_seq);
831 if (!alloc_len) {
832 xfs_open_zone_put(*oz);
833 goto select_zone;
834 }
835
836 while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
837 if (IS_ERR(split))
838 goto out_split_error;
839 alloc_len -= split->io_bio.bi_iter.bi_size;
840 xfs_submit_zoned_bio(split, *oz, is_seq);
841 if (!alloc_len) {
842 xfs_open_zone_put(*oz);
843 goto select_zone;
844 }
845 }
846
847 xfs_submit_zoned_bio(ioend, *oz, is_seq);
848 return;
849
850 out_split_error:
851 ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
852 out_error:
853 bio_io_error(&ioend->io_bio);
854 }
855
856 /*
857 * Wake up all threads waiting for a zoned space allocation when the file system
858 * is shut down.
859 */
860 void
xfs_zoned_wake_all(struct xfs_mount * mp)861 xfs_zoned_wake_all(
862 struct xfs_mount *mp)
863 {
864 /*
865 * Don't wake up if there is no m_zone_info. This is complicated by the
866 * fact that unmount can't atomically clear m_zone_info and thus we need
867 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
868 * during log recovery so we can't entirely rely on that either.
869 */
870 if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
871 wake_up_all(&mp->m_zone_info->zi_zone_wait);
872 }
873
874 /*
875 * Check if @rgbno in @rgb is a potentially valid block. It might still be
876 * unused, but that information is only found in the rmap.
877 */
878 bool
xfs_zone_rgbno_is_valid(struct xfs_rtgroup * rtg,xfs_rgnumber_t rgbno)879 xfs_zone_rgbno_is_valid(
880 struct xfs_rtgroup *rtg,
881 xfs_rgnumber_t rgbno)
882 {
883 lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
884
885 if (rtg->rtg_open_zone)
886 return rgbno < rtg->rtg_open_zone->oz_write_pointer;
887 return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
888 rtg_rgno(rtg), XFS_RTG_FREE);
889 }
890
891 static void
xfs_free_open_zones(struct xfs_zone_info * zi)892 xfs_free_open_zones(
893 struct xfs_zone_info *zi)
894 {
895 struct xfs_open_zone *oz;
896
897 spin_lock(&zi->zi_open_zones_lock);
898 while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
899 struct xfs_open_zone, oz_entry))) {
900 list_del(&oz->oz_entry);
901 xfs_open_zone_put(oz);
902 }
903 spin_unlock(&zi->zi_open_zones_lock);
904 }
905
906 struct xfs_init_zones {
907 struct xfs_mount *mp;
908 uint64_t available;
909 uint64_t reclaimable;
910 };
911
912 static int
xfs_init_zone(struct xfs_init_zones * iz,struct xfs_rtgroup * rtg,struct blk_zone * zone)913 xfs_init_zone(
914 struct xfs_init_zones *iz,
915 struct xfs_rtgroup *rtg,
916 struct blk_zone *zone)
917 {
918 struct xfs_mount *mp = rtg_mount(rtg);
919 struct xfs_zone_info *zi = mp->m_zone_info;
920 uint64_t used = rtg_rmap(rtg)->i_used_blocks;
921 xfs_rgblock_t write_pointer, highest_rgbno;
922 int error;
923
924 if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
925 return -EFSCORRUPTED;
926
927 /*
928 * For sequential write required zones we retrieved the hardware write
929 * pointer above.
930 *
931 * For conventional zones or conventional devices we don't have that
932 * luxury. Instead query the rmap to find the highest recorded block
933 * and set the write pointer to the block after that. In case of a
934 * power loss this misses blocks where the data I/O has completed but
935 * not recorded in the rmap yet, and it also rewrites blocks if the most
936 * recently written ones got deleted again before unmount, but this is
937 * the best we can do without hardware support.
938 */
939 if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) {
940 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
941 highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
942 if (highest_rgbno == NULLRGBLOCK)
943 write_pointer = 0;
944 else
945 write_pointer = highest_rgbno + 1;
946 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
947 }
948
949 /*
950 * If there are no used blocks, but the zone is not in empty state yet
951 * we lost power before the zoned reset. In that case finish the work
952 * here.
953 */
954 if (write_pointer == rtg_blocks(rtg) && used == 0) {
955 error = xfs_zone_gc_reset_sync(rtg);
956 if (error)
957 return error;
958 write_pointer = 0;
959 }
960
961 if (write_pointer == 0) {
962 /* zone is empty */
963 atomic_inc(&zi->zi_nr_free_zones);
964 xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
965 iz->available += rtg_blocks(rtg);
966 } else if (write_pointer < rtg_blocks(rtg)) {
967 /* zone is open */
968 struct xfs_open_zone *oz;
969
970 atomic_inc(&rtg_group(rtg)->xg_active_ref);
971 oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
972 false);
973 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
974 zi->zi_nr_open_zones++;
975
976 iz->available += (rtg_blocks(rtg) - write_pointer);
977 iz->reclaimable += write_pointer - used;
978 } else if (used < rtg_blocks(rtg)) {
979 /* zone fully written, but has freed blocks */
980 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
981 iz->reclaimable += (rtg_blocks(rtg) - used);
982 }
983
984 return 0;
985 }
986
987 static int
xfs_get_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)988 xfs_get_zone_info_cb(
989 struct blk_zone *zone,
990 unsigned int idx,
991 void *data)
992 {
993 struct xfs_init_zones *iz = data;
994 struct xfs_mount *mp = iz->mp;
995 xfs_fsblock_t zsbno = xfs_daddr_to_rtb(mp, zone->start);
996 xfs_rgnumber_t rgno;
997 struct xfs_rtgroup *rtg;
998 int error;
999
1000 if (xfs_rtb_to_rgbno(mp, zsbno) != 0) {
1001 xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno);
1002 return -EFSCORRUPTED;
1003 }
1004
1005 rgno = xfs_rtb_to_rgno(mp, zsbno);
1006 rtg = xfs_rtgroup_grab(mp, rgno);
1007 if (!rtg) {
1008 xfs_warn(mp, "realtime group not found for zone %u.", rgno);
1009 return -EFSCORRUPTED;
1010 }
1011 error = xfs_init_zone(iz, rtg, zone);
1012 xfs_rtgroup_rele(rtg);
1013 return error;
1014 }
1015
1016 /*
1017 * Calculate the max open zone limit based on the of number of
1018 * backing zones available
1019 */
1020 static inline uint32_t
xfs_max_open_zones(struct xfs_mount * mp)1021 xfs_max_open_zones(
1022 struct xfs_mount *mp)
1023 {
1024 unsigned int max_open, max_open_data_zones;
1025 /*
1026 * We need two zones for every open data zone,
1027 * one in reserve as we don't reclaim open zones. One data zone
1028 * and its spare is included in XFS_MIN_ZONES.
1029 */
1030 max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1031 max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1032
1033 /*
1034 * Cap the max open limit to 1/4 of available space
1035 */
1036 max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1037
1038 return max(XFS_MIN_OPEN_ZONES, max_open);
1039 }
1040
1041 /*
1042 * Normally we use the open zone limit that the device reports. If there is
1043 * none let the user pick one from the command line.
1044 *
1045 * If the device doesn't report an open zone limit and there is no override,
1046 * allow to hold about a quarter of the zones open. In theory we could allow
1047 * all to be open, but at that point we run into GC deadlocks because we can't
1048 * reclaim open zones.
1049 *
1050 * When used on conventional SSDs a lower open limit is advisable as we'll
1051 * otherwise overwhelm the FTL just as much as a conventional block allocator.
1052 *
1053 * Note: To debug the open zone management code, force max_open to 1 here.
1054 */
1055 static int
xfs_calc_open_zones(struct xfs_mount * mp)1056 xfs_calc_open_zones(
1057 struct xfs_mount *mp)
1058 {
1059 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
1060 unsigned int bdev_open_zones = bdev_max_open_zones(bdev);
1061
1062 if (!mp->m_max_open_zones) {
1063 if (bdev_open_zones)
1064 mp->m_max_open_zones = bdev_open_zones;
1065 else
1066 mp->m_max_open_zones = xfs_max_open_zones(mp);
1067 }
1068
1069 if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1070 xfs_notice(mp, "need at least %u open zones.",
1071 XFS_MIN_OPEN_ZONES);
1072 return -EIO;
1073 }
1074
1075 if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1076 mp->m_max_open_zones = bdev_open_zones;
1077 xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1078 bdev_open_zones);
1079 }
1080
1081 if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1082 mp->m_max_open_zones = xfs_max_open_zones(mp);
1083 xfs_info(mp,
1084 "limiting open zones to %u due to total zone count (%u)",
1085 mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1086 }
1087
1088 return 0;
1089 }
1090
1091 static unsigned long *
xfs_alloc_bucket_bitmap(struct xfs_mount * mp)1092 xfs_alloc_bucket_bitmap(
1093 struct xfs_mount *mp)
1094 {
1095 return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1096 sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1097 }
1098
1099 static struct xfs_zone_info *
xfs_alloc_zone_info(struct xfs_mount * mp)1100 xfs_alloc_zone_info(
1101 struct xfs_mount *mp)
1102 {
1103 struct xfs_zone_info *zi;
1104 int i;
1105
1106 zi = kzalloc(sizeof(*zi), GFP_KERNEL);
1107 if (!zi)
1108 return NULL;
1109 INIT_LIST_HEAD(&zi->zi_open_zones);
1110 INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1111 spin_lock_init(&zi->zi_reset_list_lock);
1112 spin_lock_init(&zi->zi_open_zones_lock);
1113 spin_lock_init(&zi->zi_reservation_lock);
1114 init_waitqueue_head(&zi->zi_zone_wait);
1115 spin_lock_init(&zi->zi_used_buckets_lock);
1116 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1117 zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1118 if (!zi->zi_used_bucket_bitmap[i])
1119 goto out_free_bitmaps;
1120 }
1121 return zi;
1122
1123 out_free_bitmaps:
1124 while (--i > 0)
1125 kvfree(zi->zi_used_bucket_bitmap[i]);
1126 kfree(zi);
1127 return NULL;
1128 }
1129
1130 static void
xfs_free_zone_info(struct xfs_zone_info * zi)1131 xfs_free_zone_info(
1132 struct xfs_zone_info *zi)
1133 {
1134 int i;
1135
1136 xfs_free_open_zones(zi);
1137 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1138 kvfree(zi->zi_used_bucket_bitmap[i]);
1139 kfree(zi);
1140 }
1141
1142 int
xfs_mount_zones(struct xfs_mount * mp)1143 xfs_mount_zones(
1144 struct xfs_mount *mp)
1145 {
1146 struct xfs_init_zones iz = {
1147 .mp = mp,
1148 };
1149 struct xfs_buftarg *bt = mp->m_rtdev_targp;
1150 int error;
1151
1152 if (!bt) {
1153 xfs_notice(mp, "RT device missing.");
1154 return -EINVAL;
1155 }
1156
1157 if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1158 xfs_notice(mp, "invalid flag combination.");
1159 return -EFSCORRUPTED;
1160 }
1161 if (mp->m_sb.sb_rextsize != 1) {
1162 xfs_notice(mp, "zoned file systems do not support rextsize.");
1163 return -EFSCORRUPTED;
1164 }
1165 if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1166 xfs_notice(mp,
1167 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1168 return -EFSCORRUPTED;
1169 }
1170
1171 error = xfs_calc_open_zones(mp);
1172 if (error)
1173 return error;
1174
1175 mp->m_zone_info = xfs_alloc_zone_info(mp);
1176 if (!mp->m_zone_info)
1177 return -ENOMEM;
1178
1179 xfs_info(mp, "%u zones of %u blocks size (%u max open)",
1180 mp->m_sb.sb_rgcount, mp->m_groups[XG_TYPE_RTG].blocks,
1181 mp->m_max_open_zones);
1182 trace_xfs_zones_mount(mp);
1183
1184 if (bdev_is_zoned(bt->bt_bdev)) {
1185 error = blkdev_report_zones(bt->bt_bdev,
1186 XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart),
1187 mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz);
1188 if (error < 0)
1189 goto out_free_zone_info;
1190 } else {
1191 struct xfs_rtgroup *rtg = NULL;
1192
1193 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1194 error = xfs_init_zone(&iz, rtg, NULL);
1195 if (error)
1196 goto out_free_zone_info;
1197 }
1198 }
1199
1200 xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1201 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1202 iz.available + iz.reclaimable);
1203
1204 error = xfs_zone_gc_mount(mp);
1205 if (error)
1206 goto out_free_zone_info;
1207 return 0;
1208
1209 out_free_zone_info:
1210 xfs_free_zone_info(mp->m_zone_info);
1211 return error;
1212 }
1213
1214 void
xfs_unmount_zones(struct xfs_mount * mp)1215 xfs_unmount_zones(
1216 struct xfs_mount *mp)
1217 {
1218 xfs_zone_gc_unmount(mp);
1219 xfs_free_zone_info(mp->m_zone_info);
1220 }
1221