1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023-2025 Christoph Hellwig.
4 * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5 */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27 #include "xfs_mru_cache.h"
28
29 static void
xfs_open_zone_free_rcu(struct callback_head * cb)30 xfs_open_zone_free_rcu(
31 struct callback_head *cb)
32 {
33 struct xfs_open_zone *oz = container_of(cb, typeof(*oz), oz_rcu);
34
35 xfs_rtgroup_rele(oz->oz_rtg);
36 kfree(oz);
37 }
38
39 void
xfs_open_zone_put(struct xfs_open_zone * oz)40 xfs_open_zone_put(
41 struct xfs_open_zone *oz)
42 {
43 if (atomic_dec_and_test(&oz->oz_ref))
44 call_rcu(&oz->oz_rcu, xfs_open_zone_free_rcu);
45 }
46
47 static inline uint32_t
xfs_zone_bucket(struct xfs_mount * mp,uint32_t used_blocks)48 xfs_zone_bucket(
49 struct xfs_mount *mp,
50 uint32_t used_blocks)
51 {
52 return XFS_ZONE_USED_BUCKETS * used_blocks /
53 mp->m_groups[XG_TYPE_RTG].blocks;
54 }
55
56 static inline void
xfs_zone_add_to_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t to_bucket)57 xfs_zone_add_to_bucket(
58 struct xfs_zone_info *zi,
59 xfs_rgnumber_t rgno,
60 uint32_t to_bucket)
61 {
62 __set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
63 zi->zi_used_bucket_entries[to_bucket]++;
64 }
65
66 static inline void
xfs_zone_remove_from_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t from_bucket)67 xfs_zone_remove_from_bucket(
68 struct xfs_zone_info *zi,
69 xfs_rgnumber_t rgno,
70 uint32_t from_bucket)
71 {
72 __clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
73 zi->zi_used_bucket_entries[from_bucket]--;
74 }
75
76 static void
xfs_zone_account_reclaimable(struct xfs_rtgroup * rtg,uint32_t freed)77 xfs_zone_account_reclaimable(
78 struct xfs_rtgroup *rtg,
79 uint32_t freed)
80 {
81 struct xfs_group *xg = &rtg->rtg_group;
82 struct xfs_mount *mp = rtg_mount(rtg);
83 struct xfs_zone_info *zi = mp->m_zone_info;
84 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
85 xfs_rgnumber_t rgno = rtg_rgno(rtg);
86 uint32_t from_bucket = xfs_zone_bucket(mp, used + freed);
87 uint32_t to_bucket = xfs_zone_bucket(mp, used);
88 bool was_full = (used + freed == rtg_blocks(rtg));
89
90 /*
91 * This can be called from log recovery, where the zone_info structure
92 * hasn't been allocated yet. Skip all work as xfs_mount_zones will
93 * add the zones to the right buckets before the file systems becomes
94 * active.
95 */
96 if (!zi)
97 return;
98
99 if (!used) {
100 /*
101 * The zone is now empty, remove it from the bottom bucket and
102 * trigger a reset.
103 */
104 trace_xfs_zone_emptied(rtg);
105
106 if (!was_full)
107 xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE);
108
109 spin_lock(&zi->zi_used_buckets_lock);
110 if (!was_full)
111 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
112 spin_unlock(&zi->zi_used_buckets_lock);
113
114 spin_lock(&zi->zi_reset_list_lock);
115 xg->xg_next_reset = zi->zi_reset_list;
116 zi->zi_reset_list = xg;
117 spin_unlock(&zi->zi_reset_list_lock);
118
119 if (zi->zi_gc_thread)
120 wake_up_process(zi->zi_gc_thread);
121 } else if (was_full) {
122 /*
123 * The zone transitioned from full, mark it up as reclaimable
124 * and wake up GC which might be waiting for zones to reclaim.
125 */
126 spin_lock(&zi->zi_used_buckets_lock);
127 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
128 spin_unlock(&zi->zi_used_buckets_lock);
129
130 xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE);
131 if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
132 wake_up_process(zi->zi_gc_thread);
133 } else if (to_bucket != from_bucket) {
134 /*
135 * Move the zone to a new bucket if it dropped below the
136 * threshold.
137 */
138 spin_lock(&zi->zi_used_buckets_lock);
139 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
140 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
141 spin_unlock(&zi->zi_used_buckets_lock);
142 }
143 }
144
145 static void
xfs_open_zone_mark_full(struct xfs_open_zone * oz)146 xfs_open_zone_mark_full(
147 struct xfs_open_zone *oz)
148 {
149 struct xfs_rtgroup *rtg = oz->oz_rtg;
150 struct xfs_mount *mp = rtg_mount(rtg);
151 struct xfs_zone_info *zi = mp->m_zone_info;
152 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
153
154 trace_xfs_zone_full(rtg);
155
156 WRITE_ONCE(rtg->rtg_open_zone, NULL);
157
158 spin_lock(&zi->zi_open_zones_lock);
159 if (oz->oz_is_gc) {
160 ASSERT(current == zi->zi_gc_thread);
161 zi->zi_open_gc_zone = NULL;
162 } else {
163 zi->zi_nr_open_zones--;
164 list_del_init(&oz->oz_entry);
165 }
166 spin_unlock(&zi->zi_open_zones_lock);
167 xfs_open_zone_put(oz);
168
169 wake_up_all(&zi->zi_zone_wait);
170 if (used < rtg_blocks(rtg))
171 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
172 }
173
174 static void
xfs_zone_record_blocks(struct xfs_trans * tp,struct xfs_open_zone * oz,xfs_fsblock_t fsbno,xfs_filblks_t len)175 xfs_zone_record_blocks(
176 struct xfs_trans *tp,
177 struct xfs_open_zone *oz,
178 xfs_fsblock_t fsbno,
179 xfs_filblks_t len)
180 {
181 struct xfs_mount *mp = tp->t_mountp;
182 struct xfs_rtgroup *rtg = oz->oz_rtg;
183 struct xfs_inode *rmapip = rtg_rmap(rtg);
184
185 trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
186
187 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
188 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
189 rmapip->i_used_blocks += len;
190 ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
191 oz->oz_written += len;
192 if (oz->oz_written == rtg_blocks(rtg))
193 xfs_open_zone_mark_full(oz);
194 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
195 }
196
197 /*
198 * Called for blocks that have been written to disk, but not actually linked to
199 * an inode, which can happen when garbage collection races with user data
200 * writes to a file.
201 */
202 static void
xfs_zone_skip_blocks(struct xfs_open_zone * oz,xfs_filblks_t len)203 xfs_zone_skip_blocks(
204 struct xfs_open_zone *oz,
205 xfs_filblks_t len)
206 {
207 struct xfs_rtgroup *rtg = oz->oz_rtg;
208
209 trace_xfs_zone_skip_blocks(oz, 0, len);
210
211 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
212 oz->oz_written += len;
213 if (oz->oz_written == rtg_blocks(rtg))
214 xfs_open_zone_mark_full(oz);
215 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
216
217 xfs_add_frextents(rtg_mount(rtg), len);
218 }
219
220 static int
xfs_zoned_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * new,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)221 xfs_zoned_map_extent(
222 struct xfs_trans *tp,
223 struct xfs_inode *ip,
224 struct xfs_bmbt_irec *new,
225 struct xfs_open_zone *oz,
226 xfs_fsblock_t old_startblock)
227 {
228 struct xfs_bmbt_irec data;
229 int nmaps = 1;
230 int error;
231
232 /* Grab the corresponding mapping in the data fork. */
233 error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
234 &nmaps, 0);
235 if (error)
236 return error;
237
238 /*
239 * Cap the update to the existing extent in the data fork because we can
240 * only overwrite one extent at a time.
241 */
242 ASSERT(new->br_blockcount >= data.br_blockcount);
243 new->br_blockcount = data.br_blockcount;
244
245 /*
246 * If a data write raced with this GC write, keep the existing data in
247 * the data fork, mark our newly written GC extent as reclaimable, then
248 * move on to the next extent.
249 */
250 if (old_startblock != NULLFSBLOCK &&
251 old_startblock != data.br_startblock)
252 goto skip;
253
254 trace_xfs_reflink_cow_remap_from(ip, new);
255 trace_xfs_reflink_cow_remap_to(ip, &data);
256
257 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
258 XFS_IEXT_REFLINK_END_COW_CNT);
259 if (error)
260 return error;
261
262 if (data.br_startblock != HOLESTARTBLOCK) {
263 ASSERT(data.br_startblock != DELAYSTARTBLOCK);
264 ASSERT(!isnullstartblock(data.br_startblock));
265
266 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
267 if (xfs_is_reflink_inode(ip)) {
268 xfs_refcount_decrease_extent(tp, true, &data);
269 } else {
270 error = xfs_free_extent_later(tp, data.br_startblock,
271 data.br_blockcount, NULL,
272 XFS_AG_RESV_NONE,
273 XFS_FREE_EXTENT_REALTIME);
274 if (error)
275 return error;
276 }
277 }
278
279 xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount);
280
281 /* Map the new blocks into the data fork. */
282 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
283 return 0;
284
285 skip:
286 trace_xfs_reflink_cow_remap_skip(ip, new);
287 xfs_zone_skip_blocks(oz, new->br_blockcount);
288 return 0;
289 }
290
291 int
xfs_zoned_end_io(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,xfs_daddr_t daddr,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)292 xfs_zoned_end_io(
293 struct xfs_inode *ip,
294 xfs_off_t offset,
295 xfs_off_t count,
296 xfs_daddr_t daddr,
297 struct xfs_open_zone *oz,
298 xfs_fsblock_t old_startblock)
299 {
300 struct xfs_mount *mp = ip->i_mount;
301 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
302 struct xfs_bmbt_irec new = {
303 .br_startoff = XFS_B_TO_FSBT(mp, offset),
304 .br_startblock = xfs_daddr_to_rtb(mp, daddr),
305 .br_state = XFS_EXT_NORM,
306 };
307 unsigned int resblks =
308 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
309 struct xfs_trans *tp;
310 int error;
311
312 if (xfs_is_shutdown(mp))
313 return -EIO;
314
315 while (new.br_startoff < end_fsb) {
316 new.br_blockcount = end_fsb - new.br_startoff;
317
318 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
319 XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
320 if (error)
321 return error;
322 xfs_ilock(ip, XFS_ILOCK_EXCL);
323 xfs_trans_ijoin(tp, ip, 0);
324
325 error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
326 if (error)
327 xfs_trans_cancel(tp);
328 else
329 error = xfs_trans_commit(tp);
330 xfs_iunlock(ip, XFS_ILOCK_EXCL);
331 if (error)
332 return error;
333
334 new.br_startoff += new.br_blockcount;
335 new.br_startblock += new.br_blockcount;
336 if (old_startblock != NULLFSBLOCK)
337 old_startblock += new.br_blockcount;
338 }
339
340 return 0;
341 }
342
343 /*
344 * "Free" blocks allocated in a zone.
345 *
346 * Just decrement the used blocks counter and report the space as freed.
347 */
348 int
xfs_zone_free_blocks(struct xfs_trans * tp,struct xfs_rtgroup * rtg,xfs_fsblock_t fsbno,xfs_filblks_t len)349 xfs_zone_free_blocks(
350 struct xfs_trans *tp,
351 struct xfs_rtgroup *rtg,
352 xfs_fsblock_t fsbno,
353 xfs_filblks_t len)
354 {
355 struct xfs_mount *mp = tp->t_mountp;
356 struct xfs_inode *rmapip = rtg_rmap(rtg);
357
358 xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
359
360 if (len > rmapip->i_used_blocks) {
361 xfs_err(mp,
362 "trying to free more blocks (%lld) than used counter (%u).",
363 len, rmapip->i_used_blocks);
364 ASSERT(len <= rmapip->i_used_blocks);
365 xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
366 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
367 return -EFSCORRUPTED;
368 }
369
370 trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
371
372 rmapip->i_used_blocks -= len;
373 /*
374 * Don't add open zones to the reclaimable buckets. The I/O completion
375 * for writing the last block will take care of accounting for already
376 * unused blocks instead.
377 */
378 if (!READ_ONCE(rtg->rtg_open_zone))
379 xfs_zone_account_reclaimable(rtg, len);
380 xfs_add_frextents(mp, len);
381 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
382 return 0;
383 }
384
385 static struct xfs_group *
xfs_find_free_zone(struct xfs_mount * mp,unsigned long start,unsigned long end)386 xfs_find_free_zone(
387 struct xfs_mount *mp,
388 unsigned long start,
389 unsigned long end)
390 {
391 struct xfs_zone_info *zi = mp->m_zone_info;
392 XA_STATE (xas, &mp->m_groups[XG_TYPE_RTG].xa, start);
393 struct xfs_group *xg;
394
395 xas_lock(&xas);
396 xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE)
397 if (atomic_inc_not_zero(&xg->xg_active_ref))
398 goto found;
399 xas_unlock(&xas);
400 return NULL;
401
402 found:
403 xas_clear_mark(&xas, XFS_RTG_FREE);
404 atomic_dec(&zi->zi_nr_free_zones);
405 zi->zi_free_zone_cursor = xg->xg_gno;
406 xas_unlock(&xas);
407 return xg;
408 }
409
410 static struct xfs_open_zone *
xfs_init_open_zone(struct xfs_rtgroup * rtg,xfs_rgblock_t write_pointer,enum rw_hint write_hint,bool is_gc)411 xfs_init_open_zone(
412 struct xfs_rtgroup *rtg,
413 xfs_rgblock_t write_pointer,
414 enum rw_hint write_hint,
415 bool is_gc)
416 {
417 struct xfs_open_zone *oz;
418
419 oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL);
420 spin_lock_init(&oz->oz_alloc_lock);
421 atomic_set(&oz->oz_ref, 1);
422 oz->oz_rtg = rtg;
423 oz->oz_allocated = write_pointer;
424 oz->oz_written = write_pointer;
425 oz->oz_write_hint = write_hint;
426 oz->oz_is_gc = is_gc;
427
428 /*
429 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
430 * inode, but we don't really want to take that here because we are
431 * under the zone_list_lock. Ensure the pointer is only set for a fully
432 * initialized open zone structure so that a racy lookup finding it is
433 * fine.
434 */
435 WRITE_ONCE(rtg->rtg_open_zone, oz);
436 return oz;
437 }
438
439 /*
440 * Find a completely free zone, open it, and return a reference.
441 */
442 struct xfs_open_zone *
xfs_open_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool is_gc)443 xfs_open_zone(
444 struct xfs_mount *mp,
445 enum rw_hint write_hint,
446 bool is_gc)
447 {
448 struct xfs_zone_info *zi = mp->m_zone_info;
449 struct xfs_group *xg;
450
451 xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX);
452 if (!xg)
453 xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor);
454 if (!xg)
455 return NULL;
456
457 set_current_state(TASK_RUNNING);
458 return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
459 }
460
461 static struct xfs_open_zone *
xfs_try_open_zone(struct xfs_mount * mp,enum rw_hint write_hint)462 xfs_try_open_zone(
463 struct xfs_mount *mp,
464 enum rw_hint write_hint)
465 {
466 struct xfs_zone_info *zi = mp->m_zone_info;
467 struct xfs_open_zone *oz;
468
469 if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
470 return NULL;
471 if (atomic_read(&zi->zi_nr_free_zones) <
472 XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
473 return NULL;
474
475 /*
476 * Increment the open zone count to reserve our slot before dropping
477 * zi_open_zones_lock.
478 */
479 zi->zi_nr_open_zones++;
480 spin_unlock(&zi->zi_open_zones_lock);
481 oz = xfs_open_zone(mp, write_hint, false);
482 spin_lock(&zi->zi_open_zones_lock);
483 if (!oz) {
484 zi->zi_nr_open_zones--;
485 return NULL;
486 }
487
488 atomic_inc(&oz->oz_ref);
489 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
490
491 /*
492 * If this was the last free zone, other waiters might be waiting
493 * on us to write to it as well.
494 */
495 wake_up_all(&zi->zi_zone_wait);
496
497 if (xfs_zoned_need_gc(mp))
498 wake_up_process(zi->zi_gc_thread);
499
500 trace_xfs_zone_opened(oz->oz_rtg);
501 return oz;
502 }
503
504 enum xfs_zone_alloc_score {
505 /* Any open zone will do it, we're desperate */
506 XFS_ZONE_ALLOC_ANY = 0,
507
508 /* It better fit somehow */
509 XFS_ZONE_ALLOC_OK = 1,
510
511 /* Only reuse a zone if it fits really well. */
512 XFS_ZONE_ALLOC_GOOD = 2,
513 };
514
515 /*
516 * Life time hint co-location matrix. Fields not set default to 0
517 * aka XFS_ZONE_ALLOC_ANY.
518 */
519 static const unsigned int
520 xfs_zoned_hint_score[WRITE_LIFE_HINT_NR][WRITE_LIFE_HINT_NR] = {
521 [WRITE_LIFE_NOT_SET] = {
522 [WRITE_LIFE_NOT_SET] = XFS_ZONE_ALLOC_OK,
523 },
524 [WRITE_LIFE_NONE] = {
525 [WRITE_LIFE_NONE] = XFS_ZONE_ALLOC_OK,
526 },
527 [WRITE_LIFE_SHORT] = {
528 [WRITE_LIFE_SHORT] = XFS_ZONE_ALLOC_GOOD,
529 },
530 [WRITE_LIFE_MEDIUM] = {
531 [WRITE_LIFE_MEDIUM] = XFS_ZONE_ALLOC_GOOD,
532 },
533 [WRITE_LIFE_LONG] = {
534 [WRITE_LIFE_LONG] = XFS_ZONE_ALLOC_OK,
535 [WRITE_LIFE_EXTREME] = XFS_ZONE_ALLOC_OK,
536 },
537 [WRITE_LIFE_EXTREME] = {
538 [WRITE_LIFE_LONG] = XFS_ZONE_ALLOC_OK,
539 [WRITE_LIFE_EXTREME] = XFS_ZONE_ALLOC_OK,
540 },
541 };
542
543 static bool
xfs_try_use_zone(struct xfs_zone_info * zi,enum rw_hint file_hint,struct xfs_open_zone * oz,unsigned int goodness)544 xfs_try_use_zone(
545 struct xfs_zone_info *zi,
546 enum rw_hint file_hint,
547 struct xfs_open_zone *oz,
548 unsigned int goodness)
549 {
550 if (oz->oz_allocated == rtg_blocks(oz->oz_rtg))
551 return false;
552
553 if (xfs_zoned_hint_score[oz->oz_write_hint][file_hint] < goodness)
554 return false;
555
556 if (!atomic_inc_not_zero(&oz->oz_ref))
557 return false;
558
559 /*
560 * If we have a hint set for the data, use that for the zone even if
561 * some data was written already without any hint set, but don't change
562 * the temperature after that as that would make little sense without
563 * tracking per-temperature class written block counts, which is
564 * probably overkill anyway.
565 */
566 if (file_hint != WRITE_LIFE_NOT_SET &&
567 oz->oz_write_hint == WRITE_LIFE_NOT_SET)
568 oz->oz_write_hint = file_hint;
569
570 /*
571 * If we couldn't match by inode or life time we just pick the first
572 * zone with enough space above. For that we want the least busy zone
573 * for some definition of "least" busy. For now this simple LRU
574 * algorithm that rotates every zone to the end of the list will do it,
575 * even if it isn't exactly cache friendly.
576 */
577 if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
578 list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
579 return true;
580 }
581
582 static struct xfs_open_zone *
xfs_select_open_zone_lru(struct xfs_zone_info * zi,enum rw_hint file_hint,unsigned int goodness)583 xfs_select_open_zone_lru(
584 struct xfs_zone_info *zi,
585 enum rw_hint file_hint,
586 unsigned int goodness)
587 {
588 struct xfs_open_zone *oz;
589
590 lockdep_assert_held(&zi->zi_open_zones_lock);
591
592 list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
593 if (xfs_try_use_zone(zi, file_hint, oz, goodness))
594 return oz;
595
596 cond_resched_lock(&zi->zi_open_zones_lock);
597 return NULL;
598 }
599
600 static struct xfs_open_zone *
xfs_select_open_zone_mru(struct xfs_zone_info * zi,enum rw_hint file_hint)601 xfs_select_open_zone_mru(
602 struct xfs_zone_info *zi,
603 enum rw_hint file_hint)
604 {
605 struct xfs_open_zone *oz;
606
607 lockdep_assert_held(&zi->zi_open_zones_lock);
608
609 list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
610 if (xfs_try_use_zone(zi, file_hint, oz, false))
611 return oz;
612
613 cond_resched_lock(&zi->zi_open_zones_lock);
614 return NULL;
615 }
616
xfs_inode_write_hint(struct xfs_inode * ip)617 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
618 {
619 if (xfs_has_nolifetime(ip->i_mount))
620 return WRITE_LIFE_NOT_SET;
621 return VFS_I(ip)->i_write_hint;
622 }
623
624 /*
625 * Try to tightly pack small files that are written back after they were closed
626 * instead of trying to open new zones for them or spread them to the least
627 * recently used zone. This optimizes the data layout for workloads that untar
628 * or copy a lot of small files. Right now this does not separate multiple such
629 * streams.
630 */
xfs_zoned_pack_tight(struct xfs_inode * ip)631 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
632 {
633 struct xfs_mount *mp = ip->i_mount;
634 size_t zone_capacity =
635 XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].blocks);
636
637 /*
638 * Do not pack write files that are already using a full zone to avoid
639 * fragmentation.
640 */
641 if (i_size_read(VFS_I(ip)) >= zone_capacity)
642 return false;
643
644 return !inode_is_open_for_write(VFS_I(ip)) &&
645 !(ip->i_diflags & XFS_DIFLAG_APPEND);
646 }
647
648 static struct xfs_open_zone *
xfs_select_zone_nowait(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)649 xfs_select_zone_nowait(
650 struct xfs_mount *mp,
651 enum rw_hint write_hint,
652 bool pack_tight)
653 {
654 struct xfs_zone_info *zi = mp->m_zone_info;
655 struct xfs_open_zone *oz = NULL;
656
657 if (xfs_is_shutdown(mp))
658 return NULL;
659
660 /*
661 * Try to fill up open zones with matching temperature if available. It
662 * is better to try to co-locate data when this is favorable, so we can
663 * activate empty zones when it is statistically better to separate
664 * data.
665 */
666 spin_lock(&zi->zi_open_zones_lock);
667 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_GOOD);
668 if (oz)
669 goto out_unlock;
670
671 if (pack_tight)
672 oz = xfs_select_open_zone_mru(zi, write_hint);
673 if (oz)
674 goto out_unlock;
675
676 /*
677 * See if we can open a new zone and use that so that data for different
678 * files is mixed as little as possible.
679 */
680 oz = xfs_try_open_zone(mp, write_hint);
681 if (oz)
682 goto out_unlock;
683
684 /*
685 * Try to find an zone that is an ok match to colocate data with.
686 */
687 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_OK);
688 if (oz)
689 goto out_unlock;
690
691 /*
692 * Pick the least recently used zone, regardless of hint match
693 */
694 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_ANY);
695 out_unlock:
696 spin_unlock(&zi->zi_open_zones_lock);
697 return oz;
698 }
699
700 static struct xfs_open_zone *
xfs_select_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)701 xfs_select_zone(
702 struct xfs_mount *mp,
703 enum rw_hint write_hint,
704 bool pack_tight)
705 {
706 struct xfs_zone_info *zi = mp->m_zone_info;
707 DEFINE_WAIT (wait);
708 struct xfs_open_zone *oz;
709
710 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
711 if (oz)
712 return oz;
713
714 for (;;) {
715 prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
716 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
717 if (oz || xfs_is_shutdown(mp))
718 break;
719 schedule();
720 }
721 finish_wait(&zi->zi_zone_wait, &wait);
722 return oz;
723 }
724
725 static unsigned int
xfs_zone_alloc_blocks(struct xfs_open_zone * oz,xfs_filblks_t count_fsb,sector_t * sector,bool * is_seq)726 xfs_zone_alloc_blocks(
727 struct xfs_open_zone *oz,
728 xfs_filblks_t count_fsb,
729 sector_t *sector,
730 bool *is_seq)
731 {
732 struct xfs_rtgroup *rtg = oz->oz_rtg;
733 struct xfs_mount *mp = rtg_mount(rtg);
734 xfs_rgblock_t allocated;
735
736 spin_lock(&oz->oz_alloc_lock);
737 count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
738 (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_allocated);
739 if (!count_fsb) {
740 spin_unlock(&oz->oz_alloc_lock);
741 return 0;
742 }
743 allocated = oz->oz_allocated;
744 oz->oz_allocated += count_fsb;
745 spin_unlock(&oz->oz_alloc_lock);
746
747 trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb);
748
749 *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
750 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
751 if (!*is_seq)
752 *sector += XFS_FSB_TO_BB(mp, allocated);
753 return XFS_FSB_TO_B(mp, count_fsb);
754 }
755
756 void
xfs_mark_rtg_boundary(struct iomap_ioend * ioend)757 xfs_mark_rtg_boundary(
758 struct iomap_ioend *ioend)
759 {
760 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
761 sector_t sector = ioend->io_bio.bi_iter.bi_sector;
762
763 if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
764 ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
765 }
766
767 /*
768 * Check if we have a cached last open zone available for the inode and
769 * if yes return a reference to it.
770 */
771 static struct xfs_open_zone *
xfs_get_cached_zone(struct xfs_inode * ip)772 xfs_get_cached_zone(
773 struct xfs_inode *ip)
774 {
775 struct xfs_open_zone *oz;
776
777 rcu_read_lock();
778 oz = VFS_I(ip)->i_private;
779 if (oz) {
780 /*
781 * GC only steals open zones at mount time, so no GC zones
782 * should end up in the cache.
783 */
784 ASSERT(!oz->oz_is_gc);
785 if (!atomic_inc_not_zero(&oz->oz_ref))
786 oz = NULL;
787 }
788 rcu_read_unlock();
789
790 return oz;
791 }
792
793 /*
794 * Stash our zone in the inode so that is is reused for future allocations.
795 *
796 * The open_zone structure will be pinned until either the inode is freed or
797 * until the cached open zone is replaced with a different one because the
798 * current one was full when we tried to use it. This means we keep any
799 * open zone around forever as long as any inode that used it for the last
800 * write is cached, which slightly increases the memory use of cached inodes
801 * that were every written to, but significantly simplifies the cached zone
802 * lookup. Because the open_zone is clearly marked as full when all data
803 * in the underlying RTG was written, the caching is always safe.
804 */
805 static void
xfs_set_cached_zone(struct xfs_inode * ip,struct xfs_open_zone * oz)806 xfs_set_cached_zone(
807 struct xfs_inode *ip,
808 struct xfs_open_zone *oz)
809 {
810 struct xfs_open_zone *old_oz;
811
812 atomic_inc(&oz->oz_ref);
813 old_oz = xchg(&VFS_I(ip)->i_private, oz);
814 if (old_oz)
815 xfs_open_zone_put(old_oz);
816 }
817
818 static void
xfs_submit_zoned_bio(struct iomap_ioend * ioend,struct xfs_open_zone * oz,bool is_seq)819 xfs_submit_zoned_bio(
820 struct iomap_ioend *ioend,
821 struct xfs_open_zone *oz,
822 bool is_seq)
823 {
824 ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
825 ioend->io_private = oz;
826 atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
827
828 if (is_seq) {
829 ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
830 ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
831 } else {
832 xfs_mark_rtg_boundary(ioend);
833 }
834
835 submit_bio(&ioend->io_bio);
836 }
837
838 void
xfs_zone_alloc_and_submit(struct iomap_ioend * ioend,struct xfs_open_zone ** oz)839 xfs_zone_alloc_and_submit(
840 struct iomap_ioend *ioend,
841 struct xfs_open_zone **oz)
842 {
843 struct xfs_inode *ip = XFS_I(ioend->io_inode);
844 struct xfs_mount *mp = ip->i_mount;
845 enum rw_hint write_hint = xfs_inode_write_hint(ip);
846 bool pack_tight = xfs_zoned_pack_tight(ip);
847 unsigned int alloc_len;
848 struct iomap_ioend *split;
849 bool is_seq;
850
851 if (xfs_is_shutdown(mp))
852 goto out_error;
853
854 /*
855 * If we don't have a locally cached zone in this write context, see if
856 * the inode is still associated with a zone and use that if so.
857 */
858 if (!*oz)
859 *oz = xfs_get_cached_zone(ip);
860
861 if (!*oz) {
862 select_zone:
863 *oz = xfs_select_zone(mp, write_hint, pack_tight);
864 if (!*oz)
865 goto out_error;
866 xfs_set_cached_zone(ip, *oz);
867 }
868
869 alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
870 &ioend->io_sector, &is_seq);
871 if (!alloc_len) {
872 xfs_open_zone_put(*oz);
873 goto select_zone;
874 }
875
876 while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
877 if (IS_ERR(split))
878 goto out_split_error;
879 alloc_len -= split->io_bio.bi_iter.bi_size;
880 xfs_submit_zoned_bio(split, *oz, is_seq);
881 if (!alloc_len) {
882 xfs_open_zone_put(*oz);
883 goto select_zone;
884 }
885 }
886
887 xfs_submit_zoned_bio(ioend, *oz, is_seq);
888 return;
889
890 out_split_error:
891 ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
892 out_error:
893 bio_io_error(&ioend->io_bio);
894 }
895
896 /*
897 * Wake up all threads waiting for a zoned space allocation when the file system
898 * is shut down.
899 */
900 void
xfs_zoned_wake_all(struct xfs_mount * mp)901 xfs_zoned_wake_all(
902 struct xfs_mount *mp)
903 {
904 /*
905 * Don't wake up if there is no m_zone_info. This is complicated by the
906 * fact that unmount can't atomically clear m_zone_info and thus we need
907 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
908 * during log recovery so we can't entirely rely on that either.
909 */
910 if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
911 wake_up_all(&mp->m_zone_info->zi_zone_wait);
912 }
913
914 /*
915 * Check if @rgbno in @rgb is a potentially valid block. It might still be
916 * unused, but that information is only found in the rmap.
917 */
918 bool
xfs_zone_rgbno_is_valid(struct xfs_rtgroup * rtg,xfs_rgnumber_t rgbno)919 xfs_zone_rgbno_is_valid(
920 struct xfs_rtgroup *rtg,
921 xfs_rgnumber_t rgbno)
922 {
923 lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
924
925 if (rtg->rtg_open_zone)
926 return rgbno < rtg->rtg_open_zone->oz_allocated;
927 return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
928 rtg_rgno(rtg), XFS_RTG_FREE);
929 }
930
931 static void
xfs_free_open_zones(struct xfs_zone_info * zi)932 xfs_free_open_zones(
933 struct xfs_zone_info *zi)
934 {
935 struct xfs_open_zone *oz;
936
937 spin_lock(&zi->zi_open_zones_lock);
938 while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
939 struct xfs_open_zone, oz_entry))) {
940 list_del(&oz->oz_entry);
941 xfs_open_zone_put(oz);
942 }
943 spin_unlock(&zi->zi_open_zones_lock);
944
945 /*
946 * Wait for all open zones to be freed so that they drop the group
947 * references:
948 */
949 rcu_barrier();
950 }
951
952 struct xfs_init_zones {
953 struct xfs_mount *mp;
954 uint64_t available;
955 uint64_t reclaimable;
956 };
957
958 static int
xfs_init_zone(struct xfs_init_zones * iz,struct xfs_rtgroup * rtg,struct blk_zone * zone)959 xfs_init_zone(
960 struct xfs_init_zones *iz,
961 struct xfs_rtgroup *rtg,
962 struct blk_zone *zone)
963 {
964 struct xfs_mount *mp = rtg_mount(rtg);
965 struct xfs_zone_info *zi = mp->m_zone_info;
966 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
967 xfs_rgblock_t write_pointer, highest_rgbno;
968 int error;
969
970 if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
971 return -EFSCORRUPTED;
972
973 /*
974 * For sequential write required zones we retrieved the hardware write
975 * pointer above.
976 *
977 * For conventional zones or conventional devices we don't have that
978 * luxury. Instead query the rmap to find the highest recorded block
979 * and set the write pointer to the block after that. In case of a
980 * power loss this misses blocks where the data I/O has completed but
981 * not recorded in the rmap yet, and it also rewrites blocks if the most
982 * recently written ones got deleted again before unmount, but this is
983 * the best we can do without hardware support.
984 */
985 if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) {
986 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
987 highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
988 if (highest_rgbno == NULLRGBLOCK)
989 write_pointer = 0;
990 else
991 write_pointer = highest_rgbno + 1;
992 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
993 }
994
995 /*
996 * If there are no used blocks, but the zone is not in empty state yet
997 * we lost power before the zoned reset. In that case finish the work
998 * here.
999 */
1000 if (write_pointer == rtg_blocks(rtg) && used == 0) {
1001 error = xfs_zone_gc_reset_sync(rtg);
1002 if (error)
1003 return error;
1004 write_pointer = 0;
1005 }
1006
1007 if (write_pointer == 0) {
1008 /* zone is empty */
1009 atomic_inc(&zi->zi_nr_free_zones);
1010 xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
1011 iz->available += rtg_blocks(rtg);
1012 } else if (write_pointer < rtg_blocks(rtg)) {
1013 /* zone is open */
1014 struct xfs_open_zone *oz;
1015
1016 atomic_inc(&rtg_group(rtg)->xg_active_ref);
1017 oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
1018 false);
1019 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
1020 zi->zi_nr_open_zones++;
1021
1022 iz->available += (rtg_blocks(rtg) - write_pointer);
1023 iz->reclaimable += write_pointer - used;
1024 } else if (used < rtg_blocks(rtg)) {
1025 /* zone fully written, but has freed blocks */
1026 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
1027 iz->reclaimable += (rtg_blocks(rtg) - used);
1028 }
1029
1030 return 0;
1031 }
1032
1033 static int
xfs_get_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)1034 xfs_get_zone_info_cb(
1035 struct blk_zone *zone,
1036 unsigned int idx,
1037 void *data)
1038 {
1039 struct xfs_init_zones *iz = data;
1040 struct xfs_mount *mp = iz->mp;
1041 xfs_fsblock_t zsbno = xfs_daddr_to_rtb(mp, zone->start);
1042 xfs_rgnumber_t rgno;
1043 struct xfs_rtgroup *rtg;
1044 int error;
1045
1046 if (xfs_rtb_to_rgbno(mp, zsbno) != 0) {
1047 xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno);
1048 return -EFSCORRUPTED;
1049 }
1050
1051 rgno = xfs_rtb_to_rgno(mp, zsbno);
1052 rtg = xfs_rtgroup_grab(mp, rgno);
1053 if (!rtg) {
1054 xfs_warn(mp, "realtime group not found for zone %u.", rgno);
1055 return -EFSCORRUPTED;
1056 }
1057 error = xfs_init_zone(iz, rtg, zone);
1058 xfs_rtgroup_rele(rtg);
1059 return error;
1060 }
1061
1062 /*
1063 * Calculate the max open zone limit based on the of number of backing zones
1064 * available.
1065 */
1066 static inline uint32_t
xfs_max_open_zones(struct xfs_mount * mp)1067 xfs_max_open_zones(
1068 struct xfs_mount *mp)
1069 {
1070 unsigned int max_open, max_open_data_zones;
1071
1072 /*
1073 * We need two zones for every open data zone, one in reserve as we
1074 * don't reclaim open zones. One data zone and its spare is included
1075 * in XFS_MIN_ZONES to support at least one user data writer.
1076 */
1077 max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1078 max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1079
1080 /*
1081 * Cap the max open limit to 1/4 of available space. Without this we'd
1082 * run out of easy reclaim targets too quickly and storage devices don't
1083 * handle huge numbers of concurrent write streams overly well.
1084 */
1085 max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1086
1087 return max(XFS_MIN_OPEN_ZONES, max_open);
1088 }
1089
1090 /*
1091 * Normally we use the open zone limit that the device reports. If there is
1092 * none let the user pick one from the command line.
1093 *
1094 * If the device doesn't report an open zone limit and there is no override,
1095 * allow to hold about a quarter of the zones open. In theory we could allow
1096 * all to be open, but at that point we run into GC deadlocks because we can't
1097 * reclaim open zones.
1098 *
1099 * When used on conventional SSDs a lower open limit is advisable as we'll
1100 * otherwise overwhelm the FTL just as much as a conventional block allocator.
1101 *
1102 * Note: To debug the open zone management code, force max_open to 1 here.
1103 */
1104 static int
xfs_calc_open_zones(struct xfs_mount * mp)1105 xfs_calc_open_zones(
1106 struct xfs_mount *mp)
1107 {
1108 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
1109 unsigned int bdev_open_zones = bdev_max_open_zones(bdev);
1110
1111 if (!mp->m_max_open_zones) {
1112 if (bdev_open_zones)
1113 mp->m_max_open_zones = bdev_open_zones;
1114 else
1115 mp->m_max_open_zones = XFS_DEFAULT_MAX_OPEN_ZONES;
1116 }
1117
1118 if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1119 xfs_notice(mp, "need at least %u open zones.",
1120 XFS_MIN_OPEN_ZONES);
1121 return -EIO;
1122 }
1123
1124 if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1125 mp->m_max_open_zones = bdev_open_zones;
1126 xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1127 bdev_open_zones);
1128 }
1129
1130 if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1131 mp->m_max_open_zones = xfs_max_open_zones(mp);
1132 xfs_info(mp,
1133 "limiting open zones to %u due to total zone count (%u)",
1134 mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1135 }
1136
1137 return 0;
1138 }
1139
1140 static unsigned long *
xfs_alloc_bucket_bitmap(struct xfs_mount * mp)1141 xfs_alloc_bucket_bitmap(
1142 struct xfs_mount *mp)
1143 {
1144 return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1145 sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1146 }
1147
1148 static struct xfs_zone_info *
xfs_alloc_zone_info(struct xfs_mount * mp)1149 xfs_alloc_zone_info(
1150 struct xfs_mount *mp)
1151 {
1152 struct xfs_zone_info *zi;
1153 int i;
1154
1155 zi = kzalloc(sizeof(*zi), GFP_KERNEL);
1156 if (!zi)
1157 return NULL;
1158 INIT_LIST_HEAD(&zi->zi_open_zones);
1159 INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1160 spin_lock_init(&zi->zi_reset_list_lock);
1161 spin_lock_init(&zi->zi_open_zones_lock);
1162 spin_lock_init(&zi->zi_reservation_lock);
1163 init_waitqueue_head(&zi->zi_zone_wait);
1164 spin_lock_init(&zi->zi_used_buckets_lock);
1165 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1166 zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1167 if (!zi->zi_used_bucket_bitmap[i])
1168 goto out_free_bitmaps;
1169 }
1170 return zi;
1171
1172 out_free_bitmaps:
1173 while (--i > 0)
1174 kvfree(zi->zi_used_bucket_bitmap[i]);
1175 kfree(zi);
1176 return NULL;
1177 }
1178
1179 static void
xfs_free_zone_info(struct xfs_zone_info * zi)1180 xfs_free_zone_info(
1181 struct xfs_zone_info *zi)
1182 {
1183 int i;
1184
1185 xfs_free_open_zones(zi);
1186 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1187 kvfree(zi->zi_used_bucket_bitmap[i]);
1188 kfree(zi);
1189 }
1190
1191 int
xfs_mount_zones(struct xfs_mount * mp)1192 xfs_mount_zones(
1193 struct xfs_mount *mp)
1194 {
1195 struct xfs_init_zones iz = {
1196 .mp = mp,
1197 };
1198 struct xfs_buftarg *bt = mp->m_rtdev_targp;
1199 int error;
1200
1201 if (!bt) {
1202 xfs_notice(mp, "RT device missing.");
1203 return -EINVAL;
1204 }
1205
1206 if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1207 xfs_notice(mp, "invalid flag combination.");
1208 return -EFSCORRUPTED;
1209 }
1210 if (mp->m_sb.sb_rextsize != 1) {
1211 xfs_notice(mp, "zoned file systems do not support rextsize.");
1212 return -EFSCORRUPTED;
1213 }
1214 if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1215 xfs_notice(mp,
1216 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1217 return -EFSCORRUPTED;
1218 }
1219
1220 error = xfs_calc_open_zones(mp);
1221 if (error)
1222 return error;
1223
1224 mp->m_zone_info = xfs_alloc_zone_info(mp);
1225 if (!mp->m_zone_info)
1226 return -ENOMEM;
1227
1228 xfs_info(mp, "%u zones of %u blocks (%u max open zones)",
1229 mp->m_sb.sb_rgcount, mp->m_groups[XG_TYPE_RTG].blocks,
1230 mp->m_max_open_zones);
1231 trace_xfs_zones_mount(mp);
1232
1233 if (bdev_is_zoned(bt->bt_bdev)) {
1234 error = blkdev_report_zones(bt->bt_bdev,
1235 XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart),
1236 mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz);
1237 if (error < 0)
1238 goto out_free_zone_info;
1239 } else {
1240 struct xfs_rtgroup *rtg = NULL;
1241
1242 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1243 error = xfs_init_zone(&iz, rtg, NULL);
1244 if (error)
1245 goto out_free_zone_info;
1246 }
1247 }
1248
1249 xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1250 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1251 iz.available + iz.reclaimable);
1252
1253 /*
1254 * The user may configure GC to free up a percentage of unused blocks.
1255 * By default this is 0. GC will always trigger at the minimum level
1256 * for keeping max_open_zones available for data placement.
1257 */
1258 mp->m_zonegc_low_space = 0;
1259
1260 error = xfs_zone_gc_mount(mp);
1261 if (error)
1262 goto out_free_zone_info;
1263 return 0;
1264
1265 out_free_zone_info:
1266 xfs_free_zone_info(mp->m_zone_info);
1267 return error;
1268 }
1269
1270 void
xfs_unmount_zones(struct xfs_mount * mp)1271 xfs_unmount_zones(
1272 struct xfs_mount *mp)
1273 {
1274 xfs_zone_gc_unmount(mp);
1275 xfs_free_zone_info(mp->m_zone_info);
1276 }
1277