1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023-2025 Christoph Hellwig.
4 * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5 */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27 #include "xfs_mru_cache.h"
28
29 void
xfs_open_zone_put(struct xfs_open_zone * oz)30 xfs_open_zone_put(
31 struct xfs_open_zone *oz)
32 {
33 if (atomic_dec_and_test(&oz->oz_ref)) {
34 xfs_rtgroup_rele(oz->oz_rtg);
35 kfree(oz);
36 }
37 }
38
39 static inline uint32_t
xfs_zone_bucket(struct xfs_mount * mp,uint32_t used_blocks)40 xfs_zone_bucket(
41 struct xfs_mount *mp,
42 uint32_t used_blocks)
43 {
44 return XFS_ZONE_USED_BUCKETS * used_blocks /
45 mp->m_groups[XG_TYPE_RTG].blocks;
46 }
47
48 static inline void
xfs_zone_add_to_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t to_bucket)49 xfs_zone_add_to_bucket(
50 struct xfs_zone_info *zi,
51 xfs_rgnumber_t rgno,
52 uint32_t to_bucket)
53 {
54 __set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
55 zi->zi_used_bucket_entries[to_bucket]++;
56 }
57
58 static inline void
xfs_zone_remove_from_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t from_bucket)59 xfs_zone_remove_from_bucket(
60 struct xfs_zone_info *zi,
61 xfs_rgnumber_t rgno,
62 uint32_t from_bucket)
63 {
64 __clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
65 zi->zi_used_bucket_entries[from_bucket]--;
66 }
67
68 static void
xfs_zone_account_reclaimable(struct xfs_rtgroup * rtg,uint32_t freed)69 xfs_zone_account_reclaimable(
70 struct xfs_rtgroup *rtg,
71 uint32_t freed)
72 {
73 struct xfs_group *xg = &rtg->rtg_group;
74 struct xfs_mount *mp = rtg_mount(rtg);
75 struct xfs_zone_info *zi = mp->m_zone_info;
76 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
77 xfs_rgnumber_t rgno = rtg_rgno(rtg);
78 uint32_t from_bucket = xfs_zone_bucket(mp, used + freed);
79 uint32_t to_bucket = xfs_zone_bucket(mp, used);
80 bool was_full = (used + freed == rtg_blocks(rtg));
81
82 /*
83 * This can be called from log recovery, where the zone_info structure
84 * hasn't been allocated yet. Skip all work as xfs_mount_zones will
85 * add the zones to the right buckets before the file systems becomes
86 * active.
87 */
88 if (!zi)
89 return;
90
91 if (!used) {
92 /*
93 * The zone is now empty, remove it from the bottom bucket and
94 * trigger a reset.
95 */
96 trace_xfs_zone_emptied(rtg);
97
98 if (!was_full)
99 xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE);
100
101 spin_lock(&zi->zi_used_buckets_lock);
102 if (!was_full)
103 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
104 spin_unlock(&zi->zi_used_buckets_lock);
105
106 spin_lock(&zi->zi_reset_list_lock);
107 xg->xg_next_reset = zi->zi_reset_list;
108 zi->zi_reset_list = xg;
109 spin_unlock(&zi->zi_reset_list_lock);
110
111 if (zi->zi_gc_thread)
112 wake_up_process(zi->zi_gc_thread);
113 } else if (was_full) {
114 /*
115 * The zone transitioned from full, mark it up as reclaimable
116 * and wake up GC which might be waiting for zones to reclaim.
117 */
118 spin_lock(&zi->zi_used_buckets_lock);
119 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
120 spin_unlock(&zi->zi_used_buckets_lock);
121
122 xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE);
123 if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
124 wake_up_process(zi->zi_gc_thread);
125 } else if (to_bucket != from_bucket) {
126 /*
127 * Move the zone to a new bucket if it dropped below the
128 * threshold.
129 */
130 spin_lock(&zi->zi_used_buckets_lock);
131 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
132 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
133 spin_unlock(&zi->zi_used_buckets_lock);
134 }
135 }
136
137 static void
xfs_open_zone_mark_full(struct xfs_open_zone * oz)138 xfs_open_zone_mark_full(
139 struct xfs_open_zone *oz)
140 {
141 struct xfs_rtgroup *rtg = oz->oz_rtg;
142 struct xfs_mount *mp = rtg_mount(rtg);
143 struct xfs_zone_info *zi = mp->m_zone_info;
144 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
145
146 trace_xfs_zone_full(rtg);
147
148 WRITE_ONCE(rtg->rtg_open_zone, NULL);
149
150 spin_lock(&zi->zi_open_zones_lock);
151 if (oz->oz_is_gc) {
152 ASSERT(current == zi->zi_gc_thread);
153 zi->zi_open_gc_zone = NULL;
154 } else {
155 zi->zi_nr_open_zones--;
156 list_del_init(&oz->oz_entry);
157 }
158 spin_unlock(&zi->zi_open_zones_lock);
159 xfs_open_zone_put(oz);
160
161 wake_up_all(&zi->zi_zone_wait);
162 if (used < rtg_blocks(rtg))
163 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
164 }
165
166 static void
xfs_zone_record_blocks(struct xfs_trans * tp,struct xfs_open_zone * oz,xfs_fsblock_t fsbno,xfs_filblks_t len)167 xfs_zone_record_blocks(
168 struct xfs_trans *tp,
169 struct xfs_open_zone *oz,
170 xfs_fsblock_t fsbno,
171 xfs_filblks_t len)
172 {
173 struct xfs_mount *mp = tp->t_mountp;
174 struct xfs_rtgroup *rtg = oz->oz_rtg;
175 struct xfs_inode *rmapip = rtg_rmap(rtg);
176
177 trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
178
179 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
180 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
181 rmapip->i_used_blocks += len;
182 ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
183 oz->oz_written += len;
184 if (oz->oz_written == rtg_blocks(rtg))
185 xfs_open_zone_mark_full(oz);
186 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
187 }
188
189 /*
190 * Called for blocks that have been written to disk, but not actually linked to
191 * an inode, which can happen when garbage collection races with user data
192 * writes to a file.
193 */
194 static void
xfs_zone_skip_blocks(struct xfs_open_zone * oz,xfs_filblks_t len)195 xfs_zone_skip_blocks(
196 struct xfs_open_zone *oz,
197 xfs_filblks_t len)
198 {
199 struct xfs_rtgroup *rtg = oz->oz_rtg;
200
201 trace_xfs_zone_skip_blocks(oz, 0, len);
202
203 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
204 oz->oz_written += len;
205 if (oz->oz_written == rtg_blocks(rtg))
206 xfs_open_zone_mark_full(oz);
207 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
208
209 xfs_add_frextents(rtg_mount(rtg), len);
210 }
211
212 static int
xfs_zoned_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * new,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)213 xfs_zoned_map_extent(
214 struct xfs_trans *tp,
215 struct xfs_inode *ip,
216 struct xfs_bmbt_irec *new,
217 struct xfs_open_zone *oz,
218 xfs_fsblock_t old_startblock)
219 {
220 struct xfs_bmbt_irec data;
221 int nmaps = 1;
222 int error;
223
224 /* Grab the corresponding mapping in the data fork. */
225 error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
226 &nmaps, 0);
227 if (error)
228 return error;
229
230 /*
231 * Cap the update to the existing extent in the data fork because we can
232 * only overwrite one extent at a time.
233 */
234 ASSERT(new->br_blockcount >= data.br_blockcount);
235 new->br_blockcount = data.br_blockcount;
236
237 /*
238 * If a data write raced with this GC write, keep the existing data in
239 * the data fork, mark our newly written GC extent as reclaimable, then
240 * move on to the next extent.
241 */
242 if (old_startblock != NULLFSBLOCK &&
243 old_startblock != data.br_startblock)
244 goto skip;
245
246 trace_xfs_reflink_cow_remap_from(ip, new);
247 trace_xfs_reflink_cow_remap_to(ip, &data);
248
249 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
250 XFS_IEXT_REFLINK_END_COW_CNT);
251 if (error)
252 return error;
253
254 if (data.br_startblock != HOLESTARTBLOCK) {
255 ASSERT(data.br_startblock != DELAYSTARTBLOCK);
256 ASSERT(!isnullstartblock(data.br_startblock));
257
258 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
259 if (xfs_is_reflink_inode(ip)) {
260 xfs_refcount_decrease_extent(tp, true, &data);
261 } else {
262 error = xfs_free_extent_later(tp, data.br_startblock,
263 data.br_blockcount, NULL,
264 XFS_AG_RESV_NONE,
265 XFS_FREE_EXTENT_REALTIME);
266 if (error)
267 return error;
268 }
269 }
270
271 xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount);
272
273 /* Map the new blocks into the data fork. */
274 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
275 return 0;
276
277 skip:
278 trace_xfs_reflink_cow_remap_skip(ip, new);
279 xfs_zone_skip_blocks(oz, new->br_blockcount);
280 return 0;
281 }
282
283 int
xfs_zoned_end_io(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,xfs_daddr_t daddr,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)284 xfs_zoned_end_io(
285 struct xfs_inode *ip,
286 xfs_off_t offset,
287 xfs_off_t count,
288 xfs_daddr_t daddr,
289 struct xfs_open_zone *oz,
290 xfs_fsblock_t old_startblock)
291 {
292 struct xfs_mount *mp = ip->i_mount;
293 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
294 struct xfs_bmbt_irec new = {
295 .br_startoff = XFS_B_TO_FSBT(mp, offset),
296 .br_startblock = xfs_daddr_to_rtb(mp, daddr),
297 .br_state = XFS_EXT_NORM,
298 };
299 unsigned int resblks =
300 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
301 struct xfs_trans *tp;
302 int error;
303
304 if (xfs_is_shutdown(mp))
305 return -EIO;
306
307 while (new.br_startoff < end_fsb) {
308 new.br_blockcount = end_fsb - new.br_startoff;
309
310 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
311 XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
312 if (error)
313 return error;
314 xfs_ilock(ip, XFS_ILOCK_EXCL);
315 xfs_trans_ijoin(tp, ip, 0);
316
317 error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
318 if (error)
319 xfs_trans_cancel(tp);
320 else
321 error = xfs_trans_commit(tp);
322 xfs_iunlock(ip, XFS_ILOCK_EXCL);
323 if (error)
324 return error;
325
326 new.br_startoff += new.br_blockcount;
327 new.br_startblock += new.br_blockcount;
328 if (old_startblock != NULLFSBLOCK)
329 old_startblock += new.br_blockcount;
330 }
331
332 return 0;
333 }
334
335 /*
336 * "Free" blocks allocated in a zone.
337 *
338 * Just decrement the used blocks counter and report the space as freed.
339 */
340 int
xfs_zone_free_blocks(struct xfs_trans * tp,struct xfs_rtgroup * rtg,xfs_fsblock_t fsbno,xfs_filblks_t len)341 xfs_zone_free_blocks(
342 struct xfs_trans *tp,
343 struct xfs_rtgroup *rtg,
344 xfs_fsblock_t fsbno,
345 xfs_filblks_t len)
346 {
347 struct xfs_mount *mp = tp->t_mountp;
348 struct xfs_inode *rmapip = rtg_rmap(rtg);
349
350 xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
351
352 if (len > rmapip->i_used_blocks) {
353 xfs_err(mp,
354 "trying to free more blocks (%lld) than used counter (%u).",
355 len, rmapip->i_used_blocks);
356 ASSERT(len <= rmapip->i_used_blocks);
357 xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
358 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
359 return -EFSCORRUPTED;
360 }
361
362 trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
363
364 rmapip->i_used_blocks -= len;
365 /*
366 * Don't add open zones to the reclaimable buckets. The I/O completion
367 * for writing the last block will take care of accounting for already
368 * unused blocks instead.
369 */
370 if (!READ_ONCE(rtg->rtg_open_zone))
371 xfs_zone_account_reclaimable(rtg, len);
372 xfs_add_frextents(mp, len);
373 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
374 return 0;
375 }
376
377 static struct xfs_group *
xfs_find_free_zone(struct xfs_mount * mp,unsigned long start,unsigned long end)378 xfs_find_free_zone(
379 struct xfs_mount *mp,
380 unsigned long start,
381 unsigned long end)
382 {
383 struct xfs_zone_info *zi = mp->m_zone_info;
384 XA_STATE (xas, &mp->m_groups[XG_TYPE_RTG].xa, start);
385 struct xfs_group *xg;
386
387 xas_lock(&xas);
388 xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE)
389 if (atomic_inc_not_zero(&xg->xg_active_ref))
390 goto found;
391 xas_unlock(&xas);
392 return NULL;
393
394 found:
395 xas_clear_mark(&xas, XFS_RTG_FREE);
396 atomic_dec(&zi->zi_nr_free_zones);
397 zi->zi_free_zone_cursor = xg->xg_gno;
398 xas_unlock(&xas);
399 return xg;
400 }
401
402 static struct xfs_open_zone *
xfs_init_open_zone(struct xfs_rtgroup * rtg,xfs_rgblock_t write_pointer,enum rw_hint write_hint,bool is_gc)403 xfs_init_open_zone(
404 struct xfs_rtgroup *rtg,
405 xfs_rgblock_t write_pointer,
406 enum rw_hint write_hint,
407 bool is_gc)
408 {
409 struct xfs_open_zone *oz;
410
411 oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL);
412 spin_lock_init(&oz->oz_alloc_lock);
413 atomic_set(&oz->oz_ref, 1);
414 oz->oz_rtg = rtg;
415 oz->oz_allocated = write_pointer;
416 oz->oz_written = write_pointer;
417 oz->oz_write_hint = write_hint;
418 oz->oz_is_gc = is_gc;
419
420 /*
421 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
422 * inode, but we don't really want to take that here because we are
423 * under the zone_list_lock. Ensure the pointer is only set for a fully
424 * initialized open zone structure so that a racy lookup finding it is
425 * fine.
426 */
427 WRITE_ONCE(rtg->rtg_open_zone, oz);
428 return oz;
429 }
430
431 /*
432 * Find a completely free zone, open it, and return a reference.
433 */
434 struct xfs_open_zone *
xfs_open_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool is_gc)435 xfs_open_zone(
436 struct xfs_mount *mp,
437 enum rw_hint write_hint,
438 bool is_gc)
439 {
440 struct xfs_zone_info *zi = mp->m_zone_info;
441 struct xfs_group *xg;
442
443 xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX);
444 if (!xg)
445 xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor);
446 if (!xg)
447 return NULL;
448
449 set_current_state(TASK_RUNNING);
450 return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
451 }
452
453 static struct xfs_open_zone *
xfs_try_open_zone(struct xfs_mount * mp,enum rw_hint write_hint)454 xfs_try_open_zone(
455 struct xfs_mount *mp,
456 enum rw_hint write_hint)
457 {
458 struct xfs_zone_info *zi = mp->m_zone_info;
459 struct xfs_open_zone *oz;
460
461 if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
462 return NULL;
463 if (atomic_read(&zi->zi_nr_free_zones) <
464 XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
465 return NULL;
466
467 /*
468 * Increment the open zone count to reserve our slot before dropping
469 * zi_open_zones_lock.
470 */
471 zi->zi_nr_open_zones++;
472 spin_unlock(&zi->zi_open_zones_lock);
473 oz = xfs_open_zone(mp, write_hint, false);
474 spin_lock(&zi->zi_open_zones_lock);
475 if (!oz) {
476 zi->zi_nr_open_zones--;
477 return NULL;
478 }
479
480 atomic_inc(&oz->oz_ref);
481 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
482
483 /*
484 * If this was the last free zone, other waiters might be waiting
485 * on us to write to it as well.
486 */
487 wake_up_all(&zi->zi_zone_wait);
488
489 if (xfs_zoned_need_gc(mp))
490 wake_up_process(zi->zi_gc_thread);
491
492 trace_xfs_zone_opened(oz->oz_rtg);
493 return oz;
494 }
495
496 enum xfs_zone_alloc_score {
497 /* Any open zone will do it, we're desperate */
498 XFS_ZONE_ALLOC_ANY = 0,
499
500 /* It better fit somehow */
501 XFS_ZONE_ALLOC_OK = 1,
502
503 /* Only reuse a zone if it fits really well. */
504 XFS_ZONE_ALLOC_GOOD = 2,
505 };
506
507 /*
508 * Life time hint co-location matrix. Fields not set default to 0
509 * aka XFS_ZONE_ALLOC_ANY.
510 */
511 static const unsigned int
512 xfs_zoned_hint_score[WRITE_LIFE_HINT_NR][WRITE_LIFE_HINT_NR] = {
513 [WRITE_LIFE_NOT_SET] = {
514 [WRITE_LIFE_NOT_SET] = XFS_ZONE_ALLOC_OK,
515 },
516 [WRITE_LIFE_NONE] = {
517 [WRITE_LIFE_NONE] = XFS_ZONE_ALLOC_OK,
518 },
519 [WRITE_LIFE_SHORT] = {
520 [WRITE_LIFE_SHORT] = XFS_ZONE_ALLOC_GOOD,
521 },
522 [WRITE_LIFE_MEDIUM] = {
523 [WRITE_LIFE_MEDIUM] = XFS_ZONE_ALLOC_GOOD,
524 },
525 [WRITE_LIFE_LONG] = {
526 [WRITE_LIFE_LONG] = XFS_ZONE_ALLOC_OK,
527 [WRITE_LIFE_EXTREME] = XFS_ZONE_ALLOC_OK,
528 },
529 [WRITE_LIFE_EXTREME] = {
530 [WRITE_LIFE_LONG] = XFS_ZONE_ALLOC_OK,
531 [WRITE_LIFE_EXTREME] = XFS_ZONE_ALLOC_OK,
532 },
533 };
534
535 static bool
xfs_try_use_zone(struct xfs_zone_info * zi,enum rw_hint file_hint,struct xfs_open_zone * oz,unsigned int goodness)536 xfs_try_use_zone(
537 struct xfs_zone_info *zi,
538 enum rw_hint file_hint,
539 struct xfs_open_zone *oz,
540 unsigned int goodness)
541 {
542 if (oz->oz_allocated == rtg_blocks(oz->oz_rtg))
543 return false;
544
545 if (xfs_zoned_hint_score[oz->oz_write_hint][file_hint] < goodness)
546 return false;
547
548 if (!atomic_inc_not_zero(&oz->oz_ref))
549 return false;
550
551 /*
552 * If we have a hint set for the data, use that for the zone even if
553 * some data was written already without any hint set, but don't change
554 * the temperature after that as that would make little sense without
555 * tracking per-temperature class written block counts, which is
556 * probably overkill anyway.
557 */
558 if (file_hint != WRITE_LIFE_NOT_SET &&
559 oz->oz_write_hint == WRITE_LIFE_NOT_SET)
560 oz->oz_write_hint = file_hint;
561
562 /*
563 * If we couldn't match by inode or life time we just pick the first
564 * zone with enough space above. For that we want the least busy zone
565 * for some definition of "least" busy. For now this simple LRU
566 * algorithm that rotates every zone to the end of the list will do it,
567 * even if it isn't exactly cache friendly.
568 */
569 if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
570 list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
571 return true;
572 }
573
574 static struct xfs_open_zone *
xfs_select_open_zone_lru(struct xfs_zone_info * zi,enum rw_hint file_hint,unsigned int goodness)575 xfs_select_open_zone_lru(
576 struct xfs_zone_info *zi,
577 enum rw_hint file_hint,
578 unsigned int goodness)
579 {
580 struct xfs_open_zone *oz;
581
582 lockdep_assert_held(&zi->zi_open_zones_lock);
583
584 list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
585 if (xfs_try_use_zone(zi, file_hint, oz, goodness))
586 return oz;
587
588 cond_resched_lock(&zi->zi_open_zones_lock);
589 return NULL;
590 }
591
592 static struct xfs_open_zone *
xfs_select_open_zone_mru(struct xfs_zone_info * zi,enum rw_hint file_hint)593 xfs_select_open_zone_mru(
594 struct xfs_zone_info *zi,
595 enum rw_hint file_hint)
596 {
597 struct xfs_open_zone *oz;
598
599 lockdep_assert_held(&zi->zi_open_zones_lock);
600
601 list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
602 if (xfs_try_use_zone(zi, file_hint, oz, false))
603 return oz;
604
605 cond_resched_lock(&zi->zi_open_zones_lock);
606 return NULL;
607 }
608
xfs_inode_write_hint(struct xfs_inode * ip)609 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
610 {
611 if (xfs_has_nolifetime(ip->i_mount))
612 return WRITE_LIFE_NOT_SET;
613 return VFS_I(ip)->i_write_hint;
614 }
615
616 /*
617 * Try to pack inodes that are written back after they were closed tight instead
618 * of trying to open new zones for them or spread them to the least recently
619 * used zone. This optimizes the data layout for workloads that untar or copy
620 * a lot of small files. Right now this does not separate multiple such
621 * streams.
622 */
xfs_zoned_pack_tight(struct xfs_inode * ip)623 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
624 {
625 return !inode_is_open_for_write(VFS_I(ip)) &&
626 !(ip->i_diflags & XFS_DIFLAG_APPEND);
627 }
628
629 static struct xfs_open_zone *
xfs_select_zone_nowait(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)630 xfs_select_zone_nowait(
631 struct xfs_mount *mp,
632 enum rw_hint write_hint,
633 bool pack_tight)
634 {
635 struct xfs_zone_info *zi = mp->m_zone_info;
636 struct xfs_open_zone *oz = NULL;
637
638 if (xfs_is_shutdown(mp))
639 return NULL;
640
641 /*
642 * Try to fill up open zones with matching temperature if available. It
643 * is better to try to co-locate data when this is favorable, so we can
644 * activate empty zones when it is statistically better to separate
645 * data.
646 */
647 spin_lock(&zi->zi_open_zones_lock);
648 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_GOOD);
649 if (oz)
650 goto out_unlock;
651
652 if (pack_tight)
653 oz = xfs_select_open_zone_mru(zi, write_hint);
654 if (oz)
655 goto out_unlock;
656
657 /*
658 * See if we can open a new zone and use that so that data for different
659 * files is mixed as little as possible.
660 */
661 oz = xfs_try_open_zone(mp, write_hint);
662 if (oz)
663 goto out_unlock;
664
665 /*
666 * Try to find an zone that is an ok match to colocate data with.
667 */
668 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_OK);
669 if (oz)
670 goto out_unlock;
671
672 /*
673 * Pick the least recently used zone, regardless of hint match
674 */
675 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_ANY);
676 out_unlock:
677 spin_unlock(&zi->zi_open_zones_lock);
678 return oz;
679 }
680
681 static struct xfs_open_zone *
xfs_select_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)682 xfs_select_zone(
683 struct xfs_mount *mp,
684 enum rw_hint write_hint,
685 bool pack_tight)
686 {
687 struct xfs_zone_info *zi = mp->m_zone_info;
688 DEFINE_WAIT (wait);
689 struct xfs_open_zone *oz;
690
691 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
692 if (oz)
693 return oz;
694
695 for (;;) {
696 prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
697 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
698 if (oz || xfs_is_shutdown(mp))
699 break;
700 schedule();
701 }
702 finish_wait(&zi->zi_zone_wait, &wait);
703 return oz;
704 }
705
706 static unsigned int
xfs_zone_alloc_blocks(struct xfs_open_zone * oz,xfs_filblks_t count_fsb,sector_t * sector,bool * is_seq)707 xfs_zone_alloc_blocks(
708 struct xfs_open_zone *oz,
709 xfs_filblks_t count_fsb,
710 sector_t *sector,
711 bool *is_seq)
712 {
713 struct xfs_rtgroup *rtg = oz->oz_rtg;
714 struct xfs_mount *mp = rtg_mount(rtg);
715 xfs_rgblock_t allocated;
716
717 spin_lock(&oz->oz_alloc_lock);
718 count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
719 (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_allocated);
720 if (!count_fsb) {
721 spin_unlock(&oz->oz_alloc_lock);
722 return 0;
723 }
724 allocated = oz->oz_allocated;
725 oz->oz_allocated += count_fsb;
726 spin_unlock(&oz->oz_alloc_lock);
727
728 trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb);
729
730 *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
731 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
732 if (!*is_seq)
733 *sector += XFS_FSB_TO_BB(mp, allocated);
734 return XFS_FSB_TO_B(mp, count_fsb);
735 }
736
737 void
xfs_mark_rtg_boundary(struct iomap_ioend * ioend)738 xfs_mark_rtg_boundary(
739 struct iomap_ioend *ioend)
740 {
741 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
742 sector_t sector = ioend->io_bio.bi_iter.bi_sector;
743
744 if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
745 ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
746 }
747
748 /*
749 * Cache the last zone written to for an inode so that it is considered first
750 * for subsequent writes.
751 */
752 struct xfs_zone_cache_item {
753 struct xfs_mru_cache_elem mru;
754 struct xfs_open_zone *oz;
755 };
756
757 static inline struct xfs_zone_cache_item *
xfs_zone_cache_item(struct xfs_mru_cache_elem * mru)758 xfs_zone_cache_item(struct xfs_mru_cache_elem *mru)
759 {
760 return container_of(mru, struct xfs_zone_cache_item, mru);
761 }
762
763 static void
xfs_zone_cache_free_func(void * data,struct xfs_mru_cache_elem * mru)764 xfs_zone_cache_free_func(
765 void *data,
766 struct xfs_mru_cache_elem *mru)
767 {
768 struct xfs_zone_cache_item *item = xfs_zone_cache_item(mru);
769
770 xfs_open_zone_put(item->oz);
771 kfree(item);
772 }
773
774 /*
775 * Check if we have a cached last open zone available for the inode and
776 * if yes return a reference to it.
777 */
778 static struct xfs_open_zone *
xfs_cached_zone(struct xfs_mount * mp,struct xfs_inode * ip)779 xfs_cached_zone(
780 struct xfs_mount *mp,
781 struct xfs_inode *ip)
782 {
783 struct xfs_mru_cache_elem *mru;
784 struct xfs_open_zone *oz;
785
786 mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
787 if (!mru)
788 return NULL;
789 oz = xfs_zone_cache_item(mru)->oz;
790 if (oz) {
791 /*
792 * GC only steals open zones at mount time, so no GC zones
793 * should end up in the cache.
794 */
795 ASSERT(!oz->oz_is_gc);
796 ASSERT(atomic_read(&oz->oz_ref) > 0);
797 atomic_inc(&oz->oz_ref);
798 }
799 xfs_mru_cache_done(mp->m_zone_cache);
800 return oz;
801 }
802
803 /*
804 * Update the last used zone cache for a given inode.
805 *
806 * The caller must have a reference on the open zone.
807 */
808 static void
xfs_zone_cache_create_association(struct xfs_inode * ip,struct xfs_open_zone * oz)809 xfs_zone_cache_create_association(
810 struct xfs_inode *ip,
811 struct xfs_open_zone *oz)
812 {
813 struct xfs_mount *mp = ip->i_mount;
814 struct xfs_zone_cache_item *item = NULL;
815 struct xfs_mru_cache_elem *mru;
816
817 ASSERT(atomic_read(&oz->oz_ref) > 0);
818 atomic_inc(&oz->oz_ref);
819
820 mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
821 if (mru) {
822 /*
823 * If we have an association already, update it to point to the
824 * new zone.
825 */
826 item = xfs_zone_cache_item(mru);
827 xfs_open_zone_put(item->oz);
828 item->oz = oz;
829 xfs_mru_cache_done(mp->m_zone_cache);
830 return;
831 }
832
833 item = kmalloc(sizeof(*item), GFP_KERNEL);
834 if (!item) {
835 xfs_open_zone_put(oz);
836 return;
837 }
838 item->oz = oz;
839 xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru);
840 }
841
842 static void
xfs_submit_zoned_bio(struct iomap_ioend * ioend,struct xfs_open_zone * oz,bool is_seq)843 xfs_submit_zoned_bio(
844 struct iomap_ioend *ioend,
845 struct xfs_open_zone *oz,
846 bool is_seq)
847 {
848 ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
849 ioend->io_private = oz;
850 atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
851
852 if (is_seq) {
853 ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
854 ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
855 } else {
856 xfs_mark_rtg_boundary(ioend);
857 }
858
859 submit_bio(&ioend->io_bio);
860 }
861
862 void
xfs_zone_alloc_and_submit(struct iomap_ioend * ioend,struct xfs_open_zone ** oz)863 xfs_zone_alloc_and_submit(
864 struct iomap_ioend *ioend,
865 struct xfs_open_zone **oz)
866 {
867 struct xfs_inode *ip = XFS_I(ioend->io_inode);
868 struct xfs_mount *mp = ip->i_mount;
869 enum rw_hint write_hint = xfs_inode_write_hint(ip);
870 bool pack_tight = xfs_zoned_pack_tight(ip);
871 unsigned int alloc_len;
872 struct iomap_ioend *split;
873 bool is_seq;
874
875 if (xfs_is_shutdown(mp))
876 goto out_error;
877
878 /*
879 * If we don't have a locally cached zone in this write context, see if
880 * the inode is still associated with a zone and use that if so.
881 */
882 if (!*oz)
883 *oz = xfs_cached_zone(mp, ip);
884
885 if (!*oz) {
886 select_zone:
887 *oz = xfs_select_zone(mp, write_hint, pack_tight);
888 if (!*oz)
889 goto out_error;
890
891 xfs_zone_cache_create_association(ip, *oz);
892 }
893
894 alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
895 &ioend->io_sector, &is_seq);
896 if (!alloc_len) {
897 xfs_open_zone_put(*oz);
898 goto select_zone;
899 }
900
901 while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
902 if (IS_ERR(split))
903 goto out_split_error;
904 alloc_len -= split->io_bio.bi_iter.bi_size;
905 xfs_submit_zoned_bio(split, *oz, is_seq);
906 if (!alloc_len) {
907 xfs_open_zone_put(*oz);
908 goto select_zone;
909 }
910 }
911
912 xfs_submit_zoned_bio(ioend, *oz, is_seq);
913 return;
914
915 out_split_error:
916 ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
917 out_error:
918 bio_io_error(&ioend->io_bio);
919 }
920
921 /*
922 * Wake up all threads waiting for a zoned space allocation when the file system
923 * is shut down.
924 */
925 void
xfs_zoned_wake_all(struct xfs_mount * mp)926 xfs_zoned_wake_all(
927 struct xfs_mount *mp)
928 {
929 /*
930 * Don't wake up if there is no m_zone_info. This is complicated by the
931 * fact that unmount can't atomically clear m_zone_info and thus we need
932 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
933 * during log recovery so we can't entirely rely on that either.
934 */
935 if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
936 wake_up_all(&mp->m_zone_info->zi_zone_wait);
937 }
938
939 /*
940 * Check if @rgbno in @rgb is a potentially valid block. It might still be
941 * unused, but that information is only found in the rmap.
942 */
943 bool
xfs_zone_rgbno_is_valid(struct xfs_rtgroup * rtg,xfs_rgnumber_t rgbno)944 xfs_zone_rgbno_is_valid(
945 struct xfs_rtgroup *rtg,
946 xfs_rgnumber_t rgbno)
947 {
948 lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
949
950 if (rtg->rtg_open_zone)
951 return rgbno < rtg->rtg_open_zone->oz_allocated;
952 return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
953 rtg_rgno(rtg), XFS_RTG_FREE);
954 }
955
956 static void
xfs_free_open_zones(struct xfs_zone_info * zi)957 xfs_free_open_zones(
958 struct xfs_zone_info *zi)
959 {
960 struct xfs_open_zone *oz;
961
962 spin_lock(&zi->zi_open_zones_lock);
963 while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
964 struct xfs_open_zone, oz_entry))) {
965 list_del(&oz->oz_entry);
966 xfs_open_zone_put(oz);
967 }
968 spin_unlock(&zi->zi_open_zones_lock);
969 }
970
971 struct xfs_init_zones {
972 struct xfs_mount *mp;
973 uint64_t available;
974 uint64_t reclaimable;
975 };
976
977 static int
xfs_init_zone(struct xfs_init_zones * iz,struct xfs_rtgroup * rtg,struct blk_zone * zone)978 xfs_init_zone(
979 struct xfs_init_zones *iz,
980 struct xfs_rtgroup *rtg,
981 struct blk_zone *zone)
982 {
983 struct xfs_mount *mp = rtg_mount(rtg);
984 struct xfs_zone_info *zi = mp->m_zone_info;
985 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
986 xfs_rgblock_t write_pointer, highest_rgbno;
987 int error;
988
989 if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
990 return -EFSCORRUPTED;
991
992 /*
993 * For sequential write required zones we retrieved the hardware write
994 * pointer above.
995 *
996 * For conventional zones or conventional devices we don't have that
997 * luxury. Instead query the rmap to find the highest recorded block
998 * and set the write pointer to the block after that. In case of a
999 * power loss this misses blocks where the data I/O has completed but
1000 * not recorded in the rmap yet, and it also rewrites blocks if the most
1001 * recently written ones got deleted again before unmount, but this is
1002 * the best we can do without hardware support.
1003 */
1004 if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) {
1005 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
1006 highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
1007 if (highest_rgbno == NULLRGBLOCK)
1008 write_pointer = 0;
1009 else
1010 write_pointer = highest_rgbno + 1;
1011 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
1012 }
1013
1014 /*
1015 * If there are no used blocks, but the zone is not in empty state yet
1016 * we lost power before the zoned reset. In that case finish the work
1017 * here.
1018 */
1019 if (write_pointer == rtg_blocks(rtg) && used == 0) {
1020 error = xfs_zone_gc_reset_sync(rtg);
1021 if (error)
1022 return error;
1023 write_pointer = 0;
1024 }
1025
1026 if (write_pointer == 0) {
1027 /* zone is empty */
1028 atomic_inc(&zi->zi_nr_free_zones);
1029 xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
1030 iz->available += rtg_blocks(rtg);
1031 } else if (write_pointer < rtg_blocks(rtg)) {
1032 /* zone is open */
1033 struct xfs_open_zone *oz;
1034
1035 atomic_inc(&rtg_group(rtg)->xg_active_ref);
1036 oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
1037 false);
1038 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
1039 zi->zi_nr_open_zones++;
1040
1041 iz->available += (rtg_blocks(rtg) - write_pointer);
1042 iz->reclaimable += write_pointer - used;
1043 } else if (used < rtg_blocks(rtg)) {
1044 /* zone fully written, but has freed blocks */
1045 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
1046 iz->reclaimable += (rtg_blocks(rtg) - used);
1047 }
1048
1049 return 0;
1050 }
1051
1052 static int
xfs_get_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)1053 xfs_get_zone_info_cb(
1054 struct blk_zone *zone,
1055 unsigned int idx,
1056 void *data)
1057 {
1058 struct xfs_init_zones *iz = data;
1059 struct xfs_mount *mp = iz->mp;
1060 xfs_fsblock_t zsbno = xfs_daddr_to_rtb(mp, zone->start);
1061 xfs_rgnumber_t rgno;
1062 struct xfs_rtgroup *rtg;
1063 int error;
1064
1065 if (xfs_rtb_to_rgbno(mp, zsbno) != 0) {
1066 xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno);
1067 return -EFSCORRUPTED;
1068 }
1069
1070 rgno = xfs_rtb_to_rgno(mp, zsbno);
1071 rtg = xfs_rtgroup_grab(mp, rgno);
1072 if (!rtg) {
1073 xfs_warn(mp, "realtime group not found for zone %u.", rgno);
1074 return -EFSCORRUPTED;
1075 }
1076 error = xfs_init_zone(iz, rtg, zone);
1077 xfs_rtgroup_rele(rtg);
1078 return error;
1079 }
1080
1081 /*
1082 * Calculate the max open zone limit based on the of number of backing zones
1083 * available.
1084 */
1085 static inline uint32_t
xfs_max_open_zones(struct xfs_mount * mp)1086 xfs_max_open_zones(
1087 struct xfs_mount *mp)
1088 {
1089 unsigned int max_open, max_open_data_zones;
1090
1091 /*
1092 * We need two zones for every open data zone, one in reserve as we
1093 * don't reclaim open zones. One data zone and its spare is included
1094 * in XFS_MIN_ZONES to support at least one user data writer.
1095 */
1096 max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1097 max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1098
1099 /*
1100 * Cap the max open limit to 1/4 of available space. Without this we'd
1101 * run out of easy reclaim targets too quickly and storage devices don't
1102 * handle huge numbers of concurrent write streams overly well.
1103 */
1104 max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1105
1106 return max(XFS_MIN_OPEN_ZONES, max_open);
1107 }
1108
1109 /*
1110 * Normally we use the open zone limit that the device reports. If there is
1111 * none let the user pick one from the command line.
1112 *
1113 * If the device doesn't report an open zone limit and there is no override,
1114 * allow to hold about a quarter of the zones open. In theory we could allow
1115 * all to be open, but at that point we run into GC deadlocks because we can't
1116 * reclaim open zones.
1117 *
1118 * When used on conventional SSDs a lower open limit is advisable as we'll
1119 * otherwise overwhelm the FTL just as much as a conventional block allocator.
1120 *
1121 * Note: To debug the open zone management code, force max_open to 1 here.
1122 */
1123 static int
xfs_calc_open_zones(struct xfs_mount * mp)1124 xfs_calc_open_zones(
1125 struct xfs_mount *mp)
1126 {
1127 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
1128 unsigned int bdev_open_zones = bdev_max_open_zones(bdev);
1129
1130 if (!mp->m_max_open_zones) {
1131 if (bdev_open_zones)
1132 mp->m_max_open_zones = bdev_open_zones;
1133 else
1134 mp->m_max_open_zones = XFS_DEFAULT_MAX_OPEN_ZONES;
1135 }
1136
1137 if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1138 xfs_notice(mp, "need at least %u open zones.",
1139 XFS_MIN_OPEN_ZONES);
1140 return -EIO;
1141 }
1142
1143 if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1144 mp->m_max_open_zones = bdev_open_zones;
1145 xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1146 bdev_open_zones);
1147 }
1148
1149 if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1150 mp->m_max_open_zones = xfs_max_open_zones(mp);
1151 xfs_info(mp,
1152 "limiting open zones to %u due to total zone count (%u)",
1153 mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1154 }
1155
1156 return 0;
1157 }
1158
1159 static unsigned long *
xfs_alloc_bucket_bitmap(struct xfs_mount * mp)1160 xfs_alloc_bucket_bitmap(
1161 struct xfs_mount *mp)
1162 {
1163 return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1164 sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1165 }
1166
1167 static struct xfs_zone_info *
xfs_alloc_zone_info(struct xfs_mount * mp)1168 xfs_alloc_zone_info(
1169 struct xfs_mount *mp)
1170 {
1171 struct xfs_zone_info *zi;
1172 int i;
1173
1174 zi = kzalloc(sizeof(*zi), GFP_KERNEL);
1175 if (!zi)
1176 return NULL;
1177 INIT_LIST_HEAD(&zi->zi_open_zones);
1178 INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1179 spin_lock_init(&zi->zi_reset_list_lock);
1180 spin_lock_init(&zi->zi_open_zones_lock);
1181 spin_lock_init(&zi->zi_reservation_lock);
1182 init_waitqueue_head(&zi->zi_zone_wait);
1183 spin_lock_init(&zi->zi_used_buckets_lock);
1184 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1185 zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1186 if (!zi->zi_used_bucket_bitmap[i])
1187 goto out_free_bitmaps;
1188 }
1189 return zi;
1190
1191 out_free_bitmaps:
1192 while (--i > 0)
1193 kvfree(zi->zi_used_bucket_bitmap[i]);
1194 kfree(zi);
1195 return NULL;
1196 }
1197
1198 static void
xfs_free_zone_info(struct xfs_zone_info * zi)1199 xfs_free_zone_info(
1200 struct xfs_zone_info *zi)
1201 {
1202 int i;
1203
1204 xfs_free_open_zones(zi);
1205 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1206 kvfree(zi->zi_used_bucket_bitmap[i]);
1207 kfree(zi);
1208 }
1209
1210 int
xfs_mount_zones(struct xfs_mount * mp)1211 xfs_mount_zones(
1212 struct xfs_mount *mp)
1213 {
1214 struct xfs_init_zones iz = {
1215 .mp = mp,
1216 };
1217 struct xfs_buftarg *bt = mp->m_rtdev_targp;
1218 int error;
1219
1220 if (!bt) {
1221 xfs_notice(mp, "RT device missing.");
1222 return -EINVAL;
1223 }
1224
1225 if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1226 xfs_notice(mp, "invalid flag combination.");
1227 return -EFSCORRUPTED;
1228 }
1229 if (mp->m_sb.sb_rextsize != 1) {
1230 xfs_notice(mp, "zoned file systems do not support rextsize.");
1231 return -EFSCORRUPTED;
1232 }
1233 if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1234 xfs_notice(mp,
1235 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1236 return -EFSCORRUPTED;
1237 }
1238
1239 error = xfs_calc_open_zones(mp);
1240 if (error)
1241 return error;
1242
1243 mp->m_zone_info = xfs_alloc_zone_info(mp);
1244 if (!mp->m_zone_info)
1245 return -ENOMEM;
1246
1247 xfs_info(mp, "%u zones of %u blocks (%u max open zones)",
1248 mp->m_sb.sb_rgcount, mp->m_groups[XG_TYPE_RTG].blocks,
1249 mp->m_max_open_zones);
1250 trace_xfs_zones_mount(mp);
1251
1252 if (bdev_is_zoned(bt->bt_bdev)) {
1253 error = blkdev_report_zones(bt->bt_bdev,
1254 XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart),
1255 mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz);
1256 if (error < 0)
1257 goto out_free_zone_info;
1258 } else {
1259 struct xfs_rtgroup *rtg = NULL;
1260
1261 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1262 error = xfs_init_zone(&iz, rtg, NULL);
1263 if (error)
1264 goto out_free_zone_info;
1265 }
1266 }
1267
1268 xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1269 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1270 iz.available + iz.reclaimable);
1271
1272 /*
1273 * The user may configure GC to free up a percentage of unused blocks.
1274 * By default this is 0. GC will always trigger at the minimum level
1275 * for keeping max_open_zones available for data placement.
1276 */
1277 mp->m_zonegc_low_space = 0;
1278
1279 error = xfs_zone_gc_mount(mp);
1280 if (error)
1281 goto out_free_zone_info;
1282
1283 /*
1284 * Set up a mru cache to track inode to open zone for data placement
1285 * purposes. The magic values for group count and life time is the
1286 * same as the defaults for file streams, which seems sane enough.
1287 */
1288 xfs_mru_cache_create(&mp->m_zone_cache, mp,
1289 5000, 10, xfs_zone_cache_free_func);
1290 return 0;
1291
1292 out_free_zone_info:
1293 xfs_free_zone_info(mp->m_zone_info);
1294 return error;
1295 }
1296
1297 void
xfs_unmount_zones(struct xfs_mount * mp)1298 xfs_unmount_zones(
1299 struct xfs_mount *mp)
1300 {
1301 xfs_zone_gc_unmount(mp);
1302 xfs_free_zone_info(mp->m_zone_info);
1303 xfs_mru_cache_destroy(mp->m_zone_cache);
1304 }
1305