1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023-2025 Christoph Hellwig.
4 * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5 */
6 #include "xfs_platform.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27 #include "xfs_mru_cache.h"
28
29 static void
xfs_open_zone_free_rcu(struct callback_head * cb)30 xfs_open_zone_free_rcu(
31 struct callback_head *cb)
32 {
33 struct xfs_open_zone *oz = container_of(cb, typeof(*oz), oz_rcu);
34
35 xfs_rtgroup_rele(oz->oz_rtg);
36 kfree(oz);
37 }
38
39 void
xfs_open_zone_put(struct xfs_open_zone * oz)40 xfs_open_zone_put(
41 struct xfs_open_zone *oz)
42 {
43 if (atomic_dec_and_test(&oz->oz_ref))
44 call_rcu(&oz->oz_rcu, xfs_open_zone_free_rcu);
45 }
46
47 static inline uint32_t
xfs_zone_bucket(struct xfs_mount * mp,uint32_t used_blocks)48 xfs_zone_bucket(
49 struct xfs_mount *mp,
50 uint32_t used_blocks)
51 {
52 return XFS_ZONE_USED_BUCKETS * used_blocks /
53 mp->m_groups[XG_TYPE_RTG].blocks;
54 }
55
56 static inline void
xfs_zone_add_to_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t to_bucket)57 xfs_zone_add_to_bucket(
58 struct xfs_zone_info *zi,
59 xfs_rgnumber_t rgno,
60 uint32_t to_bucket)
61 {
62 __set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
63 zi->zi_used_bucket_entries[to_bucket]++;
64 }
65
66 static inline void
xfs_zone_remove_from_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t from_bucket)67 xfs_zone_remove_from_bucket(
68 struct xfs_zone_info *zi,
69 xfs_rgnumber_t rgno,
70 uint32_t from_bucket)
71 {
72 __clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
73 zi->zi_used_bucket_entries[from_bucket]--;
74 }
75
76 static void
xfs_zone_account_reclaimable(struct xfs_rtgroup * rtg,uint32_t freed)77 xfs_zone_account_reclaimable(
78 struct xfs_rtgroup *rtg,
79 uint32_t freed)
80 {
81 struct xfs_group *xg = &rtg->rtg_group;
82 struct xfs_mount *mp = rtg_mount(rtg);
83 struct xfs_zone_info *zi = mp->m_zone_info;
84 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
85 xfs_rgnumber_t rgno = rtg_rgno(rtg);
86 uint32_t from_bucket = xfs_zone_bucket(mp, used + freed);
87 uint32_t to_bucket = xfs_zone_bucket(mp, used);
88 bool was_full = (used + freed == rtg_blocks(rtg));
89
90 /*
91 * This can be called from log recovery, where the zone_info structure
92 * hasn't been allocated yet. Skip all work as xfs_mount_zones will
93 * add the zones to the right buckets before the file systems becomes
94 * active.
95 */
96 if (!zi)
97 return;
98
99 if (!used) {
100 /*
101 * The zone is now empty, remove it from the bottom bucket and
102 * trigger a reset.
103 */
104 trace_xfs_zone_emptied(rtg);
105
106 spin_lock(&zi->zi_used_buckets_lock);
107 if (!was_full)
108 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
109 spin_unlock(&zi->zi_used_buckets_lock);
110
111 spin_lock(&zi->zi_reset_list_lock);
112 xg->xg_next_reset = zi->zi_reset_list;
113 zi->zi_reset_list = xg;
114 spin_unlock(&zi->zi_reset_list_lock);
115
116 if (zi->zi_gc_thread)
117 wake_up_process(zi->zi_gc_thread);
118 } else if (was_full) {
119 /*
120 * The zone transitioned from full, mark it up as reclaimable
121 * and wake up GC which might be waiting for zones to reclaim.
122 */
123 spin_lock(&zi->zi_used_buckets_lock);
124 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
125 spin_unlock(&zi->zi_used_buckets_lock);
126
127 if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
128 wake_up_process(zi->zi_gc_thread);
129 } else if (to_bucket != from_bucket) {
130 /*
131 * Move the zone to a new bucket if it dropped below the
132 * threshold.
133 */
134 spin_lock(&zi->zi_used_buckets_lock);
135 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
136 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
137 spin_unlock(&zi->zi_used_buckets_lock);
138 }
139 }
140
141 /*
142 * Check if we have any zones that can be reclaimed by looking at the entry
143 * counters for the zone buckets.
144 */
145 bool
xfs_zoned_have_reclaimable(struct xfs_zone_info * zi)146 xfs_zoned_have_reclaimable(
147 struct xfs_zone_info *zi)
148 {
149 int i;
150
151 spin_lock(&zi->zi_used_buckets_lock);
152 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
153 if (zi->zi_used_bucket_entries[i]) {
154 spin_unlock(&zi->zi_used_buckets_lock);
155 return true;
156 }
157 }
158 spin_unlock(&zi->zi_used_buckets_lock);
159
160 return false;
161 }
162
163 static void
xfs_open_zone_mark_full(struct xfs_open_zone * oz)164 xfs_open_zone_mark_full(
165 struct xfs_open_zone *oz)
166 {
167 struct xfs_rtgroup *rtg = oz->oz_rtg;
168 struct xfs_mount *mp = rtg_mount(rtg);
169 struct xfs_zone_info *zi = mp->m_zone_info;
170 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
171
172 trace_xfs_zone_full(rtg);
173
174 WRITE_ONCE(rtg->rtg_open_zone, NULL);
175
176 spin_lock(&zi->zi_open_zones_lock);
177 if (oz->oz_is_gc) {
178 ASSERT(current == zi->zi_gc_thread);
179 zi->zi_open_gc_zone = NULL;
180 } else {
181 zi->zi_nr_open_zones--;
182 list_del_init(&oz->oz_entry);
183 }
184 spin_unlock(&zi->zi_open_zones_lock);
185 xfs_open_zone_put(oz);
186
187 wake_up_all(&zi->zi_zone_wait);
188 if (used < rtg_blocks(rtg))
189 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
190 }
191
192 static void
xfs_zone_record_blocks(struct xfs_trans * tp,struct xfs_open_zone * oz,xfs_fsblock_t fsbno,xfs_filblks_t len)193 xfs_zone_record_blocks(
194 struct xfs_trans *tp,
195 struct xfs_open_zone *oz,
196 xfs_fsblock_t fsbno,
197 xfs_filblks_t len)
198 {
199 struct xfs_mount *mp = tp->t_mountp;
200 struct xfs_rtgroup *rtg = oz->oz_rtg;
201 struct xfs_inode *rmapip = rtg_rmap(rtg);
202
203 trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
204
205 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
206 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
207 rmapip->i_used_blocks += len;
208 ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
209 oz->oz_written += len;
210 if (oz->oz_written == rtg_blocks(rtg))
211 xfs_open_zone_mark_full(oz);
212 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
213 }
214
215 /*
216 * Called for blocks that have been written to disk, but not actually linked to
217 * an inode, which can happen when garbage collection races with user data
218 * writes to a file.
219 */
220 static void
xfs_zone_skip_blocks(struct xfs_open_zone * oz,xfs_filblks_t len)221 xfs_zone_skip_blocks(
222 struct xfs_open_zone *oz,
223 xfs_filblks_t len)
224 {
225 struct xfs_rtgroup *rtg = oz->oz_rtg;
226
227 trace_xfs_zone_skip_blocks(oz, 0, len);
228
229 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
230 oz->oz_written += len;
231 if (oz->oz_written == rtg_blocks(rtg))
232 xfs_open_zone_mark_full(oz);
233 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
234
235 xfs_add_frextents(rtg_mount(rtg), len);
236 }
237
238 static int
xfs_zoned_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * new,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)239 xfs_zoned_map_extent(
240 struct xfs_trans *tp,
241 struct xfs_inode *ip,
242 struct xfs_bmbt_irec *new,
243 struct xfs_open_zone *oz,
244 xfs_fsblock_t old_startblock)
245 {
246 struct xfs_bmbt_irec data;
247 int nmaps = 1;
248 int error;
249
250 /* Grab the corresponding mapping in the data fork. */
251 error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
252 &nmaps, 0);
253 if (error)
254 return error;
255
256 /*
257 * Cap the update to the existing extent in the data fork because we can
258 * only overwrite one extent at a time.
259 */
260 ASSERT(new->br_blockcount >= data.br_blockcount);
261 new->br_blockcount = data.br_blockcount;
262
263 /*
264 * If a data write raced with this GC write, keep the existing data in
265 * the data fork, mark our newly written GC extent as reclaimable, then
266 * move on to the next extent.
267 *
268 * Note that this can also happen when racing with operations that do
269 * not actually invalidate the data, but just move it to a different
270 * inode (XFS_IOC_EXCHANGE_RANGE), or to a different offset inside the
271 * inode (FALLOC_FL_COLLAPSE_RANGE / FALLOC_FL_INSERT_RANGE). If the
272 * data was just moved around, GC fails to free the zone, but the zone
273 * becomes a GC candidate again as soon as all previous GC I/O has
274 * finished and these blocks will be moved out eventually.
275 */
276 if (old_startblock != NULLFSBLOCK &&
277 old_startblock != data.br_startblock)
278 goto skip;
279
280 trace_xfs_reflink_cow_remap_from(ip, new);
281 trace_xfs_reflink_cow_remap_to(ip, &data);
282
283 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
284 XFS_IEXT_REFLINK_END_COW_CNT);
285 if (error)
286 return error;
287
288 if (data.br_startblock != HOLESTARTBLOCK) {
289 ASSERT(data.br_startblock != DELAYSTARTBLOCK);
290 ASSERT(!isnullstartblock(data.br_startblock));
291
292 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
293 if (xfs_is_reflink_inode(ip)) {
294 xfs_refcount_decrease_extent(tp, true, &data);
295 } else {
296 error = xfs_free_extent_later(tp, data.br_startblock,
297 data.br_blockcount, NULL,
298 XFS_AG_RESV_NONE,
299 XFS_FREE_EXTENT_REALTIME);
300 if (error)
301 return error;
302 }
303 }
304
305 xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount);
306
307 /* Map the new blocks into the data fork. */
308 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
309 return 0;
310
311 skip:
312 trace_xfs_reflink_cow_remap_skip(ip, new);
313 xfs_zone_skip_blocks(oz, new->br_blockcount);
314 return 0;
315 }
316
317 int
xfs_zoned_end_io(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,xfs_daddr_t daddr,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)318 xfs_zoned_end_io(
319 struct xfs_inode *ip,
320 xfs_off_t offset,
321 xfs_off_t count,
322 xfs_daddr_t daddr,
323 struct xfs_open_zone *oz,
324 xfs_fsblock_t old_startblock)
325 {
326 struct xfs_mount *mp = ip->i_mount;
327 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
328 struct xfs_bmbt_irec new = {
329 .br_startoff = XFS_B_TO_FSBT(mp, offset),
330 .br_startblock = xfs_daddr_to_rtb(mp, daddr),
331 .br_state = XFS_EXT_NORM,
332 };
333 unsigned int resblks =
334 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
335 struct xfs_trans *tp;
336 int error;
337
338 if (xfs_is_shutdown(mp))
339 return -EIO;
340
341 while (new.br_startoff < end_fsb) {
342 new.br_blockcount = end_fsb - new.br_startoff;
343
344 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
345 XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
346 if (error)
347 return error;
348 xfs_ilock(ip, XFS_ILOCK_EXCL);
349 xfs_trans_ijoin(tp, ip, 0);
350
351 error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
352 if (error)
353 xfs_trans_cancel(tp);
354 else
355 error = xfs_trans_commit(tp);
356 xfs_iunlock(ip, XFS_ILOCK_EXCL);
357 if (error)
358 return error;
359
360 new.br_startoff += new.br_blockcount;
361 new.br_startblock += new.br_blockcount;
362 if (old_startblock != NULLFSBLOCK)
363 old_startblock += new.br_blockcount;
364 }
365
366 return 0;
367 }
368
369 /*
370 * "Free" blocks allocated in a zone.
371 *
372 * Just decrement the used blocks counter and report the space as freed.
373 */
374 int
xfs_zone_free_blocks(struct xfs_trans * tp,struct xfs_rtgroup * rtg,xfs_fsblock_t fsbno,xfs_filblks_t len)375 xfs_zone_free_blocks(
376 struct xfs_trans *tp,
377 struct xfs_rtgroup *rtg,
378 xfs_fsblock_t fsbno,
379 xfs_filblks_t len)
380 {
381 struct xfs_mount *mp = tp->t_mountp;
382 struct xfs_inode *rmapip = rtg_rmap(rtg);
383
384 xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
385
386 if (len > rmapip->i_used_blocks) {
387 xfs_err(mp,
388 "trying to free more blocks (%lld) than used counter (%u).",
389 len, rmapip->i_used_blocks);
390 ASSERT(len <= rmapip->i_used_blocks);
391 xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
392 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
393 return -EFSCORRUPTED;
394 }
395
396 trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
397
398 rmapip->i_used_blocks -= len;
399 /*
400 * Don't add open zones to the reclaimable buckets. The I/O completion
401 * for writing the last block will take care of accounting for already
402 * unused blocks instead.
403 */
404 if (!READ_ONCE(rtg->rtg_open_zone))
405 xfs_zone_account_reclaimable(rtg, len);
406 xfs_add_frextents(mp, len);
407 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
408 return 0;
409 }
410
411 static struct xfs_open_zone *
xfs_init_open_zone(struct xfs_rtgroup * rtg,xfs_rgblock_t write_pointer,enum rw_hint write_hint,bool is_gc)412 xfs_init_open_zone(
413 struct xfs_rtgroup *rtg,
414 xfs_rgblock_t write_pointer,
415 enum rw_hint write_hint,
416 bool is_gc)
417 {
418 struct xfs_open_zone *oz;
419
420 oz = kzalloc_obj(*oz, GFP_NOFS | __GFP_NOFAIL);
421 spin_lock_init(&oz->oz_alloc_lock);
422 atomic_set(&oz->oz_ref, 1);
423 oz->oz_rtg = rtg;
424 oz->oz_allocated = write_pointer;
425 oz->oz_written = write_pointer;
426 oz->oz_write_hint = write_hint;
427 oz->oz_is_gc = is_gc;
428
429 /*
430 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
431 * inode, but we don't really want to take that here because we are
432 * under the zone_list_lock. Ensure the pointer is only set for a fully
433 * initialized open zone structure so that a racy lookup finding it is
434 * fine.
435 */
436 WRITE_ONCE(rtg->rtg_open_zone, oz);
437 return oz;
438 }
439
440 /*
441 * Find a completely free zone, open it, and return a reference.
442 */
443 struct xfs_open_zone *
xfs_open_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool is_gc)444 xfs_open_zone(
445 struct xfs_mount *mp,
446 enum rw_hint write_hint,
447 bool is_gc)
448 {
449 struct xfs_zone_info *zi = mp->m_zone_info;
450 XA_STATE (xas, &mp->m_groups[XG_TYPE_RTG].xa, 0);
451 struct xfs_group *xg;
452
453 /*
454 * Pick the free zone with lowest index. Zones in the beginning of the
455 * address space typically provides higher bandwidth than those at the
456 * end of the address space on HDDs.
457 */
458 xas_lock(&xas);
459 xas_for_each_marked(&xas, xg, ULONG_MAX, XFS_RTG_FREE)
460 if (atomic_inc_not_zero(&xg->xg_active_ref))
461 goto found;
462 xas_unlock(&xas);
463 return NULL;
464
465 found:
466 xas_clear_mark(&xas, XFS_RTG_FREE);
467 atomic_dec(&zi->zi_nr_free_zones);
468 xas_unlock(&xas);
469
470 set_current_state(TASK_RUNNING);
471 return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
472 }
473
474 static struct xfs_open_zone *
xfs_try_open_zone(struct xfs_mount * mp,enum rw_hint write_hint)475 xfs_try_open_zone(
476 struct xfs_mount *mp,
477 enum rw_hint write_hint)
478 {
479 struct xfs_zone_info *zi = mp->m_zone_info;
480 struct xfs_open_zone *oz;
481
482 if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
483 return NULL;
484 if (atomic_read(&zi->zi_nr_free_zones) <
485 XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
486 return NULL;
487
488 /*
489 * Increment the open zone count to reserve our slot before dropping
490 * zi_open_zones_lock.
491 */
492 zi->zi_nr_open_zones++;
493 spin_unlock(&zi->zi_open_zones_lock);
494 oz = xfs_open_zone(mp, write_hint, false);
495 spin_lock(&zi->zi_open_zones_lock);
496 if (!oz) {
497 zi->zi_nr_open_zones--;
498 return NULL;
499 }
500
501 atomic_inc(&oz->oz_ref);
502 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
503
504 /*
505 * If this was the last free zone, other waiters might be waiting
506 * on us to write to it as well.
507 */
508 wake_up_all(&zi->zi_zone_wait);
509
510 if (xfs_zoned_need_gc(mp))
511 wake_up_process(zi->zi_gc_thread);
512
513 trace_xfs_zone_opened(oz->oz_rtg);
514 return oz;
515 }
516
517 enum xfs_zone_alloc_score {
518 /* Any open zone will do it, we're desperate */
519 XFS_ZONE_ALLOC_ANY = 0,
520
521 /* It better fit somehow */
522 XFS_ZONE_ALLOC_OK = 1,
523
524 /* Only reuse a zone if it fits really well. */
525 XFS_ZONE_ALLOC_GOOD = 2,
526 };
527
528 /*
529 * Life time hint co-location matrix. Fields not set default to 0
530 * aka XFS_ZONE_ALLOC_ANY.
531 */
532 static const unsigned int
533 xfs_zoned_hint_score[WRITE_LIFE_HINT_NR][WRITE_LIFE_HINT_NR] = {
534 [WRITE_LIFE_NOT_SET] = {
535 [WRITE_LIFE_NOT_SET] = XFS_ZONE_ALLOC_OK,
536 },
537 [WRITE_LIFE_NONE] = {
538 [WRITE_LIFE_NONE] = XFS_ZONE_ALLOC_OK,
539 },
540 [WRITE_LIFE_SHORT] = {
541 [WRITE_LIFE_SHORT] = XFS_ZONE_ALLOC_GOOD,
542 },
543 [WRITE_LIFE_MEDIUM] = {
544 [WRITE_LIFE_MEDIUM] = XFS_ZONE_ALLOC_GOOD,
545 },
546 [WRITE_LIFE_LONG] = {
547 [WRITE_LIFE_LONG] = XFS_ZONE_ALLOC_OK,
548 [WRITE_LIFE_EXTREME] = XFS_ZONE_ALLOC_OK,
549 },
550 [WRITE_LIFE_EXTREME] = {
551 [WRITE_LIFE_LONG] = XFS_ZONE_ALLOC_OK,
552 [WRITE_LIFE_EXTREME] = XFS_ZONE_ALLOC_OK,
553 },
554 };
555
556 static bool
xfs_try_use_zone(struct xfs_zone_info * zi,enum rw_hint file_hint,struct xfs_open_zone * oz,unsigned int goodness)557 xfs_try_use_zone(
558 struct xfs_zone_info *zi,
559 enum rw_hint file_hint,
560 struct xfs_open_zone *oz,
561 unsigned int goodness)
562 {
563 if (oz->oz_allocated == rtg_blocks(oz->oz_rtg))
564 return false;
565
566 if (xfs_zoned_hint_score[oz->oz_write_hint][file_hint] < goodness)
567 return false;
568
569 if (!atomic_inc_not_zero(&oz->oz_ref))
570 return false;
571
572 /*
573 * If we have a hint set for the data, use that for the zone even if
574 * some data was written already without any hint set, but don't change
575 * the temperature after that as that would make little sense without
576 * tracking per-temperature class written block counts, which is
577 * probably overkill anyway.
578 */
579 if (file_hint != WRITE_LIFE_NOT_SET &&
580 oz->oz_write_hint == WRITE_LIFE_NOT_SET)
581 oz->oz_write_hint = file_hint;
582
583 /*
584 * If we couldn't match by inode or life time we just pick the first
585 * zone with enough space above. For that we want the least busy zone
586 * for some definition of "least" busy. For now this simple LRU
587 * algorithm that rotates every zone to the end of the list will do it,
588 * even if it isn't exactly cache friendly.
589 */
590 if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
591 list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
592 return true;
593 }
594
595 static struct xfs_open_zone *
xfs_select_open_zone_lru(struct xfs_zone_info * zi,enum rw_hint file_hint,unsigned int goodness)596 xfs_select_open_zone_lru(
597 struct xfs_zone_info *zi,
598 enum rw_hint file_hint,
599 unsigned int goodness)
600 {
601 struct xfs_open_zone *oz;
602
603 lockdep_assert_held(&zi->zi_open_zones_lock);
604
605 list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
606 if (xfs_try_use_zone(zi, file_hint, oz, goodness))
607 return oz;
608
609 cond_resched_lock(&zi->zi_open_zones_lock);
610 return NULL;
611 }
612
613 static struct xfs_open_zone *
xfs_select_open_zone_mru(struct xfs_zone_info * zi,enum rw_hint file_hint)614 xfs_select_open_zone_mru(
615 struct xfs_zone_info *zi,
616 enum rw_hint file_hint)
617 {
618 struct xfs_open_zone *oz;
619
620 lockdep_assert_held(&zi->zi_open_zones_lock);
621
622 list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
623 if (xfs_try_use_zone(zi, file_hint, oz, XFS_ZONE_ALLOC_OK))
624 return oz;
625
626 cond_resched_lock(&zi->zi_open_zones_lock);
627 return NULL;
628 }
629
xfs_inode_write_hint(struct xfs_inode * ip)630 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
631 {
632 if (xfs_has_nolifetime(ip->i_mount))
633 return WRITE_LIFE_NOT_SET;
634 return VFS_I(ip)->i_write_hint;
635 }
636
637 /*
638 * Try to tightly pack small files that are written back after they were closed
639 * instead of trying to open new zones for them or spread them to the least
640 * recently used zone. This optimizes the data layout for workloads that untar
641 * or copy a lot of small files. Right now this does not separate multiple such
642 * streams.
643 */
xfs_zoned_pack_tight(struct xfs_inode * ip)644 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
645 {
646 struct xfs_mount *mp = ip->i_mount;
647 size_t zone_capacity =
648 XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].blocks);
649
650 /*
651 * Do not pack write files that are already using a full zone to avoid
652 * fragmentation.
653 */
654 if (i_size_read(VFS_I(ip)) >= zone_capacity)
655 return false;
656
657 return !inode_is_open_for_write(VFS_I(ip)) &&
658 !(ip->i_diflags & XFS_DIFLAG_APPEND);
659 }
660
661 static struct xfs_open_zone *
xfs_select_zone_nowait(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)662 xfs_select_zone_nowait(
663 struct xfs_mount *mp,
664 enum rw_hint write_hint,
665 bool pack_tight)
666 {
667 struct xfs_zone_info *zi = mp->m_zone_info;
668 struct xfs_open_zone *oz = NULL;
669
670 if (xfs_is_shutdown(mp))
671 return NULL;
672
673 /*
674 * Try to fill up open zones with matching temperature if available. It
675 * is better to try to co-locate data when this is favorable, so we can
676 * activate empty zones when it is statistically better to separate
677 * data.
678 */
679 spin_lock(&zi->zi_open_zones_lock);
680 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_GOOD);
681 if (oz)
682 goto out_unlock;
683
684 if (pack_tight)
685 oz = xfs_select_open_zone_mru(zi, write_hint);
686 if (oz)
687 goto out_unlock;
688
689 /*
690 * See if we can open a new zone and use that so that data for different
691 * files is mixed as little as possible.
692 */
693 oz = xfs_try_open_zone(mp, write_hint);
694 if (oz)
695 goto out_unlock;
696
697 /*
698 * Try to find an zone that is an ok match to colocate data with.
699 */
700 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_OK);
701 if (oz)
702 goto out_unlock;
703
704 /*
705 * Pick the least recently used zone, regardless of hint match
706 */
707 oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_ANY);
708 out_unlock:
709 spin_unlock(&zi->zi_open_zones_lock);
710 return oz;
711 }
712
713 static struct xfs_open_zone *
xfs_select_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)714 xfs_select_zone(
715 struct xfs_mount *mp,
716 enum rw_hint write_hint,
717 bool pack_tight)
718 {
719 struct xfs_zone_info *zi = mp->m_zone_info;
720 DEFINE_WAIT (wait);
721 struct xfs_open_zone *oz;
722
723 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
724 if (oz)
725 return oz;
726
727 for (;;) {
728 prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
729 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
730 if (oz || xfs_is_shutdown(mp))
731 break;
732 schedule();
733 }
734 finish_wait(&zi->zi_zone_wait, &wait);
735 return oz;
736 }
737
738 static unsigned int
xfs_zone_alloc_blocks(struct xfs_open_zone * oz,xfs_filblks_t count_fsb,sector_t * sector,bool * is_seq)739 xfs_zone_alloc_blocks(
740 struct xfs_open_zone *oz,
741 xfs_filblks_t count_fsb,
742 sector_t *sector,
743 bool *is_seq)
744 {
745 struct xfs_rtgroup *rtg = oz->oz_rtg;
746 struct xfs_mount *mp = rtg_mount(rtg);
747 xfs_rgblock_t allocated;
748
749 spin_lock(&oz->oz_alloc_lock);
750 count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
751 (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_allocated);
752 if (!count_fsb) {
753 spin_unlock(&oz->oz_alloc_lock);
754 return 0;
755 }
756 allocated = oz->oz_allocated;
757 oz->oz_allocated += count_fsb;
758 spin_unlock(&oz->oz_alloc_lock);
759
760 trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb);
761
762 *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
763 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
764 if (!*is_seq)
765 *sector += XFS_FSB_TO_BB(mp, allocated);
766 return XFS_FSB_TO_B(mp, count_fsb);
767 }
768
769 void
xfs_mark_rtg_boundary(struct iomap_ioend * ioend)770 xfs_mark_rtg_boundary(
771 struct iomap_ioend *ioend)
772 {
773 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
774 sector_t sector = ioend->io_bio.bi_iter.bi_sector;
775
776 if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
777 ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
778 }
779
780 /*
781 * Check if we have a cached last open zone available for the inode and
782 * if yes return a reference to it.
783 */
784 static struct xfs_open_zone *
xfs_get_cached_zone(struct xfs_inode * ip)785 xfs_get_cached_zone(
786 struct xfs_inode *ip)
787 {
788 struct xfs_open_zone *oz;
789
790 rcu_read_lock();
791 oz = VFS_I(ip)->i_private;
792 if (oz) {
793 /*
794 * GC only steals open zones at mount time, so no GC zones
795 * should end up in the cache.
796 */
797 ASSERT(!oz->oz_is_gc);
798 if (!atomic_inc_not_zero(&oz->oz_ref))
799 oz = NULL;
800 }
801 rcu_read_unlock();
802
803 return oz;
804 }
805
806 /*
807 * Stash our zone in the inode so that is is reused for future allocations.
808 *
809 * The open_zone structure will be pinned until either the inode is freed or
810 * until the cached open zone is replaced with a different one because the
811 * current one was full when we tried to use it. This means we keep any
812 * open zone around forever as long as any inode that used it for the last
813 * write is cached, which slightly increases the memory use of cached inodes
814 * that were every written to, but significantly simplifies the cached zone
815 * lookup. Because the open_zone is clearly marked as full when all data
816 * in the underlying RTG was written, the caching is always safe.
817 */
818 static void
xfs_set_cached_zone(struct xfs_inode * ip,struct xfs_open_zone * oz)819 xfs_set_cached_zone(
820 struct xfs_inode *ip,
821 struct xfs_open_zone *oz)
822 {
823 struct xfs_open_zone *old_oz;
824
825 atomic_inc(&oz->oz_ref);
826 old_oz = xchg(&VFS_I(ip)->i_private, oz);
827 if (old_oz)
828 xfs_open_zone_put(old_oz);
829 }
830
831 static void
xfs_submit_zoned_bio(struct iomap_ioend * ioend,struct xfs_open_zone * oz,bool is_seq)832 xfs_submit_zoned_bio(
833 struct iomap_ioend *ioend,
834 struct xfs_open_zone *oz,
835 bool is_seq)
836 {
837 ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
838 ioend->io_private = oz;
839 atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
840
841 if (is_seq) {
842 ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
843 ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
844 } else {
845 xfs_mark_rtg_boundary(ioend);
846 }
847
848 submit_bio(&ioend->io_bio);
849 }
850
851 void
xfs_zone_alloc_and_submit(struct iomap_ioend * ioend,struct xfs_open_zone ** oz)852 xfs_zone_alloc_and_submit(
853 struct iomap_ioend *ioend,
854 struct xfs_open_zone **oz)
855 {
856 struct xfs_inode *ip = XFS_I(ioend->io_inode);
857 struct xfs_mount *mp = ip->i_mount;
858 enum rw_hint write_hint = xfs_inode_write_hint(ip);
859 bool pack_tight = xfs_zoned_pack_tight(ip);
860 unsigned int alloc_len;
861 struct iomap_ioend *split;
862 bool is_seq;
863
864 if (xfs_is_shutdown(mp))
865 goto out_error;
866
867 /*
868 * If we don't have a locally cached zone in this write context, see if
869 * the inode is still associated with a zone and use that if so.
870 */
871 if (!*oz)
872 *oz = xfs_get_cached_zone(ip);
873
874 if (!*oz) {
875 select_zone:
876 *oz = xfs_select_zone(mp, write_hint, pack_tight);
877 if (!*oz)
878 goto out_error;
879 xfs_set_cached_zone(ip, *oz);
880 }
881
882 alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
883 &ioend->io_sector, &is_seq);
884 if (!alloc_len) {
885 xfs_open_zone_put(*oz);
886 goto select_zone;
887 }
888
889 while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
890 if (IS_ERR(split))
891 goto out_split_error;
892 alloc_len -= split->io_bio.bi_iter.bi_size;
893 xfs_submit_zoned_bio(split, *oz, is_seq);
894 if (!alloc_len) {
895 xfs_open_zone_put(*oz);
896 goto select_zone;
897 }
898 }
899
900 xfs_submit_zoned_bio(ioend, *oz, is_seq);
901 return;
902
903 out_split_error:
904 ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
905 out_error:
906 bio_io_error(&ioend->io_bio);
907 }
908
909 /*
910 * Wake up all threads waiting for a zoned space allocation when the file system
911 * is shut down.
912 */
913 void
xfs_zoned_wake_all(struct xfs_mount * mp)914 xfs_zoned_wake_all(
915 struct xfs_mount *mp)
916 {
917 /*
918 * Don't wake up if there is no m_zone_info. This is complicated by the
919 * fact that unmount can't atomically clear m_zone_info and thus we need
920 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
921 * during log recovery so we can't entirely rely on that either.
922 */
923 if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
924 wake_up_all(&mp->m_zone_info->zi_zone_wait);
925 }
926
927 /*
928 * Check if @rgbno in @rgb is a potentially valid block. It might still be
929 * unused, but that information is only found in the rmap.
930 */
931 bool
xfs_zone_rgbno_is_valid(struct xfs_rtgroup * rtg,xfs_rgnumber_t rgbno)932 xfs_zone_rgbno_is_valid(
933 struct xfs_rtgroup *rtg,
934 xfs_rgnumber_t rgbno)
935 {
936 lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
937
938 if (rtg->rtg_open_zone)
939 return rgbno < rtg->rtg_open_zone->oz_allocated;
940 return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
941 rtg_rgno(rtg), XFS_RTG_FREE);
942 }
943
944 static void
xfs_free_open_zones(struct xfs_zone_info * zi)945 xfs_free_open_zones(
946 struct xfs_zone_info *zi)
947 {
948 struct xfs_open_zone *oz;
949
950 spin_lock(&zi->zi_open_zones_lock);
951 while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
952 struct xfs_open_zone, oz_entry))) {
953 list_del(&oz->oz_entry);
954 xfs_open_zone_put(oz);
955 }
956 spin_unlock(&zi->zi_open_zones_lock);
957
958 /*
959 * Wait for all open zones to be freed so that they drop the group
960 * references:
961 */
962 rcu_barrier();
963 }
964
965 struct xfs_init_zones {
966 uint32_t zone_size;
967 uint32_t zone_capacity;
968 uint64_t available;
969 uint64_t reclaimable;
970 };
971
972 /*
973 * For sequential write required zones, we restart writing at the hardware write
974 * pointer returned by xfs_validate_blk_zone().
975 *
976 * For conventional zones or conventional devices we have to query the rmap to
977 * find the highest recorded block and set the write pointer to the block after
978 * that. In case of a power loss this misses blocks where the data I/O has
979 * completed but not recorded in the rmap yet, and it also rewrites blocks if
980 * the most recently written ones got deleted again before unmount, but this is
981 * the best we can do without hardware support.
982 */
983 static int
xfs_query_write_pointer(struct xfs_init_zones * iz,struct xfs_rtgroup * rtg,xfs_rgblock_t * write_pointer)984 xfs_query_write_pointer(
985 struct xfs_init_zones *iz,
986 struct xfs_rtgroup *rtg,
987 xfs_rgblock_t *write_pointer)
988 {
989 struct xfs_mount *mp = rtg_mount(rtg);
990 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
991 sector_t start = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
992 xfs_rgblock_t highest_rgbno;
993 struct blk_zone zone = {};
994 int error;
995
996 if (bdev_is_zoned(bdev)) {
997 error = blkdev_get_zone_info(bdev, start, &zone);
998 if (error)
999 return error;
1000 if (zone.start != start) {
1001 xfs_warn(mp, "mismatched zone start: 0x%llx/0x%llx.",
1002 zone.start, start);
1003 return -EFSCORRUPTED;
1004 }
1005
1006 if (!xfs_validate_blk_zone(mp, &zone, rtg_rgno(rtg),
1007 iz->zone_size, iz->zone_capacity,
1008 write_pointer))
1009 return -EFSCORRUPTED;
1010
1011 /*
1012 * Use the hardware write pointer returned by
1013 * xfs_validate_blk_zone for sequential write required zones,
1014 * else fall through to the rmap-based estimation below.
1015 */
1016 if (zone.cond != BLK_ZONE_COND_NOT_WP)
1017 return 0;
1018 }
1019
1020 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
1021 highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
1022 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
1023
1024 if (highest_rgbno == NULLRGBLOCK)
1025 *write_pointer = 0;
1026 else
1027 *write_pointer = highest_rgbno + 1;
1028 return 0;
1029 }
1030
1031 static int
xfs_init_zone(struct xfs_init_zones * iz,struct xfs_rtgroup * rtg,xfs_rgblock_t write_pointer)1032 xfs_init_zone(
1033 struct xfs_init_zones *iz,
1034 struct xfs_rtgroup *rtg,
1035 xfs_rgblock_t write_pointer)
1036 {
1037 struct xfs_mount *mp = rtg_mount(rtg);
1038 struct xfs_zone_info *zi = mp->m_zone_info;
1039 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
1040 int error;
1041
1042 if (write_pointer > rtg->rtg_extents) {
1043 xfs_warn(mp, "zone %u has invalid write pointer (0x%x).",
1044 rtg_rgno(rtg), write_pointer);
1045 return -EFSCORRUPTED;
1046 }
1047
1048 if (used > rtg->rtg_extents) {
1049 xfs_warn(mp,
1050 "zone %u has used counter (0x%x) larger than zone capacity (0x%llx).",
1051 rtg_rgno(rtg), used, rtg->rtg_extents);
1052 return -EFSCORRUPTED;
1053 }
1054
1055 if (used > write_pointer) {
1056 xfs_warn(mp,
1057 "zone %u has used counter (0x%x) larger than write pointer (0x%x).",
1058 rtg_rgno(rtg), used, write_pointer);
1059 return -EFSCORRUPTED;
1060 }
1061
1062 if (write_pointer == 0 && used != 0) {
1063 xfs_warn(mp, "empty zone %u has non-zero used counter (0x%x).",
1064 rtg_rgno(rtg), used);
1065 return -EFSCORRUPTED;
1066 }
1067
1068 /*
1069 * If there are no used blocks, but the zone is not in empty state yet
1070 * we lost power before the zoned reset. In that case finish the work
1071 * here.
1072 */
1073 if (write_pointer == rtg_blocks(rtg) && used == 0) {
1074 error = xfs_zone_gc_reset_sync(rtg);
1075 if (error)
1076 return error;
1077 write_pointer = 0;
1078 }
1079
1080 if (write_pointer == 0) {
1081 /* zone is empty */
1082 atomic_inc(&zi->zi_nr_free_zones);
1083 xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
1084 iz->available += rtg_blocks(rtg);
1085 } else if (write_pointer < rtg_blocks(rtg)) {
1086 /* zone is open */
1087 struct xfs_open_zone *oz;
1088
1089 atomic_inc(&rtg_group(rtg)->xg_active_ref);
1090 oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
1091 false);
1092 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
1093 zi->zi_nr_open_zones++;
1094
1095 iz->available += (rtg_blocks(rtg) - write_pointer);
1096 iz->reclaimable += write_pointer - used;
1097 } else if (used < rtg_blocks(rtg)) {
1098 /* zone fully written, but has freed blocks */
1099 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
1100 iz->reclaimable += (rtg_blocks(rtg) - used);
1101 }
1102
1103 return 0;
1104 }
1105
1106 /*
1107 * Calculate the max open zone limit based on the of number of backing zones
1108 * available.
1109 */
1110 static inline uint32_t
xfs_max_open_zones(struct xfs_mount * mp)1111 xfs_max_open_zones(
1112 struct xfs_mount *mp)
1113 {
1114 unsigned int max_open, max_open_data_zones;
1115
1116 /*
1117 * We need two zones for every open data zone, one in reserve as we
1118 * don't reclaim open zones. One data zone and its spare is included
1119 * in XFS_MIN_ZONES to support at least one user data writer.
1120 */
1121 max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1122 max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1123
1124 /*
1125 * Cap the max open limit to 1/4 of available space. Without this we'd
1126 * run out of easy reclaim targets too quickly and storage devices don't
1127 * handle huge numbers of concurrent write streams overly well.
1128 */
1129 max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1130
1131 return max(XFS_MIN_OPEN_ZONES, max_open);
1132 }
1133
1134 /*
1135 * Normally we use the open zone limit that the device reports. If there is
1136 * none let the user pick one from the command line.
1137 *
1138 * If the device doesn't report an open zone limit and there is no override,
1139 * allow to hold about a quarter of the zones open. In theory we could allow
1140 * all to be open, but at that point we run into GC deadlocks because we can't
1141 * reclaim open zones.
1142 *
1143 * When used on conventional SSDs a lower open limit is advisable as we'll
1144 * otherwise overwhelm the FTL just as much as a conventional block allocator.
1145 *
1146 * Note: To debug the open zone management code, force max_open to 1 here.
1147 */
1148 static int
xfs_calc_open_zones(struct xfs_mount * mp)1149 xfs_calc_open_zones(
1150 struct xfs_mount *mp)
1151 {
1152 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
1153 unsigned int bdev_open_zones = bdev_max_open_zones(bdev);
1154
1155 if (!mp->m_max_open_zones) {
1156 if (bdev_open_zones)
1157 mp->m_max_open_zones = bdev_open_zones;
1158 else
1159 mp->m_max_open_zones = XFS_DEFAULT_MAX_OPEN_ZONES;
1160 }
1161
1162 if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1163 xfs_notice(mp, "need at least %u open zones.",
1164 XFS_MIN_OPEN_ZONES);
1165 return -EIO;
1166 }
1167
1168 if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1169 mp->m_max_open_zones = bdev_open_zones;
1170 xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1171 bdev_open_zones);
1172 }
1173
1174 if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1175 mp->m_max_open_zones = xfs_max_open_zones(mp);
1176 xfs_info(mp,
1177 "limiting open zones to %u due to total zone count (%u)",
1178 mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1179 }
1180
1181 return 0;
1182 }
1183
1184 static unsigned long *
xfs_alloc_bucket_bitmap(struct xfs_mount * mp)1185 xfs_alloc_bucket_bitmap(
1186 struct xfs_mount *mp)
1187 {
1188 return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1189 sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1190 }
1191
1192 static struct xfs_zone_info *
xfs_alloc_zone_info(struct xfs_mount * mp)1193 xfs_alloc_zone_info(
1194 struct xfs_mount *mp)
1195 {
1196 struct xfs_zone_info *zi;
1197 int i;
1198
1199 zi = kzalloc_obj(*zi);
1200 if (!zi)
1201 return NULL;
1202 INIT_LIST_HEAD(&zi->zi_open_zones);
1203 INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1204 spin_lock_init(&zi->zi_reset_list_lock);
1205 spin_lock_init(&zi->zi_open_zones_lock);
1206 spin_lock_init(&zi->zi_reservation_lock);
1207 init_waitqueue_head(&zi->zi_zone_wait);
1208 spin_lock_init(&zi->zi_used_buckets_lock);
1209 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1210 zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1211 if (!zi->zi_used_bucket_bitmap[i])
1212 goto out_free_bitmaps;
1213 }
1214 return zi;
1215
1216 out_free_bitmaps:
1217 while (--i > 0)
1218 kvfree(zi->zi_used_bucket_bitmap[i]);
1219 kfree(zi);
1220 return NULL;
1221 }
1222
1223 static void
xfs_free_zone_info(struct xfs_zone_info * zi)1224 xfs_free_zone_info(
1225 struct xfs_zone_info *zi)
1226 {
1227 int i;
1228
1229 xfs_free_open_zones(zi);
1230 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1231 kvfree(zi->zi_used_bucket_bitmap[i]);
1232 kfree(zi);
1233 }
1234
1235 int
xfs_mount_zones(struct xfs_mount * mp)1236 xfs_mount_zones(
1237 struct xfs_mount *mp)
1238 {
1239 struct xfs_init_zones iz = {
1240 .zone_capacity = mp->m_groups[XG_TYPE_RTG].blocks,
1241 .zone_size = xfs_rtgroup_raw_size(mp),
1242 };
1243 struct xfs_rtgroup *rtg = NULL;
1244 int error;
1245
1246 if (!mp->m_rtdev_targp) {
1247 xfs_notice(mp, "RT device missing.");
1248 return -EINVAL;
1249 }
1250
1251 if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1252 xfs_notice(mp, "invalid flag combination.");
1253 return -EFSCORRUPTED;
1254 }
1255 if (mp->m_sb.sb_rextsize != 1) {
1256 xfs_notice(mp, "zoned file systems do not support rextsize.");
1257 return -EFSCORRUPTED;
1258 }
1259 if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1260 xfs_notice(mp,
1261 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1262 return -EFSCORRUPTED;
1263 }
1264
1265 error = xfs_calc_open_zones(mp);
1266 if (error)
1267 return error;
1268
1269 mp->m_zone_info = xfs_alloc_zone_info(mp);
1270 if (!mp->m_zone_info)
1271 return -ENOMEM;
1272
1273 xfs_info(mp, "%u zones of %u blocks (%u max open zones)",
1274 mp->m_sb.sb_rgcount, iz.zone_capacity, mp->m_max_open_zones);
1275 trace_xfs_zones_mount(mp);
1276
1277 /*
1278 * The writeback code switches between inodes regularly to provide
1279 * fairness. The default lower bound is 4MiB, but for zoned file
1280 * systems we want to increase that both to reduce seeks, but also more
1281 * importantly so that workloads that writes files in a multiple of the
1282 * zone size do not get fragmented and require garbage collection when
1283 * they shouldn't. Increase is to the zone size capped by the max
1284 * extent len.
1285 *
1286 * Note that because s_min_writeback_pages is a superblock field, this
1287 * value also get applied to non-zoned files on the data device if
1288 * there are any. On typical zoned setup all data is on the RT device
1289 * because using the more efficient sequential write required zones
1290 * is the reason for using the zone allocator, and either the RT device
1291 * and the (meta)data device are on the same block device, or the
1292 * (meta)data device is on a fast SSD while the data on the RT device
1293 * is on a SMR HDD. In any combination of the above cases enforcing
1294 * the higher min_writeback_pages for non-RT inodes is either a noop
1295 * or beneficial.
1296 */
1297 mp->m_super->s_min_writeback_pages =
1298 XFS_FSB_TO_B(mp, min(iz.zone_capacity, XFS_MAX_BMBT_EXTLEN)) >>
1299 PAGE_SHIFT;
1300
1301 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1302 xfs_rgblock_t write_pointer;
1303
1304 error = xfs_query_write_pointer(&iz, rtg, &write_pointer);
1305 if (!error)
1306 error = xfs_init_zone(&iz, rtg, write_pointer);
1307 if (error) {
1308 xfs_rtgroup_rele(rtg);
1309 goto out_free_zone_info;
1310 }
1311 }
1312
1313 xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1314 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1315 iz.available + iz.reclaimable);
1316
1317 /*
1318 * The user may configure GC to free up a percentage of unused blocks.
1319 * By default this is 0. GC will always trigger at the minimum level
1320 * for keeping max_open_zones available for data placement.
1321 */
1322 mp->m_zonegc_low_space = 0;
1323
1324 error = xfs_zone_gc_mount(mp);
1325 if (error)
1326 goto out_free_zone_info;
1327 return 0;
1328
1329 out_free_zone_info:
1330 xfs_free_zone_info(mp->m_zone_info);
1331 return error;
1332 }
1333
1334 void
xfs_unmount_zones(struct xfs_mount * mp)1335 xfs_unmount_zones(
1336 struct xfs_mount *mp)
1337 {
1338 xfs_zone_gc_unmount(mp);
1339 xfs_free_zone_info(mp->m_zone_info);
1340 }
1341