1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023-2025 Christoph Hellwig.
4 * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5 */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27 #include "xfs_mru_cache.h"
28
29 void
xfs_open_zone_put(struct xfs_open_zone * oz)30 xfs_open_zone_put(
31 struct xfs_open_zone *oz)
32 {
33 if (atomic_dec_and_test(&oz->oz_ref)) {
34 xfs_rtgroup_rele(oz->oz_rtg);
35 kfree(oz);
36 }
37 }
38
39 static inline uint32_t
xfs_zone_bucket(struct xfs_mount * mp,uint32_t used_blocks)40 xfs_zone_bucket(
41 struct xfs_mount *mp,
42 uint32_t used_blocks)
43 {
44 return XFS_ZONE_USED_BUCKETS * used_blocks /
45 mp->m_groups[XG_TYPE_RTG].blocks;
46 }
47
48 static inline void
xfs_zone_add_to_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t to_bucket)49 xfs_zone_add_to_bucket(
50 struct xfs_zone_info *zi,
51 xfs_rgnumber_t rgno,
52 uint32_t to_bucket)
53 {
54 __set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
55 zi->zi_used_bucket_entries[to_bucket]++;
56 }
57
58 static inline void
xfs_zone_remove_from_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t from_bucket)59 xfs_zone_remove_from_bucket(
60 struct xfs_zone_info *zi,
61 xfs_rgnumber_t rgno,
62 uint32_t from_bucket)
63 {
64 __clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
65 zi->zi_used_bucket_entries[from_bucket]--;
66 }
67
68 static void
xfs_zone_account_reclaimable(struct xfs_rtgroup * rtg,uint32_t freed)69 xfs_zone_account_reclaimable(
70 struct xfs_rtgroup *rtg,
71 uint32_t freed)
72 {
73 struct xfs_group *xg = &rtg->rtg_group;
74 struct xfs_mount *mp = rtg_mount(rtg);
75 struct xfs_zone_info *zi = mp->m_zone_info;
76 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
77 xfs_rgnumber_t rgno = rtg_rgno(rtg);
78 uint32_t from_bucket = xfs_zone_bucket(mp, used + freed);
79 uint32_t to_bucket = xfs_zone_bucket(mp, used);
80 bool was_full = (used + freed == rtg_blocks(rtg));
81
82 /*
83 * This can be called from log recovery, where the zone_info structure
84 * hasn't been allocated yet. Skip all work as xfs_mount_zones will
85 * add the zones to the right buckets before the file systems becomes
86 * active.
87 */
88 if (!zi)
89 return;
90
91 if (!used) {
92 /*
93 * The zone is now empty, remove it from the bottom bucket and
94 * trigger a reset.
95 */
96 trace_xfs_zone_emptied(rtg);
97
98 if (!was_full)
99 xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE);
100
101 spin_lock(&zi->zi_used_buckets_lock);
102 if (!was_full)
103 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
104 spin_unlock(&zi->zi_used_buckets_lock);
105
106 spin_lock(&zi->zi_reset_list_lock);
107 xg->xg_next_reset = zi->zi_reset_list;
108 zi->zi_reset_list = xg;
109 spin_unlock(&zi->zi_reset_list_lock);
110
111 if (zi->zi_gc_thread)
112 wake_up_process(zi->zi_gc_thread);
113 } else if (was_full) {
114 /*
115 * The zone transitioned from full, mark it up as reclaimable
116 * and wake up GC which might be waiting for zones to reclaim.
117 */
118 spin_lock(&zi->zi_used_buckets_lock);
119 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
120 spin_unlock(&zi->zi_used_buckets_lock);
121
122 xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE);
123 if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
124 wake_up_process(zi->zi_gc_thread);
125 } else if (to_bucket != from_bucket) {
126 /*
127 * Move the zone to a new bucket if it dropped below the
128 * threshold.
129 */
130 spin_lock(&zi->zi_used_buckets_lock);
131 xfs_zone_add_to_bucket(zi, rgno, to_bucket);
132 xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
133 spin_unlock(&zi->zi_used_buckets_lock);
134 }
135 }
136
137 static void
xfs_open_zone_mark_full(struct xfs_open_zone * oz)138 xfs_open_zone_mark_full(
139 struct xfs_open_zone *oz)
140 {
141 struct xfs_rtgroup *rtg = oz->oz_rtg;
142 struct xfs_mount *mp = rtg_mount(rtg);
143 struct xfs_zone_info *zi = mp->m_zone_info;
144 uint32_t used = rtg_rmap(rtg)->i_used_blocks;
145
146 trace_xfs_zone_full(rtg);
147
148 WRITE_ONCE(rtg->rtg_open_zone, NULL);
149
150 spin_lock(&zi->zi_open_zones_lock);
151 if (oz->oz_is_gc) {
152 ASSERT(current == zi->zi_gc_thread);
153 zi->zi_open_gc_zone = NULL;
154 } else {
155 zi->zi_nr_open_zones--;
156 list_del_init(&oz->oz_entry);
157 }
158 spin_unlock(&zi->zi_open_zones_lock);
159 xfs_open_zone_put(oz);
160
161 wake_up_all(&zi->zi_zone_wait);
162 if (used < rtg_blocks(rtg))
163 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
164 }
165
166 static void
xfs_zone_record_blocks(struct xfs_trans * tp,xfs_fsblock_t fsbno,xfs_filblks_t len,struct xfs_open_zone * oz,bool used)167 xfs_zone_record_blocks(
168 struct xfs_trans *tp,
169 xfs_fsblock_t fsbno,
170 xfs_filblks_t len,
171 struct xfs_open_zone *oz,
172 bool used)
173 {
174 struct xfs_mount *mp = tp->t_mountp;
175 struct xfs_rtgroup *rtg = oz->oz_rtg;
176 struct xfs_inode *rmapip = rtg_rmap(rtg);
177
178 trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
179
180 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
181 xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
182 if (used) {
183 rmapip->i_used_blocks += len;
184 ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
185 } else {
186 xfs_add_frextents(mp, len);
187 }
188 oz->oz_written += len;
189 if (oz->oz_written == rtg_blocks(rtg))
190 xfs_open_zone_mark_full(oz);
191 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
192 }
193
194 static int
xfs_zoned_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * new,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)195 xfs_zoned_map_extent(
196 struct xfs_trans *tp,
197 struct xfs_inode *ip,
198 struct xfs_bmbt_irec *new,
199 struct xfs_open_zone *oz,
200 xfs_fsblock_t old_startblock)
201 {
202 struct xfs_bmbt_irec data;
203 int nmaps = 1;
204 int error;
205
206 /* Grab the corresponding mapping in the data fork. */
207 error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
208 &nmaps, 0);
209 if (error)
210 return error;
211
212 /*
213 * Cap the update to the existing extent in the data fork because we can
214 * only overwrite one extent at a time.
215 */
216 ASSERT(new->br_blockcount >= data.br_blockcount);
217 new->br_blockcount = data.br_blockcount;
218
219 /*
220 * If a data write raced with this GC write, keep the existing data in
221 * the data fork, mark our newly written GC extent as reclaimable, then
222 * move on to the next extent.
223 */
224 if (old_startblock != NULLFSBLOCK &&
225 old_startblock != data.br_startblock)
226 goto skip;
227
228 trace_xfs_reflink_cow_remap_from(ip, new);
229 trace_xfs_reflink_cow_remap_to(ip, &data);
230
231 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
232 XFS_IEXT_REFLINK_END_COW_CNT);
233 if (error)
234 return error;
235
236 if (data.br_startblock != HOLESTARTBLOCK) {
237 ASSERT(data.br_startblock != DELAYSTARTBLOCK);
238 ASSERT(!isnullstartblock(data.br_startblock));
239
240 xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
241 if (xfs_is_reflink_inode(ip)) {
242 xfs_refcount_decrease_extent(tp, true, &data);
243 } else {
244 error = xfs_free_extent_later(tp, data.br_startblock,
245 data.br_blockcount, NULL,
246 XFS_AG_RESV_NONE,
247 XFS_FREE_EXTENT_REALTIME);
248 if (error)
249 return error;
250 }
251 }
252
253 xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
254 true);
255
256 /* Map the new blocks into the data fork. */
257 xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
258 return 0;
259
260 skip:
261 trace_xfs_reflink_cow_remap_skip(ip, new);
262 xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
263 false);
264 return 0;
265 }
266
267 int
xfs_zoned_end_io(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,xfs_daddr_t daddr,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)268 xfs_zoned_end_io(
269 struct xfs_inode *ip,
270 xfs_off_t offset,
271 xfs_off_t count,
272 xfs_daddr_t daddr,
273 struct xfs_open_zone *oz,
274 xfs_fsblock_t old_startblock)
275 {
276 struct xfs_mount *mp = ip->i_mount;
277 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
278 struct xfs_bmbt_irec new = {
279 .br_startoff = XFS_B_TO_FSBT(mp, offset),
280 .br_startblock = xfs_daddr_to_rtb(mp, daddr),
281 .br_state = XFS_EXT_NORM,
282 };
283 unsigned int resblks =
284 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
285 struct xfs_trans *tp;
286 int error;
287
288 if (xfs_is_shutdown(mp))
289 return -EIO;
290
291 while (new.br_startoff < end_fsb) {
292 new.br_blockcount = end_fsb - new.br_startoff;
293
294 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
295 XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
296 if (error)
297 return error;
298 xfs_ilock(ip, XFS_ILOCK_EXCL);
299 xfs_trans_ijoin(tp, ip, 0);
300
301 error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
302 if (error)
303 xfs_trans_cancel(tp);
304 else
305 error = xfs_trans_commit(tp);
306 xfs_iunlock(ip, XFS_ILOCK_EXCL);
307 if (error)
308 return error;
309
310 new.br_startoff += new.br_blockcount;
311 new.br_startblock += new.br_blockcount;
312 if (old_startblock != NULLFSBLOCK)
313 old_startblock += new.br_blockcount;
314 }
315
316 return 0;
317 }
318
319 /*
320 * "Free" blocks allocated in a zone.
321 *
322 * Just decrement the used blocks counter and report the space as freed.
323 */
324 int
xfs_zone_free_blocks(struct xfs_trans * tp,struct xfs_rtgroup * rtg,xfs_fsblock_t fsbno,xfs_filblks_t len)325 xfs_zone_free_blocks(
326 struct xfs_trans *tp,
327 struct xfs_rtgroup *rtg,
328 xfs_fsblock_t fsbno,
329 xfs_filblks_t len)
330 {
331 struct xfs_mount *mp = tp->t_mountp;
332 struct xfs_inode *rmapip = rtg_rmap(rtg);
333
334 xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
335
336 if (len > rmapip->i_used_blocks) {
337 xfs_err(mp,
338 "trying to free more blocks (%lld) than used counter (%u).",
339 len, rmapip->i_used_blocks);
340 ASSERT(len <= rmapip->i_used_blocks);
341 xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
342 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
343 return -EFSCORRUPTED;
344 }
345
346 trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
347
348 rmapip->i_used_blocks -= len;
349 /*
350 * Don't add open zones to the reclaimable buckets. The I/O completion
351 * for writing the last block will take care of accounting for already
352 * unused blocks instead.
353 */
354 if (!READ_ONCE(rtg->rtg_open_zone))
355 xfs_zone_account_reclaimable(rtg, len);
356 xfs_add_frextents(mp, len);
357 xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
358 return 0;
359 }
360
361 /*
362 * Check if the zone containing the data just before the offset we are
363 * writing to is still open and has space.
364 */
365 static struct xfs_open_zone *
xfs_last_used_zone(struct iomap_ioend * ioend)366 xfs_last_used_zone(
367 struct iomap_ioend *ioend)
368 {
369 struct xfs_inode *ip = XFS_I(ioend->io_inode);
370 struct xfs_mount *mp = ip->i_mount;
371 xfs_fileoff_t offset_fsb = XFS_B_TO_FSB(mp, ioend->io_offset);
372 struct xfs_rtgroup *rtg = NULL;
373 struct xfs_open_zone *oz = NULL;
374 struct xfs_iext_cursor icur;
375 struct xfs_bmbt_irec got;
376
377 xfs_ilock(ip, XFS_ILOCK_SHARED);
378 if (!xfs_iext_lookup_extent_before(ip, &ip->i_df, &offset_fsb,
379 &icur, &got)) {
380 xfs_iunlock(ip, XFS_ILOCK_SHARED);
381 return NULL;
382 }
383 xfs_iunlock(ip, XFS_ILOCK_SHARED);
384
385 rtg = xfs_rtgroup_grab(mp, xfs_rtb_to_rgno(mp, got.br_startblock));
386 if (!rtg)
387 return NULL;
388
389 xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
390 oz = READ_ONCE(rtg->rtg_open_zone);
391 if (oz && (oz->oz_is_gc || !atomic_inc_not_zero(&oz->oz_ref)))
392 oz = NULL;
393 xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
394
395 xfs_rtgroup_rele(rtg);
396 return oz;
397 }
398
399 static struct xfs_group *
xfs_find_free_zone(struct xfs_mount * mp,unsigned long start,unsigned long end)400 xfs_find_free_zone(
401 struct xfs_mount *mp,
402 unsigned long start,
403 unsigned long end)
404 {
405 struct xfs_zone_info *zi = mp->m_zone_info;
406 XA_STATE (xas, &mp->m_groups[XG_TYPE_RTG].xa, start);
407 struct xfs_group *xg;
408
409 xas_lock(&xas);
410 xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE)
411 if (atomic_inc_not_zero(&xg->xg_active_ref))
412 goto found;
413 xas_unlock(&xas);
414 return NULL;
415
416 found:
417 xas_clear_mark(&xas, XFS_RTG_FREE);
418 atomic_dec(&zi->zi_nr_free_zones);
419 zi->zi_free_zone_cursor = xg->xg_gno;
420 xas_unlock(&xas);
421 return xg;
422 }
423
424 static struct xfs_open_zone *
xfs_init_open_zone(struct xfs_rtgroup * rtg,xfs_rgblock_t write_pointer,enum rw_hint write_hint,bool is_gc)425 xfs_init_open_zone(
426 struct xfs_rtgroup *rtg,
427 xfs_rgblock_t write_pointer,
428 enum rw_hint write_hint,
429 bool is_gc)
430 {
431 struct xfs_open_zone *oz;
432
433 oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL);
434 spin_lock_init(&oz->oz_alloc_lock);
435 atomic_set(&oz->oz_ref, 1);
436 oz->oz_rtg = rtg;
437 oz->oz_write_pointer = write_pointer;
438 oz->oz_written = write_pointer;
439 oz->oz_write_hint = write_hint;
440 oz->oz_is_gc = is_gc;
441
442 /*
443 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
444 * inode, but we don't really want to take that here because we are
445 * under the zone_list_lock. Ensure the pointer is only set for a fully
446 * initialized open zone structure so that a racy lookup finding it is
447 * fine.
448 */
449 WRITE_ONCE(rtg->rtg_open_zone, oz);
450 return oz;
451 }
452
453 /*
454 * Find a completely free zone, open it, and return a reference.
455 */
456 struct xfs_open_zone *
xfs_open_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool is_gc)457 xfs_open_zone(
458 struct xfs_mount *mp,
459 enum rw_hint write_hint,
460 bool is_gc)
461 {
462 struct xfs_zone_info *zi = mp->m_zone_info;
463 struct xfs_group *xg;
464
465 xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX);
466 if (!xg)
467 xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor);
468 if (!xg)
469 return NULL;
470
471 set_current_state(TASK_RUNNING);
472 return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
473 }
474
475 static struct xfs_open_zone *
xfs_try_open_zone(struct xfs_mount * mp,enum rw_hint write_hint)476 xfs_try_open_zone(
477 struct xfs_mount *mp,
478 enum rw_hint write_hint)
479 {
480 struct xfs_zone_info *zi = mp->m_zone_info;
481 struct xfs_open_zone *oz;
482
483 if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
484 return NULL;
485 if (atomic_read(&zi->zi_nr_free_zones) <
486 XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
487 return NULL;
488
489 /*
490 * Increment the open zone count to reserve our slot before dropping
491 * zi_open_zones_lock.
492 */
493 zi->zi_nr_open_zones++;
494 spin_unlock(&zi->zi_open_zones_lock);
495 oz = xfs_open_zone(mp, write_hint, false);
496 spin_lock(&zi->zi_open_zones_lock);
497 if (!oz) {
498 zi->zi_nr_open_zones--;
499 return NULL;
500 }
501
502 atomic_inc(&oz->oz_ref);
503 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
504
505 /*
506 * If this was the last free zone, other waiters might be waiting
507 * on us to write to it as well.
508 */
509 wake_up_all(&zi->zi_zone_wait);
510
511 if (xfs_zoned_need_gc(mp))
512 wake_up_process(zi->zi_gc_thread);
513
514 trace_xfs_zone_opened(oz->oz_rtg);
515 return oz;
516 }
517
518 /*
519 * For data with short or medium lifetime, try to colocated it into an
520 * already open zone with a matching temperature.
521 */
522 static bool
xfs_colocate_eagerly(enum rw_hint file_hint)523 xfs_colocate_eagerly(
524 enum rw_hint file_hint)
525 {
526 switch (file_hint) {
527 case WRITE_LIFE_MEDIUM:
528 case WRITE_LIFE_SHORT:
529 case WRITE_LIFE_NONE:
530 return true;
531 default:
532 return false;
533 }
534 }
535
536 static bool
xfs_good_hint_match(struct xfs_open_zone * oz,enum rw_hint file_hint)537 xfs_good_hint_match(
538 struct xfs_open_zone *oz,
539 enum rw_hint file_hint)
540 {
541 switch (oz->oz_write_hint) {
542 case WRITE_LIFE_LONG:
543 case WRITE_LIFE_EXTREME:
544 /* colocate long and extreme */
545 if (file_hint == WRITE_LIFE_LONG ||
546 file_hint == WRITE_LIFE_EXTREME)
547 return true;
548 break;
549 case WRITE_LIFE_MEDIUM:
550 /* colocate medium with medium */
551 if (file_hint == WRITE_LIFE_MEDIUM)
552 return true;
553 break;
554 case WRITE_LIFE_SHORT:
555 case WRITE_LIFE_NONE:
556 case WRITE_LIFE_NOT_SET:
557 /* colocate short and none */
558 if (file_hint <= WRITE_LIFE_SHORT)
559 return true;
560 break;
561 }
562 return false;
563 }
564
565 static bool
xfs_try_use_zone(struct xfs_zone_info * zi,enum rw_hint file_hint,struct xfs_open_zone * oz,bool lowspace)566 xfs_try_use_zone(
567 struct xfs_zone_info *zi,
568 enum rw_hint file_hint,
569 struct xfs_open_zone *oz,
570 bool lowspace)
571 {
572 if (oz->oz_write_pointer == rtg_blocks(oz->oz_rtg))
573 return false;
574 if (!lowspace && !xfs_good_hint_match(oz, file_hint))
575 return false;
576 if (!atomic_inc_not_zero(&oz->oz_ref))
577 return false;
578
579 /*
580 * If we have a hint set for the data, use that for the zone even if
581 * some data was written already without any hint set, but don't change
582 * the temperature after that as that would make little sense without
583 * tracking per-temperature class written block counts, which is
584 * probably overkill anyway.
585 */
586 if (file_hint != WRITE_LIFE_NOT_SET &&
587 oz->oz_write_hint == WRITE_LIFE_NOT_SET)
588 oz->oz_write_hint = file_hint;
589
590 /*
591 * If we couldn't match by inode or life time we just pick the first
592 * zone with enough space above. For that we want the least busy zone
593 * for some definition of "least" busy. For now this simple LRU
594 * algorithm that rotates every zone to the end of the list will do it,
595 * even if it isn't exactly cache friendly.
596 */
597 if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
598 list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
599 return true;
600 }
601
602 static struct xfs_open_zone *
xfs_select_open_zone_lru(struct xfs_zone_info * zi,enum rw_hint file_hint,bool lowspace)603 xfs_select_open_zone_lru(
604 struct xfs_zone_info *zi,
605 enum rw_hint file_hint,
606 bool lowspace)
607 {
608 struct xfs_open_zone *oz;
609
610 lockdep_assert_held(&zi->zi_open_zones_lock);
611
612 list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
613 if (xfs_try_use_zone(zi, file_hint, oz, lowspace))
614 return oz;
615
616 cond_resched_lock(&zi->zi_open_zones_lock);
617 return NULL;
618 }
619
620 static struct xfs_open_zone *
xfs_select_open_zone_mru(struct xfs_zone_info * zi,enum rw_hint file_hint)621 xfs_select_open_zone_mru(
622 struct xfs_zone_info *zi,
623 enum rw_hint file_hint)
624 {
625 struct xfs_open_zone *oz;
626
627 lockdep_assert_held(&zi->zi_open_zones_lock);
628
629 list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
630 if (xfs_try_use_zone(zi, file_hint, oz, false))
631 return oz;
632
633 cond_resched_lock(&zi->zi_open_zones_lock);
634 return NULL;
635 }
636
xfs_inode_write_hint(struct xfs_inode * ip)637 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
638 {
639 if (xfs_has_nolifetime(ip->i_mount))
640 return WRITE_LIFE_NOT_SET;
641 return VFS_I(ip)->i_write_hint;
642 }
643
644 /*
645 * Try to pack inodes that are written back after they were closed tight instead
646 * of trying to open new zones for them or spread them to the least recently
647 * used zone. This optimizes the data layout for workloads that untar or copy
648 * a lot of small files. Right now this does not separate multiple such
649 * streams.
650 */
xfs_zoned_pack_tight(struct xfs_inode * ip)651 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
652 {
653 return !inode_is_open_for_write(VFS_I(ip)) &&
654 !(ip->i_diflags & XFS_DIFLAG_APPEND);
655 }
656
657 /*
658 * Pick a new zone for writes.
659 *
660 * If we aren't using up our budget of open zones just open a new one from the
661 * freelist. Else try to find one that matches the expected data lifetime. If
662 * we don't find one that is good pick any zone that is available.
663 */
664 static struct xfs_open_zone *
xfs_select_zone_nowait(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)665 xfs_select_zone_nowait(
666 struct xfs_mount *mp,
667 enum rw_hint write_hint,
668 bool pack_tight)
669 {
670 struct xfs_zone_info *zi = mp->m_zone_info;
671 struct xfs_open_zone *oz = NULL;
672
673 if (xfs_is_shutdown(mp))
674 return NULL;
675
676 /*
677 * Try to fill up open zones with matching temperature if available. It
678 * is better to try to co-locate data when this is favorable, so we can
679 * activate empty zones when it is statistically better to separate
680 * data.
681 */
682 spin_lock(&zi->zi_open_zones_lock);
683 if (xfs_colocate_eagerly(write_hint))
684 oz = xfs_select_open_zone_lru(zi, write_hint, false);
685 else if (pack_tight)
686 oz = xfs_select_open_zone_mru(zi, write_hint);
687 if (oz)
688 goto out_unlock;
689
690 /*
691 * See if we can open a new zone and use that.
692 */
693 oz = xfs_try_open_zone(mp, write_hint);
694 if (oz)
695 goto out_unlock;
696
697 /*
698 * Try to colocate cold data with other cold data if we failed to open a
699 * new zone for it.
700 */
701 if (write_hint != WRITE_LIFE_NOT_SET &&
702 !xfs_colocate_eagerly(write_hint))
703 oz = xfs_select_open_zone_lru(zi, write_hint, false);
704 if (!oz)
705 oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, false);
706 if (!oz)
707 oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, true);
708 out_unlock:
709 spin_unlock(&zi->zi_open_zones_lock);
710 return oz;
711 }
712
713 static struct xfs_open_zone *
xfs_select_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)714 xfs_select_zone(
715 struct xfs_mount *mp,
716 enum rw_hint write_hint,
717 bool pack_tight)
718 {
719 struct xfs_zone_info *zi = mp->m_zone_info;
720 DEFINE_WAIT (wait);
721 struct xfs_open_zone *oz;
722
723 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
724 if (oz)
725 return oz;
726
727 for (;;) {
728 prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
729 oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
730 if (oz)
731 break;
732 schedule();
733 }
734 finish_wait(&zi->zi_zone_wait, &wait);
735 return oz;
736 }
737
738 static unsigned int
xfs_zone_alloc_blocks(struct xfs_open_zone * oz,xfs_filblks_t count_fsb,sector_t * sector,bool * is_seq)739 xfs_zone_alloc_blocks(
740 struct xfs_open_zone *oz,
741 xfs_filblks_t count_fsb,
742 sector_t *sector,
743 bool *is_seq)
744 {
745 struct xfs_rtgroup *rtg = oz->oz_rtg;
746 struct xfs_mount *mp = rtg_mount(rtg);
747 xfs_rgblock_t rgbno;
748
749 spin_lock(&oz->oz_alloc_lock);
750 count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
751 (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_write_pointer);
752 if (!count_fsb) {
753 spin_unlock(&oz->oz_alloc_lock);
754 return 0;
755 }
756 rgbno = oz->oz_write_pointer;
757 oz->oz_write_pointer += count_fsb;
758 spin_unlock(&oz->oz_alloc_lock);
759
760 trace_xfs_zone_alloc_blocks(oz, rgbno, count_fsb);
761
762 *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
763 *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
764 if (!*is_seq)
765 *sector += XFS_FSB_TO_BB(mp, rgbno);
766 return XFS_FSB_TO_B(mp, count_fsb);
767 }
768
769 void
xfs_mark_rtg_boundary(struct iomap_ioend * ioend)770 xfs_mark_rtg_boundary(
771 struct iomap_ioend *ioend)
772 {
773 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
774 sector_t sector = ioend->io_bio.bi_iter.bi_sector;
775
776 if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
777 ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
778 }
779
780 static void
xfs_submit_zoned_bio(struct iomap_ioend * ioend,struct xfs_open_zone * oz,bool is_seq)781 xfs_submit_zoned_bio(
782 struct iomap_ioend *ioend,
783 struct xfs_open_zone *oz,
784 bool is_seq)
785 {
786 ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
787 ioend->io_private = oz;
788 atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
789
790 if (is_seq) {
791 ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
792 ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
793 } else {
794 xfs_mark_rtg_boundary(ioend);
795 }
796
797 submit_bio(&ioend->io_bio);
798 }
799
800 /*
801 * Cache the last zone written to for an inode so that it is considered first
802 * for subsequent writes.
803 */
804 struct xfs_zone_cache_item {
805 struct xfs_mru_cache_elem mru;
806 struct xfs_open_zone *oz;
807 };
808
809 static inline struct xfs_zone_cache_item *
xfs_zone_cache_item(struct xfs_mru_cache_elem * mru)810 xfs_zone_cache_item(struct xfs_mru_cache_elem *mru)
811 {
812 return container_of(mru, struct xfs_zone_cache_item, mru);
813 }
814
815 static void
xfs_zone_cache_free_func(void * data,struct xfs_mru_cache_elem * mru)816 xfs_zone_cache_free_func(
817 void *data,
818 struct xfs_mru_cache_elem *mru)
819 {
820 struct xfs_zone_cache_item *item = xfs_zone_cache_item(mru);
821
822 xfs_open_zone_put(item->oz);
823 kfree(item);
824 }
825
826 /*
827 * Check if we have a cached last open zone available for the inode and
828 * if yes return a reference to it.
829 */
830 static struct xfs_open_zone *
xfs_cached_zone(struct xfs_mount * mp,struct xfs_inode * ip)831 xfs_cached_zone(
832 struct xfs_mount *mp,
833 struct xfs_inode *ip)
834 {
835 struct xfs_mru_cache_elem *mru;
836 struct xfs_open_zone *oz;
837
838 mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
839 if (!mru)
840 return NULL;
841 oz = xfs_zone_cache_item(mru)->oz;
842 if (oz) {
843 /*
844 * GC only steals open zones at mount time, so no GC zones
845 * should end up in the cache.
846 */
847 ASSERT(!oz->oz_is_gc);
848 ASSERT(atomic_read(&oz->oz_ref) > 0);
849 atomic_inc(&oz->oz_ref);
850 }
851 xfs_mru_cache_done(mp->m_zone_cache);
852 return oz;
853 }
854
855 /*
856 * Update the last used zone cache for a given inode.
857 *
858 * The caller must have a reference on the open zone.
859 */
860 static void
xfs_zone_cache_create_association(struct xfs_inode * ip,struct xfs_open_zone * oz)861 xfs_zone_cache_create_association(
862 struct xfs_inode *ip,
863 struct xfs_open_zone *oz)
864 {
865 struct xfs_mount *mp = ip->i_mount;
866 struct xfs_zone_cache_item *item = NULL;
867 struct xfs_mru_cache_elem *mru;
868
869 ASSERT(atomic_read(&oz->oz_ref) > 0);
870 atomic_inc(&oz->oz_ref);
871
872 mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
873 if (mru) {
874 /*
875 * If we have an association already, update it to point to the
876 * new zone.
877 */
878 item = xfs_zone_cache_item(mru);
879 xfs_open_zone_put(item->oz);
880 item->oz = oz;
881 xfs_mru_cache_done(mp->m_zone_cache);
882 return;
883 }
884
885 item = kmalloc(sizeof(*item), GFP_KERNEL);
886 if (!item) {
887 xfs_open_zone_put(oz);
888 return;
889 }
890 item->oz = oz;
891 xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru);
892 }
893
894 void
xfs_zone_alloc_and_submit(struct iomap_ioend * ioend,struct xfs_open_zone ** oz)895 xfs_zone_alloc_and_submit(
896 struct iomap_ioend *ioend,
897 struct xfs_open_zone **oz)
898 {
899 struct xfs_inode *ip = XFS_I(ioend->io_inode);
900 struct xfs_mount *mp = ip->i_mount;
901 enum rw_hint write_hint = xfs_inode_write_hint(ip);
902 bool pack_tight = xfs_zoned_pack_tight(ip);
903 unsigned int alloc_len;
904 struct iomap_ioend *split;
905 bool is_seq;
906
907 if (xfs_is_shutdown(mp))
908 goto out_error;
909
910 /*
911 * If we don't have a cached zone in this write context, see if the
912 * last extent before the one we are writing to points to an active
913 * zone. If so, just continue writing to it.
914 */
915 if (!*oz && ioend->io_offset)
916 *oz = xfs_last_used_zone(ioend);
917 if (!*oz)
918 *oz = xfs_cached_zone(mp, ip);
919
920 if (!*oz) {
921 select_zone:
922 *oz = xfs_select_zone(mp, write_hint, pack_tight);
923 if (!*oz)
924 goto out_error;
925
926 xfs_zone_cache_create_association(ip, *oz);
927 }
928
929 alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
930 &ioend->io_sector, &is_seq);
931 if (!alloc_len) {
932 xfs_open_zone_put(*oz);
933 goto select_zone;
934 }
935
936 while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
937 if (IS_ERR(split))
938 goto out_split_error;
939 alloc_len -= split->io_bio.bi_iter.bi_size;
940 xfs_submit_zoned_bio(split, *oz, is_seq);
941 if (!alloc_len) {
942 xfs_open_zone_put(*oz);
943 goto select_zone;
944 }
945 }
946
947 xfs_submit_zoned_bio(ioend, *oz, is_seq);
948 return;
949
950 out_split_error:
951 ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
952 out_error:
953 bio_io_error(&ioend->io_bio);
954 }
955
956 /*
957 * Wake up all threads waiting for a zoned space allocation when the file system
958 * is shut down.
959 */
960 void
xfs_zoned_wake_all(struct xfs_mount * mp)961 xfs_zoned_wake_all(
962 struct xfs_mount *mp)
963 {
964 /*
965 * Don't wake up if there is no m_zone_info. This is complicated by the
966 * fact that unmount can't atomically clear m_zone_info and thus we need
967 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
968 * during log recovery so we can't entirely rely on that either.
969 */
970 if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
971 wake_up_all(&mp->m_zone_info->zi_zone_wait);
972 }
973
974 /*
975 * Check if @rgbno in @rgb is a potentially valid block. It might still be
976 * unused, but that information is only found in the rmap.
977 */
978 bool
xfs_zone_rgbno_is_valid(struct xfs_rtgroup * rtg,xfs_rgnumber_t rgbno)979 xfs_zone_rgbno_is_valid(
980 struct xfs_rtgroup *rtg,
981 xfs_rgnumber_t rgbno)
982 {
983 lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
984
985 if (rtg->rtg_open_zone)
986 return rgbno < rtg->rtg_open_zone->oz_write_pointer;
987 return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
988 rtg_rgno(rtg), XFS_RTG_FREE);
989 }
990
991 static void
xfs_free_open_zones(struct xfs_zone_info * zi)992 xfs_free_open_zones(
993 struct xfs_zone_info *zi)
994 {
995 struct xfs_open_zone *oz;
996
997 spin_lock(&zi->zi_open_zones_lock);
998 while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
999 struct xfs_open_zone, oz_entry))) {
1000 list_del(&oz->oz_entry);
1001 xfs_open_zone_put(oz);
1002 }
1003 spin_unlock(&zi->zi_open_zones_lock);
1004 }
1005
1006 struct xfs_init_zones {
1007 struct xfs_mount *mp;
1008 uint64_t available;
1009 uint64_t reclaimable;
1010 };
1011
1012 static int
xfs_init_zone(struct xfs_init_zones * iz,struct xfs_rtgroup * rtg,struct blk_zone * zone)1013 xfs_init_zone(
1014 struct xfs_init_zones *iz,
1015 struct xfs_rtgroup *rtg,
1016 struct blk_zone *zone)
1017 {
1018 struct xfs_mount *mp = rtg_mount(rtg);
1019 struct xfs_zone_info *zi = mp->m_zone_info;
1020 uint64_t used = rtg_rmap(rtg)->i_used_blocks;
1021 xfs_rgblock_t write_pointer, highest_rgbno;
1022 int error;
1023
1024 if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
1025 return -EFSCORRUPTED;
1026
1027 /*
1028 * For sequential write required zones we retrieved the hardware write
1029 * pointer above.
1030 *
1031 * For conventional zones or conventional devices we don't have that
1032 * luxury. Instead query the rmap to find the highest recorded block
1033 * and set the write pointer to the block after that. In case of a
1034 * power loss this misses blocks where the data I/O has completed but
1035 * not recorded in the rmap yet, and it also rewrites blocks if the most
1036 * recently written ones got deleted again before unmount, but this is
1037 * the best we can do without hardware support.
1038 */
1039 if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) {
1040 xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
1041 highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
1042 if (highest_rgbno == NULLRGBLOCK)
1043 write_pointer = 0;
1044 else
1045 write_pointer = highest_rgbno + 1;
1046 xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
1047 }
1048
1049 /*
1050 * If there are no used blocks, but the zone is not in empty state yet
1051 * we lost power before the zoned reset. In that case finish the work
1052 * here.
1053 */
1054 if (write_pointer == rtg_blocks(rtg) && used == 0) {
1055 error = xfs_zone_gc_reset_sync(rtg);
1056 if (error)
1057 return error;
1058 write_pointer = 0;
1059 }
1060
1061 if (write_pointer == 0) {
1062 /* zone is empty */
1063 atomic_inc(&zi->zi_nr_free_zones);
1064 xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
1065 iz->available += rtg_blocks(rtg);
1066 } else if (write_pointer < rtg_blocks(rtg)) {
1067 /* zone is open */
1068 struct xfs_open_zone *oz;
1069
1070 atomic_inc(&rtg_group(rtg)->xg_active_ref);
1071 oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
1072 false);
1073 list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
1074 zi->zi_nr_open_zones++;
1075
1076 iz->available += (rtg_blocks(rtg) - write_pointer);
1077 iz->reclaimable += write_pointer - used;
1078 } else if (used < rtg_blocks(rtg)) {
1079 /* zone fully written, but has freed blocks */
1080 xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
1081 iz->reclaimable += (rtg_blocks(rtg) - used);
1082 }
1083
1084 return 0;
1085 }
1086
1087 static int
xfs_get_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)1088 xfs_get_zone_info_cb(
1089 struct blk_zone *zone,
1090 unsigned int idx,
1091 void *data)
1092 {
1093 struct xfs_init_zones *iz = data;
1094 struct xfs_mount *mp = iz->mp;
1095 xfs_fsblock_t zsbno = xfs_daddr_to_rtb(mp, zone->start);
1096 xfs_rgnumber_t rgno;
1097 struct xfs_rtgroup *rtg;
1098 int error;
1099
1100 if (xfs_rtb_to_rgbno(mp, zsbno) != 0) {
1101 xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno);
1102 return -EFSCORRUPTED;
1103 }
1104
1105 rgno = xfs_rtb_to_rgno(mp, zsbno);
1106 rtg = xfs_rtgroup_grab(mp, rgno);
1107 if (!rtg) {
1108 xfs_warn(mp, "realtime group not found for zone %u.", rgno);
1109 return -EFSCORRUPTED;
1110 }
1111 error = xfs_init_zone(iz, rtg, zone);
1112 xfs_rtgroup_rele(rtg);
1113 return error;
1114 }
1115
1116 /*
1117 * Calculate the max open zone limit based on the of number of
1118 * backing zones available
1119 */
1120 static inline uint32_t
xfs_max_open_zones(struct xfs_mount * mp)1121 xfs_max_open_zones(
1122 struct xfs_mount *mp)
1123 {
1124 unsigned int max_open, max_open_data_zones;
1125 /*
1126 * We need two zones for every open data zone,
1127 * one in reserve as we don't reclaim open zones. One data zone
1128 * and its spare is included in XFS_MIN_ZONES.
1129 */
1130 max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1131 max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1132
1133 /*
1134 * Cap the max open limit to 1/4 of available space
1135 */
1136 max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1137
1138 return max(XFS_MIN_OPEN_ZONES, max_open);
1139 }
1140
1141 /*
1142 * Normally we use the open zone limit that the device reports. If there is
1143 * none let the user pick one from the command line.
1144 *
1145 * If the device doesn't report an open zone limit and there is no override,
1146 * allow to hold about a quarter of the zones open. In theory we could allow
1147 * all to be open, but at that point we run into GC deadlocks because we can't
1148 * reclaim open zones.
1149 *
1150 * When used on conventional SSDs a lower open limit is advisable as we'll
1151 * otherwise overwhelm the FTL just as much as a conventional block allocator.
1152 *
1153 * Note: To debug the open zone management code, force max_open to 1 here.
1154 */
1155 static int
xfs_calc_open_zones(struct xfs_mount * mp)1156 xfs_calc_open_zones(
1157 struct xfs_mount *mp)
1158 {
1159 struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
1160 unsigned int bdev_open_zones = bdev_max_open_zones(bdev);
1161
1162 if (!mp->m_max_open_zones) {
1163 if (bdev_open_zones)
1164 mp->m_max_open_zones = bdev_open_zones;
1165 else
1166 mp->m_max_open_zones = xfs_max_open_zones(mp);
1167 }
1168
1169 if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1170 xfs_notice(mp, "need at least %u open zones.",
1171 XFS_MIN_OPEN_ZONES);
1172 return -EIO;
1173 }
1174
1175 if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1176 mp->m_max_open_zones = bdev_open_zones;
1177 xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1178 bdev_open_zones);
1179 }
1180
1181 if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1182 mp->m_max_open_zones = xfs_max_open_zones(mp);
1183 xfs_info(mp,
1184 "limiting open zones to %u due to total zone count (%u)",
1185 mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1186 }
1187
1188 return 0;
1189 }
1190
1191 static unsigned long *
xfs_alloc_bucket_bitmap(struct xfs_mount * mp)1192 xfs_alloc_bucket_bitmap(
1193 struct xfs_mount *mp)
1194 {
1195 return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1196 sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1197 }
1198
1199 static struct xfs_zone_info *
xfs_alloc_zone_info(struct xfs_mount * mp)1200 xfs_alloc_zone_info(
1201 struct xfs_mount *mp)
1202 {
1203 struct xfs_zone_info *zi;
1204 int i;
1205
1206 zi = kzalloc(sizeof(*zi), GFP_KERNEL);
1207 if (!zi)
1208 return NULL;
1209 INIT_LIST_HEAD(&zi->zi_open_zones);
1210 INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1211 spin_lock_init(&zi->zi_reset_list_lock);
1212 spin_lock_init(&zi->zi_open_zones_lock);
1213 spin_lock_init(&zi->zi_reservation_lock);
1214 init_waitqueue_head(&zi->zi_zone_wait);
1215 spin_lock_init(&zi->zi_used_buckets_lock);
1216 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1217 zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1218 if (!zi->zi_used_bucket_bitmap[i])
1219 goto out_free_bitmaps;
1220 }
1221 return zi;
1222
1223 out_free_bitmaps:
1224 while (--i > 0)
1225 kvfree(zi->zi_used_bucket_bitmap[i]);
1226 kfree(zi);
1227 return NULL;
1228 }
1229
1230 static void
xfs_free_zone_info(struct xfs_zone_info * zi)1231 xfs_free_zone_info(
1232 struct xfs_zone_info *zi)
1233 {
1234 int i;
1235
1236 xfs_free_open_zones(zi);
1237 for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1238 kvfree(zi->zi_used_bucket_bitmap[i]);
1239 kfree(zi);
1240 }
1241
1242 int
xfs_mount_zones(struct xfs_mount * mp)1243 xfs_mount_zones(
1244 struct xfs_mount *mp)
1245 {
1246 struct xfs_init_zones iz = {
1247 .mp = mp,
1248 };
1249 struct xfs_buftarg *bt = mp->m_rtdev_targp;
1250 int error;
1251
1252 if (!bt) {
1253 xfs_notice(mp, "RT device missing.");
1254 return -EINVAL;
1255 }
1256
1257 if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1258 xfs_notice(mp, "invalid flag combination.");
1259 return -EFSCORRUPTED;
1260 }
1261 if (mp->m_sb.sb_rextsize != 1) {
1262 xfs_notice(mp, "zoned file systems do not support rextsize.");
1263 return -EFSCORRUPTED;
1264 }
1265 if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1266 xfs_notice(mp,
1267 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1268 return -EFSCORRUPTED;
1269 }
1270
1271 error = xfs_calc_open_zones(mp);
1272 if (error)
1273 return error;
1274
1275 mp->m_zone_info = xfs_alloc_zone_info(mp);
1276 if (!mp->m_zone_info)
1277 return -ENOMEM;
1278
1279 xfs_info(mp, "%u zones of %u blocks size (%u max open)",
1280 mp->m_sb.sb_rgcount, mp->m_groups[XG_TYPE_RTG].blocks,
1281 mp->m_max_open_zones);
1282 trace_xfs_zones_mount(mp);
1283
1284 if (bdev_is_zoned(bt->bt_bdev)) {
1285 error = blkdev_report_zones(bt->bt_bdev,
1286 XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart),
1287 mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz);
1288 if (error < 0)
1289 goto out_free_zone_info;
1290 } else {
1291 struct xfs_rtgroup *rtg = NULL;
1292
1293 while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1294 error = xfs_init_zone(&iz, rtg, NULL);
1295 if (error)
1296 goto out_free_zone_info;
1297 }
1298 }
1299
1300 xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1301 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1302 iz.available + iz.reclaimable);
1303
1304 /*
1305 * The user may configure GC to free up a percentage of unused blocks.
1306 * By default this is 0. GC will always trigger at the minimum level
1307 * for keeping max_open_zones available for data placement.
1308 */
1309 mp->m_zonegc_low_space = 0;
1310
1311 error = xfs_zone_gc_mount(mp);
1312 if (error)
1313 goto out_free_zone_info;
1314
1315 /*
1316 * Set up a mru cache to track inode to open zone for data placement
1317 * purposes. The magic values for group count and life time is the
1318 * same as the defaults for file streams, which seems sane enough.
1319 */
1320 xfs_mru_cache_create(&mp->m_zone_cache, mp,
1321 5000, 10, xfs_zone_cache_free_func);
1322 return 0;
1323
1324 out_free_zone_info:
1325 xfs_free_zone_info(mp->m_zone_info);
1326 return error;
1327 }
1328
1329 void
xfs_unmount_zones(struct xfs_mount * mp)1330 xfs_unmount_zones(
1331 struct xfs_mount *mp)
1332 {
1333 xfs_zone_gc_unmount(mp);
1334 xfs_free_zone_info(mp->m_zone_info);
1335 xfs_mru_cache_destroy(mp->m_zone_cache);
1336 }
1337