xref: /linux/fs/xfs/xfs_zone_alloc.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023-2025 Christoph Hellwig.
4  * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5  */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27 #include "xfs_mru_cache.h"
28 
29 void
30 xfs_open_zone_put(
31 	struct xfs_open_zone	*oz)
32 {
33 	if (atomic_dec_and_test(&oz->oz_ref)) {
34 		xfs_rtgroup_rele(oz->oz_rtg);
35 		kfree(oz);
36 	}
37 }
38 
39 static inline uint32_t
40 xfs_zone_bucket(
41 	struct xfs_mount	*mp,
42 	uint32_t		used_blocks)
43 {
44 	return XFS_ZONE_USED_BUCKETS * used_blocks /
45 			mp->m_groups[XG_TYPE_RTG].blocks;
46 }
47 
48 static inline void
49 xfs_zone_add_to_bucket(
50 	struct xfs_zone_info	*zi,
51 	xfs_rgnumber_t		rgno,
52 	uint32_t		to_bucket)
53 {
54 	__set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
55 	zi->zi_used_bucket_entries[to_bucket]++;
56 }
57 
58 static inline void
59 xfs_zone_remove_from_bucket(
60 	struct xfs_zone_info	*zi,
61 	xfs_rgnumber_t		rgno,
62 	uint32_t		from_bucket)
63 {
64 	__clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
65 	zi->zi_used_bucket_entries[from_bucket]--;
66 }
67 
68 static void
69 xfs_zone_account_reclaimable(
70 	struct xfs_rtgroup	*rtg,
71 	uint32_t		freed)
72 {
73 	struct xfs_group	*xg = &rtg->rtg_group;
74 	struct xfs_mount	*mp = rtg_mount(rtg);
75 	struct xfs_zone_info	*zi = mp->m_zone_info;
76 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
77 	xfs_rgnumber_t		rgno = rtg_rgno(rtg);
78 	uint32_t		from_bucket = xfs_zone_bucket(mp, used + freed);
79 	uint32_t		to_bucket = xfs_zone_bucket(mp, used);
80 	bool			was_full = (used + freed == rtg_blocks(rtg));
81 
82 	/*
83 	 * This can be called from log recovery, where the zone_info structure
84 	 * hasn't been allocated yet.  Skip all work as xfs_mount_zones will
85 	 * add the zones to the right buckets before the file systems becomes
86 	 * active.
87 	 */
88 	if (!zi)
89 		return;
90 
91 	if (!used) {
92 		/*
93 		 * The zone is now empty, remove it from the bottom bucket and
94 		 * trigger a reset.
95 		 */
96 		trace_xfs_zone_emptied(rtg);
97 
98 		if (!was_full)
99 			xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE);
100 
101 		spin_lock(&zi->zi_used_buckets_lock);
102 		if (!was_full)
103 			xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
104 		spin_unlock(&zi->zi_used_buckets_lock);
105 
106 		spin_lock(&zi->zi_reset_list_lock);
107 		xg->xg_next_reset = zi->zi_reset_list;
108 		zi->zi_reset_list = xg;
109 		spin_unlock(&zi->zi_reset_list_lock);
110 
111 		if (zi->zi_gc_thread)
112 			wake_up_process(zi->zi_gc_thread);
113 	} else if (was_full) {
114 		/*
115 		 * The zone transitioned from full, mark it up as reclaimable
116 		 * and wake up GC which might be waiting for zones to reclaim.
117 		 */
118 		spin_lock(&zi->zi_used_buckets_lock);
119 		xfs_zone_add_to_bucket(zi, rgno, to_bucket);
120 		spin_unlock(&zi->zi_used_buckets_lock);
121 
122 		xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE);
123 		if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
124 			wake_up_process(zi->zi_gc_thread);
125 	} else if (to_bucket != from_bucket) {
126 		/*
127 		 * Move the zone to a new bucket if it dropped below the
128 		 * threshold.
129 		 */
130 		spin_lock(&zi->zi_used_buckets_lock);
131 		xfs_zone_add_to_bucket(zi, rgno, to_bucket);
132 		xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
133 		spin_unlock(&zi->zi_used_buckets_lock);
134 	}
135 }
136 
137 static void
138 xfs_open_zone_mark_full(
139 	struct xfs_open_zone	*oz)
140 {
141 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
142 	struct xfs_mount	*mp = rtg_mount(rtg);
143 	struct xfs_zone_info	*zi = mp->m_zone_info;
144 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
145 
146 	trace_xfs_zone_full(rtg);
147 
148 	WRITE_ONCE(rtg->rtg_open_zone, NULL);
149 
150 	spin_lock(&zi->zi_open_zones_lock);
151 	if (oz->oz_is_gc) {
152 		ASSERT(current == zi->zi_gc_thread);
153 		zi->zi_open_gc_zone = NULL;
154 	} else {
155 		zi->zi_nr_open_zones--;
156 		list_del_init(&oz->oz_entry);
157 	}
158 	spin_unlock(&zi->zi_open_zones_lock);
159 	xfs_open_zone_put(oz);
160 
161 	wake_up_all(&zi->zi_zone_wait);
162 	if (used < rtg_blocks(rtg))
163 		xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
164 }
165 
166 static void
167 xfs_zone_record_blocks(
168 	struct xfs_trans	*tp,
169 	xfs_fsblock_t		fsbno,
170 	xfs_filblks_t		len,
171 	struct xfs_open_zone	*oz,
172 	bool			used)
173 {
174 	struct xfs_mount	*mp = tp->t_mountp;
175 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
176 	struct xfs_inode	*rmapip = rtg_rmap(rtg);
177 
178 	trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
179 
180 	xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
181 	xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
182 	if (used) {
183 		rmapip->i_used_blocks += len;
184 		ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
185 	} else {
186 		xfs_add_frextents(mp, len);
187 	}
188 	oz->oz_written += len;
189 	if (oz->oz_written == rtg_blocks(rtg))
190 		xfs_open_zone_mark_full(oz);
191 	xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
192 }
193 
194 static int
195 xfs_zoned_map_extent(
196 	struct xfs_trans	*tp,
197 	struct xfs_inode	*ip,
198 	struct xfs_bmbt_irec	*new,
199 	struct xfs_open_zone	*oz,
200 	xfs_fsblock_t		old_startblock)
201 {
202 	struct xfs_bmbt_irec	data;
203 	int			nmaps = 1;
204 	int			error;
205 
206 	/* Grab the corresponding mapping in the data fork. */
207 	error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
208 			       &nmaps, 0);
209 	if (error)
210 		return error;
211 
212 	/*
213 	 * Cap the update to the existing extent in the data fork because we can
214 	 * only overwrite one extent at a time.
215 	 */
216 	ASSERT(new->br_blockcount >= data.br_blockcount);
217 	new->br_blockcount = data.br_blockcount;
218 
219 	/*
220 	 * If a data write raced with this GC write, keep the existing data in
221 	 * the data fork, mark our newly written GC extent as reclaimable, then
222 	 * move on to the next extent.
223 	 */
224 	if (old_startblock != NULLFSBLOCK &&
225 	    old_startblock != data.br_startblock)
226 		goto skip;
227 
228 	trace_xfs_reflink_cow_remap_from(ip, new);
229 	trace_xfs_reflink_cow_remap_to(ip, &data);
230 
231 	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
232 			XFS_IEXT_REFLINK_END_COW_CNT);
233 	if (error)
234 		return error;
235 
236 	if (data.br_startblock != HOLESTARTBLOCK) {
237 		ASSERT(data.br_startblock != DELAYSTARTBLOCK);
238 		ASSERT(!isnullstartblock(data.br_startblock));
239 
240 		xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
241 		if (xfs_is_reflink_inode(ip)) {
242 			xfs_refcount_decrease_extent(tp, true, &data);
243 		} else {
244 			error = xfs_free_extent_later(tp, data.br_startblock,
245 					data.br_blockcount, NULL,
246 					XFS_AG_RESV_NONE,
247 					XFS_FREE_EXTENT_REALTIME);
248 			if (error)
249 				return error;
250 		}
251 	}
252 
253 	xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
254 			true);
255 
256 	/* Map the new blocks into the data fork. */
257 	xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
258 	return 0;
259 
260 skip:
261 	trace_xfs_reflink_cow_remap_skip(ip, new);
262 	xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
263 			false);
264 	return 0;
265 }
266 
267 int
268 xfs_zoned_end_io(
269 	struct xfs_inode	*ip,
270 	xfs_off_t		offset,
271 	xfs_off_t		count,
272 	xfs_daddr_t		daddr,
273 	struct xfs_open_zone	*oz,
274 	xfs_fsblock_t		old_startblock)
275 {
276 	struct xfs_mount	*mp = ip->i_mount;
277 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
278 	struct xfs_bmbt_irec	new = {
279 		.br_startoff	= XFS_B_TO_FSBT(mp, offset),
280 		.br_startblock	= xfs_daddr_to_rtb(mp, daddr),
281 		.br_state	= XFS_EXT_NORM,
282 	};
283 	unsigned int		resblks =
284 		XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
285 	struct xfs_trans	*tp;
286 	int			error;
287 
288 	if (xfs_is_shutdown(mp))
289 		return -EIO;
290 
291 	while (new.br_startoff < end_fsb) {
292 		new.br_blockcount = end_fsb - new.br_startoff;
293 
294 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
295 				XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
296 		if (error)
297 			return error;
298 		xfs_ilock(ip, XFS_ILOCK_EXCL);
299 		xfs_trans_ijoin(tp, ip, 0);
300 
301 		error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
302 		if (error)
303 			xfs_trans_cancel(tp);
304 		else
305 			error = xfs_trans_commit(tp);
306 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
307 		if (error)
308 			return error;
309 
310 		new.br_startoff += new.br_blockcount;
311 		new.br_startblock += new.br_blockcount;
312 		if (old_startblock != NULLFSBLOCK)
313 			old_startblock += new.br_blockcount;
314 	}
315 
316 	return 0;
317 }
318 
319 /*
320  * "Free" blocks allocated in a zone.
321  *
322  * Just decrement the used blocks counter and report the space as freed.
323  */
324 int
325 xfs_zone_free_blocks(
326 	struct xfs_trans	*tp,
327 	struct xfs_rtgroup	*rtg,
328 	xfs_fsblock_t		fsbno,
329 	xfs_filblks_t		len)
330 {
331 	struct xfs_mount	*mp = tp->t_mountp;
332 	struct xfs_inode	*rmapip = rtg_rmap(rtg);
333 
334 	xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
335 
336 	if (len > rmapip->i_used_blocks) {
337 		xfs_err(mp,
338 "trying to free more blocks (%lld) than used counter (%u).",
339 			len, rmapip->i_used_blocks);
340 		ASSERT(len <= rmapip->i_used_blocks);
341 		xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
342 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
343 		return -EFSCORRUPTED;
344 	}
345 
346 	trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
347 
348 	rmapip->i_used_blocks -= len;
349 	/*
350 	 * Don't add open zones to the reclaimable buckets.  The I/O completion
351 	 * for writing the last block will take care of accounting for already
352 	 * unused blocks instead.
353 	 */
354 	if (!READ_ONCE(rtg->rtg_open_zone))
355 		xfs_zone_account_reclaimable(rtg, len);
356 	xfs_add_frextents(mp, len);
357 	xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
358 	return 0;
359 }
360 
361 /*
362  * Check if the zone containing the data just before the offset we are
363  * writing to is still open and has space.
364  */
365 static struct xfs_open_zone *
366 xfs_last_used_zone(
367 	struct iomap_ioend	*ioend)
368 {
369 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
370 	struct xfs_mount	*mp = ip->i_mount;
371 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSB(mp, ioend->io_offset);
372 	struct xfs_rtgroup	*rtg = NULL;
373 	struct xfs_open_zone	*oz = NULL;
374 	struct xfs_iext_cursor	icur;
375 	struct xfs_bmbt_irec	got;
376 
377 	xfs_ilock(ip, XFS_ILOCK_SHARED);
378 	if (!xfs_iext_lookup_extent_before(ip, &ip->i_df, &offset_fsb,
379 				&icur, &got)) {
380 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
381 		return NULL;
382 	}
383 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
384 
385 	rtg = xfs_rtgroup_grab(mp, xfs_rtb_to_rgno(mp, got.br_startblock));
386 	if (!rtg)
387 		return NULL;
388 
389 	xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
390 	oz = READ_ONCE(rtg->rtg_open_zone);
391 	if (oz && (oz->oz_is_gc || !atomic_inc_not_zero(&oz->oz_ref)))
392 		oz = NULL;
393 	xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
394 
395 	xfs_rtgroup_rele(rtg);
396 	return oz;
397 }
398 
399 static struct xfs_group *
400 xfs_find_free_zone(
401 	struct xfs_mount	*mp,
402 	unsigned long		start,
403 	unsigned long		end)
404 {
405 	struct xfs_zone_info	*zi = mp->m_zone_info;
406 	XA_STATE		(xas, &mp->m_groups[XG_TYPE_RTG].xa, start);
407 	struct xfs_group	*xg;
408 
409 	xas_lock(&xas);
410 	xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE)
411 		if (atomic_inc_not_zero(&xg->xg_active_ref))
412 			goto found;
413 	xas_unlock(&xas);
414 	return NULL;
415 
416 found:
417 	xas_clear_mark(&xas, XFS_RTG_FREE);
418 	atomic_dec(&zi->zi_nr_free_zones);
419 	zi->zi_free_zone_cursor = xg->xg_gno;
420 	xas_unlock(&xas);
421 	return xg;
422 }
423 
424 static struct xfs_open_zone *
425 xfs_init_open_zone(
426 	struct xfs_rtgroup	*rtg,
427 	xfs_rgblock_t		write_pointer,
428 	enum rw_hint		write_hint,
429 	bool			is_gc)
430 {
431 	struct xfs_open_zone	*oz;
432 
433 	oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL);
434 	spin_lock_init(&oz->oz_alloc_lock);
435 	atomic_set(&oz->oz_ref, 1);
436 	oz->oz_rtg = rtg;
437 	oz->oz_allocated = write_pointer;
438 	oz->oz_written = write_pointer;
439 	oz->oz_write_hint = write_hint;
440 	oz->oz_is_gc = is_gc;
441 
442 	/*
443 	 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
444 	 * inode, but we don't really want to take that here because we are
445 	 * under the zone_list_lock.  Ensure the pointer is only set for a fully
446 	 * initialized open zone structure so that a racy lookup finding it is
447 	 * fine.
448 	 */
449 	WRITE_ONCE(rtg->rtg_open_zone, oz);
450 	return oz;
451 }
452 
453 /*
454  * Find a completely free zone, open it, and return a reference.
455  */
456 struct xfs_open_zone *
457 xfs_open_zone(
458 	struct xfs_mount	*mp,
459 	enum rw_hint		write_hint,
460 	bool			is_gc)
461 {
462 	struct xfs_zone_info	*zi = mp->m_zone_info;
463 	struct xfs_group	*xg;
464 
465 	xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX);
466 	if (!xg)
467 		xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor);
468 	if (!xg)
469 		return NULL;
470 
471 	set_current_state(TASK_RUNNING);
472 	return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
473 }
474 
475 static struct xfs_open_zone *
476 xfs_try_open_zone(
477 	struct xfs_mount	*mp,
478 	enum rw_hint		write_hint)
479 {
480 	struct xfs_zone_info	*zi = mp->m_zone_info;
481 	struct xfs_open_zone	*oz;
482 
483 	if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
484 		return NULL;
485 	if (atomic_read(&zi->zi_nr_free_zones) <
486 	    XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
487 		return NULL;
488 
489 	/*
490 	 * Increment the open zone count to reserve our slot before dropping
491 	 * zi_open_zones_lock.
492 	 */
493 	zi->zi_nr_open_zones++;
494 	spin_unlock(&zi->zi_open_zones_lock);
495 	oz = xfs_open_zone(mp, write_hint, false);
496 	spin_lock(&zi->zi_open_zones_lock);
497 	if (!oz) {
498 		zi->zi_nr_open_zones--;
499 		return NULL;
500 	}
501 
502 	atomic_inc(&oz->oz_ref);
503 	list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
504 
505 	/*
506 	 * If this was the last free zone, other waiters might be waiting
507 	 * on us to write to it as well.
508 	 */
509 	wake_up_all(&zi->zi_zone_wait);
510 
511 	if (xfs_zoned_need_gc(mp))
512 		wake_up_process(zi->zi_gc_thread);
513 
514 	trace_xfs_zone_opened(oz->oz_rtg);
515 	return oz;
516 }
517 
518 /*
519  * For data with short or medium lifetime, try to colocated it into an
520  * already open zone with a matching temperature.
521  */
522 static bool
523 xfs_colocate_eagerly(
524 	enum rw_hint		file_hint)
525 {
526 	switch (file_hint) {
527 	case WRITE_LIFE_MEDIUM:
528 	case WRITE_LIFE_SHORT:
529 	case WRITE_LIFE_NONE:
530 		return true;
531 	default:
532 		return false;
533 	}
534 }
535 
536 static bool
537 xfs_good_hint_match(
538 	struct xfs_open_zone	*oz,
539 	enum rw_hint		file_hint)
540 {
541 	switch (oz->oz_write_hint) {
542 	case WRITE_LIFE_LONG:
543 	case WRITE_LIFE_EXTREME:
544 		/* colocate long and extreme */
545 		if (file_hint == WRITE_LIFE_LONG ||
546 		    file_hint == WRITE_LIFE_EXTREME)
547 			return true;
548 		break;
549 	case WRITE_LIFE_MEDIUM:
550 		/* colocate medium with medium */
551 		if (file_hint == WRITE_LIFE_MEDIUM)
552 			return true;
553 		break;
554 	case WRITE_LIFE_SHORT:
555 	case WRITE_LIFE_NONE:
556 	case WRITE_LIFE_NOT_SET:
557 		/* colocate short and none */
558 		if (file_hint <= WRITE_LIFE_SHORT)
559 			return true;
560 		break;
561 	}
562 	return false;
563 }
564 
565 static bool
566 xfs_try_use_zone(
567 	struct xfs_zone_info	*zi,
568 	enum rw_hint		file_hint,
569 	struct xfs_open_zone	*oz,
570 	bool			lowspace)
571 {
572 	if (oz->oz_allocated == rtg_blocks(oz->oz_rtg))
573 		return false;
574 	if (!lowspace && !xfs_good_hint_match(oz, file_hint))
575 		return false;
576 	if (!atomic_inc_not_zero(&oz->oz_ref))
577 		return false;
578 
579 	/*
580 	 * If we have a hint set for the data, use that for the zone even if
581 	 * some data was written already without any hint set, but don't change
582 	 * the temperature after that as that would make little sense without
583 	 * tracking per-temperature class written block counts, which is
584 	 * probably overkill anyway.
585 	 */
586 	if (file_hint != WRITE_LIFE_NOT_SET &&
587 	    oz->oz_write_hint == WRITE_LIFE_NOT_SET)
588 		oz->oz_write_hint = file_hint;
589 
590 	/*
591 	 * If we couldn't match by inode or life time we just pick the first
592 	 * zone with enough space above.  For that we want the least busy zone
593 	 * for some definition of "least" busy.  For now this simple LRU
594 	 * algorithm that rotates every zone to the end of the list will do it,
595 	 * even if it isn't exactly cache friendly.
596 	 */
597 	if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
598 		list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
599 	return true;
600 }
601 
602 static struct xfs_open_zone *
603 xfs_select_open_zone_lru(
604 	struct xfs_zone_info	*zi,
605 	enum rw_hint		file_hint,
606 	bool			lowspace)
607 {
608 	struct xfs_open_zone	*oz;
609 
610 	lockdep_assert_held(&zi->zi_open_zones_lock);
611 
612 	list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
613 		if (xfs_try_use_zone(zi, file_hint, oz, lowspace))
614 			return oz;
615 
616 	cond_resched_lock(&zi->zi_open_zones_lock);
617 	return NULL;
618 }
619 
620 static struct xfs_open_zone *
621 xfs_select_open_zone_mru(
622 	struct xfs_zone_info	*zi,
623 	enum rw_hint		file_hint)
624 {
625 	struct xfs_open_zone	*oz;
626 
627 	lockdep_assert_held(&zi->zi_open_zones_lock);
628 
629 	list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
630 		if (xfs_try_use_zone(zi, file_hint, oz, false))
631 			return oz;
632 
633 	cond_resched_lock(&zi->zi_open_zones_lock);
634 	return NULL;
635 }
636 
637 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
638 {
639 	if (xfs_has_nolifetime(ip->i_mount))
640 		return WRITE_LIFE_NOT_SET;
641 	return VFS_I(ip)->i_write_hint;
642 }
643 
644 /*
645  * Try to pack inodes that are written back after they were closed tight instead
646  * of trying to open new zones for them or spread them to the least recently
647  * used zone.  This optimizes the data layout for workloads that untar or copy
648  * a lot of small files.  Right now this does not separate multiple such
649  * streams.
650  */
651 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
652 {
653 	return !inode_is_open_for_write(VFS_I(ip)) &&
654 		!(ip->i_diflags & XFS_DIFLAG_APPEND);
655 }
656 
657 static struct xfs_open_zone *
658 xfs_select_zone_nowait(
659 	struct xfs_mount	*mp,
660 	enum rw_hint		write_hint,
661 	bool			pack_tight)
662 {
663 	struct xfs_zone_info	*zi = mp->m_zone_info;
664 	struct xfs_open_zone	*oz = NULL;
665 
666 	if (xfs_is_shutdown(mp))
667 		return NULL;
668 
669 	/*
670 	 * Try to fill up open zones with matching temperature if available.  It
671 	 * is better to try to co-locate data when this is favorable, so we can
672 	 * activate empty zones when it is statistically better to separate
673 	 * data.
674 	 */
675 	spin_lock(&zi->zi_open_zones_lock);
676 	if (xfs_colocate_eagerly(write_hint))
677 		oz = xfs_select_open_zone_lru(zi, write_hint, false);
678 	else if (pack_tight)
679 		oz = xfs_select_open_zone_mru(zi, write_hint);
680 	if (oz)
681 		goto out_unlock;
682 
683 	/*
684 	 * See if we can open a new zone and use that so that data for different
685 	 * files is mixed as little as possible.
686 	 */
687 	oz = xfs_try_open_zone(mp, write_hint);
688 	if (oz)
689 		goto out_unlock;
690 
691 	/*
692 	 * Try to colocate cold data with other cold data if we failed to open a
693 	 * new zone for it.
694 	 */
695 	if (write_hint != WRITE_LIFE_NOT_SET &&
696 	    !xfs_colocate_eagerly(write_hint))
697 		oz = xfs_select_open_zone_lru(zi, write_hint, false);
698 	if (!oz)
699 		oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, false);
700 	if (!oz)
701 		oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, true);
702 out_unlock:
703 	spin_unlock(&zi->zi_open_zones_lock);
704 	return oz;
705 }
706 
707 static struct xfs_open_zone *
708 xfs_select_zone(
709 	struct xfs_mount	*mp,
710 	enum rw_hint		write_hint,
711 	bool			pack_tight)
712 {
713 	struct xfs_zone_info	*zi = mp->m_zone_info;
714 	DEFINE_WAIT		(wait);
715 	struct xfs_open_zone	*oz;
716 
717 	oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
718 	if (oz)
719 		return oz;
720 
721 	for (;;) {
722 		prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
723 		oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
724 		if (oz || xfs_is_shutdown(mp))
725 			break;
726 		schedule();
727 	}
728 	finish_wait(&zi->zi_zone_wait, &wait);
729 	return oz;
730 }
731 
732 static unsigned int
733 xfs_zone_alloc_blocks(
734 	struct xfs_open_zone	*oz,
735 	xfs_filblks_t		count_fsb,
736 	sector_t		*sector,
737 	bool			*is_seq)
738 {
739 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
740 	struct xfs_mount	*mp = rtg_mount(rtg);
741 	xfs_rgblock_t		allocated;
742 
743 	spin_lock(&oz->oz_alloc_lock);
744 	count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
745 		(xfs_filblks_t)rtg_blocks(rtg) - oz->oz_allocated);
746 	if (!count_fsb) {
747 		spin_unlock(&oz->oz_alloc_lock);
748 		return 0;
749 	}
750 	allocated = oz->oz_allocated;
751 	oz->oz_allocated += count_fsb;
752 	spin_unlock(&oz->oz_alloc_lock);
753 
754 	trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb);
755 
756 	*sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
757 	*is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
758 	if (!*is_seq)
759 		*sector += XFS_FSB_TO_BB(mp, allocated);
760 	return XFS_FSB_TO_B(mp, count_fsb);
761 }
762 
763 void
764 xfs_mark_rtg_boundary(
765 	struct iomap_ioend	*ioend)
766 {
767 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
768 	sector_t		sector = ioend->io_bio.bi_iter.bi_sector;
769 
770 	if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
771 		ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
772 }
773 
774 /*
775  * Cache the last zone written to for an inode so that it is considered first
776  * for subsequent writes.
777  */
778 struct xfs_zone_cache_item {
779 	struct xfs_mru_cache_elem	mru;
780 	struct xfs_open_zone		*oz;
781 };
782 
783 static inline struct xfs_zone_cache_item *
784 xfs_zone_cache_item(struct xfs_mru_cache_elem *mru)
785 {
786 	return container_of(mru, struct xfs_zone_cache_item, mru);
787 }
788 
789 static void
790 xfs_zone_cache_free_func(
791 	void				*data,
792 	struct xfs_mru_cache_elem	*mru)
793 {
794 	struct xfs_zone_cache_item	*item = xfs_zone_cache_item(mru);
795 
796 	xfs_open_zone_put(item->oz);
797 	kfree(item);
798 }
799 
800 /*
801  * Check if we have a cached last open zone available for the inode and
802  * if yes return a reference to it.
803  */
804 static struct xfs_open_zone *
805 xfs_cached_zone(
806 	struct xfs_mount		*mp,
807 	struct xfs_inode		*ip)
808 {
809 	struct xfs_mru_cache_elem	*mru;
810 	struct xfs_open_zone		*oz;
811 
812 	mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
813 	if (!mru)
814 		return NULL;
815 	oz = xfs_zone_cache_item(mru)->oz;
816 	if (oz) {
817 		/*
818 		 * GC only steals open zones at mount time, so no GC zones
819 		 * should end up in the cache.
820 		 */
821 		ASSERT(!oz->oz_is_gc);
822 		ASSERT(atomic_read(&oz->oz_ref) > 0);
823 		atomic_inc(&oz->oz_ref);
824 	}
825 	xfs_mru_cache_done(mp->m_zone_cache);
826 	return oz;
827 }
828 
829 /*
830  * Update the last used zone cache for a given inode.
831  *
832  * The caller must have a reference on the open zone.
833  */
834 static void
835 xfs_zone_cache_create_association(
836 	struct xfs_inode		*ip,
837 	struct xfs_open_zone		*oz)
838 {
839 	struct xfs_mount		*mp = ip->i_mount;
840 	struct xfs_zone_cache_item	*item = NULL;
841 	struct xfs_mru_cache_elem	*mru;
842 
843 	ASSERT(atomic_read(&oz->oz_ref) > 0);
844 	atomic_inc(&oz->oz_ref);
845 
846 	mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
847 	if (mru) {
848 		/*
849 		 * If we have an association already, update it to point to the
850 		 * new zone.
851 		 */
852 		item = xfs_zone_cache_item(mru);
853 		xfs_open_zone_put(item->oz);
854 		item->oz = oz;
855 		xfs_mru_cache_done(mp->m_zone_cache);
856 		return;
857 	}
858 
859 	item = kmalloc(sizeof(*item), GFP_KERNEL);
860 	if (!item) {
861 		xfs_open_zone_put(oz);
862 		return;
863 	}
864 	item->oz = oz;
865 	xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru);
866 }
867 
868 static void
869 xfs_submit_zoned_bio(
870 	struct iomap_ioend	*ioend,
871 	struct xfs_open_zone	*oz,
872 	bool			is_seq)
873 {
874 	ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
875 	ioend->io_private = oz;
876 	atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
877 
878 	if (is_seq) {
879 		ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
880 		ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
881 	} else {
882 		xfs_mark_rtg_boundary(ioend);
883 	}
884 
885 	submit_bio(&ioend->io_bio);
886 }
887 
888 void
889 xfs_zone_alloc_and_submit(
890 	struct iomap_ioend	*ioend,
891 	struct xfs_open_zone	**oz)
892 {
893 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
894 	struct xfs_mount	*mp = ip->i_mount;
895 	enum rw_hint		write_hint = xfs_inode_write_hint(ip);
896 	bool			pack_tight = xfs_zoned_pack_tight(ip);
897 	unsigned int		alloc_len;
898 	struct iomap_ioend	*split;
899 	bool			is_seq;
900 
901 	if (xfs_is_shutdown(mp))
902 		goto out_error;
903 
904 	/*
905 	 * If we don't have a cached zone in this write context, see if the
906 	 * last extent before the one we are writing to points to an active
907 	 * zone.  If so, just continue writing to it.
908 	 */
909 	if (!*oz && ioend->io_offset)
910 		*oz = xfs_last_used_zone(ioend);
911 	if (!*oz)
912 		*oz = xfs_cached_zone(mp, ip);
913 
914 	if (!*oz) {
915 select_zone:
916 		*oz = xfs_select_zone(mp, write_hint, pack_tight);
917 		if (!*oz)
918 			goto out_error;
919 
920 		xfs_zone_cache_create_association(ip, *oz);
921 	}
922 
923 	alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
924 			&ioend->io_sector, &is_seq);
925 	if (!alloc_len) {
926 		xfs_open_zone_put(*oz);
927 		goto select_zone;
928 	}
929 
930 	while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
931 		if (IS_ERR(split))
932 			goto out_split_error;
933 		alloc_len -= split->io_bio.bi_iter.bi_size;
934 		xfs_submit_zoned_bio(split, *oz, is_seq);
935 		if (!alloc_len) {
936 			xfs_open_zone_put(*oz);
937 			goto select_zone;
938 		}
939 	}
940 
941 	xfs_submit_zoned_bio(ioend, *oz, is_seq);
942 	return;
943 
944 out_split_error:
945 	ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
946 out_error:
947 	bio_io_error(&ioend->io_bio);
948 }
949 
950 /*
951  * Wake up all threads waiting for a zoned space allocation when the file system
952  * is shut down.
953  */
954 void
955 xfs_zoned_wake_all(
956 	struct xfs_mount	*mp)
957 {
958 	/*
959 	 * Don't wake up if there is no m_zone_info.  This is complicated by the
960 	 * fact that unmount can't atomically clear m_zone_info and thus we need
961 	 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
962 	 * during log recovery so we can't entirely rely on that either.
963 	 */
964 	if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
965 		wake_up_all(&mp->m_zone_info->zi_zone_wait);
966 }
967 
968 /*
969  * Check if @rgbno in @rgb is a potentially valid block.  It might still be
970  * unused, but that information is only found in the rmap.
971  */
972 bool
973 xfs_zone_rgbno_is_valid(
974 	struct xfs_rtgroup	*rtg,
975 	xfs_rgnumber_t		rgbno)
976 {
977 	lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
978 
979 	if (rtg->rtg_open_zone)
980 		return rgbno < rtg->rtg_open_zone->oz_allocated;
981 	return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
982 			rtg_rgno(rtg), XFS_RTG_FREE);
983 }
984 
985 static void
986 xfs_free_open_zones(
987 	struct xfs_zone_info	*zi)
988 {
989 	struct xfs_open_zone	*oz;
990 
991 	spin_lock(&zi->zi_open_zones_lock);
992 	while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
993 			struct xfs_open_zone, oz_entry))) {
994 		list_del(&oz->oz_entry);
995 		xfs_open_zone_put(oz);
996 	}
997 	spin_unlock(&zi->zi_open_zones_lock);
998 }
999 
1000 struct xfs_init_zones {
1001 	struct xfs_mount	*mp;
1002 	uint64_t		available;
1003 	uint64_t		reclaimable;
1004 };
1005 
1006 static int
1007 xfs_init_zone(
1008 	struct xfs_init_zones	*iz,
1009 	struct xfs_rtgroup	*rtg,
1010 	struct blk_zone		*zone)
1011 {
1012 	struct xfs_mount	*mp = rtg_mount(rtg);
1013 	struct xfs_zone_info	*zi = mp->m_zone_info;
1014 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
1015 	xfs_rgblock_t		write_pointer, highest_rgbno;
1016 	int			error;
1017 
1018 	if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
1019 		return -EFSCORRUPTED;
1020 
1021 	/*
1022 	 * For sequential write required zones we retrieved the hardware write
1023 	 * pointer above.
1024 	 *
1025 	 * For conventional zones or conventional devices we don't have that
1026 	 * luxury.  Instead query the rmap to find the highest recorded block
1027 	 * and set the write pointer to the block after that.  In case of a
1028 	 * power loss this misses blocks where the data I/O has completed but
1029 	 * not recorded in the rmap yet, and it also rewrites blocks if the most
1030 	 * recently written ones got deleted again before unmount, but this is
1031 	 * the best we can do without hardware support.
1032 	 */
1033 	if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) {
1034 		xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
1035 		highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
1036 		if (highest_rgbno == NULLRGBLOCK)
1037 			write_pointer = 0;
1038 		else
1039 			write_pointer = highest_rgbno + 1;
1040 		xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
1041 	}
1042 
1043 	/*
1044 	 * If there are no used blocks, but the zone is not in empty state yet
1045 	 * we lost power before the zoned reset.  In that case finish the work
1046 	 * here.
1047 	 */
1048 	if (write_pointer == rtg_blocks(rtg) && used == 0) {
1049 		error = xfs_zone_gc_reset_sync(rtg);
1050 		if (error)
1051 			return error;
1052 		write_pointer = 0;
1053 	}
1054 
1055 	if (write_pointer == 0) {
1056 		/* zone is empty */
1057 		atomic_inc(&zi->zi_nr_free_zones);
1058 		xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
1059 		iz->available += rtg_blocks(rtg);
1060 	} else if (write_pointer < rtg_blocks(rtg)) {
1061 		/* zone is open */
1062 		struct xfs_open_zone *oz;
1063 
1064 		atomic_inc(&rtg_group(rtg)->xg_active_ref);
1065 		oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
1066 				false);
1067 		list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
1068 		zi->zi_nr_open_zones++;
1069 
1070 		iz->available += (rtg_blocks(rtg) - write_pointer);
1071 		iz->reclaimable += write_pointer - used;
1072 	} else if (used < rtg_blocks(rtg)) {
1073 		/* zone fully written, but has freed blocks */
1074 		xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
1075 		iz->reclaimable += (rtg_blocks(rtg) - used);
1076 	}
1077 
1078 	return 0;
1079 }
1080 
1081 static int
1082 xfs_get_zone_info_cb(
1083 	struct blk_zone		*zone,
1084 	unsigned int		idx,
1085 	void			*data)
1086 {
1087 	struct xfs_init_zones	*iz = data;
1088 	struct xfs_mount	*mp = iz->mp;
1089 	xfs_fsblock_t		zsbno = xfs_daddr_to_rtb(mp, zone->start);
1090 	xfs_rgnumber_t		rgno;
1091 	struct xfs_rtgroup	*rtg;
1092 	int			error;
1093 
1094 	if (xfs_rtb_to_rgbno(mp, zsbno) != 0) {
1095 		xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno);
1096 		return -EFSCORRUPTED;
1097 	}
1098 
1099 	rgno = xfs_rtb_to_rgno(mp, zsbno);
1100 	rtg = xfs_rtgroup_grab(mp, rgno);
1101 	if (!rtg) {
1102 		xfs_warn(mp, "realtime group not found for zone %u.", rgno);
1103 		return -EFSCORRUPTED;
1104 	}
1105 	error = xfs_init_zone(iz, rtg, zone);
1106 	xfs_rtgroup_rele(rtg);
1107 	return error;
1108 }
1109 
1110 /*
1111  * Calculate the max open zone limit based on the of number of backing zones
1112  * available.
1113  */
1114 static inline uint32_t
1115 xfs_max_open_zones(
1116 	struct xfs_mount	*mp)
1117 {
1118 	unsigned int		max_open, max_open_data_zones;
1119 
1120 	/*
1121 	 * We need two zones for every open data zone, one in reserve as we
1122 	 * don't reclaim open zones.  One data zone and its spare is included
1123 	 * in XFS_MIN_ZONES to support at least one user data writer.
1124 	 */
1125 	max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1126 	max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1127 
1128 	/*
1129 	 * Cap the max open limit to 1/4 of available space.  Without this we'd
1130 	 * run out of easy reclaim targets too quickly and storage devices don't
1131 	 * handle huge numbers of concurrent write streams overly well.
1132 	 */
1133 	max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1134 
1135 	return max(XFS_MIN_OPEN_ZONES, max_open);
1136 }
1137 
1138 /*
1139  * Normally we use the open zone limit that the device reports.  If there is
1140  * none let the user pick one from the command line.
1141  *
1142  * If the device doesn't report an open zone limit and there is no override,
1143  * allow to hold about a quarter of the zones open.  In theory we could allow
1144  * all to be open, but at that point we run into GC deadlocks because we can't
1145  * reclaim open zones.
1146  *
1147  * When used on conventional SSDs a lower open limit is advisable as we'll
1148  * otherwise overwhelm the FTL just as much as a conventional block allocator.
1149  *
1150  * Note: To debug the open zone management code, force max_open to 1 here.
1151  */
1152 static int
1153 xfs_calc_open_zones(
1154 	struct xfs_mount	*mp)
1155 {
1156 	struct block_device	*bdev = mp->m_rtdev_targp->bt_bdev;
1157 	unsigned int		bdev_open_zones = bdev_max_open_zones(bdev);
1158 
1159 	if (!mp->m_max_open_zones) {
1160 		if (bdev_open_zones)
1161 			mp->m_max_open_zones = bdev_open_zones;
1162 		else
1163 			mp->m_max_open_zones = xfs_max_open_zones(mp);
1164 	}
1165 
1166 	if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1167 		xfs_notice(mp, "need at least %u open zones.",
1168 			XFS_MIN_OPEN_ZONES);
1169 		return -EIO;
1170 	}
1171 
1172 	if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1173 		mp->m_max_open_zones = bdev_open_zones;
1174 		xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1175 			bdev_open_zones);
1176 	}
1177 
1178 	if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1179 		mp->m_max_open_zones = xfs_max_open_zones(mp);
1180 		xfs_info(mp,
1181 "limiting open zones to %u due to total zone count (%u)",
1182 			mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static unsigned long *
1189 xfs_alloc_bucket_bitmap(
1190 	struct xfs_mount	*mp)
1191 {
1192 	return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1193 			sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1194 }
1195 
1196 static struct xfs_zone_info *
1197 xfs_alloc_zone_info(
1198 	struct xfs_mount	*mp)
1199 {
1200 	struct xfs_zone_info	*zi;
1201 	int			i;
1202 
1203 	zi = kzalloc(sizeof(*zi), GFP_KERNEL);
1204 	if (!zi)
1205 		return NULL;
1206 	INIT_LIST_HEAD(&zi->zi_open_zones);
1207 	INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1208 	spin_lock_init(&zi->zi_reset_list_lock);
1209 	spin_lock_init(&zi->zi_open_zones_lock);
1210 	spin_lock_init(&zi->zi_reservation_lock);
1211 	init_waitqueue_head(&zi->zi_zone_wait);
1212 	spin_lock_init(&zi->zi_used_buckets_lock);
1213 	for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1214 		zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1215 		if (!zi->zi_used_bucket_bitmap[i])
1216 			goto out_free_bitmaps;
1217 	}
1218 	return zi;
1219 
1220 out_free_bitmaps:
1221 	while (--i > 0)
1222 		kvfree(zi->zi_used_bucket_bitmap[i]);
1223 	kfree(zi);
1224 	return NULL;
1225 }
1226 
1227 static void
1228 xfs_free_zone_info(
1229 	struct xfs_zone_info	*zi)
1230 {
1231 	int			i;
1232 
1233 	xfs_free_open_zones(zi);
1234 	for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1235 		kvfree(zi->zi_used_bucket_bitmap[i]);
1236 	kfree(zi);
1237 }
1238 
1239 int
1240 xfs_mount_zones(
1241 	struct xfs_mount	*mp)
1242 {
1243 	struct xfs_init_zones	iz = {
1244 		.mp		= mp,
1245 	};
1246 	struct xfs_buftarg	*bt = mp->m_rtdev_targp;
1247 	int			error;
1248 
1249 	if (!bt) {
1250 		xfs_notice(mp, "RT device missing.");
1251 		return -EINVAL;
1252 	}
1253 
1254 	if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1255 		xfs_notice(mp, "invalid flag combination.");
1256 		return -EFSCORRUPTED;
1257 	}
1258 	if (mp->m_sb.sb_rextsize != 1) {
1259 		xfs_notice(mp, "zoned file systems do not support rextsize.");
1260 		return -EFSCORRUPTED;
1261 	}
1262 	if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1263 		xfs_notice(mp,
1264 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1265 		return -EFSCORRUPTED;
1266 	}
1267 
1268 	error = xfs_calc_open_zones(mp);
1269 	if (error)
1270 		return error;
1271 
1272 	mp->m_zone_info = xfs_alloc_zone_info(mp);
1273 	if (!mp->m_zone_info)
1274 		return -ENOMEM;
1275 
1276 	xfs_info(mp, "%u zones of %u blocks size (%u max open)",
1277 		 mp->m_sb.sb_rgcount, mp->m_groups[XG_TYPE_RTG].blocks,
1278 		 mp->m_max_open_zones);
1279 	trace_xfs_zones_mount(mp);
1280 
1281 	if (bdev_is_zoned(bt->bt_bdev)) {
1282 		error = blkdev_report_zones(bt->bt_bdev,
1283 				XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart),
1284 				mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz);
1285 		if (error < 0)
1286 			goto out_free_zone_info;
1287 	} else {
1288 		struct xfs_rtgroup	*rtg = NULL;
1289 
1290 		while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1291 			error = xfs_init_zone(&iz, rtg, NULL);
1292 			if (error)
1293 				goto out_free_zone_info;
1294 		}
1295 	}
1296 
1297 	xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1298 	xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1299 			iz.available + iz.reclaimable);
1300 
1301 	/*
1302 	 * The user may configure GC to free up a percentage of unused blocks.
1303 	 * By default this is 0. GC will always trigger at the minimum level
1304 	 * for keeping max_open_zones available for data placement.
1305 	 */
1306 	mp->m_zonegc_low_space = 0;
1307 
1308 	error = xfs_zone_gc_mount(mp);
1309 	if (error)
1310 		goto out_free_zone_info;
1311 
1312 	/*
1313 	 * Set up a mru cache to track inode to open zone for data placement
1314 	 * purposes. The magic values for group count and life time is the
1315 	 * same as the defaults for file streams, which seems sane enough.
1316 	 */
1317 	xfs_mru_cache_create(&mp->m_zone_cache, mp,
1318 			5000, 10, xfs_zone_cache_free_func);
1319 	return 0;
1320 
1321 out_free_zone_info:
1322 	xfs_free_zone_info(mp->m_zone_info);
1323 	return error;
1324 }
1325 
1326 void
1327 xfs_unmount_zones(
1328 	struct xfs_mount	*mp)
1329 {
1330 	xfs_zone_gc_unmount(mp);
1331 	xfs_free_zone_info(mp->m_zone_info);
1332 	xfs_mru_cache_destroy(mp->m_zone_cache);
1333 }
1334