xref: /linux/fs/xfs/xfs_zone_alloc.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023-2025 Christoph Hellwig.
4  * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5  */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27 #include "xfs_mru_cache.h"
28 
29 void
30 xfs_open_zone_put(
31 	struct xfs_open_zone	*oz)
32 {
33 	if (atomic_dec_and_test(&oz->oz_ref)) {
34 		xfs_rtgroup_rele(oz->oz_rtg);
35 		kfree(oz);
36 	}
37 }
38 
39 static inline uint32_t
40 xfs_zone_bucket(
41 	struct xfs_mount	*mp,
42 	uint32_t		used_blocks)
43 {
44 	return XFS_ZONE_USED_BUCKETS * used_blocks /
45 			mp->m_groups[XG_TYPE_RTG].blocks;
46 }
47 
48 static inline void
49 xfs_zone_add_to_bucket(
50 	struct xfs_zone_info	*zi,
51 	xfs_rgnumber_t		rgno,
52 	uint32_t		to_bucket)
53 {
54 	__set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
55 	zi->zi_used_bucket_entries[to_bucket]++;
56 }
57 
58 static inline void
59 xfs_zone_remove_from_bucket(
60 	struct xfs_zone_info	*zi,
61 	xfs_rgnumber_t		rgno,
62 	uint32_t		from_bucket)
63 {
64 	__clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
65 	zi->zi_used_bucket_entries[from_bucket]--;
66 }
67 
68 static void
69 xfs_zone_account_reclaimable(
70 	struct xfs_rtgroup	*rtg,
71 	uint32_t		freed)
72 {
73 	struct xfs_group	*xg = &rtg->rtg_group;
74 	struct xfs_mount	*mp = rtg_mount(rtg);
75 	struct xfs_zone_info	*zi = mp->m_zone_info;
76 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
77 	xfs_rgnumber_t		rgno = rtg_rgno(rtg);
78 	uint32_t		from_bucket = xfs_zone_bucket(mp, used + freed);
79 	uint32_t		to_bucket = xfs_zone_bucket(mp, used);
80 	bool			was_full = (used + freed == rtg_blocks(rtg));
81 
82 	/*
83 	 * This can be called from log recovery, where the zone_info structure
84 	 * hasn't been allocated yet.  Skip all work as xfs_mount_zones will
85 	 * add the zones to the right buckets before the file systems becomes
86 	 * active.
87 	 */
88 	if (!zi)
89 		return;
90 
91 	if (!used) {
92 		/*
93 		 * The zone is now empty, remove it from the bottom bucket and
94 		 * trigger a reset.
95 		 */
96 		trace_xfs_zone_emptied(rtg);
97 
98 		if (!was_full)
99 			xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE);
100 
101 		spin_lock(&zi->zi_used_buckets_lock);
102 		if (!was_full)
103 			xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
104 		spin_unlock(&zi->zi_used_buckets_lock);
105 
106 		spin_lock(&zi->zi_reset_list_lock);
107 		xg->xg_next_reset = zi->zi_reset_list;
108 		zi->zi_reset_list = xg;
109 		spin_unlock(&zi->zi_reset_list_lock);
110 
111 		if (zi->zi_gc_thread)
112 			wake_up_process(zi->zi_gc_thread);
113 	} else if (was_full) {
114 		/*
115 		 * The zone transitioned from full, mark it up as reclaimable
116 		 * and wake up GC which might be waiting for zones to reclaim.
117 		 */
118 		spin_lock(&zi->zi_used_buckets_lock);
119 		xfs_zone_add_to_bucket(zi, rgno, to_bucket);
120 		spin_unlock(&zi->zi_used_buckets_lock);
121 
122 		xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE);
123 		if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
124 			wake_up_process(zi->zi_gc_thread);
125 	} else if (to_bucket != from_bucket) {
126 		/*
127 		 * Move the zone to a new bucket if it dropped below the
128 		 * threshold.
129 		 */
130 		spin_lock(&zi->zi_used_buckets_lock);
131 		xfs_zone_add_to_bucket(zi, rgno, to_bucket);
132 		xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
133 		spin_unlock(&zi->zi_used_buckets_lock);
134 	}
135 }
136 
137 static void
138 xfs_open_zone_mark_full(
139 	struct xfs_open_zone	*oz)
140 {
141 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
142 	struct xfs_mount	*mp = rtg_mount(rtg);
143 	struct xfs_zone_info	*zi = mp->m_zone_info;
144 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
145 
146 	trace_xfs_zone_full(rtg);
147 
148 	WRITE_ONCE(rtg->rtg_open_zone, NULL);
149 
150 	spin_lock(&zi->zi_open_zones_lock);
151 	if (oz->oz_is_gc) {
152 		ASSERT(current == zi->zi_gc_thread);
153 		zi->zi_open_gc_zone = NULL;
154 	} else {
155 		zi->zi_nr_open_zones--;
156 		list_del_init(&oz->oz_entry);
157 	}
158 	spin_unlock(&zi->zi_open_zones_lock);
159 	xfs_open_zone_put(oz);
160 
161 	wake_up_all(&zi->zi_zone_wait);
162 	if (used < rtg_blocks(rtg))
163 		xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
164 }
165 
166 static void
167 xfs_zone_record_blocks(
168 	struct xfs_trans	*tp,
169 	struct xfs_open_zone	*oz,
170 	xfs_fsblock_t		fsbno,
171 	xfs_filblks_t		len)
172 {
173 	struct xfs_mount	*mp = tp->t_mountp;
174 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
175 	struct xfs_inode	*rmapip = rtg_rmap(rtg);
176 
177 	trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
178 
179 	xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
180 	xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
181 	rmapip->i_used_blocks += len;
182 	ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
183 	oz->oz_written += len;
184 	if (oz->oz_written == rtg_blocks(rtg))
185 		xfs_open_zone_mark_full(oz);
186 	xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
187 }
188 
189 /*
190  * Called for blocks that have been written to disk, but not actually linked to
191  * an inode, which can happen when garbage collection races with user data
192  * writes to a file.
193  */
194 static void
195 xfs_zone_skip_blocks(
196 	struct xfs_open_zone	*oz,
197 	xfs_filblks_t		len)
198 {
199 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
200 
201 	trace_xfs_zone_skip_blocks(oz, 0, len);
202 
203 	xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
204 	oz->oz_written += len;
205 	if (oz->oz_written == rtg_blocks(rtg))
206 		xfs_open_zone_mark_full(oz);
207 	xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
208 
209 	xfs_add_frextents(rtg_mount(rtg), len);
210 }
211 
212 static int
213 xfs_zoned_map_extent(
214 	struct xfs_trans	*tp,
215 	struct xfs_inode	*ip,
216 	struct xfs_bmbt_irec	*new,
217 	struct xfs_open_zone	*oz,
218 	xfs_fsblock_t		old_startblock)
219 {
220 	struct xfs_bmbt_irec	data;
221 	int			nmaps = 1;
222 	int			error;
223 
224 	/* Grab the corresponding mapping in the data fork. */
225 	error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
226 			       &nmaps, 0);
227 	if (error)
228 		return error;
229 
230 	/*
231 	 * Cap the update to the existing extent in the data fork because we can
232 	 * only overwrite one extent at a time.
233 	 */
234 	ASSERT(new->br_blockcount >= data.br_blockcount);
235 	new->br_blockcount = data.br_blockcount;
236 
237 	/*
238 	 * If a data write raced with this GC write, keep the existing data in
239 	 * the data fork, mark our newly written GC extent as reclaimable, then
240 	 * move on to the next extent.
241 	 */
242 	if (old_startblock != NULLFSBLOCK &&
243 	    old_startblock != data.br_startblock)
244 		goto skip;
245 
246 	trace_xfs_reflink_cow_remap_from(ip, new);
247 	trace_xfs_reflink_cow_remap_to(ip, &data);
248 
249 	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
250 			XFS_IEXT_REFLINK_END_COW_CNT);
251 	if (error)
252 		return error;
253 
254 	if (data.br_startblock != HOLESTARTBLOCK) {
255 		ASSERT(data.br_startblock != DELAYSTARTBLOCK);
256 		ASSERT(!isnullstartblock(data.br_startblock));
257 
258 		xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
259 		if (xfs_is_reflink_inode(ip)) {
260 			xfs_refcount_decrease_extent(tp, true, &data);
261 		} else {
262 			error = xfs_free_extent_later(tp, data.br_startblock,
263 					data.br_blockcount, NULL,
264 					XFS_AG_RESV_NONE,
265 					XFS_FREE_EXTENT_REALTIME);
266 			if (error)
267 				return error;
268 		}
269 	}
270 
271 	xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount);
272 
273 	/* Map the new blocks into the data fork. */
274 	xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
275 	return 0;
276 
277 skip:
278 	trace_xfs_reflink_cow_remap_skip(ip, new);
279 	xfs_zone_skip_blocks(oz, new->br_blockcount);
280 	return 0;
281 }
282 
283 int
284 xfs_zoned_end_io(
285 	struct xfs_inode	*ip,
286 	xfs_off_t		offset,
287 	xfs_off_t		count,
288 	xfs_daddr_t		daddr,
289 	struct xfs_open_zone	*oz,
290 	xfs_fsblock_t		old_startblock)
291 {
292 	struct xfs_mount	*mp = ip->i_mount;
293 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
294 	struct xfs_bmbt_irec	new = {
295 		.br_startoff	= XFS_B_TO_FSBT(mp, offset),
296 		.br_startblock	= xfs_daddr_to_rtb(mp, daddr),
297 		.br_state	= XFS_EXT_NORM,
298 	};
299 	unsigned int		resblks =
300 		XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
301 	struct xfs_trans	*tp;
302 	int			error;
303 
304 	if (xfs_is_shutdown(mp))
305 		return -EIO;
306 
307 	while (new.br_startoff < end_fsb) {
308 		new.br_blockcount = end_fsb - new.br_startoff;
309 
310 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
311 				XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
312 		if (error)
313 			return error;
314 		xfs_ilock(ip, XFS_ILOCK_EXCL);
315 		xfs_trans_ijoin(tp, ip, 0);
316 
317 		error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
318 		if (error)
319 			xfs_trans_cancel(tp);
320 		else
321 			error = xfs_trans_commit(tp);
322 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
323 		if (error)
324 			return error;
325 
326 		new.br_startoff += new.br_blockcount;
327 		new.br_startblock += new.br_blockcount;
328 		if (old_startblock != NULLFSBLOCK)
329 			old_startblock += new.br_blockcount;
330 	}
331 
332 	return 0;
333 }
334 
335 /*
336  * "Free" blocks allocated in a zone.
337  *
338  * Just decrement the used blocks counter and report the space as freed.
339  */
340 int
341 xfs_zone_free_blocks(
342 	struct xfs_trans	*tp,
343 	struct xfs_rtgroup	*rtg,
344 	xfs_fsblock_t		fsbno,
345 	xfs_filblks_t		len)
346 {
347 	struct xfs_mount	*mp = tp->t_mountp;
348 	struct xfs_inode	*rmapip = rtg_rmap(rtg);
349 
350 	xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
351 
352 	if (len > rmapip->i_used_blocks) {
353 		xfs_err(mp,
354 "trying to free more blocks (%lld) than used counter (%u).",
355 			len, rmapip->i_used_blocks);
356 		ASSERT(len <= rmapip->i_used_blocks);
357 		xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
358 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
359 		return -EFSCORRUPTED;
360 	}
361 
362 	trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
363 
364 	rmapip->i_used_blocks -= len;
365 	/*
366 	 * Don't add open zones to the reclaimable buckets.  The I/O completion
367 	 * for writing the last block will take care of accounting for already
368 	 * unused blocks instead.
369 	 */
370 	if (!READ_ONCE(rtg->rtg_open_zone))
371 		xfs_zone_account_reclaimable(rtg, len);
372 	xfs_add_frextents(mp, len);
373 	xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
374 	return 0;
375 }
376 
377 /*
378  * Check if the zone containing the data just before the offset we are
379  * writing to is still open and has space.
380  */
381 static struct xfs_open_zone *
382 xfs_last_used_zone(
383 	struct iomap_ioend	*ioend)
384 {
385 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
386 	struct xfs_mount	*mp = ip->i_mount;
387 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSB(mp, ioend->io_offset);
388 	struct xfs_rtgroup	*rtg = NULL;
389 	struct xfs_open_zone	*oz = NULL;
390 	struct xfs_iext_cursor	icur;
391 	struct xfs_bmbt_irec	got;
392 
393 	xfs_ilock(ip, XFS_ILOCK_SHARED);
394 	if (!xfs_iext_lookup_extent_before(ip, &ip->i_df, &offset_fsb,
395 				&icur, &got)) {
396 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
397 		return NULL;
398 	}
399 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
400 
401 	rtg = xfs_rtgroup_grab(mp, xfs_rtb_to_rgno(mp, got.br_startblock));
402 	if (!rtg)
403 		return NULL;
404 
405 	xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
406 	oz = READ_ONCE(rtg->rtg_open_zone);
407 	if (oz && (oz->oz_is_gc || !atomic_inc_not_zero(&oz->oz_ref)))
408 		oz = NULL;
409 	xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_SHARED);
410 
411 	xfs_rtgroup_rele(rtg);
412 	return oz;
413 }
414 
415 static struct xfs_group *
416 xfs_find_free_zone(
417 	struct xfs_mount	*mp,
418 	unsigned long		start,
419 	unsigned long		end)
420 {
421 	struct xfs_zone_info	*zi = mp->m_zone_info;
422 	XA_STATE		(xas, &mp->m_groups[XG_TYPE_RTG].xa, start);
423 	struct xfs_group	*xg;
424 
425 	xas_lock(&xas);
426 	xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE)
427 		if (atomic_inc_not_zero(&xg->xg_active_ref))
428 			goto found;
429 	xas_unlock(&xas);
430 	return NULL;
431 
432 found:
433 	xas_clear_mark(&xas, XFS_RTG_FREE);
434 	atomic_dec(&zi->zi_nr_free_zones);
435 	zi->zi_free_zone_cursor = xg->xg_gno;
436 	xas_unlock(&xas);
437 	return xg;
438 }
439 
440 static struct xfs_open_zone *
441 xfs_init_open_zone(
442 	struct xfs_rtgroup	*rtg,
443 	xfs_rgblock_t		write_pointer,
444 	enum rw_hint		write_hint,
445 	bool			is_gc)
446 {
447 	struct xfs_open_zone	*oz;
448 
449 	oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL);
450 	spin_lock_init(&oz->oz_alloc_lock);
451 	atomic_set(&oz->oz_ref, 1);
452 	oz->oz_rtg = rtg;
453 	oz->oz_allocated = write_pointer;
454 	oz->oz_written = write_pointer;
455 	oz->oz_write_hint = write_hint;
456 	oz->oz_is_gc = is_gc;
457 
458 	/*
459 	 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
460 	 * inode, but we don't really want to take that here because we are
461 	 * under the zone_list_lock.  Ensure the pointer is only set for a fully
462 	 * initialized open zone structure so that a racy lookup finding it is
463 	 * fine.
464 	 */
465 	WRITE_ONCE(rtg->rtg_open_zone, oz);
466 	return oz;
467 }
468 
469 /*
470  * Find a completely free zone, open it, and return a reference.
471  */
472 struct xfs_open_zone *
473 xfs_open_zone(
474 	struct xfs_mount	*mp,
475 	enum rw_hint		write_hint,
476 	bool			is_gc)
477 {
478 	struct xfs_zone_info	*zi = mp->m_zone_info;
479 	struct xfs_group	*xg;
480 
481 	xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX);
482 	if (!xg)
483 		xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor);
484 	if (!xg)
485 		return NULL;
486 
487 	set_current_state(TASK_RUNNING);
488 	return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
489 }
490 
491 static struct xfs_open_zone *
492 xfs_try_open_zone(
493 	struct xfs_mount	*mp,
494 	enum rw_hint		write_hint)
495 {
496 	struct xfs_zone_info	*zi = mp->m_zone_info;
497 	struct xfs_open_zone	*oz;
498 
499 	if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
500 		return NULL;
501 	if (atomic_read(&zi->zi_nr_free_zones) <
502 	    XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
503 		return NULL;
504 
505 	/*
506 	 * Increment the open zone count to reserve our slot before dropping
507 	 * zi_open_zones_lock.
508 	 */
509 	zi->zi_nr_open_zones++;
510 	spin_unlock(&zi->zi_open_zones_lock);
511 	oz = xfs_open_zone(mp, write_hint, false);
512 	spin_lock(&zi->zi_open_zones_lock);
513 	if (!oz) {
514 		zi->zi_nr_open_zones--;
515 		return NULL;
516 	}
517 
518 	atomic_inc(&oz->oz_ref);
519 	list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
520 
521 	/*
522 	 * If this was the last free zone, other waiters might be waiting
523 	 * on us to write to it as well.
524 	 */
525 	wake_up_all(&zi->zi_zone_wait);
526 
527 	if (xfs_zoned_need_gc(mp))
528 		wake_up_process(zi->zi_gc_thread);
529 
530 	trace_xfs_zone_opened(oz->oz_rtg);
531 	return oz;
532 }
533 
534 /*
535  * For data with short or medium lifetime, try to colocated it into an
536  * already open zone with a matching temperature.
537  */
538 static bool
539 xfs_colocate_eagerly(
540 	enum rw_hint		file_hint)
541 {
542 	switch (file_hint) {
543 	case WRITE_LIFE_MEDIUM:
544 	case WRITE_LIFE_SHORT:
545 	case WRITE_LIFE_NONE:
546 		return true;
547 	default:
548 		return false;
549 	}
550 }
551 
552 static bool
553 xfs_good_hint_match(
554 	struct xfs_open_zone	*oz,
555 	enum rw_hint		file_hint)
556 {
557 	switch (oz->oz_write_hint) {
558 	case WRITE_LIFE_LONG:
559 	case WRITE_LIFE_EXTREME:
560 		/* colocate long and extreme */
561 		if (file_hint == WRITE_LIFE_LONG ||
562 		    file_hint == WRITE_LIFE_EXTREME)
563 			return true;
564 		break;
565 	case WRITE_LIFE_MEDIUM:
566 		/* colocate medium with medium */
567 		if (file_hint == WRITE_LIFE_MEDIUM)
568 			return true;
569 		break;
570 	case WRITE_LIFE_SHORT:
571 	case WRITE_LIFE_NONE:
572 	case WRITE_LIFE_NOT_SET:
573 		/* colocate short and none */
574 		if (file_hint <= WRITE_LIFE_SHORT)
575 			return true;
576 		break;
577 	}
578 	return false;
579 }
580 
581 static bool
582 xfs_try_use_zone(
583 	struct xfs_zone_info	*zi,
584 	enum rw_hint		file_hint,
585 	struct xfs_open_zone	*oz,
586 	bool			lowspace)
587 {
588 	if (oz->oz_allocated == rtg_blocks(oz->oz_rtg))
589 		return false;
590 	if (!lowspace && !xfs_good_hint_match(oz, file_hint))
591 		return false;
592 	if (!atomic_inc_not_zero(&oz->oz_ref))
593 		return false;
594 
595 	/*
596 	 * If we have a hint set for the data, use that for the zone even if
597 	 * some data was written already without any hint set, but don't change
598 	 * the temperature after that as that would make little sense without
599 	 * tracking per-temperature class written block counts, which is
600 	 * probably overkill anyway.
601 	 */
602 	if (file_hint != WRITE_LIFE_NOT_SET &&
603 	    oz->oz_write_hint == WRITE_LIFE_NOT_SET)
604 		oz->oz_write_hint = file_hint;
605 
606 	/*
607 	 * If we couldn't match by inode or life time we just pick the first
608 	 * zone with enough space above.  For that we want the least busy zone
609 	 * for some definition of "least" busy.  For now this simple LRU
610 	 * algorithm that rotates every zone to the end of the list will do it,
611 	 * even if it isn't exactly cache friendly.
612 	 */
613 	if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
614 		list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
615 	return true;
616 }
617 
618 static struct xfs_open_zone *
619 xfs_select_open_zone_lru(
620 	struct xfs_zone_info	*zi,
621 	enum rw_hint		file_hint,
622 	bool			lowspace)
623 {
624 	struct xfs_open_zone	*oz;
625 
626 	lockdep_assert_held(&zi->zi_open_zones_lock);
627 
628 	list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
629 		if (xfs_try_use_zone(zi, file_hint, oz, lowspace))
630 			return oz;
631 
632 	cond_resched_lock(&zi->zi_open_zones_lock);
633 	return NULL;
634 }
635 
636 static struct xfs_open_zone *
637 xfs_select_open_zone_mru(
638 	struct xfs_zone_info	*zi,
639 	enum rw_hint		file_hint)
640 {
641 	struct xfs_open_zone	*oz;
642 
643 	lockdep_assert_held(&zi->zi_open_zones_lock);
644 
645 	list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
646 		if (xfs_try_use_zone(zi, file_hint, oz, false))
647 			return oz;
648 
649 	cond_resched_lock(&zi->zi_open_zones_lock);
650 	return NULL;
651 }
652 
653 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
654 {
655 	if (xfs_has_nolifetime(ip->i_mount))
656 		return WRITE_LIFE_NOT_SET;
657 	return VFS_I(ip)->i_write_hint;
658 }
659 
660 /*
661  * Try to pack inodes that are written back after they were closed tight instead
662  * of trying to open new zones for them or spread them to the least recently
663  * used zone.  This optimizes the data layout for workloads that untar or copy
664  * a lot of small files.  Right now this does not separate multiple such
665  * streams.
666  */
667 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
668 {
669 	return !inode_is_open_for_write(VFS_I(ip)) &&
670 		!(ip->i_diflags & XFS_DIFLAG_APPEND);
671 }
672 
673 static struct xfs_open_zone *
674 xfs_select_zone_nowait(
675 	struct xfs_mount	*mp,
676 	enum rw_hint		write_hint,
677 	bool			pack_tight)
678 {
679 	struct xfs_zone_info	*zi = mp->m_zone_info;
680 	struct xfs_open_zone	*oz = NULL;
681 
682 	if (xfs_is_shutdown(mp))
683 		return NULL;
684 
685 	/*
686 	 * Try to fill up open zones with matching temperature if available.  It
687 	 * is better to try to co-locate data when this is favorable, so we can
688 	 * activate empty zones when it is statistically better to separate
689 	 * data.
690 	 */
691 	spin_lock(&zi->zi_open_zones_lock);
692 	if (xfs_colocate_eagerly(write_hint))
693 		oz = xfs_select_open_zone_lru(zi, write_hint, false);
694 	else if (pack_tight)
695 		oz = xfs_select_open_zone_mru(zi, write_hint);
696 	if (oz)
697 		goto out_unlock;
698 
699 	/*
700 	 * See if we can open a new zone and use that so that data for different
701 	 * files is mixed as little as possible.
702 	 */
703 	oz = xfs_try_open_zone(mp, write_hint);
704 	if (oz)
705 		goto out_unlock;
706 
707 	/*
708 	 * Try to colocate cold data with other cold data if we failed to open a
709 	 * new zone for it.
710 	 */
711 	if (write_hint != WRITE_LIFE_NOT_SET &&
712 	    !xfs_colocate_eagerly(write_hint))
713 		oz = xfs_select_open_zone_lru(zi, write_hint, false);
714 	if (!oz)
715 		oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, false);
716 	if (!oz)
717 		oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, true);
718 out_unlock:
719 	spin_unlock(&zi->zi_open_zones_lock);
720 	return oz;
721 }
722 
723 static struct xfs_open_zone *
724 xfs_select_zone(
725 	struct xfs_mount	*mp,
726 	enum rw_hint		write_hint,
727 	bool			pack_tight)
728 {
729 	struct xfs_zone_info	*zi = mp->m_zone_info;
730 	DEFINE_WAIT		(wait);
731 	struct xfs_open_zone	*oz;
732 
733 	oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
734 	if (oz)
735 		return oz;
736 
737 	for (;;) {
738 		prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
739 		oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
740 		if (oz || xfs_is_shutdown(mp))
741 			break;
742 		schedule();
743 	}
744 	finish_wait(&zi->zi_zone_wait, &wait);
745 	return oz;
746 }
747 
748 static unsigned int
749 xfs_zone_alloc_blocks(
750 	struct xfs_open_zone	*oz,
751 	xfs_filblks_t		count_fsb,
752 	sector_t		*sector,
753 	bool			*is_seq)
754 {
755 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
756 	struct xfs_mount	*mp = rtg_mount(rtg);
757 	xfs_rgblock_t		allocated;
758 
759 	spin_lock(&oz->oz_alloc_lock);
760 	count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
761 		(xfs_filblks_t)rtg_blocks(rtg) - oz->oz_allocated);
762 	if (!count_fsb) {
763 		spin_unlock(&oz->oz_alloc_lock);
764 		return 0;
765 	}
766 	allocated = oz->oz_allocated;
767 	oz->oz_allocated += count_fsb;
768 	spin_unlock(&oz->oz_alloc_lock);
769 
770 	trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb);
771 
772 	*sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
773 	*is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
774 	if (!*is_seq)
775 		*sector += XFS_FSB_TO_BB(mp, allocated);
776 	return XFS_FSB_TO_B(mp, count_fsb);
777 }
778 
779 void
780 xfs_mark_rtg_boundary(
781 	struct iomap_ioend	*ioend)
782 {
783 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
784 	sector_t		sector = ioend->io_bio.bi_iter.bi_sector;
785 
786 	if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
787 		ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
788 }
789 
790 /*
791  * Cache the last zone written to for an inode so that it is considered first
792  * for subsequent writes.
793  */
794 struct xfs_zone_cache_item {
795 	struct xfs_mru_cache_elem	mru;
796 	struct xfs_open_zone		*oz;
797 };
798 
799 static inline struct xfs_zone_cache_item *
800 xfs_zone_cache_item(struct xfs_mru_cache_elem *mru)
801 {
802 	return container_of(mru, struct xfs_zone_cache_item, mru);
803 }
804 
805 static void
806 xfs_zone_cache_free_func(
807 	void				*data,
808 	struct xfs_mru_cache_elem	*mru)
809 {
810 	struct xfs_zone_cache_item	*item = xfs_zone_cache_item(mru);
811 
812 	xfs_open_zone_put(item->oz);
813 	kfree(item);
814 }
815 
816 /*
817  * Check if we have a cached last open zone available for the inode and
818  * if yes return a reference to it.
819  */
820 static struct xfs_open_zone *
821 xfs_cached_zone(
822 	struct xfs_mount		*mp,
823 	struct xfs_inode		*ip)
824 {
825 	struct xfs_mru_cache_elem	*mru;
826 	struct xfs_open_zone		*oz;
827 
828 	mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
829 	if (!mru)
830 		return NULL;
831 	oz = xfs_zone_cache_item(mru)->oz;
832 	if (oz) {
833 		/*
834 		 * GC only steals open zones at mount time, so no GC zones
835 		 * should end up in the cache.
836 		 */
837 		ASSERT(!oz->oz_is_gc);
838 		ASSERT(atomic_read(&oz->oz_ref) > 0);
839 		atomic_inc(&oz->oz_ref);
840 	}
841 	xfs_mru_cache_done(mp->m_zone_cache);
842 	return oz;
843 }
844 
845 /*
846  * Update the last used zone cache for a given inode.
847  *
848  * The caller must have a reference on the open zone.
849  */
850 static void
851 xfs_zone_cache_create_association(
852 	struct xfs_inode		*ip,
853 	struct xfs_open_zone		*oz)
854 {
855 	struct xfs_mount		*mp = ip->i_mount;
856 	struct xfs_zone_cache_item	*item = NULL;
857 	struct xfs_mru_cache_elem	*mru;
858 
859 	ASSERT(atomic_read(&oz->oz_ref) > 0);
860 	atomic_inc(&oz->oz_ref);
861 
862 	mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
863 	if (mru) {
864 		/*
865 		 * If we have an association already, update it to point to the
866 		 * new zone.
867 		 */
868 		item = xfs_zone_cache_item(mru);
869 		xfs_open_zone_put(item->oz);
870 		item->oz = oz;
871 		xfs_mru_cache_done(mp->m_zone_cache);
872 		return;
873 	}
874 
875 	item = kmalloc(sizeof(*item), GFP_KERNEL);
876 	if (!item) {
877 		xfs_open_zone_put(oz);
878 		return;
879 	}
880 	item->oz = oz;
881 	xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru);
882 }
883 
884 static void
885 xfs_submit_zoned_bio(
886 	struct iomap_ioend	*ioend,
887 	struct xfs_open_zone	*oz,
888 	bool			is_seq)
889 {
890 	ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
891 	ioend->io_private = oz;
892 	atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
893 
894 	if (is_seq) {
895 		ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
896 		ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
897 	} else {
898 		xfs_mark_rtg_boundary(ioend);
899 	}
900 
901 	submit_bio(&ioend->io_bio);
902 }
903 
904 void
905 xfs_zone_alloc_and_submit(
906 	struct iomap_ioend	*ioend,
907 	struct xfs_open_zone	**oz)
908 {
909 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
910 	struct xfs_mount	*mp = ip->i_mount;
911 	enum rw_hint		write_hint = xfs_inode_write_hint(ip);
912 	bool			pack_tight = xfs_zoned_pack_tight(ip);
913 	unsigned int		alloc_len;
914 	struct iomap_ioend	*split;
915 	bool			is_seq;
916 
917 	if (xfs_is_shutdown(mp))
918 		goto out_error;
919 
920 	/*
921 	 * If we don't have a cached zone in this write context, see if the
922 	 * last extent before the one we are writing to points to an active
923 	 * zone.  If so, just continue writing to it.
924 	 */
925 	if (!*oz && ioend->io_offset)
926 		*oz = xfs_last_used_zone(ioend);
927 	if (!*oz)
928 		*oz = xfs_cached_zone(mp, ip);
929 
930 	if (!*oz) {
931 select_zone:
932 		*oz = xfs_select_zone(mp, write_hint, pack_tight);
933 		if (!*oz)
934 			goto out_error;
935 
936 		xfs_zone_cache_create_association(ip, *oz);
937 	}
938 
939 	alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
940 			&ioend->io_sector, &is_seq);
941 	if (!alloc_len) {
942 		xfs_open_zone_put(*oz);
943 		goto select_zone;
944 	}
945 
946 	while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
947 		if (IS_ERR(split))
948 			goto out_split_error;
949 		alloc_len -= split->io_bio.bi_iter.bi_size;
950 		xfs_submit_zoned_bio(split, *oz, is_seq);
951 		if (!alloc_len) {
952 			xfs_open_zone_put(*oz);
953 			goto select_zone;
954 		}
955 	}
956 
957 	xfs_submit_zoned_bio(ioend, *oz, is_seq);
958 	return;
959 
960 out_split_error:
961 	ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
962 out_error:
963 	bio_io_error(&ioend->io_bio);
964 }
965 
966 /*
967  * Wake up all threads waiting for a zoned space allocation when the file system
968  * is shut down.
969  */
970 void
971 xfs_zoned_wake_all(
972 	struct xfs_mount	*mp)
973 {
974 	/*
975 	 * Don't wake up if there is no m_zone_info.  This is complicated by the
976 	 * fact that unmount can't atomically clear m_zone_info and thus we need
977 	 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
978 	 * during log recovery so we can't entirely rely on that either.
979 	 */
980 	if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
981 		wake_up_all(&mp->m_zone_info->zi_zone_wait);
982 }
983 
984 /*
985  * Check if @rgbno in @rgb is a potentially valid block.  It might still be
986  * unused, but that information is only found in the rmap.
987  */
988 bool
989 xfs_zone_rgbno_is_valid(
990 	struct xfs_rtgroup	*rtg,
991 	xfs_rgnumber_t		rgbno)
992 {
993 	lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
994 
995 	if (rtg->rtg_open_zone)
996 		return rgbno < rtg->rtg_open_zone->oz_allocated;
997 	return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
998 			rtg_rgno(rtg), XFS_RTG_FREE);
999 }
1000 
1001 static void
1002 xfs_free_open_zones(
1003 	struct xfs_zone_info	*zi)
1004 {
1005 	struct xfs_open_zone	*oz;
1006 
1007 	spin_lock(&zi->zi_open_zones_lock);
1008 	while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
1009 			struct xfs_open_zone, oz_entry))) {
1010 		list_del(&oz->oz_entry);
1011 		xfs_open_zone_put(oz);
1012 	}
1013 	spin_unlock(&zi->zi_open_zones_lock);
1014 }
1015 
1016 struct xfs_init_zones {
1017 	struct xfs_mount	*mp;
1018 	uint64_t		available;
1019 	uint64_t		reclaimable;
1020 };
1021 
1022 static int
1023 xfs_init_zone(
1024 	struct xfs_init_zones	*iz,
1025 	struct xfs_rtgroup	*rtg,
1026 	struct blk_zone		*zone)
1027 {
1028 	struct xfs_mount	*mp = rtg_mount(rtg);
1029 	struct xfs_zone_info	*zi = mp->m_zone_info;
1030 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
1031 	xfs_rgblock_t		write_pointer, highest_rgbno;
1032 	int			error;
1033 
1034 	if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
1035 		return -EFSCORRUPTED;
1036 
1037 	/*
1038 	 * For sequential write required zones we retrieved the hardware write
1039 	 * pointer above.
1040 	 *
1041 	 * For conventional zones or conventional devices we don't have that
1042 	 * luxury.  Instead query the rmap to find the highest recorded block
1043 	 * and set the write pointer to the block after that.  In case of a
1044 	 * power loss this misses blocks where the data I/O has completed but
1045 	 * not recorded in the rmap yet, and it also rewrites blocks if the most
1046 	 * recently written ones got deleted again before unmount, but this is
1047 	 * the best we can do without hardware support.
1048 	 */
1049 	if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) {
1050 		xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
1051 		highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
1052 		if (highest_rgbno == NULLRGBLOCK)
1053 			write_pointer = 0;
1054 		else
1055 			write_pointer = highest_rgbno + 1;
1056 		xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
1057 	}
1058 
1059 	/*
1060 	 * If there are no used blocks, but the zone is not in empty state yet
1061 	 * we lost power before the zoned reset.  In that case finish the work
1062 	 * here.
1063 	 */
1064 	if (write_pointer == rtg_blocks(rtg) && used == 0) {
1065 		error = xfs_zone_gc_reset_sync(rtg);
1066 		if (error)
1067 			return error;
1068 		write_pointer = 0;
1069 	}
1070 
1071 	if (write_pointer == 0) {
1072 		/* zone is empty */
1073 		atomic_inc(&zi->zi_nr_free_zones);
1074 		xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
1075 		iz->available += rtg_blocks(rtg);
1076 	} else if (write_pointer < rtg_blocks(rtg)) {
1077 		/* zone is open */
1078 		struct xfs_open_zone *oz;
1079 
1080 		atomic_inc(&rtg_group(rtg)->xg_active_ref);
1081 		oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
1082 				false);
1083 		list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
1084 		zi->zi_nr_open_zones++;
1085 
1086 		iz->available += (rtg_blocks(rtg) - write_pointer);
1087 		iz->reclaimable += write_pointer - used;
1088 	} else if (used < rtg_blocks(rtg)) {
1089 		/* zone fully written, but has freed blocks */
1090 		xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
1091 		iz->reclaimable += (rtg_blocks(rtg) - used);
1092 	}
1093 
1094 	return 0;
1095 }
1096 
1097 static int
1098 xfs_get_zone_info_cb(
1099 	struct blk_zone		*zone,
1100 	unsigned int		idx,
1101 	void			*data)
1102 {
1103 	struct xfs_init_zones	*iz = data;
1104 	struct xfs_mount	*mp = iz->mp;
1105 	xfs_fsblock_t		zsbno = xfs_daddr_to_rtb(mp, zone->start);
1106 	xfs_rgnumber_t		rgno;
1107 	struct xfs_rtgroup	*rtg;
1108 	int			error;
1109 
1110 	if (xfs_rtb_to_rgbno(mp, zsbno) != 0) {
1111 		xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno);
1112 		return -EFSCORRUPTED;
1113 	}
1114 
1115 	rgno = xfs_rtb_to_rgno(mp, zsbno);
1116 	rtg = xfs_rtgroup_grab(mp, rgno);
1117 	if (!rtg) {
1118 		xfs_warn(mp, "realtime group not found for zone %u.", rgno);
1119 		return -EFSCORRUPTED;
1120 	}
1121 	error = xfs_init_zone(iz, rtg, zone);
1122 	xfs_rtgroup_rele(rtg);
1123 	return error;
1124 }
1125 
1126 /*
1127  * Calculate the max open zone limit based on the of number of backing zones
1128  * available.
1129  */
1130 static inline uint32_t
1131 xfs_max_open_zones(
1132 	struct xfs_mount	*mp)
1133 {
1134 	unsigned int		max_open, max_open_data_zones;
1135 
1136 	/*
1137 	 * We need two zones for every open data zone, one in reserve as we
1138 	 * don't reclaim open zones.  One data zone and its spare is included
1139 	 * in XFS_MIN_ZONES to support at least one user data writer.
1140 	 */
1141 	max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1142 	max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1143 
1144 	/*
1145 	 * Cap the max open limit to 1/4 of available space.  Without this we'd
1146 	 * run out of easy reclaim targets too quickly and storage devices don't
1147 	 * handle huge numbers of concurrent write streams overly well.
1148 	 */
1149 	max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1150 
1151 	return max(XFS_MIN_OPEN_ZONES, max_open);
1152 }
1153 
1154 /*
1155  * Normally we use the open zone limit that the device reports.  If there is
1156  * none let the user pick one from the command line.
1157  *
1158  * If the device doesn't report an open zone limit and there is no override,
1159  * allow to hold about a quarter of the zones open.  In theory we could allow
1160  * all to be open, but at that point we run into GC deadlocks because we can't
1161  * reclaim open zones.
1162  *
1163  * When used on conventional SSDs a lower open limit is advisable as we'll
1164  * otherwise overwhelm the FTL just as much as a conventional block allocator.
1165  *
1166  * Note: To debug the open zone management code, force max_open to 1 here.
1167  */
1168 static int
1169 xfs_calc_open_zones(
1170 	struct xfs_mount	*mp)
1171 {
1172 	struct block_device	*bdev = mp->m_rtdev_targp->bt_bdev;
1173 	unsigned int		bdev_open_zones = bdev_max_open_zones(bdev);
1174 
1175 	if (!mp->m_max_open_zones) {
1176 		if (bdev_open_zones)
1177 			mp->m_max_open_zones = bdev_open_zones;
1178 		else
1179 			mp->m_max_open_zones = xfs_max_open_zones(mp);
1180 	}
1181 
1182 	if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1183 		xfs_notice(mp, "need at least %u open zones.",
1184 			XFS_MIN_OPEN_ZONES);
1185 		return -EIO;
1186 	}
1187 
1188 	if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1189 		mp->m_max_open_zones = bdev_open_zones;
1190 		xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1191 			bdev_open_zones);
1192 	}
1193 
1194 	if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1195 		mp->m_max_open_zones = xfs_max_open_zones(mp);
1196 		xfs_info(mp,
1197 "limiting open zones to %u due to total zone count (%u)",
1198 			mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1199 	}
1200 
1201 	return 0;
1202 }
1203 
1204 static unsigned long *
1205 xfs_alloc_bucket_bitmap(
1206 	struct xfs_mount	*mp)
1207 {
1208 	return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1209 			sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1210 }
1211 
1212 static struct xfs_zone_info *
1213 xfs_alloc_zone_info(
1214 	struct xfs_mount	*mp)
1215 {
1216 	struct xfs_zone_info	*zi;
1217 	int			i;
1218 
1219 	zi = kzalloc(sizeof(*zi), GFP_KERNEL);
1220 	if (!zi)
1221 		return NULL;
1222 	INIT_LIST_HEAD(&zi->zi_open_zones);
1223 	INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1224 	spin_lock_init(&zi->zi_reset_list_lock);
1225 	spin_lock_init(&zi->zi_open_zones_lock);
1226 	spin_lock_init(&zi->zi_reservation_lock);
1227 	init_waitqueue_head(&zi->zi_zone_wait);
1228 	spin_lock_init(&zi->zi_used_buckets_lock);
1229 	for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1230 		zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1231 		if (!zi->zi_used_bucket_bitmap[i])
1232 			goto out_free_bitmaps;
1233 	}
1234 	return zi;
1235 
1236 out_free_bitmaps:
1237 	while (--i > 0)
1238 		kvfree(zi->zi_used_bucket_bitmap[i]);
1239 	kfree(zi);
1240 	return NULL;
1241 }
1242 
1243 static void
1244 xfs_free_zone_info(
1245 	struct xfs_zone_info	*zi)
1246 {
1247 	int			i;
1248 
1249 	xfs_free_open_zones(zi);
1250 	for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1251 		kvfree(zi->zi_used_bucket_bitmap[i]);
1252 	kfree(zi);
1253 }
1254 
1255 int
1256 xfs_mount_zones(
1257 	struct xfs_mount	*mp)
1258 {
1259 	struct xfs_init_zones	iz = {
1260 		.mp		= mp,
1261 	};
1262 	struct xfs_buftarg	*bt = mp->m_rtdev_targp;
1263 	int			error;
1264 
1265 	if (!bt) {
1266 		xfs_notice(mp, "RT device missing.");
1267 		return -EINVAL;
1268 	}
1269 
1270 	if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1271 		xfs_notice(mp, "invalid flag combination.");
1272 		return -EFSCORRUPTED;
1273 	}
1274 	if (mp->m_sb.sb_rextsize != 1) {
1275 		xfs_notice(mp, "zoned file systems do not support rextsize.");
1276 		return -EFSCORRUPTED;
1277 	}
1278 	if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1279 		xfs_notice(mp,
1280 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1281 		return -EFSCORRUPTED;
1282 	}
1283 
1284 	error = xfs_calc_open_zones(mp);
1285 	if (error)
1286 		return error;
1287 
1288 	mp->m_zone_info = xfs_alloc_zone_info(mp);
1289 	if (!mp->m_zone_info)
1290 		return -ENOMEM;
1291 
1292 	xfs_info(mp, "%u zones of %u blocks size (%u max open)",
1293 		 mp->m_sb.sb_rgcount, mp->m_groups[XG_TYPE_RTG].blocks,
1294 		 mp->m_max_open_zones);
1295 	trace_xfs_zones_mount(mp);
1296 
1297 	if (bdev_is_zoned(bt->bt_bdev)) {
1298 		error = blkdev_report_zones(bt->bt_bdev,
1299 				XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart),
1300 				mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz);
1301 		if (error < 0)
1302 			goto out_free_zone_info;
1303 	} else {
1304 		struct xfs_rtgroup	*rtg = NULL;
1305 
1306 		while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1307 			error = xfs_init_zone(&iz, rtg, NULL);
1308 			if (error)
1309 				goto out_free_zone_info;
1310 		}
1311 	}
1312 
1313 	xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1314 	xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1315 			iz.available + iz.reclaimable);
1316 
1317 	/*
1318 	 * The user may configure GC to free up a percentage of unused blocks.
1319 	 * By default this is 0. GC will always trigger at the minimum level
1320 	 * for keeping max_open_zones available for data placement.
1321 	 */
1322 	mp->m_zonegc_low_space = 0;
1323 
1324 	error = xfs_zone_gc_mount(mp);
1325 	if (error)
1326 		goto out_free_zone_info;
1327 
1328 	/*
1329 	 * Set up a mru cache to track inode to open zone for data placement
1330 	 * purposes. The magic values for group count and life time is the
1331 	 * same as the defaults for file streams, which seems sane enough.
1332 	 */
1333 	xfs_mru_cache_create(&mp->m_zone_cache, mp,
1334 			5000, 10, xfs_zone_cache_free_func);
1335 	return 0;
1336 
1337 out_free_zone_info:
1338 	xfs_free_zone_info(mp->m_zone_info);
1339 	return error;
1340 }
1341 
1342 void
1343 xfs_unmount_zones(
1344 	struct xfs_mount	*mp)
1345 {
1346 	xfs_zone_gc_unmount(mp);
1347 	xfs_free_zone_info(mp->m_zone_info);
1348 	xfs_mru_cache_destroy(mp->m_zone_cache);
1349 }
1350