xref: /linux/fs/xfs/xfs_zone_alloc.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023-2025 Christoph Hellwig.
4  * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates.
5  */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_error.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_iomap.h"
15 #include "xfs_trans.h"
16 #include "xfs_alloc.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_btree.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_refcount.h"
21 #include "xfs_rtbitmap.h"
22 #include "xfs_rtrmap_btree.h"
23 #include "xfs_zone_alloc.h"
24 #include "xfs_zone_priv.h"
25 #include "xfs_zones.h"
26 #include "xfs_trace.h"
27 #include "xfs_mru_cache.h"
28 
29 static void
xfs_open_zone_free_rcu(struct callback_head * cb)30 xfs_open_zone_free_rcu(
31 	struct callback_head	*cb)
32 {
33 	struct xfs_open_zone	*oz = container_of(cb, typeof(*oz), oz_rcu);
34 
35 	xfs_rtgroup_rele(oz->oz_rtg);
36 	kfree(oz);
37 }
38 
39 void
xfs_open_zone_put(struct xfs_open_zone * oz)40 xfs_open_zone_put(
41 	struct xfs_open_zone	*oz)
42 {
43 	if (atomic_dec_and_test(&oz->oz_ref))
44 		call_rcu(&oz->oz_rcu, xfs_open_zone_free_rcu);
45 }
46 
47 static inline uint32_t
xfs_zone_bucket(struct xfs_mount * mp,uint32_t used_blocks)48 xfs_zone_bucket(
49 	struct xfs_mount	*mp,
50 	uint32_t		used_blocks)
51 {
52 	return XFS_ZONE_USED_BUCKETS * used_blocks /
53 			mp->m_groups[XG_TYPE_RTG].blocks;
54 }
55 
56 static inline void
xfs_zone_add_to_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t to_bucket)57 xfs_zone_add_to_bucket(
58 	struct xfs_zone_info	*zi,
59 	xfs_rgnumber_t		rgno,
60 	uint32_t		to_bucket)
61 {
62 	__set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]);
63 	zi->zi_used_bucket_entries[to_bucket]++;
64 }
65 
66 static inline void
xfs_zone_remove_from_bucket(struct xfs_zone_info * zi,xfs_rgnumber_t rgno,uint32_t from_bucket)67 xfs_zone_remove_from_bucket(
68 	struct xfs_zone_info	*zi,
69 	xfs_rgnumber_t		rgno,
70 	uint32_t		from_bucket)
71 {
72 	__clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]);
73 	zi->zi_used_bucket_entries[from_bucket]--;
74 }
75 
76 static void
xfs_zone_account_reclaimable(struct xfs_rtgroup * rtg,uint32_t freed)77 xfs_zone_account_reclaimable(
78 	struct xfs_rtgroup	*rtg,
79 	uint32_t		freed)
80 {
81 	struct xfs_group	*xg = &rtg->rtg_group;
82 	struct xfs_mount	*mp = rtg_mount(rtg);
83 	struct xfs_zone_info	*zi = mp->m_zone_info;
84 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
85 	xfs_rgnumber_t		rgno = rtg_rgno(rtg);
86 	uint32_t		from_bucket = xfs_zone_bucket(mp, used + freed);
87 	uint32_t		to_bucket = xfs_zone_bucket(mp, used);
88 	bool			was_full = (used + freed == rtg_blocks(rtg));
89 
90 	/*
91 	 * This can be called from log recovery, where the zone_info structure
92 	 * hasn't been allocated yet.  Skip all work as xfs_mount_zones will
93 	 * add the zones to the right buckets before the file systems becomes
94 	 * active.
95 	 */
96 	if (!zi)
97 		return;
98 
99 	if (!used) {
100 		/*
101 		 * The zone is now empty, remove it from the bottom bucket and
102 		 * trigger a reset.
103 		 */
104 		trace_xfs_zone_emptied(rtg);
105 
106 		spin_lock(&zi->zi_used_buckets_lock);
107 		if (!was_full)
108 			xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
109 		spin_unlock(&zi->zi_used_buckets_lock);
110 
111 		spin_lock(&zi->zi_reset_list_lock);
112 		xg->xg_next_reset = zi->zi_reset_list;
113 		zi->zi_reset_list = xg;
114 		spin_unlock(&zi->zi_reset_list_lock);
115 
116 		if (zi->zi_gc_thread)
117 			wake_up_process(zi->zi_gc_thread);
118 	} else if (was_full) {
119 		/*
120 		 * The zone transitioned from full, mark it up as reclaimable
121 		 * and wake up GC which might be waiting for zones to reclaim.
122 		 */
123 		spin_lock(&zi->zi_used_buckets_lock);
124 		xfs_zone_add_to_bucket(zi, rgno, to_bucket);
125 		spin_unlock(&zi->zi_used_buckets_lock);
126 
127 		if (zi->zi_gc_thread && xfs_zoned_need_gc(mp))
128 			wake_up_process(zi->zi_gc_thread);
129 	} else if (to_bucket != from_bucket) {
130 		/*
131 		 * Move the zone to a new bucket if it dropped below the
132 		 * threshold.
133 		 */
134 		spin_lock(&zi->zi_used_buckets_lock);
135 		xfs_zone_add_to_bucket(zi, rgno, to_bucket);
136 		xfs_zone_remove_from_bucket(zi, rgno, from_bucket);
137 		spin_unlock(&zi->zi_used_buckets_lock);
138 	}
139 }
140 
141 /*
142  * Check if we have any zones that can be reclaimed by looking at the entry
143  * counters for the zone buckets.
144  */
145 bool
xfs_zoned_have_reclaimable(struct xfs_zone_info * zi)146 xfs_zoned_have_reclaimable(
147 	struct xfs_zone_info	*zi)
148 {
149 	int i;
150 
151 	spin_lock(&zi->zi_used_buckets_lock);
152 	for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
153 		if (zi->zi_used_bucket_entries[i]) {
154 			spin_unlock(&zi->zi_used_buckets_lock);
155 			return true;
156 		}
157 	}
158 	spin_unlock(&zi->zi_used_buckets_lock);
159 
160 	return false;
161 }
162 
163 static void
xfs_open_zone_mark_full(struct xfs_open_zone * oz)164 xfs_open_zone_mark_full(
165 	struct xfs_open_zone	*oz)
166 {
167 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
168 	struct xfs_mount	*mp = rtg_mount(rtg);
169 	struct xfs_zone_info	*zi = mp->m_zone_info;
170 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
171 
172 	trace_xfs_zone_full(rtg);
173 
174 	WRITE_ONCE(rtg->rtg_open_zone, NULL);
175 
176 	spin_lock(&zi->zi_open_zones_lock);
177 	if (oz->oz_is_gc) {
178 		ASSERT(current == zi->zi_gc_thread);
179 		zi->zi_open_gc_zone = NULL;
180 	} else {
181 		zi->zi_nr_open_zones--;
182 		list_del_init(&oz->oz_entry);
183 	}
184 	spin_unlock(&zi->zi_open_zones_lock);
185 	xfs_open_zone_put(oz);
186 
187 	wake_up_all(&zi->zi_zone_wait);
188 	if (used < rtg_blocks(rtg))
189 		xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
190 }
191 
192 static void
xfs_zone_record_blocks(struct xfs_trans * tp,struct xfs_open_zone * oz,xfs_fsblock_t fsbno,xfs_filblks_t len)193 xfs_zone_record_blocks(
194 	struct xfs_trans	*tp,
195 	struct xfs_open_zone	*oz,
196 	xfs_fsblock_t		fsbno,
197 	xfs_filblks_t		len)
198 {
199 	struct xfs_mount	*mp = tp->t_mountp;
200 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
201 	struct xfs_inode	*rmapip = rtg_rmap(rtg);
202 
203 	trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len);
204 
205 	xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
206 	xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
207 	rmapip->i_used_blocks += len;
208 	ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
209 	oz->oz_written += len;
210 	if (oz->oz_written == rtg_blocks(rtg))
211 		xfs_open_zone_mark_full(oz);
212 	xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
213 }
214 
215 /*
216  * Called for blocks that have been written to disk, but not actually linked to
217  * an inode, which can happen when garbage collection races with user data
218  * writes to a file.
219  */
220 static void
xfs_zone_skip_blocks(struct xfs_open_zone * oz,xfs_filblks_t len)221 xfs_zone_skip_blocks(
222 	struct xfs_open_zone	*oz,
223 	xfs_filblks_t		len)
224 {
225 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
226 
227 	trace_xfs_zone_skip_blocks(oz, 0, len);
228 
229 	xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
230 	oz->oz_written += len;
231 	if (oz->oz_written == rtg_blocks(rtg))
232 		xfs_open_zone_mark_full(oz);
233 	xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
234 
235 	xfs_add_frextents(rtg_mount(rtg), len);
236 }
237 
238 static int
xfs_zoned_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_bmbt_irec * new,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)239 xfs_zoned_map_extent(
240 	struct xfs_trans	*tp,
241 	struct xfs_inode	*ip,
242 	struct xfs_bmbt_irec	*new,
243 	struct xfs_open_zone	*oz,
244 	xfs_fsblock_t		old_startblock)
245 {
246 	struct xfs_bmbt_irec	data;
247 	int			nmaps = 1;
248 	int			error;
249 
250 	/* Grab the corresponding mapping in the data fork. */
251 	error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data,
252 			       &nmaps, 0);
253 	if (error)
254 		return error;
255 
256 	/*
257 	 * Cap the update to the existing extent in the data fork because we can
258 	 * only overwrite one extent at a time.
259 	 */
260 	ASSERT(new->br_blockcount >= data.br_blockcount);
261 	new->br_blockcount = data.br_blockcount;
262 
263 	/*
264 	 * If a data write raced with this GC write, keep the existing data in
265 	 * the data fork, mark our newly written GC extent as reclaimable, then
266 	 * move on to the next extent.
267 	 *
268 	 * Note that this can also happen when racing with operations that do
269 	 * not actually invalidate the data, but just move it to a different
270 	 * inode (XFS_IOC_EXCHANGE_RANGE), or to a different offset inside the
271 	 * inode (FALLOC_FL_COLLAPSE_RANGE / FALLOC_FL_INSERT_RANGE).  If the
272 	 * data was just moved around, GC fails to free the zone, but the zone
273 	 * becomes a GC candidate again as soon as all previous GC I/O has
274 	 * finished and these blocks will be moved out eventually.
275 	 */
276 	if (old_startblock != NULLFSBLOCK &&
277 	    old_startblock != data.br_startblock)
278 		goto skip;
279 
280 	trace_xfs_reflink_cow_remap_from(ip, new);
281 	trace_xfs_reflink_cow_remap_to(ip, &data);
282 
283 	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
284 			XFS_IEXT_REFLINK_END_COW_CNT);
285 	if (error)
286 		return error;
287 
288 	if (data.br_startblock != HOLESTARTBLOCK) {
289 		ASSERT(data.br_startblock != DELAYSTARTBLOCK);
290 		ASSERT(!isnullstartblock(data.br_startblock));
291 
292 		xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
293 		if (xfs_is_reflink_inode(ip)) {
294 			xfs_refcount_decrease_extent(tp, true, &data);
295 		} else {
296 			error = xfs_free_extent_later(tp, data.br_startblock,
297 					data.br_blockcount, NULL,
298 					XFS_AG_RESV_NONE,
299 					XFS_FREE_EXTENT_REALTIME);
300 			if (error)
301 				return error;
302 		}
303 	}
304 
305 	xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount);
306 
307 	/* Map the new blocks into the data fork. */
308 	xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
309 	return 0;
310 
311 skip:
312 	trace_xfs_reflink_cow_remap_skip(ip, new);
313 	xfs_zone_skip_blocks(oz, new->br_blockcount);
314 	return 0;
315 }
316 
317 int
xfs_zoned_end_io(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,xfs_daddr_t daddr,struct xfs_open_zone * oz,xfs_fsblock_t old_startblock)318 xfs_zoned_end_io(
319 	struct xfs_inode	*ip,
320 	xfs_off_t		offset,
321 	xfs_off_t		count,
322 	xfs_daddr_t		daddr,
323 	struct xfs_open_zone	*oz,
324 	xfs_fsblock_t		old_startblock)
325 {
326 	struct xfs_mount	*mp = ip->i_mount;
327 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
328 	struct xfs_bmbt_irec	new = {
329 		.br_startoff	= XFS_B_TO_FSBT(mp, offset),
330 		.br_startblock	= xfs_daddr_to_rtb(mp, daddr),
331 		.br_state	= XFS_EXT_NORM,
332 	};
333 	unsigned int		resblks =
334 		XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
335 	struct xfs_trans	*tp;
336 	int			error;
337 
338 	if (xfs_is_shutdown(mp))
339 		return -EIO;
340 
341 	while (new.br_startoff < end_fsb) {
342 		new.br_blockcount = end_fsb - new.br_startoff;
343 
344 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
345 				XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp);
346 		if (error)
347 			return error;
348 		xfs_ilock(ip, XFS_ILOCK_EXCL);
349 		xfs_trans_ijoin(tp, ip, 0);
350 
351 		error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock);
352 		if (error)
353 			xfs_trans_cancel(tp);
354 		else
355 			error = xfs_trans_commit(tp);
356 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
357 		if (error)
358 			return error;
359 
360 		new.br_startoff += new.br_blockcount;
361 		new.br_startblock += new.br_blockcount;
362 		if (old_startblock != NULLFSBLOCK)
363 			old_startblock += new.br_blockcount;
364 	}
365 
366 	return 0;
367 }
368 
369 /*
370  * "Free" blocks allocated in a zone.
371  *
372  * Just decrement the used blocks counter and report the space as freed.
373  */
374 int
xfs_zone_free_blocks(struct xfs_trans * tp,struct xfs_rtgroup * rtg,xfs_fsblock_t fsbno,xfs_filblks_t len)375 xfs_zone_free_blocks(
376 	struct xfs_trans	*tp,
377 	struct xfs_rtgroup	*rtg,
378 	xfs_fsblock_t		fsbno,
379 	xfs_filblks_t		len)
380 {
381 	struct xfs_mount	*mp = tp->t_mountp;
382 	struct xfs_inode	*rmapip = rtg_rmap(rtg);
383 
384 	xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL);
385 
386 	if (len > rmapip->i_used_blocks) {
387 		xfs_err(mp,
388 "trying to free more blocks (%lld) than used counter (%u).",
389 			len, rmapip->i_used_blocks);
390 		ASSERT(len <= rmapip->i_used_blocks);
391 		xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP);
392 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
393 		return -EFSCORRUPTED;
394 	}
395 
396 	trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len);
397 
398 	rmapip->i_used_blocks -= len;
399 	/*
400 	 * Don't add open zones to the reclaimable buckets.  The I/O completion
401 	 * for writing the last block will take care of accounting for already
402 	 * unused blocks instead.
403 	 */
404 	if (!READ_ONCE(rtg->rtg_open_zone))
405 		xfs_zone_account_reclaimable(rtg, len);
406 	xfs_add_frextents(mp, len);
407 	xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
408 	return 0;
409 }
410 
411 static struct xfs_group *
xfs_find_free_zone(struct xfs_mount * mp,unsigned long start,unsigned long end)412 xfs_find_free_zone(
413 	struct xfs_mount	*mp,
414 	unsigned long		start,
415 	unsigned long		end)
416 {
417 	struct xfs_zone_info	*zi = mp->m_zone_info;
418 	XA_STATE		(xas, &mp->m_groups[XG_TYPE_RTG].xa, start);
419 	struct xfs_group	*xg;
420 
421 	xas_lock(&xas);
422 	xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE)
423 		if (atomic_inc_not_zero(&xg->xg_active_ref))
424 			goto found;
425 	xas_unlock(&xas);
426 	return NULL;
427 
428 found:
429 	xas_clear_mark(&xas, XFS_RTG_FREE);
430 	atomic_dec(&zi->zi_nr_free_zones);
431 	zi->zi_free_zone_cursor = xg->xg_gno;
432 	xas_unlock(&xas);
433 	return xg;
434 }
435 
436 static struct xfs_open_zone *
xfs_init_open_zone(struct xfs_rtgroup * rtg,xfs_rgblock_t write_pointer,enum rw_hint write_hint,bool is_gc)437 xfs_init_open_zone(
438 	struct xfs_rtgroup	*rtg,
439 	xfs_rgblock_t		write_pointer,
440 	enum rw_hint		write_hint,
441 	bool			is_gc)
442 {
443 	struct xfs_open_zone	*oz;
444 
445 	oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL);
446 	spin_lock_init(&oz->oz_alloc_lock);
447 	atomic_set(&oz->oz_ref, 1);
448 	oz->oz_rtg = rtg;
449 	oz->oz_allocated = write_pointer;
450 	oz->oz_written = write_pointer;
451 	oz->oz_write_hint = write_hint;
452 	oz->oz_is_gc = is_gc;
453 
454 	/*
455 	 * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap
456 	 * inode, but we don't really want to take that here because we are
457 	 * under the zone_list_lock.  Ensure the pointer is only set for a fully
458 	 * initialized open zone structure so that a racy lookup finding it is
459 	 * fine.
460 	 */
461 	WRITE_ONCE(rtg->rtg_open_zone, oz);
462 	return oz;
463 }
464 
465 /*
466  * Find a completely free zone, open it, and return a reference.
467  */
468 struct xfs_open_zone *
xfs_open_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool is_gc)469 xfs_open_zone(
470 	struct xfs_mount	*mp,
471 	enum rw_hint		write_hint,
472 	bool			is_gc)
473 {
474 	struct xfs_zone_info	*zi = mp->m_zone_info;
475 	struct xfs_group	*xg;
476 
477 	xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX);
478 	if (!xg)
479 		xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor);
480 	if (!xg)
481 		return NULL;
482 
483 	set_current_state(TASK_RUNNING);
484 	return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc);
485 }
486 
487 static struct xfs_open_zone *
xfs_try_open_zone(struct xfs_mount * mp,enum rw_hint write_hint)488 xfs_try_open_zone(
489 	struct xfs_mount	*mp,
490 	enum rw_hint		write_hint)
491 {
492 	struct xfs_zone_info	*zi = mp->m_zone_info;
493 	struct xfs_open_zone	*oz;
494 
495 	if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES)
496 		return NULL;
497 	if (atomic_read(&zi->zi_nr_free_zones) <
498 	    XFS_GC_ZONES - XFS_OPEN_GC_ZONES)
499 		return NULL;
500 
501 	/*
502 	 * Increment the open zone count to reserve our slot before dropping
503 	 * zi_open_zones_lock.
504 	 */
505 	zi->zi_nr_open_zones++;
506 	spin_unlock(&zi->zi_open_zones_lock);
507 	oz = xfs_open_zone(mp, write_hint, false);
508 	spin_lock(&zi->zi_open_zones_lock);
509 	if (!oz) {
510 		zi->zi_nr_open_zones--;
511 		return NULL;
512 	}
513 
514 	atomic_inc(&oz->oz_ref);
515 	list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
516 
517 	/*
518 	 * If this was the last free zone, other waiters might be waiting
519 	 * on us to write to it as well.
520 	 */
521 	wake_up_all(&zi->zi_zone_wait);
522 
523 	if (xfs_zoned_need_gc(mp))
524 		wake_up_process(zi->zi_gc_thread);
525 
526 	trace_xfs_zone_opened(oz->oz_rtg);
527 	return oz;
528 }
529 
530 enum xfs_zone_alloc_score {
531 	/* Any open zone will do it, we're desperate */
532 	XFS_ZONE_ALLOC_ANY	= 0,
533 
534 	/* It better fit somehow */
535 	XFS_ZONE_ALLOC_OK	= 1,
536 
537 	/* Only reuse a zone if it fits really well. */
538 	XFS_ZONE_ALLOC_GOOD	= 2,
539 };
540 
541 /*
542  * Life time hint co-location matrix.  Fields not set default to 0
543  * aka XFS_ZONE_ALLOC_ANY.
544  */
545 static const unsigned int
546 xfs_zoned_hint_score[WRITE_LIFE_HINT_NR][WRITE_LIFE_HINT_NR] = {
547 	[WRITE_LIFE_NOT_SET]	= {
548 		[WRITE_LIFE_NOT_SET]	= XFS_ZONE_ALLOC_OK,
549 	},
550 	[WRITE_LIFE_NONE]	= {
551 		[WRITE_LIFE_NONE]	= XFS_ZONE_ALLOC_OK,
552 	},
553 	[WRITE_LIFE_SHORT]	= {
554 		[WRITE_LIFE_SHORT]	= XFS_ZONE_ALLOC_GOOD,
555 	},
556 	[WRITE_LIFE_MEDIUM]	= {
557 		[WRITE_LIFE_MEDIUM]	= XFS_ZONE_ALLOC_GOOD,
558 	},
559 	[WRITE_LIFE_LONG]	= {
560 		[WRITE_LIFE_LONG]	= XFS_ZONE_ALLOC_OK,
561 		[WRITE_LIFE_EXTREME]	= XFS_ZONE_ALLOC_OK,
562 	},
563 	[WRITE_LIFE_EXTREME]	= {
564 		[WRITE_LIFE_LONG]	= XFS_ZONE_ALLOC_OK,
565 		[WRITE_LIFE_EXTREME]	= XFS_ZONE_ALLOC_OK,
566 	},
567 };
568 
569 static bool
xfs_try_use_zone(struct xfs_zone_info * zi,enum rw_hint file_hint,struct xfs_open_zone * oz,unsigned int goodness)570 xfs_try_use_zone(
571 	struct xfs_zone_info	*zi,
572 	enum rw_hint		file_hint,
573 	struct xfs_open_zone	*oz,
574 	unsigned int		goodness)
575 {
576 	if (oz->oz_allocated == rtg_blocks(oz->oz_rtg))
577 		return false;
578 
579 	if (xfs_zoned_hint_score[oz->oz_write_hint][file_hint] < goodness)
580 		return false;
581 
582 	if (!atomic_inc_not_zero(&oz->oz_ref))
583 		return false;
584 
585 	/*
586 	 * If we have a hint set for the data, use that for the zone even if
587 	 * some data was written already without any hint set, but don't change
588 	 * the temperature after that as that would make little sense without
589 	 * tracking per-temperature class written block counts, which is
590 	 * probably overkill anyway.
591 	 */
592 	if (file_hint != WRITE_LIFE_NOT_SET &&
593 	    oz->oz_write_hint == WRITE_LIFE_NOT_SET)
594 		oz->oz_write_hint = file_hint;
595 
596 	/*
597 	 * If we couldn't match by inode or life time we just pick the first
598 	 * zone with enough space above.  For that we want the least busy zone
599 	 * for some definition of "least" busy.  For now this simple LRU
600 	 * algorithm that rotates every zone to the end of the list will do it,
601 	 * even if it isn't exactly cache friendly.
602 	 */
603 	if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones))
604 		list_move_tail(&oz->oz_entry, &zi->zi_open_zones);
605 	return true;
606 }
607 
608 static struct xfs_open_zone *
xfs_select_open_zone_lru(struct xfs_zone_info * zi,enum rw_hint file_hint,unsigned int goodness)609 xfs_select_open_zone_lru(
610 	struct xfs_zone_info	*zi,
611 	enum rw_hint		file_hint,
612 	unsigned int		goodness)
613 {
614 	struct xfs_open_zone	*oz;
615 
616 	lockdep_assert_held(&zi->zi_open_zones_lock);
617 
618 	list_for_each_entry(oz, &zi->zi_open_zones, oz_entry)
619 		if (xfs_try_use_zone(zi, file_hint, oz, goodness))
620 			return oz;
621 
622 	cond_resched_lock(&zi->zi_open_zones_lock);
623 	return NULL;
624 }
625 
626 static struct xfs_open_zone *
xfs_select_open_zone_mru(struct xfs_zone_info * zi,enum rw_hint file_hint)627 xfs_select_open_zone_mru(
628 	struct xfs_zone_info	*zi,
629 	enum rw_hint		file_hint)
630 {
631 	struct xfs_open_zone	*oz;
632 
633 	lockdep_assert_held(&zi->zi_open_zones_lock);
634 
635 	list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry)
636 		if (xfs_try_use_zone(zi, file_hint, oz, XFS_ZONE_ALLOC_OK))
637 			return oz;
638 
639 	cond_resched_lock(&zi->zi_open_zones_lock);
640 	return NULL;
641 }
642 
xfs_inode_write_hint(struct xfs_inode * ip)643 static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
644 {
645 	if (xfs_has_nolifetime(ip->i_mount))
646 		return WRITE_LIFE_NOT_SET;
647 	return VFS_I(ip)->i_write_hint;
648 }
649 
650 /*
651  * Try to tightly pack small files that are written back after they were closed
652  * instead of trying to open new zones for them or spread them to the least
653  * recently used zone. This optimizes the data layout for workloads that untar
654  * or copy a lot of small files. Right now this does not separate multiple such
655  * streams.
656  */
xfs_zoned_pack_tight(struct xfs_inode * ip)657 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
658 {
659 	struct xfs_mount *mp = ip->i_mount;
660 	size_t zone_capacity =
661 		XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].blocks);
662 
663 	/*
664 	 * Do not pack write files that are already using a full zone to avoid
665 	 * fragmentation.
666 	 */
667 	if (i_size_read(VFS_I(ip)) >= zone_capacity)
668 		return false;
669 
670 	return !inode_is_open_for_write(VFS_I(ip)) &&
671 		!(ip->i_diflags & XFS_DIFLAG_APPEND);
672 }
673 
674 static struct xfs_open_zone *
xfs_select_zone_nowait(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)675 xfs_select_zone_nowait(
676 	struct xfs_mount	*mp,
677 	enum rw_hint		write_hint,
678 	bool			pack_tight)
679 {
680 	struct xfs_zone_info	*zi = mp->m_zone_info;
681 	struct xfs_open_zone	*oz = NULL;
682 
683 	if (xfs_is_shutdown(mp))
684 		return NULL;
685 
686 	/*
687 	 * Try to fill up open zones with matching temperature if available.  It
688 	 * is better to try to co-locate data when this is favorable, so we can
689 	 * activate empty zones when it is statistically better to separate
690 	 * data.
691 	 */
692 	spin_lock(&zi->zi_open_zones_lock);
693 	oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_GOOD);
694 	if (oz)
695 		goto out_unlock;
696 
697 	if (pack_tight)
698 		oz = xfs_select_open_zone_mru(zi, write_hint);
699 	if (oz)
700 		goto out_unlock;
701 
702 	/*
703 	 * See if we can open a new zone and use that so that data for different
704 	 * files is mixed as little as possible.
705 	 */
706 	oz = xfs_try_open_zone(mp, write_hint);
707 	if (oz)
708 		goto out_unlock;
709 
710 	/*
711 	 * Try to find an zone that is an ok match to colocate data with.
712 	 */
713 	oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_OK);
714 	if (oz)
715 		goto out_unlock;
716 
717 	/*
718 	 * Pick the least recently used zone, regardless of hint match
719 	 */
720 	oz = xfs_select_open_zone_lru(zi, write_hint, XFS_ZONE_ALLOC_ANY);
721 out_unlock:
722 	spin_unlock(&zi->zi_open_zones_lock);
723 	return oz;
724 }
725 
726 static struct xfs_open_zone *
xfs_select_zone(struct xfs_mount * mp,enum rw_hint write_hint,bool pack_tight)727 xfs_select_zone(
728 	struct xfs_mount	*mp,
729 	enum rw_hint		write_hint,
730 	bool			pack_tight)
731 {
732 	struct xfs_zone_info	*zi = mp->m_zone_info;
733 	DEFINE_WAIT		(wait);
734 	struct xfs_open_zone	*oz;
735 
736 	oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
737 	if (oz)
738 		return oz;
739 
740 	for (;;) {
741 		prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE);
742 		oz = xfs_select_zone_nowait(mp, write_hint, pack_tight);
743 		if (oz || xfs_is_shutdown(mp))
744 			break;
745 		schedule();
746 	}
747 	finish_wait(&zi->zi_zone_wait, &wait);
748 	return oz;
749 }
750 
751 static unsigned int
xfs_zone_alloc_blocks(struct xfs_open_zone * oz,xfs_filblks_t count_fsb,sector_t * sector,bool * is_seq)752 xfs_zone_alloc_blocks(
753 	struct xfs_open_zone	*oz,
754 	xfs_filblks_t		count_fsb,
755 	sector_t		*sector,
756 	bool			*is_seq)
757 {
758 	struct xfs_rtgroup	*rtg = oz->oz_rtg;
759 	struct xfs_mount	*mp = rtg_mount(rtg);
760 	xfs_rgblock_t		allocated;
761 
762 	spin_lock(&oz->oz_alloc_lock);
763 	count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN,
764 		(xfs_filblks_t)rtg_blocks(rtg) - oz->oz_allocated);
765 	if (!count_fsb) {
766 		spin_unlock(&oz->oz_alloc_lock);
767 		return 0;
768 	}
769 	allocated = oz->oz_allocated;
770 	oz->oz_allocated += count_fsb;
771 	spin_unlock(&oz->oz_alloc_lock);
772 
773 	trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb);
774 
775 	*sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
776 	*is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
777 	if (!*is_seq)
778 		*sector += XFS_FSB_TO_BB(mp, allocated);
779 	return XFS_FSB_TO_B(mp, count_fsb);
780 }
781 
782 void
xfs_mark_rtg_boundary(struct iomap_ioend * ioend)783 xfs_mark_rtg_boundary(
784 	struct iomap_ioend	*ioend)
785 {
786 	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
787 	sector_t		sector = ioend->io_bio.bi_iter.bi_sector;
788 
789 	if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0)
790 		ioend->io_flags |= IOMAP_IOEND_BOUNDARY;
791 }
792 
793 /*
794  * Check if we have a cached last open zone available for the inode and
795  * if yes return a reference to it.
796  */
797 static struct xfs_open_zone *
xfs_get_cached_zone(struct xfs_inode * ip)798 xfs_get_cached_zone(
799 	struct xfs_inode	*ip)
800 {
801 	struct xfs_open_zone	*oz;
802 
803 	rcu_read_lock();
804 	oz = VFS_I(ip)->i_private;
805 	if (oz) {
806 		/*
807 		 * GC only steals open zones at mount time, so no GC zones
808 		 * should end up in the cache.
809 		 */
810 		ASSERT(!oz->oz_is_gc);
811 		if (!atomic_inc_not_zero(&oz->oz_ref))
812 			oz = NULL;
813 	}
814 	rcu_read_unlock();
815 
816 	return oz;
817 }
818 
819 /*
820  * Stash our zone in the inode so that is is reused for future allocations.
821  *
822  * The open_zone structure will be pinned until either the inode is freed or
823  * until the cached open zone is replaced with a different one because the
824  * current one was full when we tried to use it.  This means we keep any
825  * open zone around forever as long as any inode that used it for the last
826  * write is cached, which slightly increases the memory use of cached inodes
827  * that were every written to, but significantly simplifies the cached zone
828  * lookup.  Because the open_zone is clearly marked as full when all data
829  * in the underlying RTG was written, the caching is always safe.
830  */
831 static void
xfs_set_cached_zone(struct xfs_inode * ip,struct xfs_open_zone * oz)832 xfs_set_cached_zone(
833 	struct xfs_inode	*ip,
834 	struct xfs_open_zone	*oz)
835 {
836 	struct xfs_open_zone	*old_oz;
837 
838 	atomic_inc(&oz->oz_ref);
839 	old_oz = xchg(&VFS_I(ip)->i_private, oz);
840 	if (old_oz)
841 		xfs_open_zone_put(old_oz);
842 }
843 
844 static void
xfs_submit_zoned_bio(struct iomap_ioend * ioend,struct xfs_open_zone * oz,bool is_seq)845 xfs_submit_zoned_bio(
846 	struct iomap_ioend	*ioend,
847 	struct xfs_open_zone	*oz,
848 	bool			is_seq)
849 {
850 	ioend->io_bio.bi_iter.bi_sector = ioend->io_sector;
851 	ioend->io_private = oz;
852 	atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */
853 
854 	if (is_seq) {
855 		ioend->io_bio.bi_opf &= ~REQ_OP_WRITE;
856 		ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND;
857 	} else {
858 		xfs_mark_rtg_boundary(ioend);
859 	}
860 
861 	submit_bio(&ioend->io_bio);
862 }
863 
864 void
xfs_zone_alloc_and_submit(struct iomap_ioend * ioend,struct xfs_open_zone ** oz)865 xfs_zone_alloc_and_submit(
866 	struct iomap_ioend	*ioend,
867 	struct xfs_open_zone	**oz)
868 {
869 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
870 	struct xfs_mount	*mp = ip->i_mount;
871 	enum rw_hint		write_hint = xfs_inode_write_hint(ip);
872 	bool			pack_tight = xfs_zoned_pack_tight(ip);
873 	unsigned int		alloc_len;
874 	struct iomap_ioend	*split;
875 	bool			is_seq;
876 
877 	if (xfs_is_shutdown(mp))
878 		goto out_error;
879 
880 	/*
881 	 * If we don't have a locally cached zone in this write context, see if
882 	 * the inode is still associated with a zone and use that if so.
883 	 */
884 	if (!*oz)
885 		*oz = xfs_get_cached_zone(ip);
886 
887 	if (!*oz) {
888 select_zone:
889 		*oz = xfs_select_zone(mp, write_hint, pack_tight);
890 		if (!*oz)
891 			goto out_error;
892 		xfs_set_cached_zone(ip, *oz);
893 	}
894 
895 	alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
896 			&ioend->io_sector, &is_seq);
897 	if (!alloc_len) {
898 		xfs_open_zone_put(*oz);
899 		goto select_zone;
900 	}
901 
902 	while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) {
903 		if (IS_ERR(split))
904 			goto out_split_error;
905 		alloc_len -= split->io_bio.bi_iter.bi_size;
906 		xfs_submit_zoned_bio(split, *oz, is_seq);
907 		if (!alloc_len) {
908 			xfs_open_zone_put(*oz);
909 			goto select_zone;
910 		}
911 	}
912 
913 	xfs_submit_zoned_bio(ioend, *oz, is_seq);
914 	return;
915 
916 out_split_error:
917 	ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split));
918 out_error:
919 	bio_io_error(&ioend->io_bio);
920 }
921 
922 /*
923  * Wake up all threads waiting for a zoned space allocation when the file system
924  * is shut down.
925  */
926 void
xfs_zoned_wake_all(struct xfs_mount * mp)927 xfs_zoned_wake_all(
928 	struct xfs_mount	*mp)
929 {
930 	/*
931 	 * Don't wake up if there is no m_zone_info.  This is complicated by the
932 	 * fact that unmount can't atomically clear m_zone_info and thus we need
933 	 * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE
934 	 * during log recovery so we can't entirely rely on that either.
935 	 */
936 	if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info)
937 		wake_up_all(&mp->m_zone_info->zi_zone_wait);
938 }
939 
940 /*
941  * Check if @rgbno in @rgb is a potentially valid block.  It might still be
942  * unused, but that information is only found in the rmap.
943  */
944 bool
xfs_zone_rgbno_is_valid(struct xfs_rtgroup * rtg,xfs_rgnumber_t rgbno)945 xfs_zone_rgbno_is_valid(
946 	struct xfs_rtgroup	*rtg,
947 	xfs_rgnumber_t		rgbno)
948 {
949 	lockdep_assert_held(&rtg_rmap(rtg)->i_lock);
950 
951 	if (rtg->rtg_open_zone)
952 		return rgbno < rtg->rtg_open_zone->oz_allocated;
953 	return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa,
954 			rtg_rgno(rtg), XFS_RTG_FREE);
955 }
956 
957 static void
xfs_free_open_zones(struct xfs_zone_info * zi)958 xfs_free_open_zones(
959 	struct xfs_zone_info	*zi)
960 {
961 	struct xfs_open_zone	*oz;
962 
963 	spin_lock(&zi->zi_open_zones_lock);
964 	while ((oz = list_first_entry_or_null(&zi->zi_open_zones,
965 			struct xfs_open_zone, oz_entry))) {
966 		list_del(&oz->oz_entry);
967 		xfs_open_zone_put(oz);
968 	}
969 	spin_unlock(&zi->zi_open_zones_lock);
970 
971 	/*
972 	 * Wait for all open zones to be freed so that they drop the group
973 	 * references:
974 	 */
975 	rcu_barrier();
976 }
977 
978 struct xfs_init_zones {
979 	struct xfs_mount	*mp;
980 	uint64_t		available;
981 	uint64_t		reclaimable;
982 };
983 
984 static int
xfs_init_zone(struct xfs_init_zones * iz,struct xfs_rtgroup * rtg,struct blk_zone * zone)985 xfs_init_zone(
986 	struct xfs_init_zones	*iz,
987 	struct xfs_rtgroup	*rtg,
988 	struct blk_zone		*zone)
989 {
990 	struct xfs_mount	*mp = rtg_mount(rtg);
991 	struct xfs_zone_info	*zi = mp->m_zone_info;
992 	uint32_t		used = rtg_rmap(rtg)->i_used_blocks;
993 	xfs_rgblock_t		write_pointer, highest_rgbno;
994 	int			error;
995 
996 	if (zone && !xfs_zone_validate(zone, rtg, &write_pointer))
997 		return -EFSCORRUPTED;
998 
999 	/*
1000 	 * For sequential write required zones we retrieved the hardware write
1001 	 * pointer above.
1002 	 *
1003 	 * For conventional zones or conventional devices we don't have that
1004 	 * luxury.  Instead query the rmap to find the highest recorded block
1005 	 * and set the write pointer to the block after that.  In case of a
1006 	 * power loss this misses blocks where the data I/O has completed but
1007 	 * not recorded in the rmap yet, and it also rewrites blocks if the most
1008 	 * recently written ones got deleted again before unmount, but this is
1009 	 * the best we can do without hardware support.
1010 	 */
1011 	if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) {
1012 		xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
1013 		highest_rgbno = xfs_rtrmap_highest_rgbno(rtg);
1014 		if (highest_rgbno == NULLRGBLOCK)
1015 			write_pointer = 0;
1016 		else
1017 			write_pointer = highest_rgbno + 1;
1018 		xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
1019 	}
1020 
1021 	/*
1022 	 * If there are no used blocks, but the zone is not in empty state yet
1023 	 * we lost power before the zoned reset.  In that case finish the work
1024 	 * here.
1025 	 */
1026 	if (write_pointer == rtg_blocks(rtg) && used == 0) {
1027 		error = xfs_zone_gc_reset_sync(rtg);
1028 		if (error)
1029 			return error;
1030 		write_pointer = 0;
1031 	}
1032 
1033 	if (write_pointer == 0) {
1034 		/* zone is empty */
1035 		atomic_inc(&zi->zi_nr_free_zones);
1036 		xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
1037 		iz->available += rtg_blocks(rtg);
1038 	} else if (write_pointer < rtg_blocks(rtg)) {
1039 		/* zone is open */
1040 		struct xfs_open_zone *oz;
1041 
1042 		atomic_inc(&rtg_group(rtg)->xg_active_ref);
1043 		oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET,
1044 				false);
1045 		list_add_tail(&oz->oz_entry, &zi->zi_open_zones);
1046 		zi->zi_nr_open_zones++;
1047 
1048 		iz->available += (rtg_blocks(rtg) - write_pointer);
1049 		iz->reclaimable += write_pointer - used;
1050 	} else if (used < rtg_blocks(rtg)) {
1051 		/* zone fully written, but has freed blocks */
1052 		xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used);
1053 		iz->reclaimable += (rtg_blocks(rtg) - used);
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 static int
xfs_get_zone_info_cb(struct blk_zone * zone,unsigned int idx,void * data)1060 xfs_get_zone_info_cb(
1061 	struct blk_zone		*zone,
1062 	unsigned int		idx,
1063 	void			*data)
1064 {
1065 	struct xfs_init_zones	*iz = data;
1066 	struct xfs_mount	*mp = iz->mp;
1067 	xfs_fsblock_t		zsbno = xfs_daddr_to_rtb(mp, zone->start);
1068 	xfs_rgnumber_t		rgno;
1069 	struct xfs_rtgroup	*rtg;
1070 	int			error;
1071 
1072 	if (xfs_rtb_to_rgbno(mp, zsbno) != 0) {
1073 		xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno);
1074 		return -EFSCORRUPTED;
1075 	}
1076 
1077 	rgno = xfs_rtb_to_rgno(mp, zsbno);
1078 	rtg = xfs_rtgroup_grab(mp, rgno);
1079 	if (!rtg) {
1080 		xfs_warn(mp, "realtime group not found for zone %u.", rgno);
1081 		return -EFSCORRUPTED;
1082 	}
1083 	error = xfs_init_zone(iz, rtg, zone);
1084 	xfs_rtgroup_rele(rtg);
1085 	return error;
1086 }
1087 
1088 /*
1089  * Calculate the max open zone limit based on the of number of backing zones
1090  * available.
1091  */
1092 static inline uint32_t
xfs_max_open_zones(struct xfs_mount * mp)1093 xfs_max_open_zones(
1094 	struct xfs_mount	*mp)
1095 {
1096 	unsigned int		max_open, max_open_data_zones;
1097 
1098 	/*
1099 	 * We need two zones for every open data zone, one in reserve as we
1100 	 * don't reclaim open zones.  One data zone and its spare is included
1101 	 * in XFS_MIN_ZONES to support at least one user data writer.
1102 	 */
1103 	max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1;
1104 	max_open = max_open_data_zones + XFS_OPEN_GC_ZONES;
1105 
1106 	/*
1107 	 * Cap the max open limit to 1/4 of available space.  Without this we'd
1108 	 * run out of easy reclaim targets too quickly and storage devices don't
1109 	 * handle huge numbers of concurrent write streams overly well.
1110 	 */
1111 	max_open = min(max_open, mp->m_sb.sb_rgcount / 4);
1112 
1113 	return max(XFS_MIN_OPEN_ZONES, max_open);
1114 }
1115 
1116 /*
1117  * Normally we use the open zone limit that the device reports.  If there is
1118  * none let the user pick one from the command line.
1119  *
1120  * If the device doesn't report an open zone limit and there is no override,
1121  * allow to hold about a quarter of the zones open.  In theory we could allow
1122  * all to be open, but at that point we run into GC deadlocks because we can't
1123  * reclaim open zones.
1124  *
1125  * When used on conventional SSDs a lower open limit is advisable as we'll
1126  * otherwise overwhelm the FTL just as much as a conventional block allocator.
1127  *
1128  * Note: To debug the open zone management code, force max_open to 1 here.
1129  */
1130 static int
xfs_calc_open_zones(struct xfs_mount * mp)1131 xfs_calc_open_zones(
1132 	struct xfs_mount	*mp)
1133 {
1134 	struct block_device	*bdev = mp->m_rtdev_targp->bt_bdev;
1135 	unsigned int		bdev_open_zones = bdev_max_open_zones(bdev);
1136 
1137 	if (!mp->m_max_open_zones) {
1138 		if (bdev_open_zones)
1139 			mp->m_max_open_zones = bdev_open_zones;
1140 		else
1141 			mp->m_max_open_zones = XFS_DEFAULT_MAX_OPEN_ZONES;
1142 	}
1143 
1144 	if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) {
1145 		xfs_notice(mp, "need at least %u open zones.",
1146 			XFS_MIN_OPEN_ZONES);
1147 		return -EIO;
1148 	}
1149 
1150 	if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) {
1151 		mp->m_max_open_zones = bdev_open_zones;
1152 		xfs_info(mp, "limiting open zones to %u due to hardware limit.\n",
1153 			bdev_open_zones);
1154 	}
1155 
1156 	if (mp->m_max_open_zones > xfs_max_open_zones(mp)) {
1157 		mp->m_max_open_zones = xfs_max_open_zones(mp);
1158 		xfs_info(mp,
1159 "limiting open zones to %u due to total zone count (%u)",
1160 			mp->m_max_open_zones, mp->m_sb.sb_rgcount);
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 static unsigned long *
xfs_alloc_bucket_bitmap(struct xfs_mount * mp)1167 xfs_alloc_bucket_bitmap(
1168 	struct xfs_mount	*mp)
1169 {
1170 	return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount),
1171 			sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO);
1172 }
1173 
1174 static struct xfs_zone_info *
xfs_alloc_zone_info(struct xfs_mount * mp)1175 xfs_alloc_zone_info(
1176 	struct xfs_mount	*mp)
1177 {
1178 	struct xfs_zone_info	*zi;
1179 	int			i;
1180 
1181 	zi = kzalloc(sizeof(*zi), GFP_KERNEL);
1182 	if (!zi)
1183 		return NULL;
1184 	INIT_LIST_HEAD(&zi->zi_open_zones);
1185 	INIT_LIST_HEAD(&zi->zi_reclaim_reservations);
1186 	spin_lock_init(&zi->zi_reset_list_lock);
1187 	spin_lock_init(&zi->zi_open_zones_lock);
1188 	spin_lock_init(&zi->zi_reservation_lock);
1189 	init_waitqueue_head(&zi->zi_zone_wait);
1190 	spin_lock_init(&zi->zi_used_buckets_lock);
1191 	for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) {
1192 		zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp);
1193 		if (!zi->zi_used_bucket_bitmap[i])
1194 			goto out_free_bitmaps;
1195 	}
1196 	return zi;
1197 
1198 out_free_bitmaps:
1199 	while (--i > 0)
1200 		kvfree(zi->zi_used_bucket_bitmap[i]);
1201 	kfree(zi);
1202 	return NULL;
1203 }
1204 
1205 static void
xfs_free_zone_info(struct xfs_zone_info * zi)1206 xfs_free_zone_info(
1207 	struct xfs_zone_info	*zi)
1208 {
1209 	int			i;
1210 
1211 	xfs_free_open_zones(zi);
1212 	for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++)
1213 		kvfree(zi->zi_used_bucket_bitmap[i]);
1214 	kfree(zi);
1215 }
1216 
1217 int
xfs_mount_zones(struct xfs_mount * mp)1218 xfs_mount_zones(
1219 	struct xfs_mount	*mp)
1220 {
1221 	struct xfs_init_zones	iz = {
1222 		.mp		= mp,
1223 	};
1224 	struct xfs_buftarg	*bt = mp->m_rtdev_targp;
1225 	xfs_extlen_t		zone_blocks = mp->m_groups[XG_TYPE_RTG].blocks;
1226 	int			error;
1227 
1228 	if (!bt) {
1229 		xfs_notice(mp, "RT device missing.");
1230 		return -EINVAL;
1231 	}
1232 
1233 	if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) {
1234 		xfs_notice(mp, "invalid flag combination.");
1235 		return -EFSCORRUPTED;
1236 	}
1237 	if (mp->m_sb.sb_rextsize != 1) {
1238 		xfs_notice(mp, "zoned file systems do not support rextsize.");
1239 		return -EFSCORRUPTED;
1240 	}
1241 	if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) {
1242 		xfs_notice(mp,
1243 "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES);
1244 		return -EFSCORRUPTED;
1245 	}
1246 
1247 	error = xfs_calc_open_zones(mp);
1248 	if (error)
1249 		return error;
1250 
1251 	mp->m_zone_info = xfs_alloc_zone_info(mp);
1252 	if (!mp->m_zone_info)
1253 		return -ENOMEM;
1254 
1255 	xfs_info(mp, "%u zones of %u blocks (%u max open zones)",
1256 		 mp->m_sb.sb_rgcount, zone_blocks, mp->m_max_open_zones);
1257 	trace_xfs_zones_mount(mp);
1258 
1259 	/*
1260 	 * The writeback code switches between inodes regularly to provide
1261 	 * fairness.  The default lower bound is 4MiB, but for zoned file
1262 	 * systems we want to increase that both to reduce seeks, but also more
1263 	 * importantly so that workloads that writes files in a multiple of the
1264 	 * zone size do not get fragmented and require garbage collection when
1265 	 * they shouldn't.  Increase is to the zone size capped by the max
1266 	 * extent len.
1267 	 *
1268 	 * Note that because s_min_writeback_pages is a superblock field, this
1269 	 * value also get applied to non-zoned files on the data device if
1270 	 * there are any.  On typical zoned setup all data is on the RT device
1271 	 * because using the more efficient sequential write required zones
1272 	 * is the reason for using the zone allocator, and either the RT device
1273 	 * and the (meta)data device are on the same block device, or the
1274 	 * (meta)data device is on a fast SSD while the data on the RT device
1275 	 * is on a SMR HDD.  In any combination of the above cases enforcing
1276 	 * the higher min_writeback_pages for non-RT inodes is either a noop
1277 	 * or beneficial.
1278 	 */
1279 	mp->m_super->s_min_writeback_pages =
1280 		XFS_FSB_TO_B(mp, min(zone_blocks, XFS_MAX_BMBT_EXTLEN)) >>
1281 			PAGE_SHIFT;
1282 
1283 	if (bdev_is_zoned(bt->bt_bdev)) {
1284 		error = blkdev_report_zones_cached(bt->bt_bdev,
1285 				XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart),
1286 				mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz);
1287 		if (error < 0)
1288 			goto out_free_zone_info;
1289 	} else {
1290 		struct xfs_rtgroup	*rtg = NULL;
1291 
1292 		while ((rtg = xfs_rtgroup_next(mp, rtg))) {
1293 			error = xfs_init_zone(&iz, rtg, NULL);
1294 			if (error) {
1295 				xfs_rtgroup_rele(rtg);
1296 				goto out_free_zone_info;
1297 			}
1298 		}
1299 	}
1300 
1301 	xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
1302 	xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1303 			iz.available + iz.reclaimable);
1304 
1305 	/*
1306 	 * The user may configure GC to free up a percentage of unused blocks.
1307 	 * By default this is 0. GC will always trigger at the minimum level
1308 	 * for keeping max_open_zones available for data placement.
1309 	 */
1310 	mp->m_zonegc_low_space = 0;
1311 
1312 	error = xfs_zone_gc_mount(mp);
1313 	if (error)
1314 		goto out_free_zone_info;
1315 	return 0;
1316 
1317 out_free_zone_info:
1318 	xfs_free_zone_info(mp->m_zone_info);
1319 	return error;
1320 }
1321 
1322 void
xfs_unmount_zones(struct xfs_mount * mp)1323 xfs_unmount_zones(
1324 	struct xfs_mount	*mp)
1325 {
1326 	xfs_zone_gc_unmount(mp);
1327 	xfs_free_zone_info(mp->m_zone_info);
1328 }
1329