xref: /linux/block/blk-zoned.c (revision 5b026e34120766408e76ba19a0e33a9dc996f9f0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Zoned block device handling
4  *
5  * Copyright (c) 2015, Hannes Reinecke
6  * Copyright (c) 2015, SUSE Linux GmbH
7  *
8  * Copyright (c) 2016, Damien Le Moal
9  * Copyright (c) 2016, Western Digital
10  * Copyright (c) 2024, Western Digital Corporation or its affiliates.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/mm.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/mm.h>
20 #include <linux/spinlock.h>
21 #include <linux/atomic.h>
22 #include <linux/mempool.h>
23 
24 #include "blk.h"
25 #include "blk-mq-sched.h"
26 #include "blk-mq-debugfs.h"
27 
28 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
29 static const char *const zone_cond_name[] = {
30 	ZONE_COND_NAME(NOT_WP),
31 	ZONE_COND_NAME(EMPTY),
32 	ZONE_COND_NAME(IMP_OPEN),
33 	ZONE_COND_NAME(EXP_OPEN),
34 	ZONE_COND_NAME(CLOSED),
35 	ZONE_COND_NAME(READONLY),
36 	ZONE_COND_NAME(FULL),
37 	ZONE_COND_NAME(OFFLINE),
38 };
39 #undef ZONE_COND_NAME
40 
41 /*
42  * Per-zone write plug.
43  * @node: hlist_node structure for managing the plug using a hash table.
44  * @link: To list the plug in the zone write plug error list of the disk.
45  * @ref: Zone write plug reference counter. A zone write plug reference is
46  *       always at least 1 when the plug is hashed in the disk plug hash table.
47  *       The reference is incremented whenever a new BIO needing plugging is
48  *       submitted and when a function needs to manipulate a plug. The
49  *       reference count is decremented whenever a plugged BIO completes and
50  *       when a function that referenced the plug returns. The initial
51  *       reference is dropped whenever the zone of the zone write plug is reset,
52  *       finished and when the zone becomes full (last write BIO to the zone
53  *       completes).
54  * @lock: Spinlock to atomically manipulate the plug.
55  * @flags: Flags indicating the plug state.
56  * @zone_no: The number of the zone the plug is managing.
57  * @wp_offset: The zone write pointer location relative to the start of the zone
58  *             as a number of 512B sectors.
59  * @bio_list: The list of BIOs that are currently plugged.
60  * @bio_work: Work struct to handle issuing of plugged BIOs
61  * @rcu_head: RCU head to free zone write plugs with an RCU grace period.
62  * @disk: The gendisk the plug belongs to.
63  */
64 struct blk_zone_wplug {
65 	struct hlist_node	node;
66 	struct list_head	link;
67 	atomic_t		ref;
68 	spinlock_t		lock;
69 	unsigned int		flags;
70 	unsigned int		zone_no;
71 	unsigned int		wp_offset;
72 	struct bio_list		bio_list;
73 	struct work_struct	bio_work;
74 	struct rcu_head		rcu_head;
75 	struct gendisk		*disk;
76 };
77 
78 /*
79  * Zone write plug flags bits:
80  *  - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
81  *    that is, that write BIOs are being throttled due to a write BIO already
82  *    being executed or the zone write plug bio list is not empty.
83  *  - BLK_ZONE_WPLUG_ERROR: Indicates that a write error happened which will be
84  *    recovered with a report zone to update the zone write pointer offset.
85  *  - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
86  *    from the disk hash table and that the initial reference to the zone
87  *    write plug set when the plug was first added to the hash table has been
88  *    dropped. This flag is set when a zone is reset, finished or become full,
89  *    to prevent new references to the zone write plug to be taken for
90  *    newly incoming BIOs. A zone write plug flagged with this flag will be
91  *    freed once all remaining references from BIOs or functions are dropped.
92  */
93 #define BLK_ZONE_WPLUG_PLUGGED		(1U << 0)
94 #define BLK_ZONE_WPLUG_ERROR		(1U << 1)
95 #define BLK_ZONE_WPLUG_UNHASHED		(1U << 2)
96 
97 #define BLK_ZONE_WPLUG_BUSY	(BLK_ZONE_WPLUG_PLUGGED | BLK_ZONE_WPLUG_ERROR)
98 
99 /**
100  * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
101  * @zone_cond: BLK_ZONE_COND_XXX.
102  *
103  * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
104  * into string format. Useful in the debugging and tracing zone conditions. For
105  * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
106  */
107 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
108 {
109 	static const char *zone_cond_str = "UNKNOWN";
110 
111 	if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
112 		zone_cond_str = zone_cond_name[zone_cond];
113 
114 	return zone_cond_str;
115 }
116 EXPORT_SYMBOL_GPL(blk_zone_cond_str);
117 
118 /**
119  * blkdev_report_zones - Get zones information
120  * @bdev:	Target block device
121  * @sector:	Sector from which to report zones
122  * @nr_zones:	Maximum number of zones to report
123  * @cb:		Callback function called for each reported zone
124  * @data:	Private data for the callback
125  *
126  * Description:
127  *    Get zone information starting from the zone containing @sector for at most
128  *    @nr_zones, and call @cb for each zone reported by the device.
129  *    To report all zones in a device starting from @sector, the BLK_ALL_ZONES
130  *    constant can be passed to @nr_zones.
131  *    Returns the number of zones reported by the device, or a negative errno
132  *    value in case of failure.
133  *
134  *    Note: The caller must use memalloc_noXX_save/restore() calls to control
135  *    memory allocations done within this function.
136  */
137 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
138 			unsigned int nr_zones, report_zones_cb cb, void *data)
139 {
140 	struct gendisk *disk = bdev->bd_disk;
141 	sector_t capacity = get_capacity(disk);
142 
143 	if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
144 		return -EOPNOTSUPP;
145 
146 	if (!nr_zones || sector >= capacity)
147 		return 0;
148 
149 	return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
150 }
151 EXPORT_SYMBOL_GPL(blkdev_report_zones);
152 
153 static inline unsigned long *blk_alloc_zone_bitmap(int node,
154 						   unsigned int nr_zones)
155 {
156 	return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
157 			    GFP_NOIO, node);
158 }
159 
160 static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
161 				  void *data)
162 {
163 	/*
164 	 * For an all-zones reset, ignore conventional, empty, read-only
165 	 * and offline zones.
166 	 */
167 	switch (zone->cond) {
168 	case BLK_ZONE_COND_NOT_WP:
169 	case BLK_ZONE_COND_EMPTY:
170 	case BLK_ZONE_COND_READONLY:
171 	case BLK_ZONE_COND_OFFLINE:
172 		return 0;
173 	default:
174 		set_bit(idx, (unsigned long *)data);
175 		return 0;
176 	}
177 }
178 
179 static int blkdev_zone_reset_all_emulated(struct block_device *bdev)
180 {
181 	struct gendisk *disk = bdev->bd_disk;
182 	sector_t capacity = bdev_nr_sectors(bdev);
183 	sector_t zone_sectors = bdev_zone_sectors(bdev);
184 	unsigned long *need_reset;
185 	struct bio *bio = NULL;
186 	sector_t sector = 0;
187 	int ret;
188 
189 	need_reset = blk_alloc_zone_bitmap(disk->queue->node, disk->nr_zones);
190 	if (!need_reset)
191 		return -ENOMEM;
192 
193 	ret = disk->fops->report_zones(disk, 0, disk->nr_zones,
194 				       blk_zone_need_reset_cb, need_reset);
195 	if (ret < 0)
196 		goto out_free_need_reset;
197 
198 	ret = 0;
199 	while (sector < capacity) {
200 		if (!test_bit(disk_zone_no(disk, sector), need_reset)) {
201 			sector += zone_sectors;
202 			continue;
203 		}
204 
205 		bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
206 				   GFP_KERNEL);
207 		bio->bi_iter.bi_sector = sector;
208 		sector += zone_sectors;
209 
210 		/* This may take a while, so be nice to others */
211 		cond_resched();
212 	}
213 
214 	if (bio) {
215 		ret = submit_bio_wait(bio);
216 		bio_put(bio);
217 	}
218 
219 out_free_need_reset:
220 	kfree(need_reset);
221 	return ret;
222 }
223 
224 static int blkdev_zone_reset_all(struct block_device *bdev)
225 {
226 	struct bio bio;
227 
228 	bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
229 	return submit_bio_wait(&bio);
230 }
231 
232 /**
233  * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
234  * @bdev:	Target block device
235  * @op:		Operation to be performed on the zones
236  * @sector:	Start sector of the first zone to operate on
237  * @nr_sectors:	Number of sectors, should be at least the length of one zone and
238  *		must be zone size aligned.
239  *
240  * Description:
241  *    Perform the specified operation on the range of zones specified by
242  *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
243  *    is valid, but the specified range should not contain conventional zones.
244  *    The operation to execute on each zone can be a zone reset, open, close
245  *    or finish request.
246  */
247 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
248 		     sector_t sector, sector_t nr_sectors)
249 {
250 	struct request_queue *q = bdev_get_queue(bdev);
251 	sector_t zone_sectors = bdev_zone_sectors(bdev);
252 	sector_t capacity = bdev_nr_sectors(bdev);
253 	sector_t end_sector = sector + nr_sectors;
254 	struct bio *bio = NULL;
255 	int ret = 0;
256 
257 	if (!bdev_is_zoned(bdev))
258 		return -EOPNOTSUPP;
259 
260 	if (bdev_read_only(bdev))
261 		return -EPERM;
262 
263 	if (!op_is_zone_mgmt(op))
264 		return -EOPNOTSUPP;
265 
266 	if (end_sector <= sector || end_sector > capacity)
267 		/* Out of range */
268 		return -EINVAL;
269 
270 	/* Check alignment (handle eventual smaller last zone) */
271 	if (!bdev_is_zone_start(bdev, sector))
272 		return -EINVAL;
273 
274 	if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity)
275 		return -EINVAL;
276 
277 	/*
278 	 * In the case of a zone reset operation over all zones,
279 	 * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
280 	 * command. For other devices, we emulate this command behavior by
281 	 * identifying the zones needing a reset.
282 	 */
283 	if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
284 		if (!blk_queue_zone_resetall(q))
285 			return blkdev_zone_reset_all_emulated(bdev);
286 		return blkdev_zone_reset_all(bdev);
287 	}
288 
289 	while (sector < end_sector) {
290 		bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL);
291 		bio->bi_iter.bi_sector = sector;
292 		sector += zone_sectors;
293 
294 		/* This may take a while, so be nice to others */
295 		cond_resched();
296 	}
297 
298 	ret = submit_bio_wait(bio);
299 	bio_put(bio);
300 
301 	return ret;
302 }
303 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
304 
305 struct zone_report_args {
306 	struct blk_zone __user *zones;
307 };
308 
309 static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
310 				    void *data)
311 {
312 	struct zone_report_args *args = data;
313 
314 	if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
315 		return -EFAULT;
316 	return 0;
317 }
318 
319 /*
320  * BLKREPORTZONE ioctl processing.
321  * Called from blkdev_ioctl.
322  */
323 int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
324 		unsigned long arg)
325 {
326 	void __user *argp = (void __user *)arg;
327 	struct zone_report_args args;
328 	struct blk_zone_report rep;
329 	int ret;
330 
331 	if (!argp)
332 		return -EINVAL;
333 
334 	if (!bdev_is_zoned(bdev))
335 		return -ENOTTY;
336 
337 	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
338 		return -EFAULT;
339 
340 	if (!rep.nr_zones)
341 		return -EINVAL;
342 
343 	args.zones = argp + sizeof(struct blk_zone_report);
344 	ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
345 				  blkdev_copy_zone_to_user, &args);
346 	if (ret < 0)
347 		return ret;
348 
349 	rep.nr_zones = ret;
350 	rep.flags = BLK_ZONE_REP_CAPACITY;
351 	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
352 		return -EFAULT;
353 	return 0;
354 }
355 
356 static int blkdev_truncate_zone_range(struct block_device *bdev,
357 		blk_mode_t mode, const struct blk_zone_range *zrange)
358 {
359 	loff_t start, end;
360 
361 	if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
362 	    zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
363 		/* Out of range */
364 		return -EINVAL;
365 
366 	start = zrange->sector << SECTOR_SHIFT;
367 	end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
368 
369 	return truncate_bdev_range(bdev, mode, start, end);
370 }
371 
372 /*
373  * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
374  * Called from blkdev_ioctl.
375  */
376 int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
377 			   unsigned int cmd, unsigned long arg)
378 {
379 	void __user *argp = (void __user *)arg;
380 	struct blk_zone_range zrange;
381 	enum req_op op;
382 	int ret;
383 
384 	if (!argp)
385 		return -EINVAL;
386 
387 	if (!bdev_is_zoned(bdev))
388 		return -ENOTTY;
389 
390 	if (!(mode & BLK_OPEN_WRITE))
391 		return -EBADF;
392 
393 	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
394 		return -EFAULT;
395 
396 	switch (cmd) {
397 	case BLKRESETZONE:
398 		op = REQ_OP_ZONE_RESET;
399 
400 		/* Invalidate the page cache, including dirty pages. */
401 		filemap_invalidate_lock(bdev->bd_mapping);
402 		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
403 		if (ret)
404 			goto fail;
405 		break;
406 	case BLKOPENZONE:
407 		op = REQ_OP_ZONE_OPEN;
408 		break;
409 	case BLKCLOSEZONE:
410 		op = REQ_OP_ZONE_CLOSE;
411 		break;
412 	case BLKFINISHZONE:
413 		op = REQ_OP_ZONE_FINISH;
414 		break;
415 	default:
416 		return -ENOTTY;
417 	}
418 
419 	ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
420 
421 fail:
422 	if (cmd == BLKRESETZONE)
423 		filemap_invalidate_unlock(bdev->bd_mapping);
424 
425 	return ret;
426 }
427 
428 static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
429 {
430 	if (!disk->conv_zones_bitmap)
431 		return false;
432 	return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
433 }
434 
435 static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
436 {
437 	return zone->start + zone->len >= get_capacity(disk);
438 }
439 
440 static bool disk_zone_is_full(struct gendisk *disk,
441 			      unsigned int zno, unsigned int offset_in_zone)
442 {
443 	if (zno < disk->nr_zones - 1)
444 		return offset_in_zone >= disk->zone_capacity;
445 	return offset_in_zone >= disk->last_zone_capacity;
446 }
447 
448 static bool disk_zone_wplug_is_full(struct gendisk *disk,
449 				    struct blk_zone_wplug *zwplug)
450 {
451 	return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset);
452 }
453 
454 static bool disk_insert_zone_wplug(struct gendisk *disk,
455 				   struct blk_zone_wplug *zwplug)
456 {
457 	struct blk_zone_wplug *zwplg;
458 	unsigned long flags;
459 	unsigned int idx =
460 		hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits);
461 
462 	/*
463 	 * Add the new zone write plug to the hash table, but carefully as we
464 	 * are racing with other submission context, so we may already have a
465 	 * zone write plug for the same zone.
466 	 */
467 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
468 	hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) {
469 		if (zwplg->zone_no == zwplug->zone_no) {
470 			spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
471 			return false;
472 		}
473 	}
474 	hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
475 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
476 
477 	return true;
478 }
479 
480 static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
481 						  sector_t sector)
482 {
483 	unsigned int zno = disk_zone_no(disk, sector);
484 	unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
485 	struct blk_zone_wplug *zwplug;
486 
487 	rcu_read_lock();
488 
489 	hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) {
490 		if (zwplug->zone_no == zno &&
491 		    atomic_inc_not_zero(&zwplug->ref)) {
492 			rcu_read_unlock();
493 			return zwplug;
494 		}
495 	}
496 
497 	rcu_read_unlock();
498 
499 	return NULL;
500 }
501 
502 static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
503 {
504 	struct blk_zone_wplug *zwplug =
505 		container_of(rcu_head, struct blk_zone_wplug, rcu_head);
506 
507 	mempool_free(zwplug, zwplug->disk->zone_wplugs_pool);
508 }
509 
510 static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
511 {
512 	if (atomic_dec_and_test(&zwplug->ref)) {
513 		WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
514 		WARN_ON_ONCE(!list_empty(&zwplug->link));
515 		WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
516 
517 		call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
518 	}
519 }
520 
521 static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
522 						 struct blk_zone_wplug *zwplug)
523 {
524 	/* If the zone write plug was already removed, we are done. */
525 	if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
526 		return false;
527 
528 	/* If the zone write plug is still busy, it cannot be removed. */
529 	if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
530 		return false;
531 
532 	/*
533 	 * Completions of BIOs with blk_zone_write_plug_bio_endio() may
534 	 * happen after handling a request completion with
535 	 * blk_zone_write_plug_finish_request() (e.g. with split BIOs
536 	 * that are chained). In such case, disk_zone_wplug_unplug_bio()
537 	 * should not attempt to remove the zone write plug until all BIO
538 	 * completions are seen. Check by looking at the zone write plug
539 	 * reference count, which is 2 when the plug is unused (one reference
540 	 * taken when the plug was allocated and another reference taken by the
541 	 * caller context).
542 	 */
543 	if (atomic_read(&zwplug->ref) > 2)
544 		return false;
545 
546 	/* We can remove zone write plugs for zones that are empty or full. */
547 	return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug);
548 }
549 
550 static void disk_remove_zone_wplug(struct gendisk *disk,
551 				   struct blk_zone_wplug *zwplug)
552 {
553 	unsigned long flags;
554 
555 	/* If the zone write plug was already removed, we have nothing to do. */
556 	if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
557 		return;
558 
559 	/*
560 	 * Mark the zone write plug as unhashed and drop the extra reference we
561 	 * took when the plug was inserted in the hash table.
562 	 */
563 	zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
564 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
565 	hlist_del_init_rcu(&zwplug->node);
566 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
567 	disk_put_zone_wplug(zwplug);
568 }
569 
570 static void blk_zone_wplug_bio_work(struct work_struct *work);
571 
572 /*
573  * Get a reference on the write plug for the zone containing @sector.
574  * If the plug does not exist, it is allocated and hashed.
575  * Return a pointer to the zone write plug with the plug spinlock held.
576  */
577 static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
578 					sector_t sector, gfp_t gfp_mask,
579 					unsigned long *flags)
580 {
581 	unsigned int zno = disk_zone_no(disk, sector);
582 	struct blk_zone_wplug *zwplug;
583 
584 again:
585 	zwplug = disk_get_zone_wplug(disk, sector);
586 	if (zwplug) {
587 		/*
588 		 * Check that a BIO completion or a zone reset or finish
589 		 * operation has not already removed the zone write plug from
590 		 * the hash table and dropped its reference count. In such case,
591 		 * we need to get a new plug so start over from the beginning.
592 		 */
593 		spin_lock_irqsave(&zwplug->lock, *flags);
594 		if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
595 			spin_unlock_irqrestore(&zwplug->lock, *flags);
596 			disk_put_zone_wplug(zwplug);
597 			goto again;
598 		}
599 		return zwplug;
600 	}
601 
602 	/*
603 	 * Allocate and initialize a zone write plug with an extra reference
604 	 * so that it is not freed when the zone write plug becomes idle without
605 	 * the zone being full.
606 	 */
607 	zwplug = mempool_alloc(disk->zone_wplugs_pool, gfp_mask);
608 	if (!zwplug)
609 		return NULL;
610 
611 	INIT_HLIST_NODE(&zwplug->node);
612 	INIT_LIST_HEAD(&zwplug->link);
613 	atomic_set(&zwplug->ref, 2);
614 	spin_lock_init(&zwplug->lock);
615 	zwplug->flags = 0;
616 	zwplug->zone_no = zno;
617 	zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1);
618 	bio_list_init(&zwplug->bio_list);
619 	INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
620 	zwplug->disk = disk;
621 
622 	spin_lock_irqsave(&zwplug->lock, *flags);
623 
624 	/*
625 	 * Insert the new zone write plug in the hash table. This can fail only
626 	 * if another context already inserted a plug. Retry from the beginning
627 	 * in such case.
628 	 */
629 	if (!disk_insert_zone_wplug(disk, zwplug)) {
630 		spin_unlock_irqrestore(&zwplug->lock, *flags);
631 		mempool_free(zwplug, disk->zone_wplugs_pool);
632 		goto again;
633 	}
634 
635 	return zwplug;
636 }
637 
638 static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug,
639 					       struct bio *bio)
640 {
641 	struct request_queue *q = zwplug->disk->queue;
642 
643 	bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
644 	bio_io_error(bio);
645 	disk_put_zone_wplug(zwplug);
646 	blk_queue_exit(q);
647 }
648 
649 /*
650  * Abort (fail) all plugged BIOs of a zone write plug.
651  */
652 static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
653 {
654 	struct bio *bio;
655 
656 	while ((bio = bio_list_pop(&zwplug->bio_list)))
657 		blk_zone_wplug_bio_io_error(zwplug, bio);
658 }
659 
660 /*
661  * Abort (fail) all plugged BIOs of a zone write plug that are not aligned
662  * with the assumed write pointer location of the zone when the BIO will
663  * be unplugged.
664  */
665 static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
666 					    struct blk_zone_wplug *zwplug)
667 {
668 	unsigned int wp_offset = zwplug->wp_offset;
669 	struct bio_list bl = BIO_EMPTY_LIST;
670 	struct bio *bio;
671 
672 	while ((bio = bio_list_pop(&zwplug->bio_list))) {
673 		if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
674 		    (bio_op(bio) != REQ_OP_ZONE_APPEND &&
675 		     bio_offset_from_zone_start(bio) != wp_offset)) {
676 			blk_zone_wplug_bio_io_error(zwplug, bio);
677 			continue;
678 		}
679 
680 		wp_offset += bio_sectors(bio);
681 		bio_list_add(&bl, bio);
682 	}
683 
684 	bio_list_merge(&zwplug->bio_list, &bl);
685 }
686 
687 static inline void disk_zone_wplug_set_error(struct gendisk *disk,
688 					     struct blk_zone_wplug *zwplug)
689 {
690 	unsigned long flags;
691 
692 	if (zwplug->flags & BLK_ZONE_WPLUG_ERROR)
693 		return;
694 
695 	/*
696 	 * At this point, we already have a reference on the zone write plug.
697 	 * However, since we are going to add the plug to the disk zone write
698 	 * plugs work list, increase its reference count. This reference will
699 	 * be dropped in disk_zone_wplugs_work() once the error state is
700 	 * handled, or in disk_zone_wplug_clear_error() if the zone is reset or
701 	 * finished.
702 	 */
703 	zwplug->flags |= BLK_ZONE_WPLUG_ERROR;
704 	atomic_inc(&zwplug->ref);
705 
706 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
707 	list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list);
708 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
709 }
710 
711 static inline void disk_zone_wplug_clear_error(struct gendisk *disk,
712 					       struct blk_zone_wplug *zwplug)
713 {
714 	unsigned long flags;
715 
716 	if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
717 		return;
718 
719 	/*
720 	 * We are racing with the error handling work which drops the reference
721 	 * on the zone write plug after handling the error state. So remove the
722 	 * plug from the error list and drop its reference count only if the
723 	 * error handling has not yet started, that is, if the zone write plug
724 	 * is still listed.
725 	 */
726 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
727 	if (!list_empty(&zwplug->link)) {
728 		list_del_init(&zwplug->link);
729 		zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
730 		disk_put_zone_wplug(zwplug);
731 	}
732 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
733 }
734 
735 /*
736  * Set a zone write plug write pointer offset to either 0 (zone reset case)
737  * or to the zone size (zone finish case). This aborts all plugged BIOs, which
738  * is fine to do as doing a zone reset or zone finish while writes are in-flight
739  * is a mistake from the user which will most likely cause all plugged BIOs to
740  * fail anyway.
741  */
742 static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
743 					  struct blk_zone_wplug *zwplug,
744 					  unsigned int wp_offset)
745 {
746 	unsigned long flags;
747 
748 	spin_lock_irqsave(&zwplug->lock, flags);
749 
750 	/*
751 	 * Make sure that a BIO completion or another zone reset or finish
752 	 * operation has not already removed the plug from the hash table.
753 	 */
754 	if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
755 		spin_unlock_irqrestore(&zwplug->lock, flags);
756 		return;
757 	}
758 
759 	/* Update the zone write pointer and abort all plugged BIOs. */
760 	zwplug->wp_offset = wp_offset;
761 	disk_zone_wplug_abort(zwplug);
762 
763 	/*
764 	 * Updating the write pointer offset puts back the zone
765 	 * in a good state. So clear the error flag and decrement the
766 	 * error count if we were in error state.
767 	 */
768 	disk_zone_wplug_clear_error(disk, zwplug);
769 
770 	/*
771 	 * The zone write plug now has no BIO plugged: remove it from the
772 	 * hash table so that it cannot be seen. The plug will be freed
773 	 * when the last reference is dropped.
774 	 */
775 	if (disk_should_remove_zone_wplug(disk, zwplug))
776 		disk_remove_zone_wplug(disk, zwplug);
777 
778 	spin_unlock_irqrestore(&zwplug->lock, flags);
779 }
780 
781 static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
782 						  unsigned int wp_offset)
783 {
784 	struct gendisk *disk = bio->bi_bdev->bd_disk;
785 	sector_t sector = bio->bi_iter.bi_sector;
786 	struct blk_zone_wplug *zwplug;
787 
788 	/* Conventional zones cannot be reset nor finished. */
789 	if (disk_zone_is_conv(disk, sector)) {
790 		bio_io_error(bio);
791 		return true;
792 	}
793 
794 	/*
795 	 * If we have a zone write plug, set its write pointer offset to 0
796 	 * (reset case) or to the zone size (finish case). This will abort all
797 	 * BIOs plugged for the target zone. It is fine as resetting or
798 	 * finishing zones while writes are still in-flight will result in the
799 	 * writes failing anyway.
800 	 */
801 	zwplug = disk_get_zone_wplug(disk, sector);
802 	if (zwplug) {
803 		disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
804 		disk_put_zone_wplug(zwplug);
805 	}
806 
807 	return false;
808 }
809 
810 static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
811 {
812 	struct gendisk *disk = bio->bi_bdev->bd_disk;
813 	struct blk_zone_wplug *zwplug;
814 	sector_t sector;
815 
816 	/*
817 	 * Set the write pointer offset of all zone write plugs to 0. This will
818 	 * abort all plugged BIOs. It is fine as resetting zones while writes
819 	 * are still in-flight will result in the writes failing anyway.
820 	 */
821 	for (sector = 0; sector < get_capacity(disk);
822 	     sector += disk->queue->limits.chunk_sectors) {
823 		zwplug = disk_get_zone_wplug(disk, sector);
824 		if (zwplug) {
825 			disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
826 			disk_put_zone_wplug(zwplug);
827 		}
828 	}
829 
830 	return false;
831 }
832 
833 static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
834 					  struct bio *bio, unsigned int nr_segs)
835 {
836 	/*
837 	 * Grab an extra reference on the BIO request queue usage counter.
838 	 * This reference will be reused to submit a request for the BIO for
839 	 * blk-mq devices and dropped when the BIO is failed and after
840 	 * it is issued in the case of BIO-based devices.
841 	 */
842 	percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter);
843 
844 	/*
845 	 * The BIO is being plugged and thus will have to wait for the on-going
846 	 * write and for all other writes already plugged. So polling makes
847 	 * no sense.
848 	 */
849 	bio_clear_polled(bio);
850 
851 	/*
852 	 * Reuse the poll cookie field to store the number of segments when
853 	 * split to the hardware limits.
854 	 */
855 	bio->__bi_nr_segments = nr_segs;
856 
857 	/*
858 	 * We always receive BIOs after they are split and ready to be issued.
859 	 * The block layer passes the parts of a split BIO in order, and the
860 	 * user must also issue write sequentially. So simply add the new BIO
861 	 * at the tail of the list to preserve the sequential write order.
862 	 */
863 	bio_list_add(&zwplug->bio_list, bio);
864 }
865 
866 /*
867  * Called from bio_attempt_back_merge() when a BIO was merged with a request.
868  */
869 void blk_zone_write_plug_bio_merged(struct bio *bio)
870 {
871 	struct blk_zone_wplug *zwplug;
872 	unsigned long flags;
873 
874 	/*
875 	 * If the BIO was already plugged, then we were called through
876 	 * blk_zone_write_plug_init_request() -> blk_attempt_bio_merge().
877 	 * For this case, we already hold a reference on the zone write plug for
878 	 * the BIO and blk_zone_write_plug_init_request() will handle the
879 	 * zone write pointer offset update.
880 	 */
881 	if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
882 		return;
883 
884 	bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
885 
886 	/*
887 	 * Get a reference on the zone write plug of the target zone and advance
888 	 * the zone write pointer offset. Given that this is a merge, we already
889 	 * have at least one request and one BIO referencing the zone write
890 	 * plug. So this should not fail.
891 	 */
892 	zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk,
893 				     bio->bi_iter.bi_sector);
894 	if (WARN_ON_ONCE(!zwplug))
895 		return;
896 
897 	spin_lock_irqsave(&zwplug->lock, flags);
898 	zwplug->wp_offset += bio_sectors(bio);
899 	spin_unlock_irqrestore(&zwplug->lock, flags);
900 }
901 
902 /*
903  * Attempt to merge plugged BIOs with a newly prepared request for a BIO that
904  * already went through zone write plugging (either a new BIO or one that was
905  * unplugged).
906  */
907 void blk_zone_write_plug_init_request(struct request *req)
908 {
909 	sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
910 	struct request_queue *q = req->q;
911 	struct gendisk *disk = q->disk;
912 	struct blk_zone_wplug *zwplug =
913 		disk_get_zone_wplug(disk, blk_rq_pos(req));
914 	unsigned long flags;
915 	struct bio *bio;
916 
917 	if (WARN_ON_ONCE(!zwplug))
918 		return;
919 
920 	/*
921 	 * Indicate that completion of this request needs to be handled with
922 	 * blk_zone_write_plug_finish_request(), which will drop the reference
923 	 * on the zone write plug we took above on entry to this function.
924 	 */
925 	req->rq_flags |= RQF_ZONE_WRITE_PLUGGING;
926 
927 	if (blk_queue_nomerges(q))
928 		return;
929 
930 	/*
931 	 * Walk through the list of plugged BIOs to check if they can be merged
932 	 * into the back of the request.
933 	 */
934 	spin_lock_irqsave(&zwplug->lock, flags);
935 	while (!disk_zone_wplug_is_full(disk, zwplug)) {
936 		bio = bio_list_peek(&zwplug->bio_list);
937 		if (!bio)
938 			break;
939 
940 		if (bio->bi_iter.bi_sector != req_back_sector ||
941 		    !blk_rq_merge_ok(req, bio))
942 			break;
943 
944 		WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES &&
945 			     !bio->__bi_nr_segments);
946 
947 		bio_list_pop(&zwplug->bio_list);
948 		if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) !=
949 		    BIO_MERGE_OK) {
950 			bio_list_add_head(&zwplug->bio_list, bio);
951 			break;
952 		}
953 
954 		/*
955 		 * Drop the extra reference on the queue usage we got when
956 		 * plugging the BIO and advance the write pointer offset.
957 		 */
958 		blk_queue_exit(q);
959 		zwplug->wp_offset += bio_sectors(bio);
960 
961 		req_back_sector += bio_sectors(bio);
962 	}
963 	spin_unlock_irqrestore(&zwplug->lock, flags);
964 }
965 
966 /*
967  * Check and prepare a BIO for submission by incrementing the write pointer
968  * offset of its zone write plug and changing zone append operations into
969  * regular write when zone append emulation is needed.
970  */
971 static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
972 				       struct bio *bio)
973 {
974 	struct gendisk *disk = bio->bi_bdev->bd_disk;
975 
976 	/*
977 	 * Check that the user is not attempting to write to a full zone.
978 	 * We know such BIO will fail, and that would potentially overflow our
979 	 * write pointer offset beyond the end of the zone.
980 	 */
981 	if (disk_zone_wplug_is_full(disk, zwplug))
982 		goto err;
983 
984 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
985 		/*
986 		 * Use a regular write starting at the current write pointer.
987 		 * Similarly to native zone append operations, do not allow
988 		 * merging.
989 		 */
990 		bio->bi_opf &= ~REQ_OP_MASK;
991 		bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE;
992 		bio->bi_iter.bi_sector += zwplug->wp_offset;
993 
994 		/*
995 		 * Remember that this BIO is in fact a zone append operation
996 		 * so that we can restore its operation code on completion.
997 		 */
998 		bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
999 	} else {
1000 		/*
1001 		 * Check for non-sequential writes early because we avoid a
1002 		 * whole lot of error handling trouble if we don't send it off
1003 		 * to the driver.
1004 		 */
1005 		if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
1006 			goto err;
1007 	}
1008 
1009 	/* Advance the zone write pointer offset. */
1010 	zwplug->wp_offset += bio_sectors(bio);
1011 
1012 	return true;
1013 
1014 err:
1015 	/* We detected an invalid write BIO: schedule error recovery. */
1016 	disk_zone_wplug_set_error(disk, zwplug);
1017 	kblockd_schedule_work(&disk->zone_wplugs_work);
1018 	return false;
1019 }
1020 
1021 static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
1022 {
1023 	struct gendisk *disk = bio->bi_bdev->bd_disk;
1024 	sector_t sector = bio->bi_iter.bi_sector;
1025 	struct blk_zone_wplug *zwplug;
1026 	gfp_t gfp_mask = GFP_NOIO;
1027 	unsigned long flags;
1028 
1029 	/*
1030 	 * BIOs must be fully contained within a zone so that we use the correct
1031 	 * zone write plug for the entire BIO. For blk-mq devices, the block
1032 	 * layer should already have done any splitting required to ensure this
1033 	 * and this BIO should thus not be straddling zone boundaries. For
1034 	 * BIO-based devices, it is the responsibility of the driver to split
1035 	 * the bio before submitting it.
1036 	 */
1037 	if (WARN_ON_ONCE(bio_straddles_zones(bio))) {
1038 		bio_io_error(bio);
1039 		return true;
1040 	}
1041 
1042 	/* Conventional zones do not need write plugging. */
1043 	if (disk_zone_is_conv(disk, sector)) {
1044 		/* Zone append to conventional zones is not allowed. */
1045 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1046 			bio_io_error(bio);
1047 			return true;
1048 		}
1049 		return false;
1050 	}
1051 
1052 	if (bio->bi_opf & REQ_NOWAIT)
1053 		gfp_mask = GFP_NOWAIT;
1054 
1055 	zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
1056 	if (!zwplug) {
1057 		bio_io_error(bio);
1058 		return true;
1059 	}
1060 
1061 	/* Indicate that this BIO is being handled using zone write plugging. */
1062 	bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
1063 
1064 	/*
1065 	 * If the zone is already plugged or has a pending error, add the BIO
1066 	 * to the plug BIO list. Otherwise, plug and let the BIO execute.
1067 	 */
1068 	if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
1069 		goto plug;
1070 
1071 	/*
1072 	 * If an error is detected when preparing the BIO, add it to the BIO
1073 	 * list so that error recovery can deal with it.
1074 	 */
1075 	if (!blk_zone_wplug_prepare_bio(zwplug, bio))
1076 		goto plug;
1077 
1078 	zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
1079 
1080 	spin_unlock_irqrestore(&zwplug->lock, flags);
1081 
1082 	return false;
1083 
1084 plug:
1085 	zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
1086 	blk_zone_wplug_add_bio(zwplug, bio, nr_segs);
1087 
1088 	spin_unlock_irqrestore(&zwplug->lock, flags);
1089 
1090 	return true;
1091 }
1092 
1093 /**
1094  * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
1095  * @bio: The BIO being submitted
1096  * @nr_segs: The number of physical segments of @bio
1097  *
1098  * Handle write, write zeroes and zone append operations requiring emulation
1099  * using zone write plugging.
1100  *
1101  * Return true whenever @bio execution needs to be delayed through the zone
1102  * write plug. Otherwise, return false to let the submission path process
1103  * @bio normally.
1104  */
1105 bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
1106 {
1107 	struct block_device *bdev = bio->bi_bdev;
1108 
1109 	if (!bdev->bd_disk->zone_wplugs_hash)
1110 		return false;
1111 
1112 	/*
1113 	 * If the BIO already has the plugging flag set, then it was already
1114 	 * handled through this path and this is a submission from the zone
1115 	 * plug bio submit work.
1116 	 */
1117 	if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
1118 		return false;
1119 
1120 	/*
1121 	 * We do not need to do anything special for empty flush BIOs, e.g
1122 	 * BIOs such as issued by blkdev_issue_flush(). The is because it is
1123 	 * the responsibility of the user to first wait for the completion of
1124 	 * write operations for flush to have any effect on the persistence of
1125 	 * the written data.
1126 	 */
1127 	if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
1128 		return false;
1129 
1130 	/*
1131 	 * Regular writes and write zeroes need to be handled through the target
1132 	 * zone write plug. This includes writes with REQ_FUA | REQ_PREFLUSH
1133 	 * which may need to go through the flush machinery depending on the
1134 	 * target device capabilities. Plugging such writes is fine as the flush
1135 	 * machinery operates at the request level, below the plug, and
1136 	 * completion of the flush sequence will go through the regular BIO
1137 	 * completion, which will handle zone write plugging.
1138 	 * Zone append operations for devices that requested emulation must
1139 	 * also be plugged so that these BIOs can be changed into regular
1140 	 * write BIOs.
1141 	 * Zone reset, reset all and finish commands need special treatment
1142 	 * to correctly track the write pointer offset of zones. These commands
1143 	 * are not plugged as we do not need serialization with write
1144 	 * operations. It is the responsibility of the user to not issue reset
1145 	 * and finish commands when write operations are in flight.
1146 	 */
1147 	switch (bio_op(bio)) {
1148 	case REQ_OP_ZONE_APPEND:
1149 		if (!bdev_emulates_zone_append(bdev))
1150 			return false;
1151 		fallthrough;
1152 	case REQ_OP_WRITE:
1153 	case REQ_OP_WRITE_ZEROES:
1154 		return blk_zone_wplug_handle_write(bio, nr_segs);
1155 	case REQ_OP_ZONE_RESET:
1156 		return blk_zone_wplug_handle_reset_or_finish(bio, 0);
1157 	case REQ_OP_ZONE_FINISH:
1158 		return blk_zone_wplug_handle_reset_or_finish(bio,
1159 						bdev_zone_sectors(bdev));
1160 	case REQ_OP_ZONE_RESET_ALL:
1161 		return blk_zone_wplug_handle_reset_all(bio);
1162 	default:
1163 		return false;
1164 	}
1165 
1166 	return false;
1167 }
1168 EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
1169 
1170 static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
1171 					      struct blk_zone_wplug *zwplug)
1172 {
1173 	/*
1174 	 * Take a reference on the zone write plug and schedule the submission
1175 	 * of the next plugged BIO. blk_zone_wplug_bio_work() will release the
1176 	 * reference we take here.
1177 	 */
1178 	WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
1179 	atomic_inc(&zwplug->ref);
1180 	queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
1181 }
1182 
1183 static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
1184 				       struct blk_zone_wplug *zwplug)
1185 {
1186 	unsigned long flags;
1187 
1188 	spin_lock_irqsave(&zwplug->lock, flags);
1189 
1190 	/*
1191 	 * If we had an error, schedule error recovery. The recovery work
1192 	 * will restart submission of plugged BIOs.
1193 	 */
1194 	if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) {
1195 		spin_unlock_irqrestore(&zwplug->lock, flags);
1196 		kblockd_schedule_work(&disk->zone_wplugs_work);
1197 		return;
1198 	}
1199 
1200 	/* Schedule submission of the next plugged BIO if we have one. */
1201 	if (!bio_list_empty(&zwplug->bio_list)) {
1202 		disk_zone_wplug_schedule_bio_work(disk, zwplug);
1203 		spin_unlock_irqrestore(&zwplug->lock, flags);
1204 		return;
1205 	}
1206 
1207 	zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
1208 
1209 	/*
1210 	 * If the zone is full (it was fully written or finished, or empty
1211 	 * (it was reset), remove its zone write plug from the hash table.
1212 	 */
1213 	if (disk_should_remove_zone_wplug(disk, zwplug))
1214 		disk_remove_zone_wplug(disk, zwplug);
1215 
1216 	spin_unlock_irqrestore(&zwplug->lock, flags);
1217 }
1218 
1219 void blk_zone_write_plug_bio_endio(struct bio *bio)
1220 {
1221 	struct gendisk *disk = bio->bi_bdev->bd_disk;
1222 	struct blk_zone_wplug *zwplug =
1223 		disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
1224 	unsigned long flags;
1225 
1226 	if (WARN_ON_ONCE(!zwplug))
1227 		return;
1228 
1229 	/* Make sure we do not see this BIO again by clearing the plug flag. */
1230 	bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING);
1231 
1232 	/*
1233 	 * If this is a regular write emulating a zone append operation,
1234 	 * restore the original operation code.
1235 	 */
1236 	if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) {
1237 		bio->bi_opf &= ~REQ_OP_MASK;
1238 		bio->bi_opf |= REQ_OP_ZONE_APPEND;
1239 	}
1240 
1241 	/*
1242 	 * If the BIO failed, mark the plug as having an error to trigger
1243 	 * recovery.
1244 	 */
1245 	if (bio->bi_status != BLK_STS_OK) {
1246 		spin_lock_irqsave(&zwplug->lock, flags);
1247 		disk_zone_wplug_set_error(disk, zwplug);
1248 		spin_unlock_irqrestore(&zwplug->lock, flags);
1249 	}
1250 
1251 	/* Drop the reference we took when the BIO was issued. */
1252 	disk_put_zone_wplug(zwplug);
1253 
1254 	/*
1255 	 * For BIO-based devices, blk_zone_write_plug_finish_request()
1256 	 * is not called. So we need to schedule execution of the next
1257 	 * plugged BIO here.
1258 	 */
1259 	if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
1260 		disk_zone_wplug_unplug_bio(disk, zwplug);
1261 
1262 	/* Drop the reference we took when entering this function. */
1263 	disk_put_zone_wplug(zwplug);
1264 }
1265 
1266 void blk_zone_write_plug_finish_request(struct request *req)
1267 {
1268 	struct gendisk *disk = req->q->disk;
1269 	struct blk_zone_wplug *zwplug;
1270 
1271 	zwplug = disk_get_zone_wplug(disk, req->__sector);
1272 	if (WARN_ON_ONCE(!zwplug))
1273 		return;
1274 
1275 	req->rq_flags &= ~RQF_ZONE_WRITE_PLUGGING;
1276 
1277 	/*
1278 	 * Drop the reference we took when the request was initialized in
1279 	 * blk_zone_write_plug_init_request().
1280 	 */
1281 	disk_put_zone_wplug(zwplug);
1282 
1283 	disk_zone_wplug_unplug_bio(disk, zwplug);
1284 
1285 	/* Drop the reference we took when entering this function. */
1286 	disk_put_zone_wplug(zwplug);
1287 }
1288 
1289 static void blk_zone_wplug_bio_work(struct work_struct *work)
1290 {
1291 	struct blk_zone_wplug *zwplug =
1292 		container_of(work, struct blk_zone_wplug, bio_work);
1293 	struct block_device *bdev;
1294 	unsigned long flags;
1295 	struct bio *bio;
1296 
1297 	/*
1298 	 * Submit the next plugged BIO. If we do not have any, clear
1299 	 * the plugged flag.
1300 	 */
1301 	spin_lock_irqsave(&zwplug->lock, flags);
1302 
1303 	bio = bio_list_pop(&zwplug->bio_list);
1304 	if (!bio) {
1305 		zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
1306 		spin_unlock_irqrestore(&zwplug->lock, flags);
1307 		goto put_zwplug;
1308 	}
1309 
1310 	if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
1311 		/* Error recovery will decide what to do with the BIO. */
1312 		bio_list_add_head(&zwplug->bio_list, bio);
1313 		spin_unlock_irqrestore(&zwplug->lock, flags);
1314 		goto put_zwplug;
1315 	}
1316 
1317 	spin_unlock_irqrestore(&zwplug->lock, flags);
1318 
1319 	bdev = bio->bi_bdev;
1320 	submit_bio_noacct_nocheck(bio);
1321 
1322 	/*
1323 	 * blk-mq devices will reuse the extra reference on the request queue
1324 	 * usage counter we took when the BIO was plugged, but the submission
1325 	 * path for BIO-based devices will not do that. So drop this extra
1326 	 * reference here.
1327 	 */
1328 	if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
1329 		blk_queue_exit(bdev->bd_disk->queue);
1330 
1331 put_zwplug:
1332 	/* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */
1333 	disk_put_zone_wplug(zwplug);
1334 }
1335 
1336 static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
1337 {
1338 	switch (zone->cond) {
1339 	case BLK_ZONE_COND_IMP_OPEN:
1340 	case BLK_ZONE_COND_EXP_OPEN:
1341 	case BLK_ZONE_COND_CLOSED:
1342 		return zone->wp - zone->start;
1343 	case BLK_ZONE_COND_FULL:
1344 		return zone->len;
1345 	case BLK_ZONE_COND_EMPTY:
1346 		return 0;
1347 	case BLK_ZONE_COND_NOT_WP:
1348 	case BLK_ZONE_COND_OFFLINE:
1349 	case BLK_ZONE_COND_READONLY:
1350 	default:
1351 		/*
1352 		 * Conventional, offline and read-only zones do not have a valid
1353 		 * write pointer.
1354 		 */
1355 		return UINT_MAX;
1356 	}
1357 }
1358 
1359 static int blk_zone_wplug_report_zone_cb(struct blk_zone *zone,
1360 					 unsigned int idx, void *data)
1361 {
1362 	struct blk_zone *zonep = data;
1363 
1364 	*zonep = *zone;
1365 	return 0;
1366 }
1367 
1368 static void disk_zone_wplug_handle_error(struct gendisk *disk,
1369 					 struct blk_zone_wplug *zwplug)
1370 {
1371 	sector_t zone_start_sector =
1372 		bdev_zone_sectors(disk->part0) * zwplug->zone_no;
1373 	unsigned int noio_flag;
1374 	struct blk_zone zone;
1375 	unsigned long flags;
1376 	int ret;
1377 
1378 	/* Get the current zone information from the device. */
1379 	noio_flag = memalloc_noio_save();
1380 	ret = disk->fops->report_zones(disk, zone_start_sector, 1,
1381 				       blk_zone_wplug_report_zone_cb, &zone);
1382 	memalloc_noio_restore(noio_flag);
1383 
1384 	spin_lock_irqsave(&zwplug->lock, flags);
1385 
1386 	/*
1387 	 * A zone reset or finish may have cleared the error already. In such
1388 	 * case, do nothing as the report zones may have seen the "old" write
1389 	 * pointer value before the reset/finish operation completed.
1390 	 */
1391 	if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
1392 		goto unlock;
1393 
1394 	zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
1395 
1396 	if (ret != 1) {
1397 		/*
1398 		 * We failed to get the zone information, meaning that something
1399 		 * is likely really wrong with the device. Abort all remaining
1400 		 * plugged BIOs as otherwise we could endup waiting forever on
1401 		 * plugged BIOs to complete if there is a queue freeze on-going.
1402 		 */
1403 		disk_zone_wplug_abort(zwplug);
1404 		goto unplug;
1405 	}
1406 
1407 	/* Update the zone write pointer offset. */
1408 	zwplug->wp_offset = blk_zone_wp_offset(&zone);
1409 	disk_zone_wplug_abort_unaligned(disk, zwplug);
1410 
1411 	/* Restart BIO submission if we still have any BIO left. */
1412 	if (!bio_list_empty(&zwplug->bio_list)) {
1413 		disk_zone_wplug_schedule_bio_work(disk, zwplug);
1414 		goto unlock;
1415 	}
1416 
1417 unplug:
1418 	zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
1419 	if (disk_should_remove_zone_wplug(disk, zwplug))
1420 		disk_remove_zone_wplug(disk, zwplug);
1421 
1422 unlock:
1423 	spin_unlock_irqrestore(&zwplug->lock, flags);
1424 }
1425 
1426 static void disk_zone_wplugs_work(struct work_struct *work)
1427 {
1428 	struct gendisk *disk =
1429 		container_of(work, struct gendisk, zone_wplugs_work);
1430 	struct blk_zone_wplug *zwplug;
1431 	unsigned long flags;
1432 
1433 	spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
1434 
1435 	while (!list_empty(&disk->zone_wplugs_err_list)) {
1436 		zwplug = list_first_entry(&disk->zone_wplugs_err_list,
1437 					  struct blk_zone_wplug, link);
1438 		list_del_init(&zwplug->link);
1439 		spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
1440 
1441 		disk_zone_wplug_handle_error(disk, zwplug);
1442 		disk_put_zone_wplug(zwplug);
1443 
1444 		spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
1445 	}
1446 
1447 	spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
1448 }
1449 
1450 static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
1451 {
1452 	return 1U << disk->zone_wplugs_hash_bits;
1453 }
1454 
1455 void disk_init_zone_resources(struct gendisk *disk)
1456 {
1457 	spin_lock_init(&disk->zone_wplugs_lock);
1458 	INIT_LIST_HEAD(&disk->zone_wplugs_err_list);
1459 	INIT_WORK(&disk->zone_wplugs_work, disk_zone_wplugs_work);
1460 }
1461 
1462 /*
1463  * For the size of a disk zone write plug hash table, use the size of the
1464  * zone write plug mempool, which is the maximum of the disk open zones and
1465  * active zones limits. But do not exceed 4KB (512 hlist head entries), that is,
1466  * 9 bits. For a disk that has no limits, mempool size defaults to 128.
1467  */
1468 #define BLK_ZONE_WPLUG_MAX_HASH_BITS		9
1469 #define BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE	128
1470 
1471 static int disk_alloc_zone_resources(struct gendisk *disk,
1472 				     unsigned int pool_size)
1473 {
1474 	unsigned int i;
1475 
1476 	disk->zone_wplugs_hash_bits =
1477 		min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
1478 
1479 	disk->zone_wplugs_hash =
1480 		kcalloc(disk_zone_wplugs_hash_size(disk),
1481 			sizeof(struct hlist_head), GFP_KERNEL);
1482 	if (!disk->zone_wplugs_hash)
1483 		return -ENOMEM;
1484 
1485 	for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++)
1486 		INIT_HLIST_HEAD(&disk->zone_wplugs_hash[i]);
1487 
1488 	disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size,
1489 						sizeof(struct blk_zone_wplug));
1490 	if (!disk->zone_wplugs_pool)
1491 		goto free_hash;
1492 
1493 	disk->zone_wplugs_wq =
1494 		alloc_workqueue("%s_zwplugs", WQ_MEM_RECLAIM | WQ_HIGHPRI,
1495 				pool_size, disk->disk_name);
1496 	if (!disk->zone_wplugs_wq)
1497 		goto destroy_pool;
1498 
1499 	return 0;
1500 
1501 destroy_pool:
1502 	mempool_destroy(disk->zone_wplugs_pool);
1503 	disk->zone_wplugs_pool = NULL;
1504 free_hash:
1505 	kfree(disk->zone_wplugs_hash);
1506 	disk->zone_wplugs_hash = NULL;
1507 	disk->zone_wplugs_hash_bits = 0;
1508 	return -ENOMEM;
1509 }
1510 
1511 static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
1512 {
1513 	struct blk_zone_wplug *zwplug;
1514 	unsigned int i;
1515 
1516 	if (!disk->zone_wplugs_hash)
1517 		return;
1518 
1519 	/* Free all the zone write plugs we have. */
1520 	for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
1521 		while (!hlist_empty(&disk->zone_wplugs_hash[i])) {
1522 			zwplug = hlist_entry(disk->zone_wplugs_hash[i].first,
1523 					     struct blk_zone_wplug, node);
1524 			atomic_inc(&zwplug->ref);
1525 			disk_remove_zone_wplug(disk, zwplug);
1526 			disk_put_zone_wplug(zwplug);
1527 		}
1528 	}
1529 
1530 	kfree(disk->zone_wplugs_hash);
1531 	disk->zone_wplugs_hash = NULL;
1532 	disk->zone_wplugs_hash_bits = 0;
1533 }
1534 
1535 void disk_free_zone_resources(struct gendisk *disk)
1536 {
1537 	cancel_work_sync(&disk->zone_wplugs_work);
1538 
1539 	if (disk->zone_wplugs_wq) {
1540 		destroy_workqueue(disk->zone_wplugs_wq);
1541 		disk->zone_wplugs_wq = NULL;
1542 	}
1543 
1544 	disk_destroy_zone_wplugs_hash_table(disk);
1545 
1546 	/*
1547 	 * Wait for the zone write plugs to be RCU-freed before
1548 	 * destorying the mempool.
1549 	 */
1550 	rcu_barrier();
1551 
1552 	mempool_destroy(disk->zone_wplugs_pool);
1553 	disk->zone_wplugs_pool = NULL;
1554 
1555 	kfree(disk->conv_zones_bitmap);
1556 	disk->conv_zones_bitmap = NULL;
1557 	disk->zone_capacity = 0;
1558 	disk->last_zone_capacity = 0;
1559 	disk->nr_zones = 0;
1560 }
1561 
1562 static inline bool disk_need_zone_resources(struct gendisk *disk)
1563 {
1564 	/*
1565 	 * All mq zoned devices need zone resources so that the block layer
1566 	 * can automatically handle write BIO plugging. BIO-based device drivers
1567 	 * (e.g. DM devices) are normally responsible for handling zone write
1568 	 * ordering and do not need zone resources, unless the driver requires
1569 	 * zone append emulation.
1570 	 */
1571 	return queue_is_mq(disk->queue) ||
1572 		queue_emulates_zone_append(disk->queue);
1573 }
1574 
1575 static int disk_revalidate_zone_resources(struct gendisk *disk,
1576 					  unsigned int nr_zones)
1577 {
1578 	struct queue_limits *lim = &disk->queue->limits;
1579 	unsigned int pool_size;
1580 
1581 	if (!disk_need_zone_resources(disk))
1582 		return 0;
1583 
1584 	/*
1585 	 * If the device has no limit on the maximum number of open and active
1586 	 * zones, use BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE.
1587 	 */
1588 	pool_size = max(lim->max_open_zones, lim->max_active_zones);
1589 	if (!pool_size)
1590 		pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones);
1591 
1592 	if (!disk->zone_wplugs_hash)
1593 		return disk_alloc_zone_resources(disk, pool_size);
1594 
1595 	return 0;
1596 }
1597 
1598 struct blk_revalidate_zone_args {
1599 	struct gendisk	*disk;
1600 	unsigned long	*conv_zones_bitmap;
1601 	unsigned int	nr_zones;
1602 	unsigned int	zone_capacity;
1603 	unsigned int	last_zone_capacity;
1604 	sector_t	sector;
1605 };
1606 
1607 /*
1608  * Update the disk zone resources information and device queue limits.
1609  * The disk queue is frozen when this is executed.
1610  */
1611 static int disk_update_zone_resources(struct gendisk *disk,
1612 				      struct blk_revalidate_zone_args *args)
1613 {
1614 	struct request_queue *q = disk->queue;
1615 	unsigned int nr_seq_zones, nr_conv_zones = 0;
1616 	unsigned int pool_size;
1617 	struct queue_limits lim;
1618 
1619 	disk->nr_zones = args->nr_zones;
1620 	disk->zone_capacity = args->zone_capacity;
1621 	disk->last_zone_capacity = args->last_zone_capacity;
1622 	swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
1623 	if (disk->conv_zones_bitmap)
1624 		nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
1625 					      disk->nr_zones);
1626 	if (nr_conv_zones >= disk->nr_zones) {
1627 		pr_warn("%s: Invalid number of conventional zones %u / %u\n",
1628 			disk->disk_name, nr_conv_zones, disk->nr_zones);
1629 		return -ENODEV;
1630 	}
1631 
1632 	lim = queue_limits_start_update(q);
1633 
1634 	/*
1635 	 * Some devices can advertize zone resource limits that are larger than
1636 	 * the number of sequential zones of the zoned block device, e.g. a
1637 	 * small ZNS namespace. For such case, assume that the zoned device has
1638 	 * no zone resource limits.
1639 	 */
1640 	nr_seq_zones = disk->nr_zones - nr_conv_zones;
1641 	if (lim.max_open_zones >= nr_seq_zones)
1642 		lim.max_open_zones = 0;
1643 	if (lim.max_active_zones >= nr_seq_zones)
1644 		lim.max_active_zones = 0;
1645 
1646 	if (!disk->zone_wplugs_pool)
1647 		goto commit;
1648 
1649 	/*
1650 	 * If the device has no limit on the maximum number of open and active
1651 	 * zones, set its max open zone limit to the mempool size to indicate
1652 	 * to the user that there is a potential performance impact due to
1653 	 * dynamic zone write plug allocation when simultaneously writing to
1654 	 * more zones than the size of the mempool.
1655 	 */
1656 	pool_size = max(lim.max_open_zones, lim.max_active_zones);
1657 	if (!pool_size)
1658 		pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_seq_zones);
1659 
1660 	mempool_resize(disk->zone_wplugs_pool, pool_size);
1661 
1662 	if (!lim.max_open_zones && !lim.max_active_zones) {
1663 		if (pool_size < nr_seq_zones)
1664 			lim.max_open_zones = pool_size;
1665 		else
1666 			lim.max_open_zones = 0;
1667 	}
1668 
1669 commit:
1670 	return queue_limits_commit_update(q, &lim);
1671 }
1672 
1673 static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
1674 				    struct blk_revalidate_zone_args *args)
1675 {
1676 	struct gendisk *disk = args->disk;
1677 	struct request_queue *q = disk->queue;
1678 
1679 	if (zone->capacity != zone->len) {
1680 		pr_warn("%s: Invalid conventional zone capacity\n",
1681 			disk->disk_name);
1682 		return -ENODEV;
1683 	}
1684 
1685 	if (disk_zone_is_last(disk, zone))
1686 		args->last_zone_capacity = zone->capacity;
1687 
1688 	if (!disk_need_zone_resources(disk))
1689 		return 0;
1690 
1691 	if (!args->conv_zones_bitmap) {
1692 		args->conv_zones_bitmap =
1693 			blk_alloc_zone_bitmap(q->node, args->nr_zones);
1694 		if (!args->conv_zones_bitmap)
1695 			return -ENOMEM;
1696 	}
1697 
1698 	set_bit(idx, args->conv_zones_bitmap);
1699 
1700 	return 0;
1701 }
1702 
1703 static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
1704 				   struct blk_revalidate_zone_args *args)
1705 {
1706 	struct gendisk *disk = args->disk;
1707 	struct blk_zone_wplug *zwplug;
1708 	unsigned int wp_offset;
1709 	unsigned long flags;
1710 
1711 	/*
1712 	 * Remember the capacity of the first sequential zone and check
1713 	 * if it is constant for all zones, ignoring the last zone as it can be
1714 	 * smaller.
1715 	 */
1716 	if (!args->zone_capacity)
1717 		args->zone_capacity = zone->capacity;
1718 	if (disk_zone_is_last(disk, zone)) {
1719 		args->last_zone_capacity = zone->capacity;
1720 	} else if (zone->capacity != args->zone_capacity) {
1721 		pr_warn("%s: Invalid variable zone capacity\n",
1722 			disk->disk_name);
1723 		return -ENODEV;
1724 	}
1725 
1726 	/*
1727 	 * We need to track the write pointer of all zones that are not
1728 	 * empty nor full. So make sure we have a zone write plug for
1729 	 * such zone if the device has a zone write plug hash table.
1730 	 */
1731 	if (!disk->zone_wplugs_hash)
1732 		return 0;
1733 
1734 	wp_offset = blk_zone_wp_offset(zone);
1735 	if (!wp_offset || wp_offset >= zone->capacity)
1736 		return 0;
1737 
1738 	zwplug = disk_get_and_lock_zone_wplug(disk, zone->wp, GFP_NOIO, &flags);
1739 	if (!zwplug)
1740 		return -ENOMEM;
1741 	spin_unlock_irqrestore(&zwplug->lock, flags);
1742 	disk_put_zone_wplug(zwplug);
1743 
1744 	return 0;
1745 }
1746 
1747 /*
1748  * Helper function to check the validity of zones of a zoned block device.
1749  */
1750 static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
1751 				  void *data)
1752 {
1753 	struct blk_revalidate_zone_args *args = data;
1754 	struct gendisk *disk = args->disk;
1755 	sector_t zone_sectors = disk->queue->limits.chunk_sectors;
1756 	int ret;
1757 
1758 	/* Check for bad zones and holes in the zone report */
1759 	if (zone->start != args->sector) {
1760 		pr_warn("%s: Zone gap at sectors %llu..%llu\n",
1761 			disk->disk_name, args->sector, zone->start);
1762 		return -ENODEV;
1763 	}
1764 
1765 	if (zone->start >= get_capacity(disk) || !zone->len) {
1766 		pr_warn("%s: Invalid zone start %llu, length %llu\n",
1767 			disk->disk_name, zone->start, zone->len);
1768 		return -ENODEV;
1769 	}
1770 
1771 	/*
1772 	 * All zones must have the same size, with the exception on an eventual
1773 	 * smaller last zone.
1774 	 */
1775 	if (!disk_zone_is_last(disk, zone)) {
1776 		if (zone->len != zone_sectors) {
1777 			pr_warn("%s: Invalid zoned device with non constant zone size\n",
1778 				disk->disk_name);
1779 			return -ENODEV;
1780 		}
1781 	} else if (zone->len > zone_sectors) {
1782 		pr_warn("%s: Invalid zoned device with larger last zone size\n",
1783 			disk->disk_name);
1784 		return -ENODEV;
1785 	}
1786 
1787 	if (!zone->capacity || zone->capacity > zone->len) {
1788 		pr_warn("%s: Invalid zone capacity\n",
1789 			disk->disk_name);
1790 		return -ENODEV;
1791 	}
1792 
1793 	/* Check zone type */
1794 	switch (zone->type) {
1795 	case BLK_ZONE_TYPE_CONVENTIONAL:
1796 		ret = blk_revalidate_conv_zone(zone, idx, args);
1797 		break;
1798 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
1799 		ret = blk_revalidate_seq_zone(zone, idx, args);
1800 		break;
1801 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
1802 	default:
1803 		pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
1804 			disk->disk_name, (int)zone->type, zone->start);
1805 		ret = -ENODEV;
1806 	}
1807 
1808 	if (!ret)
1809 		args->sector += zone->len;
1810 
1811 	return ret;
1812 }
1813 
1814 /**
1815  * blk_revalidate_disk_zones - (re)allocate and initialize zone write plugs
1816  * @disk:	Target disk
1817  *
1818  * Helper function for low-level device drivers to check, (re) allocate and
1819  * initialize resources used for managing zoned disks. This function should
1820  * normally be called by blk-mq based drivers when a zoned gendisk is probed
1821  * and when the zone configuration of the gendisk changes (e.g. after a format).
1822  * Before calling this function, the device driver must already have set the
1823  * device zone size (chunk_sector limit) and the max zone append limit.
1824  * BIO based drivers can also use this function as long as the device queue
1825  * can be safely frozen.
1826  */
1827 int blk_revalidate_disk_zones(struct gendisk *disk)
1828 {
1829 	struct request_queue *q = disk->queue;
1830 	sector_t zone_sectors = q->limits.chunk_sectors;
1831 	sector_t capacity = get_capacity(disk);
1832 	struct blk_revalidate_zone_args args = { };
1833 	unsigned int noio_flag;
1834 	int ret = -ENOMEM;
1835 
1836 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
1837 		return -EIO;
1838 
1839 	if (!capacity)
1840 		return -ENODEV;
1841 
1842 	/*
1843 	 * Checks that the device driver indicated a valid zone size and that
1844 	 * the max zone append limit is set.
1845 	 */
1846 	if (!zone_sectors || !is_power_of_2(zone_sectors)) {
1847 		pr_warn("%s: Invalid non power of two zone size (%llu)\n",
1848 			disk->disk_name, zone_sectors);
1849 		return -ENODEV;
1850 	}
1851 
1852 	if (!queue_max_zone_append_sectors(q)) {
1853 		pr_warn("%s: Invalid 0 maximum zone append limit\n",
1854 			disk->disk_name);
1855 		return -ENODEV;
1856 	}
1857 
1858 	/*
1859 	 * Ensure that all memory allocations in this context are done as if
1860 	 * GFP_NOIO was specified.
1861 	 */
1862 	args.disk = disk;
1863 	args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
1864 	noio_flag = memalloc_noio_save();
1865 	ret = disk_revalidate_zone_resources(disk, args.nr_zones);
1866 	if (ret) {
1867 		memalloc_noio_restore(noio_flag);
1868 		return ret;
1869 	}
1870 	ret = disk->fops->report_zones(disk, 0, UINT_MAX,
1871 				       blk_revalidate_zone_cb, &args);
1872 	if (!ret) {
1873 		pr_warn("%s: No zones reported\n", disk->disk_name);
1874 		ret = -ENODEV;
1875 	}
1876 	memalloc_noio_restore(noio_flag);
1877 
1878 	/*
1879 	 * If zones where reported, make sure that the entire disk capacity
1880 	 * has been checked.
1881 	 */
1882 	if (ret > 0 && args.sector != capacity) {
1883 		pr_warn("%s: Missing zones from sector %llu\n",
1884 			disk->disk_name, args.sector);
1885 		ret = -ENODEV;
1886 	}
1887 
1888 	/*
1889 	 * Set the new disk zone parameters only once the queue is frozen and
1890 	 * all I/Os are completed.
1891 	 */
1892 	blk_mq_freeze_queue(q);
1893 	if (ret > 0)
1894 		ret = disk_update_zone_resources(disk, &args);
1895 	else
1896 		pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
1897 	if (ret)
1898 		disk_free_zone_resources(disk);
1899 	blk_mq_unfreeze_queue(q);
1900 
1901 	kfree(args.conv_zones_bitmap);
1902 
1903 	return ret;
1904 }
1905 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
1906 
1907 #ifdef CONFIG_BLK_DEBUG_FS
1908 
1909 int queue_zone_wplugs_show(void *data, struct seq_file *m)
1910 {
1911 	struct request_queue *q = data;
1912 	struct gendisk *disk = q->disk;
1913 	struct blk_zone_wplug *zwplug;
1914 	unsigned int zwp_wp_offset, zwp_flags;
1915 	unsigned int zwp_zone_no, zwp_ref;
1916 	unsigned int zwp_bio_list_size, i;
1917 	unsigned long flags;
1918 
1919 	if (!disk->zone_wplugs_hash)
1920 		return 0;
1921 
1922 	rcu_read_lock();
1923 	for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) {
1924 		hlist_for_each_entry_rcu(zwplug,
1925 					 &disk->zone_wplugs_hash[i], node) {
1926 			spin_lock_irqsave(&zwplug->lock, flags);
1927 			zwp_zone_no = zwplug->zone_no;
1928 			zwp_flags = zwplug->flags;
1929 			zwp_ref = atomic_read(&zwplug->ref);
1930 			zwp_wp_offset = zwplug->wp_offset;
1931 			zwp_bio_list_size = bio_list_size(&zwplug->bio_list);
1932 			spin_unlock_irqrestore(&zwplug->lock, flags);
1933 
1934 			seq_printf(m, "%u 0x%x %u %u %u\n",
1935 				   zwp_zone_no, zwp_flags, zwp_ref,
1936 				   zwp_wp_offset, zwp_bio_list_size);
1937 		}
1938 	}
1939 	rcu_read_unlock();
1940 
1941 	return 0;
1942 }
1943 
1944 #endif
1945