xref: /linux/drivers/block/null_blk/zoned.c (revision 5ddb88f22eb97218d9295e69c39e0ff7cc64e09c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
4 #include "null_blk.h"
5 
6 #define CREATE_TRACE_POINTS
7 #include "trace.h"
8 
9 #undef pr_fmt
10 #define pr_fmt(fmt)	"null_blk: " fmt
11 
12 #define NULL_ZONE_INVALID_WP	((sector_t)-1)
13 
14 static inline sector_t mb_to_sects(unsigned long mb)
15 {
16 	return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
17 }
18 
19 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
20 {
21 	return sect >> ilog2(dev->zone_size_sects);
22 }
23 
24 static inline void null_init_zone_lock(struct nullb_device *dev,
25 				       struct nullb_zone *zone)
26 {
27 	if (!dev->memory_backed)
28 		spin_lock_init(&zone->spinlock);
29 	else
30 		mutex_init(&zone->mutex);
31 }
32 
33 static inline void null_lock_zone(struct nullb_device *dev,
34 				  struct nullb_zone *zone)
35 {
36 	if (!dev->memory_backed)
37 		spin_lock_irq(&zone->spinlock);
38 	else
39 		mutex_lock(&zone->mutex);
40 }
41 
42 static inline void null_unlock_zone(struct nullb_device *dev,
43 				    struct nullb_zone *zone)
44 {
45 	if (!dev->memory_backed)
46 		spin_unlock_irq(&zone->spinlock);
47 	else
48 		mutex_unlock(&zone->mutex);
49 }
50 
51 int null_init_zoned_dev(struct nullb_device *dev,
52 			struct queue_limits *lim)
53 {
54 	sector_t dev_capacity_sects, zone_capacity_sects;
55 	struct nullb_zone *zone;
56 	sector_t sector = 0;
57 	unsigned int i;
58 
59 	if (!is_power_of_2(dev->zone_size)) {
60 		pr_err("zone_size must be power-of-two\n");
61 		return -EINVAL;
62 	}
63 	if (dev->zone_size > dev->size) {
64 		pr_err("Zone size larger than device capacity\n");
65 		return -EINVAL;
66 	}
67 
68 	if (!dev->zone_capacity)
69 		dev->zone_capacity = dev->zone_size;
70 
71 	if (dev->zone_capacity > dev->zone_size) {
72 		pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
73 		       dev->zone_capacity, dev->zone_size);
74 		return -EINVAL;
75 	}
76 
77 	/*
78 	 * If a smaller zone capacity was requested, do not allow a smaller last
79 	 * zone at the same time as such zone configuration does not correspond
80 	 * to any real zoned device.
81 	 */
82 	if (dev->zone_capacity != dev->zone_size &&
83 	    dev->size & (dev->zone_size - 1)) {
84 		pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
85 		return -EINVAL;
86 	}
87 
88 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
89 	dev_capacity_sects = mb_to_sects(dev->size);
90 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
91 	dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
92 		>> ilog2(dev->zone_size_sects);
93 
94 	dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
95 				    GFP_KERNEL | __GFP_ZERO);
96 	if (!dev->zones)
97 		return -ENOMEM;
98 
99 	spin_lock_init(&dev->zone_res_lock);
100 
101 	if (dev->zone_nr_conv >= dev->nr_zones) {
102 		dev->zone_nr_conv = dev->nr_zones - 1;
103 		pr_info("changed the number of conventional zones to %u",
104 			dev->zone_nr_conv);
105 	}
106 
107 	dev->zone_append_max_sectors =
108 		min(ALIGN_DOWN(dev->zone_append_max_sectors,
109 			       dev->blocksize >> SECTOR_SHIFT),
110 		    zone_capacity_sects);
111 
112 	/* Max active zones has to be < nbr of seq zones in order to be enforceable */
113 	if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
114 		dev->zone_max_active = 0;
115 		pr_info("zone_max_active limit disabled, limit >= zone count\n");
116 	}
117 
118 	/* Max open zones has to be <= max active zones */
119 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
120 		dev->zone_max_open = dev->zone_max_active;
121 		pr_info("changed the maximum number of open zones to %u\n",
122 			dev->zone_max_open);
123 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
124 		dev->zone_max_open = 0;
125 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
126 	}
127 	dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
128 	dev->imp_close_zone_no = dev->zone_nr_conv;
129 
130 	for (i = 0; i <  dev->zone_nr_conv; i++) {
131 		zone = &dev->zones[i];
132 
133 		null_init_zone_lock(dev, zone);
134 		zone->start = sector;
135 		zone->len = dev->zone_size_sects;
136 		zone->capacity = zone->len;
137 		zone->wp = zone->start + zone->len;
138 		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
139 		zone->cond = BLK_ZONE_COND_NOT_WP;
140 
141 		sector += dev->zone_size_sects;
142 	}
143 
144 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
145 		zone = &dev->zones[i];
146 
147 		null_init_zone_lock(dev, zone);
148 		zone->start = zone->wp = sector;
149 		if (zone->start + dev->zone_size_sects > dev_capacity_sects)
150 			zone->len = dev_capacity_sects - zone->start;
151 		else
152 			zone->len = dev->zone_size_sects;
153 		zone->capacity =
154 			min_t(sector_t, zone->len, zone_capacity_sects);
155 		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
156 		zone->cond = BLK_ZONE_COND_EMPTY;
157 
158 		sector += dev->zone_size_sects;
159 	}
160 
161 	lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
162 	lim->chunk_sectors = dev->zone_size_sects;
163 	lim->max_zone_append_sectors = dev->zone_append_max_sectors;
164 	lim->max_open_zones = dev->zone_max_open;
165 	lim->max_active_zones = dev->zone_max_active;
166 	return 0;
167 }
168 
169 int null_register_zoned_dev(struct nullb *nullb)
170 {
171 	struct request_queue *q = nullb->q;
172 	struct gendisk *disk = nullb->disk;
173 
174 	disk->nr_zones = bdev_nr_zones(disk->part0);
175 
176 	pr_info("%s: using %s zone append\n",
177 		disk->disk_name,
178 		queue_emulates_zone_append(q) ? "emulated" : "native");
179 
180 	return blk_revalidate_disk_zones(disk);
181 }
182 
183 void null_free_zoned_dev(struct nullb_device *dev)
184 {
185 	kvfree(dev->zones);
186 	dev->zones = NULL;
187 }
188 
189 int null_report_zones(struct gendisk *disk, sector_t sector,
190 		unsigned int nr_zones, report_zones_cb cb, void *data)
191 {
192 	struct nullb *nullb = disk->private_data;
193 	struct nullb_device *dev = nullb->dev;
194 	unsigned int first_zone, i;
195 	struct nullb_zone *zone;
196 	struct blk_zone blkz;
197 	int error;
198 
199 	first_zone = null_zone_no(dev, sector);
200 	if (first_zone >= dev->nr_zones)
201 		return 0;
202 
203 	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
204 	trace_nullb_report_zones(nullb, nr_zones);
205 
206 	memset(&blkz, 0, sizeof(struct blk_zone));
207 	zone = &dev->zones[first_zone];
208 	for (i = 0; i < nr_zones; i++, zone++) {
209 		/*
210 		 * Stacked DM target drivers will remap the zone information by
211 		 * modifying the zone information passed to the report callback.
212 		 * So use a local copy to avoid corruption of the device zone
213 		 * array.
214 		 */
215 		null_lock_zone(dev, zone);
216 		blkz.start = zone->start;
217 		blkz.len = zone->len;
218 		blkz.wp = zone->wp;
219 		blkz.type = zone->type;
220 		blkz.cond = zone->cond;
221 		blkz.capacity = zone->capacity;
222 		null_unlock_zone(dev, zone);
223 
224 		error = cb(&blkz, i, data);
225 		if (error)
226 			return error;
227 	}
228 
229 	return nr_zones;
230 }
231 
232 /*
233  * This is called in the case of memory backing from null_process_cmd()
234  * with the target zone already locked.
235  */
236 size_t null_zone_valid_read_len(struct nullb *nullb,
237 				sector_t sector, unsigned int len)
238 {
239 	struct nullb_device *dev = nullb->dev;
240 	struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
241 	unsigned int nr_sectors = len >> SECTOR_SHIFT;
242 
243 	/* Read must be below the write pointer position */
244 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
245 	    sector + nr_sectors <= zone->wp)
246 		return len;
247 
248 	if (sector > zone->wp)
249 		return 0;
250 
251 	return (zone->wp - sector) << SECTOR_SHIFT;
252 }
253 
254 static void null_close_imp_open_zone(struct nullb_device *dev)
255 {
256 	struct nullb_zone *zone;
257 	unsigned int zno, i;
258 
259 	zno = dev->imp_close_zone_no;
260 	if (zno >= dev->nr_zones)
261 		zno = dev->zone_nr_conv;
262 
263 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
264 		zone = &dev->zones[zno];
265 		zno++;
266 		if (zno >= dev->nr_zones)
267 			zno = dev->zone_nr_conv;
268 
269 		if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
270 			dev->nr_zones_imp_open--;
271 			if (zone->wp == zone->start) {
272 				zone->cond = BLK_ZONE_COND_EMPTY;
273 			} else {
274 				zone->cond = BLK_ZONE_COND_CLOSED;
275 				dev->nr_zones_closed++;
276 			}
277 			dev->imp_close_zone_no = zno;
278 			return;
279 		}
280 	}
281 }
282 
283 static blk_status_t null_check_active(struct nullb_device *dev)
284 {
285 	if (!dev->zone_max_active)
286 		return BLK_STS_OK;
287 
288 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
289 			dev->nr_zones_closed < dev->zone_max_active)
290 		return BLK_STS_OK;
291 
292 	return BLK_STS_ZONE_ACTIVE_RESOURCE;
293 }
294 
295 static blk_status_t null_check_open(struct nullb_device *dev)
296 {
297 	if (!dev->zone_max_open)
298 		return BLK_STS_OK;
299 
300 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
301 		return BLK_STS_OK;
302 
303 	if (dev->nr_zones_imp_open) {
304 		if (null_check_active(dev) == BLK_STS_OK) {
305 			null_close_imp_open_zone(dev);
306 			return BLK_STS_OK;
307 		}
308 	}
309 
310 	return BLK_STS_ZONE_OPEN_RESOURCE;
311 }
312 
313 /*
314  * This function matches the manage open zone resources function in the ZBC standard,
315  * with the addition of max active zones support (added in the ZNS standard).
316  *
317  * The function determines if a zone can transition to implicit open or explicit open,
318  * while maintaining the max open zone (and max active zone) limit(s). It may close an
319  * implicit open zone in order to make additional zone resources available.
320  *
321  * ZBC states that an implicit open zone shall be closed only if there is not
322  * room within the open limit. However, with the addition of an active limit,
323  * it is not certain that closing an implicit open zone will allow a new zone
324  * to be opened, since we might already be at the active limit capacity.
325  */
326 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
327 					      struct nullb_zone *zone)
328 {
329 	blk_status_t ret;
330 
331 	switch (zone->cond) {
332 	case BLK_ZONE_COND_EMPTY:
333 		ret = null_check_active(dev);
334 		if (ret != BLK_STS_OK)
335 			return ret;
336 		fallthrough;
337 	case BLK_ZONE_COND_CLOSED:
338 		return null_check_open(dev);
339 	default:
340 		/* Should never be called for other states */
341 		WARN_ON(1);
342 		return BLK_STS_IOERR;
343 	}
344 }
345 
346 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
347 				    unsigned int nr_sectors, bool append)
348 {
349 	struct nullb_device *dev = cmd->nq->dev;
350 	unsigned int zno = null_zone_no(dev, sector);
351 	struct nullb_zone *zone = &dev->zones[zno];
352 	blk_status_t ret;
353 
354 	trace_nullb_zone_op(cmd, zno, zone->cond);
355 
356 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
357 		if (append)
358 			return BLK_STS_IOERR;
359 		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
360 	}
361 
362 	null_lock_zone(dev, zone);
363 
364 	/*
365 	 * Regular writes must be at the write pointer position. Zone append
366 	 * writes are automatically issued at the write pointer and the position
367 	 * returned using the request sector. Note that we do not check the zone
368 	 * condition because for FULL, READONLY and OFFLINE zones, the sector
369 	 * check against the zone write pointer will always result in failing
370 	 * the command.
371 	 */
372 	if (append) {
373 		if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
374 		    zone->wp == NULL_ZONE_INVALID_WP) {
375 			ret = BLK_STS_IOERR;
376 			goto unlock_zone;
377 		}
378 		sector = zone->wp;
379 		blk_mq_rq_from_pdu(cmd)->__sector = sector;
380 	}
381 
382 	if (sector != zone->wp ||
383 	    zone->wp + nr_sectors > zone->start + zone->capacity) {
384 		ret = BLK_STS_IOERR;
385 		goto unlock_zone;
386 	}
387 
388 	if (zone->cond == BLK_ZONE_COND_CLOSED ||
389 	    zone->cond == BLK_ZONE_COND_EMPTY) {
390 		if (dev->need_zone_res_mgmt) {
391 			spin_lock(&dev->zone_res_lock);
392 
393 			ret = null_check_zone_resources(dev, zone);
394 			if (ret != BLK_STS_OK) {
395 				spin_unlock(&dev->zone_res_lock);
396 				goto unlock_zone;
397 			}
398 			if (zone->cond == BLK_ZONE_COND_CLOSED) {
399 				dev->nr_zones_closed--;
400 				dev->nr_zones_imp_open++;
401 			} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
402 				dev->nr_zones_imp_open++;
403 			}
404 
405 			spin_unlock(&dev->zone_res_lock);
406 		}
407 
408 		zone->cond = BLK_ZONE_COND_IMP_OPEN;
409 	}
410 
411 	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
412 	if (ret != BLK_STS_OK)
413 		goto unlock_zone;
414 
415 	zone->wp += nr_sectors;
416 	if (zone->wp == zone->start + zone->capacity) {
417 		if (dev->need_zone_res_mgmt) {
418 			spin_lock(&dev->zone_res_lock);
419 			if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
420 				dev->nr_zones_exp_open--;
421 			else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
422 				dev->nr_zones_imp_open--;
423 			spin_unlock(&dev->zone_res_lock);
424 		}
425 		zone->cond = BLK_ZONE_COND_FULL;
426 	}
427 
428 	ret = BLK_STS_OK;
429 
430 unlock_zone:
431 	null_unlock_zone(dev, zone);
432 
433 	return ret;
434 }
435 
436 static blk_status_t null_open_zone(struct nullb_device *dev,
437 				   struct nullb_zone *zone)
438 {
439 	blk_status_t ret = BLK_STS_OK;
440 
441 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
442 		return BLK_STS_IOERR;
443 
444 	switch (zone->cond) {
445 	case BLK_ZONE_COND_EXP_OPEN:
446 		/* Open operation on exp open is not an error */
447 		return BLK_STS_OK;
448 	case BLK_ZONE_COND_EMPTY:
449 	case BLK_ZONE_COND_IMP_OPEN:
450 	case BLK_ZONE_COND_CLOSED:
451 		break;
452 	case BLK_ZONE_COND_FULL:
453 	default:
454 		return BLK_STS_IOERR;
455 	}
456 
457 	if (dev->need_zone_res_mgmt) {
458 		spin_lock(&dev->zone_res_lock);
459 
460 		switch (zone->cond) {
461 		case BLK_ZONE_COND_EMPTY:
462 			ret = null_check_zone_resources(dev, zone);
463 			if (ret != BLK_STS_OK) {
464 				spin_unlock(&dev->zone_res_lock);
465 				return ret;
466 			}
467 			break;
468 		case BLK_ZONE_COND_IMP_OPEN:
469 			dev->nr_zones_imp_open--;
470 			break;
471 		case BLK_ZONE_COND_CLOSED:
472 			ret = null_check_zone_resources(dev, zone);
473 			if (ret != BLK_STS_OK) {
474 				spin_unlock(&dev->zone_res_lock);
475 				return ret;
476 			}
477 			dev->nr_zones_closed--;
478 			break;
479 		default:
480 			break;
481 		}
482 
483 		dev->nr_zones_exp_open++;
484 
485 		spin_unlock(&dev->zone_res_lock);
486 	}
487 
488 	zone->cond = BLK_ZONE_COND_EXP_OPEN;
489 
490 	return BLK_STS_OK;
491 }
492 
493 static blk_status_t null_close_zone(struct nullb_device *dev,
494 				    struct nullb_zone *zone)
495 {
496 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
497 		return BLK_STS_IOERR;
498 
499 	switch (zone->cond) {
500 	case BLK_ZONE_COND_CLOSED:
501 		/* close operation on closed is not an error */
502 		return BLK_STS_OK;
503 	case BLK_ZONE_COND_IMP_OPEN:
504 	case BLK_ZONE_COND_EXP_OPEN:
505 		break;
506 	case BLK_ZONE_COND_EMPTY:
507 	case BLK_ZONE_COND_FULL:
508 	default:
509 		return BLK_STS_IOERR;
510 	}
511 
512 	if (dev->need_zone_res_mgmt) {
513 		spin_lock(&dev->zone_res_lock);
514 
515 		switch (zone->cond) {
516 		case BLK_ZONE_COND_IMP_OPEN:
517 			dev->nr_zones_imp_open--;
518 			break;
519 		case BLK_ZONE_COND_EXP_OPEN:
520 			dev->nr_zones_exp_open--;
521 			break;
522 		default:
523 			break;
524 		}
525 
526 		if (zone->wp > zone->start)
527 			dev->nr_zones_closed++;
528 
529 		spin_unlock(&dev->zone_res_lock);
530 	}
531 
532 	if (zone->wp == zone->start)
533 		zone->cond = BLK_ZONE_COND_EMPTY;
534 	else
535 		zone->cond = BLK_ZONE_COND_CLOSED;
536 
537 	return BLK_STS_OK;
538 }
539 
540 static blk_status_t null_finish_zone(struct nullb_device *dev,
541 				     struct nullb_zone *zone)
542 {
543 	blk_status_t ret = BLK_STS_OK;
544 
545 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
546 		return BLK_STS_IOERR;
547 
548 	if (dev->need_zone_res_mgmt) {
549 		spin_lock(&dev->zone_res_lock);
550 
551 		switch (zone->cond) {
552 		case BLK_ZONE_COND_FULL:
553 			/* Finish operation on full is not an error */
554 			spin_unlock(&dev->zone_res_lock);
555 			return BLK_STS_OK;
556 		case BLK_ZONE_COND_EMPTY:
557 			ret = null_check_zone_resources(dev, zone);
558 			if (ret != BLK_STS_OK) {
559 				spin_unlock(&dev->zone_res_lock);
560 				return ret;
561 			}
562 			break;
563 		case BLK_ZONE_COND_IMP_OPEN:
564 			dev->nr_zones_imp_open--;
565 			break;
566 		case BLK_ZONE_COND_EXP_OPEN:
567 			dev->nr_zones_exp_open--;
568 			break;
569 		case BLK_ZONE_COND_CLOSED:
570 			ret = null_check_zone_resources(dev, zone);
571 			if (ret != BLK_STS_OK) {
572 				spin_unlock(&dev->zone_res_lock);
573 				return ret;
574 			}
575 			dev->nr_zones_closed--;
576 			break;
577 		default:
578 			spin_unlock(&dev->zone_res_lock);
579 			return BLK_STS_IOERR;
580 		}
581 
582 		spin_unlock(&dev->zone_res_lock);
583 	}
584 
585 	zone->cond = BLK_ZONE_COND_FULL;
586 	zone->wp = zone->start + zone->len;
587 
588 	return BLK_STS_OK;
589 }
590 
591 static blk_status_t null_reset_zone(struct nullb_device *dev,
592 				    struct nullb_zone *zone)
593 {
594 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
595 		return BLK_STS_IOERR;
596 
597 	if (dev->need_zone_res_mgmt) {
598 		spin_lock(&dev->zone_res_lock);
599 
600 		switch (zone->cond) {
601 		case BLK_ZONE_COND_IMP_OPEN:
602 			dev->nr_zones_imp_open--;
603 			break;
604 		case BLK_ZONE_COND_EXP_OPEN:
605 			dev->nr_zones_exp_open--;
606 			break;
607 		case BLK_ZONE_COND_CLOSED:
608 			dev->nr_zones_closed--;
609 			break;
610 		case BLK_ZONE_COND_EMPTY:
611 		case BLK_ZONE_COND_FULL:
612 			break;
613 		default:
614 			spin_unlock(&dev->zone_res_lock);
615 			return BLK_STS_IOERR;
616 		}
617 
618 		spin_unlock(&dev->zone_res_lock);
619 	}
620 
621 	zone->cond = BLK_ZONE_COND_EMPTY;
622 	zone->wp = zone->start;
623 
624 	if (dev->memory_backed)
625 		return null_handle_discard(dev, zone->start, zone->len);
626 
627 	return BLK_STS_OK;
628 }
629 
630 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
631 				   sector_t sector)
632 {
633 	struct nullb_device *dev = cmd->nq->dev;
634 	unsigned int zone_no;
635 	struct nullb_zone *zone;
636 	blk_status_t ret;
637 	size_t i;
638 
639 	if (op == REQ_OP_ZONE_RESET_ALL) {
640 		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
641 			zone = &dev->zones[i];
642 			null_lock_zone(dev, zone);
643 			if (zone->cond != BLK_ZONE_COND_EMPTY &&
644 			    zone->cond != BLK_ZONE_COND_READONLY &&
645 			    zone->cond != BLK_ZONE_COND_OFFLINE) {
646 				null_reset_zone(dev, zone);
647 				trace_nullb_zone_op(cmd, i, zone->cond);
648 			}
649 			null_unlock_zone(dev, zone);
650 		}
651 		return BLK_STS_OK;
652 	}
653 
654 	zone_no = null_zone_no(dev, sector);
655 	zone = &dev->zones[zone_no];
656 
657 	null_lock_zone(dev, zone);
658 
659 	if (zone->cond == BLK_ZONE_COND_READONLY ||
660 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
661 		ret = BLK_STS_IOERR;
662 		goto unlock;
663 	}
664 
665 	switch (op) {
666 	case REQ_OP_ZONE_RESET:
667 		ret = null_reset_zone(dev, zone);
668 		break;
669 	case REQ_OP_ZONE_OPEN:
670 		ret = null_open_zone(dev, zone);
671 		break;
672 	case REQ_OP_ZONE_CLOSE:
673 		ret = null_close_zone(dev, zone);
674 		break;
675 	case REQ_OP_ZONE_FINISH:
676 		ret = null_finish_zone(dev, zone);
677 		break;
678 	default:
679 		ret = BLK_STS_NOTSUPP;
680 		break;
681 	}
682 
683 	if (ret == BLK_STS_OK)
684 		trace_nullb_zone_op(cmd, zone_no, zone->cond);
685 
686 unlock:
687 	null_unlock_zone(dev, zone);
688 
689 	return ret;
690 }
691 
692 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
693 				    sector_t sector, sector_t nr_sectors)
694 {
695 	struct nullb_device *dev;
696 	struct nullb_zone *zone;
697 	blk_status_t sts;
698 
699 	switch (op) {
700 	case REQ_OP_WRITE:
701 		return null_zone_write(cmd, sector, nr_sectors, false);
702 	case REQ_OP_ZONE_APPEND:
703 		return null_zone_write(cmd, sector, nr_sectors, true);
704 	case REQ_OP_ZONE_RESET:
705 	case REQ_OP_ZONE_RESET_ALL:
706 	case REQ_OP_ZONE_OPEN:
707 	case REQ_OP_ZONE_CLOSE:
708 	case REQ_OP_ZONE_FINISH:
709 		return null_zone_mgmt(cmd, op, sector);
710 	default:
711 		dev = cmd->nq->dev;
712 		zone = &dev->zones[null_zone_no(dev, sector)];
713 		if (zone->cond == BLK_ZONE_COND_OFFLINE)
714 			return BLK_STS_IOERR;
715 
716 		null_lock_zone(dev, zone);
717 		sts = null_process_cmd(cmd, op, sector, nr_sectors);
718 		null_unlock_zone(dev, zone);
719 		return sts;
720 	}
721 }
722 
723 /*
724  * Set a zone in the read-only or offline condition.
725  */
726 static void null_set_zone_cond(struct nullb_device *dev,
727 			       struct nullb_zone *zone, enum blk_zone_cond cond)
728 {
729 	if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
730 			 cond != BLK_ZONE_COND_OFFLINE))
731 		return;
732 
733 	null_lock_zone(dev, zone);
734 
735 	/*
736 	 * If the read-only condition is requested again to zones already in
737 	 * read-only condition, restore back normal empty condition. Do the same
738 	 * if the offline condition is requested for offline zones. Otherwise,
739 	 * set the specified zone condition to the zones. Finish the zones
740 	 * beforehand to free up zone resources.
741 	 */
742 	if (zone->cond == cond) {
743 		zone->cond = BLK_ZONE_COND_EMPTY;
744 		zone->wp = zone->start;
745 		if (dev->memory_backed)
746 			null_handle_discard(dev, zone->start, zone->len);
747 	} else {
748 		if (zone->cond != BLK_ZONE_COND_READONLY &&
749 		    zone->cond != BLK_ZONE_COND_OFFLINE)
750 			null_finish_zone(dev, zone);
751 		zone->cond = cond;
752 		zone->wp = NULL_ZONE_INVALID_WP;
753 	}
754 
755 	null_unlock_zone(dev, zone);
756 }
757 
758 /*
759  * Identify a zone from the sector written to configfs file. Then set zone
760  * condition to the zone.
761  */
762 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
763 			size_t count, enum blk_zone_cond cond)
764 {
765 	unsigned long long sector;
766 	unsigned int zone_no;
767 	int ret;
768 
769 	if (!dev->zoned) {
770 		pr_err("null_blk device is not zoned\n");
771 		return -EINVAL;
772 	}
773 
774 	if (!dev->zones) {
775 		pr_err("null_blk device is not yet powered\n");
776 		return -EINVAL;
777 	}
778 
779 	ret = kstrtoull(page, 0, &sector);
780 	if (ret < 0)
781 		return ret;
782 
783 	zone_no = null_zone_no(dev, sector);
784 	if (zone_no >= dev->nr_zones) {
785 		pr_err("Sector out of range\n");
786 		return -EINVAL;
787 	}
788 
789 	if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
790 		pr_err("Can not change condition of conventional zones\n");
791 		return -EINVAL;
792 	}
793 
794 	null_set_zone_cond(dev, &dev->zones[zone_no], cond);
795 
796 	return count;
797 }
798