xref: /linux/drivers/block/null_blk/zoned.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
4 #include "null_blk.h"
5 
6 #define CREATE_TRACE_POINTS
7 #include "trace.h"
8 
9 #undef pr_fmt
10 #define pr_fmt(fmt)	"null_blk: " fmt
11 
12 #define NULL_ZONE_INVALID_WP	((sector_t)-1)
13 
14 static inline sector_t mb_to_sects(unsigned long mb)
15 {
16 	return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
17 }
18 
19 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
20 {
21 	return sect >> ilog2(dev->zone_size_sects);
22 }
23 
24 static inline void null_init_zone_lock(struct nullb_device *dev,
25 				       struct nullb_zone *zone)
26 {
27 	if (!dev->memory_backed)
28 		spin_lock_init(&zone->spinlock);
29 	else
30 		mutex_init(&zone->mutex);
31 }
32 
33 static inline void null_lock_zone(struct nullb_device *dev,
34 				  struct nullb_zone *zone)
35 {
36 	if (!dev->memory_backed)
37 		spin_lock_irq(&zone->spinlock);
38 	else
39 		mutex_lock(&zone->mutex);
40 }
41 
42 static inline void null_unlock_zone(struct nullb_device *dev,
43 				    struct nullb_zone *zone)
44 {
45 	if (!dev->memory_backed)
46 		spin_unlock_irq(&zone->spinlock);
47 	else
48 		mutex_unlock(&zone->mutex);
49 }
50 
51 int null_init_zoned_dev(struct nullb_device *dev,
52 			struct queue_limits *lim)
53 {
54 	sector_t dev_capacity_sects, zone_capacity_sects;
55 	struct nullb_zone *zone;
56 	sector_t sector = 0;
57 	unsigned int i;
58 
59 	if (!is_power_of_2(dev->zone_size)) {
60 		pr_err("zone_size must be power-of-two\n");
61 		return -EINVAL;
62 	}
63 	if (dev->zone_size > dev->size) {
64 		pr_err("Zone size larger than device capacity\n");
65 		return -EINVAL;
66 	}
67 
68 	if (!dev->zone_capacity)
69 		dev->zone_capacity = dev->zone_size;
70 
71 	if (dev->zone_capacity > dev->zone_size) {
72 		pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
73 		       dev->zone_capacity, dev->zone_size);
74 		return -EINVAL;
75 	}
76 
77 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
78 	dev_capacity_sects = mb_to_sects(dev->size);
79 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
80 	dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
81 		>> ilog2(dev->zone_size_sects);
82 
83 	dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
84 				    GFP_KERNEL | __GFP_ZERO);
85 	if (!dev->zones)
86 		return -ENOMEM;
87 
88 	spin_lock_init(&dev->zone_res_lock);
89 
90 	if (dev->zone_nr_conv >= dev->nr_zones) {
91 		dev->zone_nr_conv = dev->nr_zones - 1;
92 		pr_info("changed the number of conventional zones to %u",
93 			dev->zone_nr_conv);
94 	}
95 
96 	dev->zone_append_max_sectors =
97 		min(ALIGN_DOWN(dev->zone_append_max_sectors,
98 			       dev->blocksize >> SECTOR_SHIFT),
99 		    zone_capacity_sects);
100 
101 	/* Max active zones has to be < nbr of seq zones in order to be enforceable */
102 	if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
103 		dev->zone_max_active = 0;
104 		pr_info("zone_max_active limit disabled, limit >= zone count\n");
105 	}
106 
107 	/* Max open zones has to be <= max active zones */
108 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
109 		dev->zone_max_open = dev->zone_max_active;
110 		pr_info("changed the maximum number of open zones to %u\n",
111 			dev->nr_zones);
112 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
113 		dev->zone_max_open = 0;
114 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
115 	}
116 	dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
117 	dev->imp_close_zone_no = dev->zone_nr_conv;
118 
119 	for (i = 0; i <  dev->zone_nr_conv; i++) {
120 		zone = &dev->zones[i];
121 
122 		null_init_zone_lock(dev, zone);
123 		zone->start = sector;
124 		zone->len = dev->zone_size_sects;
125 		zone->capacity = zone->len;
126 		zone->wp = zone->start + zone->len;
127 		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
128 		zone->cond = BLK_ZONE_COND_NOT_WP;
129 
130 		sector += dev->zone_size_sects;
131 	}
132 
133 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
134 		zone = &dev->zones[i];
135 
136 		null_init_zone_lock(dev, zone);
137 		zone->start = zone->wp = sector;
138 		if (zone->start + dev->zone_size_sects > dev_capacity_sects)
139 			zone->len = dev_capacity_sects - zone->start;
140 		else
141 			zone->len = dev->zone_size_sects;
142 		zone->capacity =
143 			min_t(sector_t, zone->len, zone_capacity_sects);
144 		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
145 		zone->cond = BLK_ZONE_COND_EMPTY;
146 
147 		sector += dev->zone_size_sects;
148 	}
149 
150 	lim->zoned = true;
151 	lim->chunk_sectors = dev->zone_size_sects;
152 	lim->max_zone_append_sectors = dev->zone_append_max_sectors;
153 	lim->max_open_zones = dev->zone_max_open;
154 	lim->max_active_zones = dev->zone_max_active;
155 	return 0;
156 }
157 
158 int null_register_zoned_dev(struct nullb *nullb)
159 {
160 	struct request_queue *q = nullb->q;
161 	struct gendisk *disk = nullb->disk;
162 
163 	blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
164 	disk->nr_zones = bdev_nr_zones(disk->part0);
165 
166 	pr_info("%s: using %s zone append\n",
167 		disk->disk_name,
168 		queue_emulates_zone_append(q) ? "emulated" : "native");
169 
170 	return blk_revalidate_disk_zones(disk);
171 }
172 
173 void null_free_zoned_dev(struct nullb_device *dev)
174 {
175 	kvfree(dev->zones);
176 	dev->zones = NULL;
177 }
178 
179 int null_report_zones(struct gendisk *disk, sector_t sector,
180 		unsigned int nr_zones, report_zones_cb cb, void *data)
181 {
182 	struct nullb *nullb = disk->private_data;
183 	struct nullb_device *dev = nullb->dev;
184 	unsigned int first_zone, i;
185 	struct nullb_zone *zone;
186 	struct blk_zone blkz;
187 	int error;
188 
189 	first_zone = null_zone_no(dev, sector);
190 	if (first_zone >= dev->nr_zones)
191 		return 0;
192 
193 	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
194 	trace_nullb_report_zones(nullb, nr_zones);
195 
196 	memset(&blkz, 0, sizeof(struct blk_zone));
197 	zone = &dev->zones[first_zone];
198 	for (i = 0; i < nr_zones; i++, zone++) {
199 		/*
200 		 * Stacked DM target drivers will remap the zone information by
201 		 * modifying the zone information passed to the report callback.
202 		 * So use a local copy to avoid corruption of the device zone
203 		 * array.
204 		 */
205 		null_lock_zone(dev, zone);
206 		blkz.start = zone->start;
207 		blkz.len = zone->len;
208 		blkz.wp = zone->wp;
209 		blkz.type = zone->type;
210 		blkz.cond = zone->cond;
211 		blkz.capacity = zone->capacity;
212 		null_unlock_zone(dev, zone);
213 
214 		error = cb(&blkz, i, data);
215 		if (error)
216 			return error;
217 	}
218 
219 	return nr_zones;
220 }
221 
222 /*
223  * This is called in the case of memory backing from null_process_cmd()
224  * with the target zone already locked.
225  */
226 size_t null_zone_valid_read_len(struct nullb *nullb,
227 				sector_t sector, unsigned int len)
228 {
229 	struct nullb_device *dev = nullb->dev;
230 	struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
231 	unsigned int nr_sectors = len >> SECTOR_SHIFT;
232 
233 	/* Read must be below the write pointer position */
234 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
235 	    sector + nr_sectors <= zone->wp)
236 		return len;
237 
238 	if (sector > zone->wp)
239 		return 0;
240 
241 	return (zone->wp - sector) << SECTOR_SHIFT;
242 }
243 
244 static void null_close_imp_open_zone(struct nullb_device *dev)
245 {
246 	struct nullb_zone *zone;
247 	unsigned int zno, i;
248 
249 	zno = dev->imp_close_zone_no;
250 	if (zno >= dev->nr_zones)
251 		zno = dev->zone_nr_conv;
252 
253 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
254 		zone = &dev->zones[zno];
255 		zno++;
256 		if (zno >= dev->nr_zones)
257 			zno = dev->zone_nr_conv;
258 
259 		if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
260 			dev->nr_zones_imp_open--;
261 			if (zone->wp == zone->start) {
262 				zone->cond = BLK_ZONE_COND_EMPTY;
263 			} else {
264 				zone->cond = BLK_ZONE_COND_CLOSED;
265 				dev->nr_zones_closed++;
266 			}
267 			dev->imp_close_zone_no = zno;
268 			return;
269 		}
270 	}
271 }
272 
273 static blk_status_t null_check_active(struct nullb_device *dev)
274 {
275 	if (!dev->zone_max_active)
276 		return BLK_STS_OK;
277 
278 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
279 			dev->nr_zones_closed < dev->zone_max_active)
280 		return BLK_STS_OK;
281 
282 	return BLK_STS_ZONE_ACTIVE_RESOURCE;
283 }
284 
285 static blk_status_t null_check_open(struct nullb_device *dev)
286 {
287 	if (!dev->zone_max_open)
288 		return BLK_STS_OK;
289 
290 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
291 		return BLK_STS_OK;
292 
293 	if (dev->nr_zones_imp_open) {
294 		if (null_check_active(dev) == BLK_STS_OK) {
295 			null_close_imp_open_zone(dev);
296 			return BLK_STS_OK;
297 		}
298 	}
299 
300 	return BLK_STS_ZONE_OPEN_RESOURCE;
301 }
302 
303 /*
304  * This function matches the manage open zone resources function in the ZBC standard,
305  * with the addition of max active zones support (added in the ZNS standard).
306  *
307  * The function determines if a zone can transition to implicit open or explicit open,
308  * while maintaining the max open zone (and max active zone) limit(s). It may close an
309  * implicit open zone in order to make additional zone resources available.
310  *
311  * ZBC states that an implicit open zone shall be closed only if there is not
312  * room within the open limit. However, with the addition of an active limit,
313  * it is not certain that closing an implicit open zone will allow a new zone
314  * to be opened, since we might already be at the active limit capacity.
315  */
316 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
317 					      struct nullb_zone *zone)
318 {
319 	blk_status_t ret;
320 
321 	switch (zone->cond) {
322 	case BLK_ZONE_COND_EMPTY:
323 		ret = null_check_active(dev);
324 		if (ret != BLK_STS_OK)
325 			return ret;
326 		fallthrough;
327 	case BLK_ZONE_COND_CLOSED:
328 		return null_check_open(dev);
329 	default:
330 		/* Should never be called for other states */
331 		WARN_ON(1);
332 		return BLK_STS_IOERR;
333 	}
334 }
335 
336 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
337 				    unsigned int nr_sectors, bool append)
338 {
339 	struct nullb_device *dev = cmd->nq->dev;
340 	unsigned int zno = null_zone_no(dev, sector);
341 	struct nullb_zone *zone = &dev->zones[zno];
342 	blk_status_t ret;
343 
344 	trace_nullb_zone_op(cmd, zno, zone->cond);
345 
346 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
347 		if (append)
348 			return BLK_STS_IOERR;
349 		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
350 	}
351 
352 	null_lock_zone(dev, zone);
353 
354 	/*
355 	 * Regular writes must be at the write pointer position. Zone append
356 	 * writes are automatically issued at the write pointer and the position
357 	 * returned using the request sector. Note that we do not check the zone
358 	 * condition because for FULL, READONLY and OFFLINE zones, the sector
359 	 * check against the zone write pointer will always result in failing
360 	 * the command.
361 	 */
362 	if (append) {
363 		if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
364 		    zone->wp == NULL_ZONE_INVALID_WP) {
365 			ret = BLK_STS_IOERR;
366 			goto unlock_zone;
367 		}
368 		sector = zone->wp;
369 		blk_mq_rq_from_pdu(cmd)->__sector = sector;
370 	}
371 
372 	if (sector != zone->wp ||
373 	    zone->wp + nr_sectors > zone->start + zone->capacity) {
374 		ret = BLK_STS_IOERR;
375 		goto unlock_zone;
376 	}
377 
378 	if (zone->cond == BLK_ZONE_COND_CLOSED ||
379 	    zone->cond == BLK_ZONE_COND_EMPTY) {
380 		if (dev->need_zone_res_mgmt) {
381 			spin_lock(&dev->zone_res_lock);
382 
383 			ret = null_check_zone_resources(dev, zone);
384 			if (ret != BLK_STS_OK) {
385 				spin_unlock(&dev->zone_res_lock);
386 				goto unlock_zone;
387 			}
388 			if (zone->cond == BLK_ZONE_COND_CLOSED) {
389 				dev->nr_zones_closed--;
390 				dev->nr_zones_imp_open++;
391 			} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
392 				dev->nr_zones_imp_open++;
393 			}
394 
395 			spin_unlock(&dev->zone_res_lock);
396 		}
397 
398 		zone->cond = BLK_ZONE_COND_IMP_OPEN;
399 	}
400 
401 	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
402 	if (ret != BLK_STS_OK)
403 		goto unlock_zone;
404 
405 	zone->wp += nr_sectors;
406 	if (zone->wp == zone->start + zone->capacity) {
407 		if (dev->need_zone_res_mgmt) {
408 			spin_lock(&dev->zone_res_lock);
409 			if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
410 				dev->nr_zones_exp_open--;
411 			else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
412 				dev->nr_zones_imp_open--;
413 			spin_unlock(&dev->zone_res_lock);
414 		}
415 		zone->cond = BLK_ZONE_COND_FULL;
416 	}
417 
418 	ret = BLK_STS_OK;
419 
420 unlock_zone:
421 	null_unlock_zone(dev, zone);
422 
423 	return ret;
424 }
425 
426 static blk_status_t null_open_zone(struct nullb_device *dev,
427 				   struct nullb_zone *zone)
428 {
429 	blk_status_t ret = BLK_STS_OK;
430 
431 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
432 		return BLK_STS_IOERR;
433 
434 	switch (zone->cond) {
435 	case BLK_ZONE_COND_EXP_OPEN:
436 		/* Open operation on exp open is not an error */
437 		return BLK_STS_OK;
438 	case BLK_ZONE_COND_EMPTY:
439 	case BLK_ZONE_COND_IMP_OPEN:
440 	case BLK_ZONE_COND_CLOSED:
441 		break;
442 	case BLK_ZONE_COND_FULL:
443 	default:
444 		return BLK_STS_IOERR;
445 	}
446 
447 	if (dev->need_zone_res_mgmt) {
448 		spin_lock(&dev->zone_res_lock);
449 
450 		switch (zone->cond) {
451 		case BLK_ZONE_COND_EMPTY:
452 			ret = null_check_zone_resources(dev, zone);
453 			if (ret != BLK_STS_OK) {
454 				spin_unlock(&dev->zone_res_lock);
455 				return ret;
456 			}
457 			break;
458 		case BLK_ZONE_COND_IMP_OPEN:
459 			dev->nr_zones_imp_open--;
460 			break;
461 		case BLK_ZONE_COND_CLOSED:
462 			ret = null_check_zone_resources(dev, zone);
463 			if (ret != BLK_STS_OK) {
464 				spin_unlock(&dev->zone_res_lock);
465 				return ret;
466 			}
467 			dev->nr_zones_closed--;
468 			break;
469 		default:
470 			break;
471 		}
472 
473 		dev->nr_zones_exp_open++;
474 
475 		spin_unlock(&dev->zone_res_lock);
476 	}
477 
478 	zone->cond = BLK_ZONE_COND_EXP_OPEN;
479 
480 	return BLK_STS_OK;
481 }
482 
483 static blk_status_t null_close_zone(struct nullb_device *dev,
484 				    struct nullb_zone *zone)
485 {
486 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
487 		return BLK_STS_IOERR;
488 
489 	switch (zone->cond) {
490 	case BLK_ZONE_COND_CLOSED:
491 		/* close operation on closed is not an error */
492 		return BLK_STS_OK;
493 	case BLK_ZONE_COND_IMP_OPEN:
494 	case BLK_ZONE_COND_EXP_OPEN:
495 		break;
496 	case BLK_ZONE_COND_EMPTY:
497 	case BLK_ZONE_COND_FULL:
498 	default:
499 		return BLK_STS_IOERR;
500 	}
501 
502 	if (dev->need_zone_res_mgmt) {
503 		spin_lock(&dev->zone_res_lock);
504 
505 		switch (zone->cond) {
506 		case BLK_ZONE_COND_IMP_OPEN:
507 			dev->nr_zones_imp_open--;
508 			break;
509 		case BLK_ZONE_COND_EXP_OPEN:
510 			dev->nr_zones_exp_open--;
511 			break;
512 		default:
513 			break;
514 		}
515 
516 		if (zone->wp > zone->start)
517 			dev->nr_zones_closed++;
518 
519 		spin_unlock(&dev->zone_res_lock);
520 	}
521 
522 	if (zone->wp == zone->start)
523 		zone->cond = BLK_ZONE_COND_EMPTY;
524 	else
525 		zone->cond = BLK_ZONE_COND_CLOSED;
526 
527 	return BLK_STS_OK;
528 }
529 
530 static blk_status_t null_finish_zone(struct nullb_device *dev,
531 				     struct nullb_zone *zone)
532 {
533 	blk_status_t ret = BLK_STS_OK;
534 
535 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
536 		return BLK_STS_IOERR;
537 
538 	if (dev->need_zone_res_mgmt) {
539 		spin_lock(&dev->zone_res_lock);
540 
541 		switch (zone->cond) {
542 		case BLK_ZONE_COND_FULL:
543 			/* Finish operation on full is not an error */
544 			spin_unlock(&dev->zone_res_lock);
545 			return BLK_STS_OK;
546 		case BLK_ZONE_COND_EMPTY:
547 			ret = null_check_zone_resources(dev, zone);
548 			if (ret != BLK_STS_OK) {
549 				spin_unlock(&dev->zone_res_lock);
550 				return ret;
551 			}
552 			break;
553 		case BLK_ZONE_COND_IMP_OPEN:
554 			dev->nr_zones_imp_open--;
555 			break;
556 		case BLK_ZONE_COND_EXP_OPEN:
557 			dev->nr_zones_exp_open--;
558 			break;
559 		case BLK_ZONE_COND_CLOSED:
560 			ret = null_check_zone_resources(dev, zone);
561 			if (ret != BLK_STS_OK) {
562 				spin_unlock(&dev->zone_res_lock);
563 				return ret;
564 			}
565 			dev->nr_zones_closed--;
566 			break;
567 		default:
568 			spin_unlock(&dev->zone_res_lock);
569 			return BLK_STS_IOERR;
570 		}
571 
572 		spin_unlock(&dev->zone_res_lock);
573 	}
574 
575 	zone->cond = BLK_ZONE_COND_FULL;
576 	zone->wp = zone->start + zone->len;
577 
578 	return BLK_STS_OK;
579 }
580 
581 static blk_status_t null_reset_zone(struct nullb_device *dev,
582 				    struct nullb_zone *zone)
583 {
584 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
585 		return BLK_STS_IOERR;
586 
587 	if (dev->need_zone_res_mgmt) {
588 		spin_lock(&dev->zone_res_lock);
589 
590 		switch (zone->cond) {
591 		case BLK_ZONE_COND_IMP_OPEN:
592 			dev->nr_zones_imp_open--;
593 			break;
594 		case BLK_ZONE_COND_EXP_OPEN:
595 			dev->nr_zones_exp_open--;
596 			break;
597 		case BLK_ZONE_COND_CLOSED:
598 			dev->nr_zones_closed--;
599 			break;
600 		case BLK_ZONE_COND_EMPTY:
601 		case BLK_ZONE_COND_FULL:
602 			break;
603 		default:
604 			spin_unlock(&dev->zone_res_lock);
605 			return BLK_STS_IOERR;
606 		}
607 
608 		spin_unlock(&dev->zone_res_lock);
609 	}
610 
611 	zone->cond = BLK_ZONE_COND_EMPTY;
612 	zone->wp = zone->start;
613 
614 	if (dev->memory_backed)
615 		return null_handle_discard(dev, zone->start, zone->len);
616 
617 	return BLK_STS_OK;
618 }
619 
620 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
621 				   sector_t sector)
622 {
623 	struct nullb_device *dev = cmd->nq->dev;
624 	unsigned int zone_no;
625 	struct nullb_zone *zone;
626 	blk_status_t ret;
627 	size_t i;
628 
629 	if (op == REQ_OP_ZONE_RESET_ALL) {
630 		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
631 			zone = &dev->zones[i];
632 			null_lock_zone(dev, zone);
633 			if (zone->cond != BLK_ZONE_COND_EMPTY &&
634 			    zone->cond != BLK_ZONE_COND_READONLY &&
635 			    zone->cond != BLK_ZONE_COND_OFFLINE) {
636 				null_reset_zone(dev, zone);
637 				trace_nullb_zone_op(cmd, i, zone->cond);
638 			}
639 			null_unlock_zone(dev, zone);
640 		}
641 		return BLK_STS_OK;
642 	}
643 
644 	zone_no = null_zone_no(dev, sector);
645 	zone = &dev->zones[zone_no];
646 
647 	null_lock_zone(dev, zone);
648 
649 	if (zone->cond == BLK_ZONE_COND_READONLY ||
650 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
651 		ret = BLK_STS_IOERR;
652 		goto unlock;
653 	}
654 
655 	switch (op) {
656 	case REQ_OP_ZONE_RESET:
657 		ret = null_reset_zone(dev, zone);
658 		break;
659 	case REQ_OP_ZONE_OPEN:
660 		ret = null_open_zone(dev, zone);
661 		break;
662 	case REQ_OP_ZONE_CLOSE:
663 		ret = null_close_zone(dev, zone);
664 		break;
665 	case REQ_OP_ZONE_FINISH:
666 		ret = null_finish_zone(dev, zone);
667 		break;
668 	default:
669 		ret = BLK_STS_NOTSUPP;
670 		break;
671 	}
672 
673 	if (ret == BLK_STS_OK)
674 		trace_nullb_zone_op(cmd, zone_no, zone->cond);
675 
676 unlock:
677 	null_unlock_zone(dev, zone);
678 
679 	return ret;
680 }
681 
682 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
683 				    sector_t sector, sector_t nr_sectors)
684 {
685 	struct nullb_device *dev;
686 	struct nullb_zone *zone;
687 	blk_status_t sts;
688 
689 	switch (op) {
690 	case REQ_OP_WRITE:
691 		return null_zone_write(cmd, sector, nr_sectors, false);
692 	case REQ_OP_ZONE_APPEND:
693 		return null_zone_write(cmd, sector, nr_sectors, true);
694 	case REQ_OP_ZONE_RESET:
695 	case REQ_OP_ZONE_RESET_ALL:
696 	case REQ_OP_ZONE_OPEN:
697 	case REQ_OP_ZONE_CLOSE:
698 	case REQ_OP_ZONE_FINISH:
699 		return null_zone_mgmt(cmd, op, sector);
700 	default:
701 		dev = cmd->nq->dev;
702 		zone = &dev->zones[null_zone_no(dev, sector)];
703 		if (zone->cond == BLK_ZONE_COND_OFFLINE)
704 			return BLK_STS_IOERR;
705 
706 		null_lock_zone(dev, zone);
707 		sts = null_process_cmd(cmd, op, sector, nr_sectors);
708 		null_unlock_zone(dev, zone);
709 		return sts;
710 	}
711 }
712 
713 /*
714  * Set a zone in the read-only or offline condition.
715  */
716 static void null_set_zone_cond(struct nullb_device *dev,
717 			       struct nullb_zone *zone, enum blk_zone_cond cond)
718 {
719 	if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
720 			 cond != BLK_ZONE_COND_OFFLINE))
721 		return;
722 
723 	null_lock_zone(dev, zone);
724 
725 	/*
726 	 * If the read-only condition is requested again to zones already in
727 	 * read-only condition, restore back normal empty condition. Do the same
728 	 * if the offline condition is requested for offline zones. Otherwise,
729 	 * set the specified zone condition to the zones. Finish the zones
730 	 * beforehand to free up zone resources.
731 	 */
732 	if (zone->cond == cond) {
733 		zone->cond = BLK_ZONE_COND_EMPTY;
734 		zone->wp = zone->start;
735 		if (dev->memory_backed)
736 			null_handle_discard(dev, zone->start, zone->len);
737 	} else {
738 		if (zone->cond != BLK_ZONE_COND_READONLY &&
739 		    zone->cond != BLK_ZONE_COND_OFFLINE)
740 			null_finish_zone(dev, zone);
741 		zone->cond = cond;
742 		zone->wp = NULL_ZONE_INVALID_WP;
743 	}
744 
745 	null_unlock_zone(dev, zone);
746 }
747 
748 /*
749  * Identify a zone from the sector written to configfs file. Then set zone
750  * condition to the zone.
751  */
752 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
753 			size_t count, enum blk_zone_cond cond)
754 {
755 	unsigned long long sector;
756 	unsigned int zone_no;
757 	int ret;
758 
759 	if (!dev->zoned) {
760 		pr_err("null_blk device is not zoned\n");
761 		return -EINVAL;
762 	}
763 
764 	if (!dev->zones) {
765 		pr_err("null_blk device is not yet powered\n");
766 		return -EINVAL;
767 	}
768 
769 	ret = kstrtoull(page, 0, &sector);
770 	if (ret < 0)
771 		return ret;
772 
773 	zone_no = null_zone_no(dev, sector);
774 	if (zone_no >= dev->nr_zones) {
775 		pr_err("Sector out of range\n");
776 		return -EINVAL;
777 	}
778 
779 	if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
780 		pr_err("Can not change condition of conventional zones\n");
781 		return -EINVAL;
782 	}
783 
784 	null_set_zone_cond(dev, &dev->zones[zone_no], cond);
785 
786 	return count;
787 }
788