xref: /linux/drivers/block/null_blk/zoned.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
4 #include "null_blk.h"
5 
6 #define CREATE_TRACE_POINTS
7 #include "trace.h"
8 
9 #undef pr_fmt
10 #define pr_fmt(fmt)	"null_blk: " fmt
11 
12 #define NULL_ZONE_INVALID_WP	((sector_t)-1)
13 
14 static inline sector_t mb_to_sects(unsigned long mb)
15 {
16 	return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
17 }
18 
19 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
20 {
21 	return sect >> ilog2(dev->zone_size_sects);
22 }
23 
24 static inline void null_init_zone_lock(struct nullb_device *dev,
25 				       struct nullb_zone *zone)
26 {
27 	if (!dev->memory_backed)
28 		spin_lock_init(&zone->spinlock);
29 	else
30 		mutex_init(&zone->mutex);
31 }
32 
33 static inline void null_lock_zone(struct nullb_device *dev,
34 				  struct nullb_zone *zone)
35 {
36 	if (!dev->memory_backed)
37 		spin_lock_irq(&zone->spinlock);
38 	else
39 		mutex_lock(&zone->mutex);
40 }
41 
42 static inline void null_unlock_zone(struct nullb_device *dev,
43 				    struct nullb_zone *zone)
44 {
45 	if (!dev->memory_backed)
46 		spin_unlock_irq(&zone->spinlock);
47 	else
48 		mutex_unlock(&zone->mutex);
49 }
50 
51 int null_init_zoned_dev(struct nullb_device *dev,
52 			struct queue_limits *lim)
53 {
54 	sector_t dev_capacity_sects, zone_capacity_sects;
55 	struct nullb_zone *zone;
56 	sector_t sector = 0;
57 	unsigned int i;
58 
59 	if (!is_power_of_2(dev->zone_size)) {
60 		pr_err("zone_size must be power-of-two\n");
61 		return -EINVAL;
62 	}
63 	if (dev->zone_size > dev->size) {
64 		pr_err("Zone size larger than device capacity\n");
65 		return -EINVAL;
66 	}
67 
68 	if (!dev->zone_capacity)
69 		dev->zone_capacity = dev->zone_size;
70 
71 	if (dev->zone_capacity > dev->zone_size) {
72 		pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
73 		       dev->zone_capacity, dev->zone_size);
74 		return -EINVAL;
75 	}
76 
77 	/*
78 	 * If a smaller zone capacity was requested, do not allow a smaller last
79 	 * zone at the same time as such zone configuration does not correspond
80 	 * to any real zoned device.
81 	 */
82 	if (dev->zone_capacity != dev->zone_size &&
83 	    dev->size & (dev->zone_size - 1)) {
84 		pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
85 		return -EINVAL;
86 	}
87 
88 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
89 	dev_capacity_sects = mb_to_sects(dev->size);
90 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
91 	dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
92 		>> ilog2(dev->zone_size_sects);
93 
94 	dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
95 				    GFP_KERNEL | __GFP_ZERO);
96 	if (!dev->zones)
97 		return -ENOMEM;
98 
99 	spin_lock_init(&dev->zone_res_lock);
100 
101 	if (dev->zone_nr_conv >= dev->nr_zones) {
102 		dev->zone_nr_conv = dev->nr_zones - 1;
103 		pr_info("changed the number of conventional zones to %u",
104 			dev->zone_nr_conv);
105 	}
106 
107 	dev->zone_append_max_sectors =
108 		min(ALIGN_DOWN(dev->zone_append_max_sectors,
109 			       dev->blocksize >> SECTOR_SHIFT),
110 		    zone_capacity_sects);
111 
112 	/* Max active zones has to be < nbr of seq zones in order to be enforceable */
113 	if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
114 		dev->zone_max_active = 0;
115 		pr_info("zone_max_active limit disabled, limit >= zone count\n");
116 	}
117 
118 	/* Max open zones has to be <= max active zones */
119 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
120 		dev->zone_max_open = dev->zone_max_active;
121 		pr_info("changed the maximum number of open zones to %u\n",
122 			dev->zone_max_open);
123 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
124 		dev->zone_max_open = 0;
125 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
126 	}
127 	dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
128 	dev->imp_close_zone_no = dev->zone_nr_conv;
129 
130 	for (i = 0; i <  dev->zone_nr_conv; i++) {
131 		zone = &dev->zones[i];
132 
133 		null_init_zone_lock(dev, zone);
134 		zone->start = sector;
135 		zone->len = dev->zone_size_sects;
136 		zone->capacity = zone->len;
137 		zone->wp = zone->start + zone->len;
138 		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
139 		zone->cond = BLK_ZONE_COND_NOT_WP;
140 
141 		sector += dev->zone_size_sects;
142 	}
143 
144 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
145 		zone = &dev->zones[i];
146 
147 		null_init_zone_lock(dev, zone);
148 		zone->start = sector;
149 		if (zone->start + dev->zone_size_sects > dev_capacity_sects)
150 			zone->len = dev_capacity_sects - zone->start;
151 		else
152 			zone->len = dev->zone_size_sects;
153 		zone->capacity =
154 			min_t(sector_t, zone->len, zone_capacity_sects);
155 		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
156 		if (dev->zone_full) {
157 			zone->cond = BLK_ZONE_COND_FULL;
158 			zone->wp = zone->start + zone->capacity;
159 		} else{
160 			zone->cond = BLK_ZONE_COND_EMPTY;
161 			zone->wp = zone->start;
162 		}
163 
164 		sector += dev->zone_size_sects;
165 	}
166 
167 	lim->features |= BLK_FEAT_ZONED;
168 	lim->chunk_sectors = dev->zone_size_sects;
169 	lim->max_zone_append_sectors = dev->zone_append_max_sectors;
170 	lim->max_open_zones = dev->zone_max_open;
171 	lim->max_active_zones = dev->zone_max_active;
172 	return 0;
173 }
174 
175 int null_register_zoned_dev(struct nullb *nullb)
176 {
177 	struct request_queue *q = nullb->q;
178 	struct gendisk *disk = nullb->disk;
179 
180 	pr_info("%s: using %s zone append\n",
181 		disk->disk_name,
182 		queue_emulates_zone_append(q) ? "emulated" : "native");
183 
184 	return blk_revalidate_disk_zones(disk);
185 }
186 
187 void null_free_zoned_dev(struct nullb_device *dev)
188 {
189 	kvfree(dev->zones);
190 	dev->zones = NULL;
191 }
192 
193 int null_report_zones(struct gendisk *disk, sector_t sector,
194 		unsigned int nr_zones, report_zones_cb cb, void *data)
195 {
196 	struct nullb *nullb = disk->private_data;
197 	struct nullb_device *dev = nullb->dev;
198 	unsigned int first_zone, i;
199 	struct nullb_zone *zone;
200 	struct blk_zone blkz;
201 	int error;
202 
203 	first_zone = null_zone_no(dev, sector);
204 	if (first_zone >= dev->nr_zones)
205 		return 0;
206 
207 	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
208 	trace_nullb_report_zones(nullb, nr_zones);
209 
210 	memset(&blkz, 0, sizeof(struct blk_zone));
211 	zone = &dev->zones[first_zone];
212 	for (i = 0; i < nr_zones; i++, zone++) {
213 		/*
214 		 * Stacked DM target drivers will remap the zone information by
215 		 * modifying the zone information passed to the report callback.
216 		 * So use a local copy to avoid corruption of the device zone
217 		 * array.
218 		 */
219 		null_lock_zone(dev, zone);
220 		blkz.start = zone->start;
221 		blkz.len = zone->len;
222 		blkz.wp = zone->wp;
223 		blkz.type = zone->type;
224 		blkz.cond = zone->cond;
225 		blkz.capacity = zone->capacity;
226 		null_unlock_zone(dev, zone);
227 
228 		error = cb(&blkz, i, data);
229 		if (error)
230 			return error;
231 	}
232 
233 	return nr_zones;
234 }
235 
236 /*
237  * This is called in the case of memory backing from null_process_cmd()
238  * with the target zone already locked.
239  */
240 size_t null_zone_valid_read_len(struct nullb *nullb,
241 				sector_t sector, unsigned int len)
242 {
243 	struct nullb_device *dev = nullb->dev;
244 	struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
245 	unsigned int nr_sectors = len >> SECTOR_SHIFT;
246 
247 	/* Read must be below the write pointer position */
248 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
249 	    sector + nr_sectors <= zone->wp)
250 		return len;
251 
252 	if (sector > zone->wp)
253 		return 0;
254 
255 	return (zone->wp - sector) << SECTOR_SHIFT;
256 }
257 
258 static void null_close_imp_open_zone(struct nullb_device *dev)
259 {
260 	struct nullb_zone *zone;
261 	unsigned int zno, i;
262 
263 	zno = dev->imp_close_zone_no;
264 	if (zno >= dev->nr_zones)
265 		zno = dev->zone_nr_conv;
266 
267 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
268 		zone = &dev->zones[zno];
269 		zno++;
270 		if (zno >= dev->nr_zones)
271 			zno = dev->zone_nr_conv;
272 
273 		if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
274 			dev->nr_zones_imp_open--;
275 			if (zone->wp == zone->start) {
276 				zone->cond = BLK_ZONE_COND_EMPTY;
277 			} else {
278 				zone->cond = BLK_ZONE_COND_CLOSED;
279 				dev->nr_zones_closed++;
280 			}
281 			dev->imp_close_zone_no = zno;
282 			return;
283 		}
284 	}
285 }
286 
287 static blk_status_t null_check_active(struct nullb_device *dev)
288 {
289 	if (!dev->zone_max_active)
290 		return BLK_STS_OK;
291 
292 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
293 			dev->nr_zones_closed < dev->zone_max_active)
294 		return BLK_STS_OK;
295 
296 	return BLK_STS_ZONE_ACTIVE_RESOURCE;
297 }
298 
299 static blk_status_t null_check_open(struct nullb_device *dev)
300 {
301 	if (!dev->zone_max_open)
302 		return BLK_STS_OK;
303 
304 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
305 		return BLK_STS_OK;
306 
307 	if (dev->nr_zones_imp_open) {
308 		if (null_check_active(dev) == BLK_STS_OK) {
309 			null_close_imp_open_zone(dev);
310 			return BLK_STS_OK;
311 		}
312 	}
313 
314 	return BLK_STS_ZONE_OPEN_RESOURCE;
315 }
316 
317 /*
318  * This function matches the manage open zone resources function in the ZBC standard,
319  * with the addition of max active zones support (added in the ZNS standard).
320  *
321  * The function determines if a zone can transition to implicit open or explicit open,
322  * while maintaining the max open zone (and max active zone) limit(s). It may close an
323  * implicit open zone in order to make additional zone resources available.
324  *
325  * ZBC states that an implicit open zone shall be closed only if there is not
326  * room within the open limit. However, with the addition of an active limit,
327  * it is not certain that closing an implicit open zone will allow a new zone
328  * to be opened, since we might already be at the active limit capacity.
329  */
330 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
331 					      struct nullb_zone *zone)
332 {
333 	blk_status_t ret;
334 
335 	switch (zone->cond) {
336 	case BLK_ZONE_COND_EMPTY:
337 		ret = null_check_active(dev);
338 		if (ret != BLK_STS_OK)
339 			return ret;
340 		fallthrough;
341 	case BLK_ZONE_COND_CLOSED:
342 		return null_check_open(dev);
343 	default:
344 		/* Should never be called for other states */
345 		WARN_ON(1);
346 		return BLK_STS_IOERR;
347 	}
348 }
349 
350 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
351 				    unsigned int nr_sectors, bool append)
352 {
353 	struct nullb_device *dev = cmd->nq->dev;
354 	unsigned int zno = null_zone_no(dev, sector);
355 	struct nullb_zone *zone = &dev->zones[zno];
356 	blk_status_t ret;
357 
358 	trace_nullb_zone_op(cmd, zno, zone->cond);
359 
360 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
361 		if (append)
362 			return BLK_STS_IOERR;
363 		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
364 	}
365 
366 	null_lock_zone(dev, zone);
367 
368 	/*
369 	 * Regular writes must be at the write pointer position. Zone append
370 	 * writes are automatically issued at the write pointer and the position
371 	 * returned using the request sector. Note that we do not check the zone
372 	 * condition because for FULL, READONLY and OFFLINE zones, the sector
373 	 * check against the zone write pointer will always result in failing
374 	 * the command.
375 	 */
376 	if (append) {
377 		if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
378 		    zone->wp == NULL_ZONE_INVALID_WP) {
379 			ret = BLK_STS_IOERR;
380 			goto unlock_zone;
381 		}
382 		sector = zone->wp;
383 		blk_mq_rq_from_pdu(cmd)->__sector = sector;
384 	}
385 
386 	if (sector != zone->wp ||
387 	    zone->wp + nr_sectors > zone->start + zone->capacity) {
388 		ret = BLK_STS_IOERR;
389 		goto unlock_zone;
390 	}
391 
392 	if (zone->cond == BLK_ZONE_COND_CLOSED ||
393 	    zone->cond == BLK_ZONE_COND_EMPTY) {
394 		if (dev->need_zone_res_mgmt) {
395 			spin_lock(&dev->zone_res_lock);
396 
397 			ret = null_check_zone_resources(dev, zone);
398 			if (ret != BLK_STS_OK) {
399 				spin_unlock(&dev->zone_res_lock);
400 				goto unlock_zone;
401 			}
402 			if (zone->cond == BLK_ZONE_COND_CLOSED) {
403 				dev->nr_zones_closed--;
404 				dev->nr_zones_imp_open++;
405 			} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
406 				dev->nr_zones_imp_open++;
407 			}
408 
409 			spin_unlock(&dev->zone_res_lock);
410 		}
411 
412 		zone->cond = BLK_ZONE_COND_IMP_OPEN;
413 	}
414 
415 	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
416 	if (ret != BLK_STS_OK)
417 		goto unlock_zone;
418 
419 	zone->wp += nr_sectors;
420 	if (zone->wp == zone->start + zone->capacity) {
421 		if (dev->need_zone_res_mgmt) {
422 			spin_lock(&dev->zone_res_lock);
423 			if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
424 				dev->nr_zones_exp_open--;
425 			else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
426 				dev->nr_zones_imp_open--;
427 			spin_unlock(&dev->zone_res_lock);
428 		}
429 		zone->cond = BLK_ZONE_COND_FULL;
430 	}
431 
432 	ret = BLK_STS_OK;
433 
434 unlock_zone:
435 	null_unlock_zone(dev, zone);
436 
437 	return ret;
438 }
439 
440 static blk_status_t null_open_zone(struct nullb_device *dev,
441 				   struct nullb_zone *zone)
442 {
443 	blk_status_t ret = BLK_STS_OK;
444 
445 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
446 		return BLK_STS_IOERR;
447 
448 	switch (zone->cond) {
449 	case BLK_ZONE_COND_EXP_OPEN:
450 		/* Open operation on exp open is not an error */
451 		return BLK_STS_OK;
452 	case BLK_ZONE_COND_EMPTY:
453 	case BLK_ZONE_COND_IMP_OPEN:
454 	case BLK_ZONE_COND_CLOSED:
455 		break;
456 	case BLK_ZONE_COND_FULL:
457 	default:
458 		return BLK_STS_IOERR;
459 	}
460 
461 	if (dev->need_zone_res_mgmt) {
462 		spin_lock(&dev->zone_res_lock);
463 
464 		switch (zone->cond) {
465 		case BLK_ZONE_COND_EMPTY:
466 			ret = null_check_zone_resources(dev, zone);
467 			if (ret != BLK_STS_OK) {
468 				spin_unlock(&dev->zone_res_lock);
469 				return ret;
470 			}
471 			break;
472 		case BLK_ZONE_COND_IMP_OPEN:
473 			dev->nr_zones_imp_open--;
474 			break;
475 		case BLK_ZONE_COND_CLOSED:
476 			ret = null_check_zone_resources(dev, zone);
477 			if (ret != BLK_STS_OK) {
478 				spin_unlock(&dev->zone_res_lock);
479 				return ret;
480 			}
481 			dev->nr_zones_closed--;
482 			break;
483 		default:
484 			break;
485 		}
486 
487 		dev->nr_zones_exp_open++;
488 
489 		spin_unlock(&dev->zone_res_lock);
490 	}
491 
492 	zone->cond = BLK_ZONE_COND_EXP_OPEN;
493 
494 	return BLK_STS_OK;
495 }
496 
497 static blk_status_t null_close_zone(struct nullb_device *dev,
498 				    struct nullb_zone *zone)
499 {
500 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
501 		return BLK_STS_IOERR;
502 
503 	switch (zone->cond) {
504 	case BLK_ZONE_COND_CLOSED:
505 		/* close operation on closed is not an error */
506 		return BLK_STS_OK;
507 	case BLK_ZONE_COND_IMP_OPEN:
508 	case BLK_ZONE_COND_EXP_OPEN:
509 		break;
510 	case BLK_ZONE_COND_EMPTY:
511 	case BLK_ZONE_COND_FULL:
512 	default:
513 		return BLK_STS_IOERR;
514 	}
515 
516 	if (dev->need_zone_res_mgmt) {
517 		spin_lock(&dev->zone_res_lock);
518 
519 		switch (zone->cond) {
520 		case BLK_ZONE_COND_IMP_OPEN:
521 			dev->nr_zones_imp_open--;
522 			break;
523 		case BLK_ZONE_COND_EXP_OPEN:
524 			dev->nr_zones_exp_open--;
525 			break;
526 		default:
527 			break;
528 		}
529 
530 		if (zone->wp > zone->start)
531 			dev->nr_zones_closed++;
532 
533 		spin_unlock(&dev->zone_res_lock);
534 	}
535 
536 	if (zone->wp == zone->start)
537 		zone->cond = BLK_ZONE_COND_EMPTY;
538 	else
539 		zone->cond = BLK_ZONE_COND_CLOSED;
540 
541 	return BLK_STS_OK;
542 }
543 
544 static blk_status_t null_finish_zone(struct nullb_device *dev,
545 				     struct nullb_zone *zone)
546 {
547 	blk_status_t ret = BLK_STS_OK;
548 
549 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
550 		return BLK_STS_IOERR;
551 
552 	if (dev->need_zone_res_mgmt) {
553 		spin_lock(&dev->zone_res_lock);
554 
555 		switch (zone->cond) {
556 		case BLK_ZONE_COND_FULL:
557 			/* Finish operation on full is not an error */
558 			spin_unlock(&dev->zone_res_lock);
559 			return BLK_STS_OK;
560 		case BLK_ZONE_COND_EMPTY:
561 			ret = null_check_zone_resources(dev, zone);
562 			if (ret != BLK_STS_OK) {
563 				spin_unlock(&dev->zone_res_lock);
564 				return ret;
565 			}
566 			break;
567 		case BLK_ZONE_COND_IMP_OPEN:
568 			dev->nr_zones_imp_open--;
569 			break;
570 		case BLK_ZONE_COND_EXP_OPEN:
571 			dev->nr_zones_exp_open--;
572 			break;
573 		case BLK_ZONE_COND_CLOSED:
574 			ret = null_check_zone_resources(dev, zone);
575 			if (ret != BLK_STS_OK) {
576 				spin_unlock(&dev->zone_res_lock);
577 				return ret;
578 			}
579 			dev->nr_zones_closed--;
580 			break;
581 		default:
582 			spin_unlock(&dev->zone_res_lock);
583 			return BLK_STS_IOERR;
584 		}
585 
586 		spin_unlock(&dev->zone_res_lock);
587 	}
588 
589 	zone->cond = BLK_ZONE_COND_FULL;
590 	zone->wp = zone->start + zone->len;
591 
592 	return BLK_STS_OK;
593 }
594 
595 static blk_status_t null_reset_zone(struct nullb_device *dev,
596 				    struct nullb_zone *zone)
597 {
598 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
599 		return BLK_STS_IOERR;
600 
601 	if (dev->need_zone_res_mgmt) {
602 		spin_lock(&dev->zone_res_lock);
603 
604 		switch (zone->cond) {
605 		case BLK_ZONE_COND_IMP_OPEN:
606 			dev->nr_zones_imp_open--;
607 			break;
608 		case BLK_ZONE_COND_EXP_OPEN:
609 			dev->nr_zones_exp_open--;
610 			break;
611 		case BLK_ZONE_COND_CLOSED:
612 			dev->nr_zones_closed--;
613 			break;
614 		case BLK_ZONE_COND_EMPTY:
615 		case BLK_ZONE_COND_FULL:
616 			break;
617 		default:
618 			spin_unlock(&dev->zone_res_lock);
619 			return BLK_STS_IOERR;
620 		}
621 
622 		spin_unlock(&dev->zone_res_lock);
623 	}
624 
625 	zone->cond = BLK_ZONE_COND_EMPTY;
626 	zone->wp = zone->start;
627 
628 	if (dev->memory_backed)
629 		return null_handle_discard(dev, zone->start, zone->len);
630 
631 	return BLK_STS_OK;
632 }
633 
634 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
635 				   sector_t sector)
636 {
637 	struct nullb_device *dev = cmd->nq->dev;
638 	unsigned int zone_no;
639 	struct nullb_zone *zone;
640 	blk_status_t ret;
641 	size_t i;
642 
643 	if (op == REQ_OP_ZONE_RESET_ALL) {
644 		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
645 			zone = &dev->zones[i];
646 			null_lock_zone(dev, zone);
647 			if (zone->cond != BLK_ZONE_COND_EMPTY &&
648 			    zone->cond != BLK_ZONE_COND_READONLY &&
649 			    zone->cond != BLK_ZONE_COND_OFFLINE) {
650 				null_reset_zone(dev, zone);
651 				trace_nullb_zone_op(cmd, i, zone->cond);
652 			}
653 			null_unlock_zone(dev, zone);
654 		}
655 		return BLK_STS_OK;
656 	}
657 
658 	zone_no = null_zone_no(dev, sector);
659 	zone = &dev->zones[zone_no];
660 
661 	null_lock_zone(dev, zone);
662 
663 	if (zone->cond == BLK_ZONE_COND_READONLY ||
664 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
665 		ret = BLK_STS_IOERR;
666 		goto unlock;
667 	}
668 
669 	switch (op) {
670 	case REQ_OP_ZONE_RESET:
671 		ret = null_reset_zone(dev, zone);
672 		break;
673 	case REQ_OP_ZONE_OPEN:
674 		ret = null_open_zone(dev, zone);
675 		break;
676 	case REQ_OP_ZONE_CLOSE:
677 		ret = null_close_zone(dev, zone);
678 		break;
679 	case REQ_OP_ZONE_FINISH:
680 		ret = null_finish_zone(dev, zone);
681 		break;
682 	default:
683 		ret = BLK_STS_NOTSUPP;
684 		break;
685 	}
686 
687 	if (ret == BLK_STS_OK)
688 		trace_nullb_zone_op(cmd, zone_no, zone->cond);
689 
690 unlock:
691 	null_unlock_zone(dev, zone);
692 
693 	return ret;
694 }
695 
696 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
697 				    sector_t sector, sector_t nr_sectors)
698 {
699 	struct nullb_device *dev;
700 	struct nullb_zone *zone;
701 	blk_status_t sts;
702 
703 	switch (op) {
704 	case REQ_OP_WRITE:
705 		return null_zone_write(cmd, sector, nr_sectors, false);
706 	case REQ_OP_ZONE_APPEND:
707 		return null_zone_write(cmd, sector, nr_sectors, true);
708 	case REQ_OP_ZONE_RESET:
709 	case REQ_OP_ZONE_RESET_ALL:
710 	case REQ_OP_ZONE_OPEN:
711 	case REQ_OP_ZONE_CLOSE:
712 	case REQ_OP_ZONE_FINISH:
713 		return null_zone_mgmt(cmd, op, sector);
714 	default:
715 		dev = cmd->nq->dev;
716 		zone = &dev->zones[null_zone_no(dev, sector)];
717 		if (zone->cond == BLK_ZONE_COND_OFFLINE)
718 			return BLK_STS_IOERR;
719 
720 		null_lock_zone(dev, zone);
721 		sts = null_process_cmd(cmd, op, sector, nr_sectors);
722 		null_unlock_zone(dev, zone);
723 		return sts;
724 	}
725 }
726 
727 /*
728  * Set a zone in the read-only or offline condition.
729  */
730 static void null_set_zone_cond(struct nullb_device *dev,
731 			       struct nullb_zone *zone, enum blk_zone_cond cond)
732 {
733 	if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
734 			 cond != BLK_ZONE_COND_OFFLINE))
735 		return;
736 
737 	null_lock_zone(dev, zone);
738 
739 	/*
740 	 * If the read-only condition is requested again to zones already in
741 	 * read-only condition, restore back normal empty condition. Do the same
742 	 * if the offline condition is requested for offline zones. Otherwise,
743 	 * set the specified zone condition to the zones. Finish the zones
744 	 * beforehand to free up zone resources.
745 	 */
746 	if (zone->cond == cond) {
747 		zone->cond = BLK_ZONE_COND_EMPTY;
748 		zone->wp = zone->start;
749 		if (dev->memory_backed)
750 			null_handle_discard(dev, zone->start, zone->len);
751 	} else {
752 		if (zone->cond != BLK_ZONE_COND_READONLY &&
753 		    zone->cond != BLK_ZONE_COND_OFFLINE)
754 			null_finish_zone(dev, zone);
755 		zone->cond = cond;
756 		zone->wp = NULL_ZONE_INVALID_WP;
757 	}
758 
759 	null_unlock_zone(dev, zone);
760 }
761 
762 /*
763  * Identify a zone from the sector written to configfs file. Then set zone
764  * condition to the zone.
765  */
766 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
767 			size_t count, enum blk_zone_cond cond)
768 {
769 	unsigned long long sector;
770 	unsigned int zone_no;
771 	int ret;
772 
773 	if (!dev->zoned) {
774 		pr_err("null_blk device is not zoned\n");
775 		return -EINVAL;
776 	}
777 
778 	if (!dev->zones) {
779 		pr_err("null_blk device is not yet powered\n");
780 		return -EINVAL;
781 	}
782 
783 	ret = kstrtoull(page, 0, &sector);
784 	if (ret < 0)
785 		return ret;
786 
787 	zone_no = null_zone_no(dev, sector);
788 	if (zone_no >= dev->nr_zones) {
789 		pr_err("Sector out of range\n");
790 		return -EINVAL;
791 	}
792 
793 	if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
794 		pr_err("Can not change condition of conventional zones\n");
795 		return -EINVAL;
796 	}
797 
798 	null_set_zone_cond(dev, &dev->zones[zone_no], cond);
799 
800 	return count;
801 }
802