xref: /linux/drivers/block/null_blk/zoned.c (revision b58b13f156c00c2457035b7071eaaac105fe6836)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
4 #include "null_blk.h"
5 
6 #define CREATE_TRACE_POINTS
7 #include "trace.h"
8 
9 #undef pr_fmt
10 #define pr_fmt(fmt)	"null_blk: " fmt
11 
12 static inline sector_t mb_to_sects(unsigned long mb)
13 {
14 	return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
15 }
16 
17 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
18 {
19 	return sect >> ilog2(dev->zone_size_sects);
20 }
21 
22 static inline void null_lock_zone_res(struct nullb_device *dev)
23 {
24 	if (dev->need_zone_res_mgmt)
25 		spin_lock_irq(&dev->zone_res_lock);
26 }
27 
28 static inline void null_unlock_zone_res(struct nullb_device *dev)
29 {
30 	if (dev->need_zone_res_mgmt)
31 		spin_unlock_irq(&dev->zone_res_lock);
32 }
33 
34 static inline void null_init_zone_lock(struct nullb_device *dev,
35 				       struct nullb_zone *zone)
36 {
37 	if (!dev->memory_backed)
38 		spin_lock_init(&zone->spinlock);
39 	else
40 		mutex_init(&zone->mutex);
41 }
42 
43 static inline void null_lock_zone(struct nullb_device *dev,
44 				  struct nullb_zone *zone)
45 {
46 	if (!dev->memory_backed)
47 		spin_lock_irq(&zone->spinlock);
48 	else
49 		mutex_lock(&zone->mutex);
50 }
51 
52 static inline void null_unlock_zone(struct nullb_device *dev,
53 				    struct nullb_zone *zone)
54 {
55 	if (!dev->memory_backed)
56 		spin_unlock_irq(&zone->spinlock);
57 	else
58 		mutex_unlock(&zone->mutex);
59 }
60 
61 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
62 {
63 	sector_t dev_capacity_sects, zone_capacity_sects;
64 	struct nullb_zone *zone;
65 	sector_t sector = 0;
66 	unsigned int i;
67 
68 	if (!is_power_of_2(dev->zone_size)) {
69 		pr_err("zone_size must be power-of-two\n");
70 		return -EINVAL;
71 	}
72 	if (dev->zone_size > dev->size) {
73 		pr_err("Zone size larger than device capacity\n");
74 		return -EINVAL;
75 	}
76 
77 	if (!dev->zone_capacity)
78 		dev->zone_capacity = dev->zone_size;
79 
80 	if (dev->zone_capacity > dev->zone_size) {
81 		pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
82 		       dev->zone_capacity, dev->zone_size);
83 		return -EINVAL;
84 	}
85 
86 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
87 	dev_capacity_sects = mb_to_sects(dev->size);
88 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
89 	dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
90 		>> ilog2(dev->zone_size_sects);
91 
92 	dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
93 				    GFP_KERNEL | __GFP_ZERO);
94 	if (!dev->zones)
95 		return -ENOMEM;
96 
97 	spin_lock_init(&dev->zone_res_lock);
98 
99 	if (dev->zone_nr_conv >= dev->nr_zones) {
100 		dev->zone_nr_conv = dev->nr_zones - 1;
101 		pr_info("changed the number of conventional zones to %u",
102 			dev->zone_nr_conv);
103 	}
104 
105 	/* Max active zones has to be < nbr of seq zones in order to be enforceable */
106 	if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
107 		dev->zone_max_active = 0;
108 		pr_info("zone_max_active limit disabled, limit >= zone count\n");
109 	}
110 
111 	/* Max open zones has to be <= max active zones */
112 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
113 		dev->zone_max_open = dev->zone_max_active;
114 		pr_info("changed the maximum number of open zones to %u\n",
115 			dev->nr_zones);
116 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
117 		dev->zone_max_open = 0;
118 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
119 	}
120 	dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
121 	dev->imp_close_zone_no = dev->zone_nr_conv;
122 
123 	for (i = 0; i <  dev->zone_nr_conv; i++) {
124 		zone = &dev->zones[i];
125 
126 		null_init_zone_lock(dev, zone);
127 		zone->start = sector;
128 		zone->len = dev->zone_size_sects;
129 		zone->capacity = zone->len;
130 		zone->wp = zone->start + zone->len;
131 		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
132 		zone->cond = BLK_ZONE_COND_NOT_WP;
133 
134 		sector += dev->zone_size_sects;
135 	}
136 
137 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
138 		zone = &dev->zones[i];
139 
140 		null_init_zone_lock(dev, zone);
141 		zone->start = zone->wp = sector;
142 		if (zone->start + dev->zone_size_sects > dev_capacity_sects)
143 			zone->len = dev_capacity_sects - zone->start;
144 		else
145 			zone->len = dev->zone_size_sects;
146 		zone->capacity =
147 			min_t(sector_t, zone->len, zone_capacity_sects);
148 		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
149 		zone->cond = BLK_ZONE_COND_EMPTY;
150 
151 		sector += dev->zone_size_sects;
152 	}
153 
154 	return 0;
155 }
156 
157 int null_register_zoned_dev(struct nullb *nullb)
158 {
159 	struct nullb_device *dev = nullb->dev;
160 	struct request_queue *q = nullb->q;
161 
162 	disk_set_zoned(nullb->disk);
163 	blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
164 	blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
165 	blk_queue_chunk_sectors(q, dev->zone_size_sects);
166 	nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
167 	blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
168 	disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
169 	disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
170 
171 	if (queue_is_mq(q))
172 		return blk_revalidate_disk_zones(nullb->disk, NULL);
173 
174 	return 0;
175 }
176 
177 void null_free_zoned_dev(struct nullb_device *dev)
178 {
179 	kvfree(dev->zones);
180 	dev->zones = NULL;
181 }
182 
183 int null_report_zones(struct gendisk *disk, sector_t sector,
184 		unsigned int nr_zones, report_zones_cb cb, void *data)
185 {
186 	struct nullb *nullb = disk->private_data;
187 	struct nullb_device *dev = nullb->dev;
188 	unsigned int first_zone, i;
189 	struct nullb_zone *zone;
190 	struct blk_zone blkz;
191 	int error;
192 
193 	first_zone = null_zone_no(dev, sector);
194 	if (first_zone >= dev->nr_zones)
195 		return 0;
196 
197 	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
198 	trace_nullb_report_zones(nullb, nr_zones);
199 
200 	memset(&blkz, 0, sizeof(struct blk_zone));
201 	zone = &dev->zones[first_zone];
202 	for (i = 0; i < nr_zones; i++, zone++) {
203 		/*
204 		 * Stacked DM target drivers will remap the zone information by
205 		 * modifying the zone information passed to the report callback.
206 		 * So use a local copy to avoid corruption of the device zone
207 		 * array.
208 		 */
209 		null_lock_zone(dev, zone);
210 		blkz.start = zone->start;
211 		blkz.len = zone->len;
212 		blkz.wp = zone->wp;
213 		blkz.type = zone->type;
214 		blkz.cond = zone->cond;
215 		blkz.capacity = zone->capacity;
216 		null_unlock_zone(dev, zone);
217 
218 		error = cb(&blkz, i, data);
219 		if (error)
220 			return error;
221 	}
222 
223 	return nr_zones;
224 }
225 
226 /*
227  * This is called in the case of memory backing from null_process_cmd()
228  * with the target zone already locked.
229  */
230 size_t null_zone_valid_read_len(struct nullb *nullb,
231 				sector_t sector, unsigned int len)
232 {
233 	struct nullb_device *dev = nullb->dev;
234 	struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
235 	unsigned int nr_sectors = len >> SECTOR_SHIFT;
236 
237 	/* Read must be below the write pointer position */
238 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
239 	    sector + nr_sectors <= zone->wp)
240 		return len;
241 
242 	if (sector > zone->wp)
243 		return 0;
244 
245 	return (zone->wp - sector) << SECTOR_SHIFT;
246 }
247 
248 static blk_status_t __null_close_zone(struct nullb_device *dev,
249 				      struct nullb_zone *zone)
250 {
251 	switch (zone->cond) {
252 	case BLK_ZONE_COND_CLOSED:
253 		/* close operation on closed is not an error */
254 		return BLK_STS_OK;
255 	case BLK_ZONE_COND_IMP_OPEN:
256 		dev->nr_zones_imp_open--;
257 		break;
258 	case BLK_ZONE_COND_EXP_OPEN:
259 		dev->nr_zones_exp_open--;
260 		break;
261 	case BLK_ZONE_COND_EMPTY:
262 	case BLK_ZONE_COND_FULL:
263 	default:
264 		return BLK_STS_IOERR;
265 	}
266 
267 	if (zone->wp == zone->start) {
268 		zone->cond = BLK_ZONE_COND_EMPTY;
269 	} else {
270 		zone->cond = BLK_ZONE_COND_CLOSED;
271 		dev->nr_zones_closed++;
272 	}
273 
274 	return BLK_STS_OK;
275 }
276 
277 static void null_close_imp_open_zone(struct nullb_device *dev)
278 {
279 	struct nullb_zone *zone;
280 	unsigned int zno, i;
281 
282 	zno = dev->imp_close_zone_no;
283 	if (zno >= dev->nr_zones)
284 		zno = dev->zone_nr_conv;
285 
286 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
287 		zone = &dev->zones[zno];
288 		zno++;
289 		if (zno >= dev->nr_zones)
290 			zno = dev->zone_nr_conv;
291 
292 		if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
293 			__null_close_zone(dev, zone);
294 			dev->imp_close_zone_no = zno;
295 			return;
296 		}
297 	}
298 }
299 
300 static blk_status_t null_check_active(struct nullb_device *dev)
301 {
302 	if (!dev->zone_max_active)
303 		return BLK_STS_OK;
304 
305 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
306 			dev->nr_zones_closed < dev->zone_max_active)
307 		return BLK_STS_OK;
308 
309 	return BLK_STS_ZONE_ACTIVE_RESOURCE;
310 }
311 
312 static blk_status_t null_check_open(struct nullb_device *dev)
313 {
314 	if (!dev->zone_max_open)
315 		return BLK_STS_OK;
316 
317 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
318 		return BLK_STS_OK;
319 
320 	if (dev->nr_zones_imp_open) {
321 		if (null_check_active(dev) == BLK_STS_OK) {
322 			null_close_imp_open_zone(dev);
323 			return BLK_STS_OK;
324 		}
325 	}
326 
327 	return BLK_STS_ZONE_OPEN_RESOURCE;
328 }
329 
330 /*
331  * This function matches the manage open zone resources function in the ZBC standard,
332  * with the addition of max active zones support (added in the ZNS standard).
333  *
334  * The function determines if a zone can transition to implicit open or explicit open,
335  * while maintaining the max open zone (and max active zone) limit(s). It may close an
336  * implicit open zone in order to make additional zone resources available.
337  *
338  * ZBC states that an implicit open zone shall be closed only if there is not
339  * room within the open limit. However, with the addition of an active limit,
340  * it is not certain that closing an implicit open zone will allow a new zone
341  * to be opened, since we might already be at the active limit capacity.
342  */
343 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
344 					      struct nullb_zone *zone)
345 {
346 	blk_status_t ret;
347 
348 	switch (zone->cond) {
349 	case BLK_ZONE_COND_EMPTY:
350 		ret = null_check_active(dev);
351 		if (ret != BLK_STS_OK)
352 			return ret;
353 		fallthrough;
354 	case BLK_ZONE_COND_CLOSED:
355 		return null_check_open(dev);
356 	default:
357 		/* Should never be called for other states */
358 		WARN_ON(1);
359 		return BLK_STS_IOERR;
360 	}
361 }
362 
363 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
364 				    unsigned int nr_sectors, bool append)
365 {
366 	struct nullb_device *dev = cmd->nq->dev;
367 	unsigned int zno = null_zone_no(dev, sector);
368 	struct nullb_zone *zone = &dev->zones[zno];
369 	blk_status_t ret;
370 
371 	trace_nullb_zone_op(cmd, zno, zone->cond);
372 
373 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
374 		if (append)
375 			return BLK_STS_IOERR;
376 		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
377 	}
378 
379 	null_lock_zone(dev, zone);
380 
381 	if (zone->cond == BLK_ZONE_COND_FULL ||
382 	    zone->cond == BLK_ZONE_COND_READONLY ||
383 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
384 		/* Cannot write to the zone */
385 		ret = BLK_STS_IOERR;
386 		goto unlock;
387 	}
388 
389 	/*
390 	 * Regular writes must be at the write pointer position.
391 	 * Zone append writes are automatically issued at the write
392 	 * pointer and the position returned using the request or BIO
393 	 * sector.
394 	 */
395 	if (append) {
396 		sector = zone->wp;
397 		if (dev->queue_mode == NULL_Q_MQ)
398 			cmd->rq->__sector = sector;
399 		else
400 			cmd->bio->bi_iter.bi_sector = sector;
401 	} else if (sector != zone->wp) {
402 		ret = BLK_STS_IOERR;
403 		goto unlock;
404 	}
405 
406 	if (zone->wp + nr_sectors > zone->start + zone->capacity) {
407 		ret = BLK_STS_IOERR;
408 		goto unlock;
409 	}
410 
411 	if (zone->cond == BLK_ZONE_COND_CLOSED ||
412 	    zone->cond == BLK_ZONE_COND_EMPTY) {
413 		null_lock_zone_res(dev);
414 
415 		ret = null_check_zone_resources(dev, zone);
416 		if (ret != BLK_STS_OK) {
417 			null_unlock_zone_res(dev);
418 			goto unlock;
419 		}
420 		if (zone->cond == BLK_ZONE_COND_CLOSED) {
421 			dev->nr_zones_closed--;
422 			dev->nr_zones_imp_open++;
423 		} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
424 			dev->nr_zones_imp_open++;
425 		}
426 
427 		if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
428 			zone->cond = BLK_ZONE_COND_IMP_OPEN;
429 
430 		null_unlock_zone_res(dev);
431 	}
432 
433 	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
434 	if (ret != BLK_STS_OK)
435 		goto unlock;
436 
437 	zone->wp += nr_sectors;
438 	if (zone->wp == zone->start + zone->capacity) {
439 		null_lock_zone_res(dev);
440 		if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
441 			dev->nr_zones_exp_open--;
442 		else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
443 			dev->nr_zones_imp_open--;
444 		zone->cond = BLK_ZONE_COND_FULL;
445 		null_unlock_zone_res(dev);
446 	}
447 
448 	ret = BLK_STS_OK;
449 
450 unlock:
451 	null_unlock_zone(dev, zone);
452 
453 	return ret;
454 }
455 
456 static blk_status_t null_open_zone(struct nullb_device *dev,
457 				   struct nullb_zone *zone)
458 {
459 	blk_status_t ret = BLK_STS_OK;
460 
461 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
462 		return BLK_STS_IOERR;
463 
464 	null_lock_zone_res(dev);
465 
466 	switch (zone->cond) {
467 	case BLK_ZONE_COND_EXP_OPEN:
468 		/* open operation on exp open is not an error */
469 		goto unlock;
470 	case BLK_ZONE_COND_EMPTY:
471 		ret = null_check_zone_resources(dev, zone);
472 		if (ret != BLK_STS_OK)
473 			goto unlock;
474 		break;
475 	case BLK_ZONE_COND_IMP_OPEN:
476 		dev->nr_zones_imp_open--;
477 		break;
478 	case BLK_ZONE_COND_CLOSED:
479 		ret = null_check_zone_resources(dev, zone);
480 		if (ret != BLK_STS_OK)
481 			goto unlock;
482 		dev->nr_zones_closed--;
483 		break;
484 	case BLK_ZONE_COND_FULL:
485 	default:
486 		ret = BLK_STS_IOERR;
487 		goto unlock;
488 	}
489 
490 	zone->cond = BLK_ZONE_COND_EXP_OPEN;
491 	dev->nr_zones_exp_open++;
492 
493 unlock:
494 	null_unlock_zone_res(dev);
495 
496 	return ret;
497 }
498 
499 static blk_status_t null_close_zone(struct nullb_device *dev,
500 				    struct nullb_zone *zone)
501 {
502 	blk_status_t ret;
503 
504 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
505 		return BLK_STS_IOERR;
506 
507 	null_lock_zone_res(dev);
508 	ret = __null_close_zone(dev, zone);
509 	null_unlock_zone_res(dev);
510 
511 	return ret;
512 }
513 
514 static blk_status_t null_finish_zone(struct nullb_device *dev,
515 				     struct nullb_zone *zone)
516 {
517 	blk_status_t ret = BLK_STS_OK;
518 
519 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
520 		return BLK_STS_IOERR;
521 
522 	null_lock_zone_res(dev);
523 
524 	switch (zone->cond) {
525 	case BLK_ZONE_COND_FULL:
526 		/* finish operation on full is not an error */
527 		goto unlock;
528 	case BLK_ZONE_COND_EMPTY:
529 		ret = null_check_zone_resources(dev, zone);
530 		if (ret != BLK_STS_OK)
531 			goto unlock;
532 		break;
533 	case BLK_ZONE_COND_IMP_OPEN:
534 		dev->nr_zones_imp_open--;
535 		break;
536 	case BLK_ZONE_COND_EXP_OPEN:
537 		dev->nr_zones_exp_open--;
538 		break;
539 	case BLK_ZONE_COND_CLOSED:
540 		ret = null_check_zone_resources(dev, zone);
541 		if (ret != BLK_STS_OK)
542 			goto unlock;
543 		dev->nr_zones_closed--;
544 		break;
545 	default:
546 		ret = BLK_STS_IOERR;
547 		goto unlock;
548 	}
549 
550 	zone->cond = BLK_ZONE_COND_FULL;
551 	zone->wp = zone->start + zone->len;
552 
553 unlock:
554 	null_unlock_zone_res(dev);
555 
556 	return ret;
557 }
558 
559 static blk_status_t null_reset_zone(struct nullb_device *dev,
560 				    struct nullb_zone *zone)
561 {
562 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
563 		return BLK_STS_IOERR;
564 
565 	null_lock_zone_res(dev);
566 
567 	switch (zone->cond) {
568 	case BLK_ZONE_COND_EMPTY:
569 		/* reset operation on empty is not an error */
570 		null_unlock_zone_res(dev);
571 		return BLK_STS_OK;
572 	case BLK_ZONE_COND_IMP_OPEN:
573 		dev->nr_zones_imp_open--;
574 		break;
575 	case BLK_ZONE_COND_EXP_OPEN:
576 		dev->nr_zones_exp_open--;
577 		break;
578 	case BLK_ZONE_COND_CLOSED:
579 		dev->nr_zones_closed--;
580 		break;
581 	case BLK_ZONE_COND_FULL:
582 		break;
583 	default:
584 		null_unlock_zone_res(dev);
585 		return BLK_STS_IOERR;
586 	}
587 
588 	zone->cond = BLK_ZONE_COND_EMPTY;
589 	zone->wp = zone->start;
590 
591 	null_unlock_zone_res(dev);
592 
593 	if (dev->memory_backed)
594 		return null_handle_discard(dev, zone->start, zone->len);
595 
596 	return BLK_STS_OK;
597 }
598 
599 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
600 				   sector_t sector)
601 {
602 	struct nullb_device *dev = cmd->nq->dev;
603 	unsigned int zone_no;
604 	struct nullb_zone *zone;
605 	blk_status_t ret;
606 	size_t i;
607 
608 	if (op == REQ_OP_ZONE_RESET_ALL) {
609 		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
610 			zone = &dev->zones[i];
611 			null_lock_zone(dev, zone);
612 			if (zone->cond != BLK_ZONE_COND_EMPTY &&
613 			    zone->cond != BLK_ZONE_COND_READONLY &&
614 			    zone->cond != BLK_ZONE_COND_OFFLINE) {
615 				null_reset_zone(dev, zone);
616 				trace_nullb_zone_op(cmd, i, zone->cond);
617 			}
618 			null_unlock_zone(dev, zone);
619 		}
620 		return BLK_STS_OK;
621 	}
622 
623 	zone_no = null_zone_no(dev, sector);
624 	zone = &dev->zones[zone_no];
625 
626 	null_lock_zone(dev, zone);
627 
628 	if (zone->cond == BLK_ZONE_COND_READONLY ||
629 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
630 		ret = BLK_STS_IOERR;
631 		goto unlock;
632 	}
633 
634 	switch (op) {
635 	case REQ_OP_ZONE_RESET:
636 		ret = null_reset_zone(dev, zone);
637 		break;
638 	case REQ_OP_ZONE_OPEN:
639 		ret = null_open_zone(dev, zone);
640 		break;
641 	case REQ_OP_ZONE_CLOSE:
642 		ret = null_close_zone(dev, zone);
643 		break;
644 	case REQ_OP_ZONE_FINISH:
645 		ret = null_finish_zone(dev, zone);
646 		break;
647 	default:
648 		ret = BLK_STS_NOTSUPP;
649 		break;
650 	}
651 
652 	if (ret == BLK_STS_OK)
653 		trace_nullb_zone_op(cmd, zone_no, zone->cond);
654 
655 unlock:
656 	null_unlock_zone(dev, zone);
657 
658 	return ret;
659 }
660 
661 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
662 				    sector_t sector, sector_t nr_sectors)
663 {
664 	struct nullb_device *dev;
665 	struct nullb_zone *zone;
666 	blk_status_t sts;
667 
668 	switch (op) {
669 	case REQ_OP_WRITE:
670 		return null_zone_write(cmd, sector, nr_sectors, false);
671 	case REQ_OP_ZONE_APPEND:
672 		return null_zone_write(cmd, sector, nr_sectors, true);
673 	case REQ_OP_ZONE_RESET:
674 	case REQ_OP_ZONE_RESET_ALL:
675 	case REQ_OP_ZONE_OPEN:
676 	case REQ_OP_ZONE_CLOSE:
677 	case REQ_OP_ZONE_FINISH:
678 		return null_zone_mgmt(cmd, op, sector);
679 	default:
680 		dev = cmd->nq->dev;
681 		zone = &dev->zones[null_zone_no(dev, sector)];
682 		if (zone->cond == BLK_ZONE_COND_OFFLINE)
683 			return BLK_STS_IOERR;
684 
685 		null_lock_zone(dev, zone);
686 		sts = null_process_cmd(cmd, op, sector, nr_sectors);
687 		null_unlock_zone(dev, zone);
688 		return sts;
689 	}
690 }
691 
692 /*
693  * Set a zone in the read-only or offline condition.
694  */
695 static void null_set_zone_cond(struct nullb_device *dev,
696 			       struct nullb_zone *zone, enum blk_zone_cond cond)
697 {
698 	if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
699 			 cond != BLK_ZONE_COND_OFFLINE))
700 		return;
701 
702 	null_lock_zone(dev, zone);
703 
704 	/*
705 	 * If the read-only condition is requested again to zones already in
706 	 * read-only condition, restore back normal empty condition. Do the same
707 	 * if the offline condition is requested for offline zones. Otherwise,
708 	 * set the specified zone condition to the zones. Finish the zones
709 	 * beforehand to free up zone resources.
710 	 */
711 	if (zone->cond == cond) {
712 		zone->cond = BLK_ZONE_COND_EMPTY;
713 		zone->wp = zone->start;
714 		if (dev->memory_backed)
715 			null_handle_discard(dev, zone->start, zone->len);
716 	} else {
717 		if (zone->cond != BLK_ZONE_COND_READONLY &&
718 		    zone->cond != BLK_ZONE_COND_OFFLINE)
719 			null_finish_zone(dev, zone);
720 		zone->cond = cond;
721 		zone->wp = (sector_t)-1;
722 	}
723 
724 	null_unlock_zone(dev, zone);
725 }
726 
727 /*
728  * Identify a zone from the sector written to configfs file. Then set zone
729  * condition to the zone.
730  */
731 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
732 			size_t count, enum blk_zone_cond cond)
733 {
734 	unsigned long long sector;
735 	unsigned int zone_no;
736 	int ret;
737 
738 	if (!dev->zoned) {
739 		pr_err("null_blk device is not zoned\n");
740 		return -EINVAL;
741 	}
742 
743 	if (!dev->zones) {
744 		pr_err("null_blk device is not yet powered\n");
745 		return -EINVAL;
746 	}
747 
748 	ret = kstrtoull(page, 0, &sector);
749 	if (ret < 0)
750 		return ret;
751 
752 	zone_no = null_zone_no(dev, sector);
753 	if (zone_no >= dev->nr_zones) {
754 		pr_err("Sector out of range\n");
755 		return -EINVAL;
756 	}
757 
758 	if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
759 		pr_err("Can not change condition of conventional zones\n");
760 		return -EINVAL;
761 	}
762 
763 	null_set_zone_cond(dev, &dev->zones[zone_no], cond);
764 
765 	return count;
766 }
767