xref: /linux/drivers/block/null_blk/zoned.c (revision 5b026e34120766408e76ba19a0e33a9dc996f9f0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
4 #include "null_blk.h"
5 
6 #define CREATE_TRACE_POINTS
7 #include "trace.h"
8 
9 #undef pr_fmt
10 #define pr_fmt(fmt)	"null_blk: " fmt
11 
12 #define NULL_ZONE_INVALID_WP	((sector_t)-1)
13 
14 static inline sector_t mb_to_sects(unsigned long mb)
15 {
16 	return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
17 }
18 
19 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
20 {
21 	return sect >> ilog2(dev->zone_size_sects);
22 }
23 
24 static inline void null_init_zone_lock(struct nullb_device *dev,
25 				       struct nullb_zone *zone)
26 {
27 	if (!dev->memory_backed)
28 		spin_lock_init(&zone->spinlock);
29 	else
30 		mutex_init(&zone->mutex);
31 }
32 
33 static inline void null_lock_zone(struct nullb_device *dev,
34 				  struct nullb_zone *zone)
35 {
36 	if (!dev->memory_backed)
37 		spin_lock_irq(&zone->spinlock);
38 	else
39 		mutex_lock(&zone->mutex);
40 }
41 
42 static inline void null_unlock_zone(struct nullb_device *dev,
43 				    struct nullb_zone *zone)
44 {
45 	if (!dev->memory_backed)
46 		spin_unlock_irq(&zone->spinlock);
47 	else
48 		mutex_unlock(&zone->mutex);
49 }
50 
51 int null_init_zoned_dev(struct nullb_device *dev,
52 			struct queue_limits *lim)
53 {
54 	sector_t dev_capacity_sects, zone_capacity_sects;
55 	struct nullb_zone *zone;
56 	sector_t sector = 0;
57 	unsigned int i;
58 
59 	if (!is_power_of_2(dev->zone_size)) {
60 		pr_err("zone_size must be power-of-two\n");
61 		return -EINVAL;
62 	}
63 	if (dev->zone_size > dev->size) {
64 		pr_err("Zone size larger than device capacity\n");
65 		return -EINVAL;
66 	}
67 
68 	if (!dev->zone_capacity)
69 		dev->zone_capacity = dev->zone_size;
70 
71 	if (dev->zone_capacity > dev->zone_size) {
72 		pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
73 		       dev->zone_capacity, dev->zone_size);
74 		return -EINVAL;
75 	}
76 
77 	/*
78 	 * If a smaller zone capacity was requested, do not allow a smaller last
79 	 * zone at the same time as such zone configuration does not correspond
80 	 * to any real zoned device.
81 	 */
82 	if (dev->zone_capacity != dev->zone_size &&
83 	    dev->size & (dev->zone_size - 1)) {
84 		pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
85 		return -EINVAL;
86 	}
87 
88 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
89 	dev_capacity_sects = mb_to_sects(dev->size);
90 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
91 	dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
92 		>> ilog2(dev->zone_size_sects);
93 
94 	dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
95 				    GFP_KERNEL | __GFP_ZERO);
96 	if (!dev->zones)
97 		return -ENOMEM;
98 
99 	spin_lock_init(&dev->zone_res_lock);
100 
101 	if (dev->zone_nr_conv >= dev->nr_zones) {
102 		dev->zone_nr_conv = dev->nr_zones - 1;
103 		pr_info("changed the number of conventional zones to %u",
104 			dev->zone_nr_conv);
105 	}
106 
107 	dev->zone_append_max_sectors =
108 		min(ALIGN_DOWN(dev->zone_append_max_sectors,
109 			       dev->blocksize >> SECTOR_SHIFT),
110 		    zone_capacity_sects);
111 
112 	/* Max active zones has to be < nbr of seq zones in order to be enforceable */
113 	if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
114 		dev->zone_max_active = 0;
115 		pr_info("zone_max_active limit disabled, limit >= zone count\n");
116 	}
117 
118 	/* Max open zones has to be <= max active zones */
119 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
120 		dev->zone_max_open = dev->zone_max_active;
121 		pr_info("changed the maximum number of open zones to %u\n",
122 			dev->zone_max_open);
123 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
124 		dev->zone_max_open = 0;
125 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
126 	}
127 	dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
128 	dev->imp_close_zone_no = dev->zone_nr_conv;
129 
130 	for (i = 0; i <  dev->zone_nr_conv; i++) {
131 		zone = &dev->zones[i];
132 
133 		null_init_zone_lock(dev, zone);
134 		zone->start = sector;
135 		zone->len = dev->zone_size_sects;
136 		zone->capacity = zone->len;
137 		zone->wp = zone->start + zone->len;
138 		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
139 		zone->cond = BLK_ZONE_COND_NOT_WP;
140 
141 		sector += dev->zone_size_sects;
142 	}
143 
144 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
145 		zone = &dev->zones[i];
146 
147 		null_init_zone_lock(dev, zone);
148 		zone->start = zone->wp = sector;
149 		if (zone->start + dev->zone_size_sects > dev_capacity_sects)
150 			zone->len = dev_capacity_sects - zone->start;
151 		else
152 			zone->len = dev->zone_size_sects;
153 		zone->capacity =
154 			min_t(sector_t, zone->len, zone_capacity_sects);
155 		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
156 		zone->cond = BLK_ZONE_COND_EMPTY;
157 
158 		sector += dev->zone_size_sects;
159 	}
160 
161 	lim->features |= BLK_FEAT_ZONED | BLK_FEAT_ZONE_RESETALL;
162 	lim->chunk_sectors = dev->zone_size_sects;
163 	lim->max_zone_append_sectors = dev->zone_append_max_sectors;
164 	lim->max_open_zones = dev->zone_max_open;
165 	lim->max_active_zones = dev->zone_max_active;
166 	return 0;
167 }
168 
169 int null_register_zoned_dev(struct nullb *nullb)
170 {
171 	struct request_queue *q = nullb->q;
172 	struct gendisk *disk = nullb->disk;
173 
174 	pr_info("%s: using %s zone append\n",
175 		disk->disk_name,
176 		queue_emulates_zone_append(q) ? "emulated" : "native");
177 
178 	return blk_revalidate_disk_zones(disk);
179 }
180 
181 void null_free_zoned_dev(struct nullb_device *dev)
182 {
183 	kvfree(dev->zones);
184 	dev->zones = NULL;
185 }
186 
187 int null_report_zones(struct gendisk *disk, sector_t sector,
188 		unsigned int nr_zones, report_zones_cb cb, void *data)
189 {
190 	struct nullb *nullb = disk->private_data;
191 	struct nullb_device *dev = nullb->dev;
192 	unsigned int first_zone, i;
193 	struct nullb_zone *zone;
194 	struct blk_zone blkz;
195 	int error;
196 
197 	first_zone = null_zone_no(dev, sector);
198 	if (first_zone >= dev->nr_zones)
199 		return 0;
200 
201 	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
202 	trace_nullb_report_zones(nullb, nr_zones);
203 
204 	memset(&blkz, 0, sizeof(struct blk_zone));
205 	zone = &dev->zones[first_zone];
206 	for (i = 0; i < nr_zones; i++, zone++) {
207 		/*
208 		 * Stacked DM target drivers will remap the zone information by
209 		 * modifying the zone information passed to the report callback.
210 		 * So use a local copy to avoid corruption of the device zone
211 		 * array.
212 		 */
213 		null_lock_zone(dev, zone);
214 		blkz.start = zone->start;
215 		blkz.len = zone->len;
216 		blkz.wp = zone->wp;
217 		blkz.type = zone->type;
218 		blkz.cond = zone->cond;
219 		blkz.capacity = zone->capacity;
220 		null_unlock_zone(dev, zone);
221 
222 		error = cb(&blkz, i, data);
223 		if (error)
224 			return error;
225 	}
226 
227 	return nr_zones;
228 }
229 
230 /*
231  * This is called in the case of memory backing from null_process_cmd()
232  * with the target zone already locked.
233  */
234 size_t null_zone_valid_read_len(struct nullb *nullb,
235 				sector_t sector, unsigned int len)
236 {
237 	struct nullb_device *dev = nullb->dev;
238 	struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
239 	unsigned int nr_sectors = len >> SECTOR_SHIFT;
240 
241 	/* Read must be below the write pointer position */
242 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
243 	    sector + nr_sectors <= zone->wp)
244 		return len;
245 
246 	if (sector > zone->wp)
247 		return 0;
248 
249 	return (zone->wp - sector) << SECTOR_SHIFT;
250 }
251 
252 static void null_close_imp_open_zone(struct nullb_device *dev)
253 {
254 	struct nullb_zone *zone;
255 	unsigned int zno, i;
256 
257 	zno = dev->imp_close_zone_no;
258 	if (zno >= dev->nr_zones)
259 		zno = dev->zone_nr_conv;
260 
261 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
262 		zone = &dev->zones[zno];
263 		zno++;
264 		if (zno >= dev->nr_zones)
265 			zno = dev->zone_nr_conv;
266 
267 		if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
268 			dev->nr_zones_imp_open--;
269 			if (zone->wp == zone->start) {
270 				zone->cond = BLK_ZONE_COND_EMPTY;
271 			} else {
272 				zone->cond = BLK_ZONE_COND_CLOSED;
273 				dev->nr_zones_closed++;
274 			}
275 			dev->imp_close_zone_no = zno;
276 			return;
277 		}
278 	}
279 }
280 
281 static blk_status_t null_check_active(struct nullb_device *dev)
282 {
283 	if (!dev->zone_max_active)
284 		return BLK_STS_OK;
285 
286 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
287 			dev->nr_zones_closed < dev->zone_max_active)
288 		return BLK_STS_OK;
289 
290 	return BLK_STS_ZONE_ACTIVE_RESOURCE;
291 }
292 
293 static blk_status_t null_check_open(struct nullb_device *dev)
294 {
295 	if (!dev->zone_max_open)
296 		return BLK_STS_OK;
297 
298 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
299 		return BLK_STS_OK;
300 
301 	if (dev->nr_zones_imp_open) {
302 		if (null_check_active(dev) == BLK_STS_OK) {
303 			null_close_imp_open_zone(dev);
304 			return BLK_STS_OK;
305 		}
306 	}
307 
308 	return BLK_STS_ZONE_OPEN_RESOURCE;
309 }
310 
311 /*
312  * This function matches the manage open zone resources function in the ZBC standard,
313  * with the addition of max active zones support (added in the ZNS standard).
314  *
315  * The function determines if a zone can transition to implicit open or explicit open,
316  * while maintaining the max open zone (and max active zone) limit(s). It may close an
317  * implicit open zone in order to make additional zone resources available.
318  *
319  * ZBC states that an implicit open zone shall be closed only if there is not
320  * room within the open limit. However, with the addition of an active limit,
321  * it is not certain that closing an implicit open zone will allow a new zone
322  * to be opened, since we might already be at the active limit capacity.
323  */
324 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
325 					      struct nullb_zone *zone)
326 {
327 	blk_status_t ret;
328 
329 	switch (zone->cond) {
330 	case BLK_ZONE_COND_EMPTY:
331 		ret = null_check_active(dev);
332 		if (ret != BLK_STS_OK)
333 			return ret;
334 		fallthrough;
335 	case BLK_ZONE_COND_CLOSED:
336 		return null_check_open(dev);
337 	default:
338 		/* Should never be called for other states */
339 		WARN_ON(1);
340 		return BLK_STS_IOERR;
341 	}
342 }
343 
344 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
345 				    unsigned int nr_sectors, bool append)
346 {
347 	struct nullb_device *dev = cmd->nq->dev;
348 	unsigned int zno = null_zone_no(dev, sector);
349 	struct nullb_zone *zone = &dev->zones[zno];
350 	blk_status_t ret;
351 
352 	trace_nullb_zone_op(cmd, zno, zone->cond);
353 
354 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
355 		if (append)
356 			return BLK_STS_IOERR;
357 		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
358 	}
359 
360 	null_lock_zone(dev, zone);
361 
362 	/*
363 	 * Regular writes must be at the write pointer position. Zone append
364 	 * writes are automatically issued at the write pointer and the position
365 	 * returned using the request sector. Note that we do not check the zone
366 	 * condition because for FULL, READONLY and OFFLINE zones, the sector
367 	 * check against the zone write pointer will always result in failing
368 	 * the command.
369 	 */
370 	if (append) {
371 		if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
372 		    zone->wp == NULL_ZONE_INVALID_WP) {
373 			ret = BLK_STS_IOERR;
374 			goto unlock_zone;
375 		}
376 		sector = zone->wp;
377 		blk_mq_rq_from_pdu(cmd)->__sector = sector;
378 	}
379 
380 	if (sector != zone->wp ||
381 	    zone->wp + nr_sectors > zone->start + zone->capacity) {
382 		ret = BLK_STS_IOERR;
383 		goto unlock_zone;
384 	}
385 
386 	if (zone->cond == BLK_ZONE_COND_CLOSED ||
387 	    zone->cond == BLK_ZONE_COND_EMPTY) {
388 		if (dev->need_zone_res_mgmt) {
389 			spin_lock(&dev->zone_res_lock);
390 
391 			ret = null_check_zone_resources(dev, zone);
392 			if (ret != BLK_STS_OK) {
393 				spin_unlock(&dev->zone_res_lock);
394 				goto unlock_zone;
395 			}
396 			if (zone->cond == BLK_ZONE_COND_CLOSED) {
397 				dev->nr_zones_closed--;
398 				dev->nr_zones_imp_open++;
399 			} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
400 				dev->nr_zones_imp_open++;
401 			}
402 
403 			spin_unlock(&dev->zone_res_lock);
404 		}
405 
406 		zone->cond = BLK_ZONE_COND_IMP_OPEN;
407 	}
408 
409 	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
410 	if (ret != BLK_STS_OK)
411 		goto unlock_zone;
412 
413 	zone->wp += nr_sectors;
414 	if (zone->wp == zone->start + zone->capacity) {
415 		if (dev->need_zone_res_mgmt) {
416 			spin_lock(&dev->zone_res_lock);
417 			if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
418 				dev->nr_zones_exp_open--;
419 			else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
420 				dev->nr_zones_imp_open--;
421 			spin_unlock(&dev->zone_res_lock);
422 		}
423 		zone->cond = BLK_ZONE_COND_FULL;
424 	}
425 
426 	ret = BLK_STS_OK;
427 
428 unlock_zone:
429 	null_unlock_zone(dev, zone);
430 
431 	return ret;
432 }
433 
434 static blk_status_t null_open_zone(struct nullb_device *dev,
435 				   struct nullb_zone *zone)
436 {
437 	blk_status_t ret = BLK_STS_OK;
438 
439 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
440 		return BLK_STS_IOERR;
441 
442 	switch (zone->cond) {
443 	case BLK_ZONE_COND_EXP_OPEN:
444 		/* Open operation on exp open is not an error */
445 		return BLK_STS_OK;
446 	case BLK_ZONE_COND_EMPTY:
447 	case BLK_ZONE_COND_IMP_OPEN:
448 	case BLK_ZONE_COND_CLOSED:
449 		break;
450 	case BLK_ZONE_COND_FULL:
451 	default:
452 		return BLK_STS_IOERR;
453 	}
454 
455 	if (dev->need_zone_res_mgmt) {
456 		spin_lock(&dev->zone_res_lock);
457 
458 		switch (zone->cond) {
459 		case BLK_ZONE_COND_EMPTY:
460 			ret = null_check_zone_resources(dev, zone);
461 			if (ret != BLK_STS_OK) {
462 				spin_unlock(&dev->zone_res_lock);
463 				return ret;
464 			}
465 			break;
466 		case BLK_ZONE_COND_IMP_OPEN:
467 			dev->nr_zones_imp_open--;
468 			break;
469 		case BLK_ZONE_COND_CLOSED:
470 			ret = null_check_zone_resources(dev, zone);
471 			if (ret != BLK_STS_OK) {
472 				spin_unlock(&dev->zone_res_lock);
473 				return ret;
474 			}
475 			dev->nr_zones_closed--;
476 			break;
477 		default:
478 			break;
479 		}
480 
481 		dev->nr_zones_exp_open++;
482 
483 		spin_unlock(&dev->zone_res_lock);
484 	}
485 
486 	zone->cond = BLK_ZONE_COND_EXP_OPEN;
487 
488 	return BLK_STS_OK;
489 }
490 
491 static blk_status_t null_close_zone(struct nullb_device *dev,
492 				    struct nullb_zone *zone)
493 {
494 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
495 		return BLK_STS_IOERR;
496 
497 	switch (zone->cond) {
498 	case BLK_ZONE_COND_CLOSED:
499 		/* close operation on closed is not an error */
500 		return BLK_STS_OK;
501 	case BLK_ZONE_COND_IMP_OPEN:
502 	case BLK_ZONE_COND_EXP_OPEN:
503 		break;
504 	case BLK_ZONE_COND_EMPTY:
505 	case BLK_ZONE_COND_FULL:
506 	default:
507 		return BLK_STS_IOERR;
508 	}
509 
510 	if (dev->need_zone_res_mgmt) {
511 		spin_lock(&dev->zone_res_lock);
512 
513 		switch (zone->cond) {
514 		case BLK_ZONE_COND_IMP_OPEN:
515 			dev->nr_zones_imp_open--;
516 			break;
517 		case BLK_ZONE_COND_EXP_OPEN:
518 			dev->nr_zones_exp_open--;
519 			break;
520 		default:
521 			break;
522 		}
523 
524 		if (zone->wp > zone->start)
525 			dev->nr_zones_closed++;
526 
527 		spin_unlock(&dev->zone_res_lock);
528 	}
529 
530 	if (zone->wp == zone->start)
531 		zone->cond = BLK_ZONE_COND_EMPTY;
532 	else
533 		zone->cond = BLK_ZONE_COND_CLOSED;
534 
535 	return BLK_STS_OK;
536 }
537 
538 static blk_status_t null_finish_zone(struct nullb_device *dev,
539 				     struct nullb_zone *zone)
540 {
541 	blk_status_t ret = BLK_STS_OK;
542 
543 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
544 		return BLK_STS_IOERR;
545 
546 	if (dev->need_zone_res_mgmt) {
547 		spin_lock(&dev->zone_res_lock);
548 
549 		switch (zone->cond) {
550 		case BLK_ZONE_COND_FULL:
551 			/* Finish operation on full is not an error */
552 			spin_unlock(&dev->zone_res_lock);
553 			return BLK_STS_OK;
554 		case BLK_ZONE_COND_EMPTY:
555 			ret = null_check_zone_resources(dev, zone);
556 			if (ret != BLK_STS_OK) {
557 				spin_unlock(&dev->zone_res_lock);
558 				return ret;
559 			}
560 			break;
561 		case BLK_ZONE_COND_IMP_OPEN:
562 			dev->nr_zones_imp_open--;
563 			break;
564 		case BLK_ZONE_COND_EXP_OPEN:
565 			dev->nr_zones_exp_open--;
566 			break;
567 		case BLK_ZONE_COND_CLOSED:
568 			ret = null_check_zone_resources(dev, zone);
569 			if (ret != BLK_STS_OK) {
570 				spin_unlock(&dev->zone_res_lock);
571 				return ret;
572 			}
573 			dev->nr_zones_closed--;
574 			break;
575 		default:
576 			spin_unlock(&dev->zone_res_lock);
577 			return BLK_STS_IOERR;
578 		}
579 
580 		spin_unlock(&dev->zone_res_lock);
581 	}
582 
583 	zone->cond = BLK_ZONE_COND_FULL;
584 	zone->wp = zone->start + zone->len;
585 
586 	return BLK_STS_OK;
587 }
588 
589 static blk_status_t null_reset_zone(struct nullb_device *dev,
590 				    struct nullb_zone *zone)
591 {
592 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
593 		return BLK_STS_IOERR;
594 
595 	if (dev->need_zone_res_mgmt) {
596 		spin_lock(&dev->zone_res_lock);
597 
598 		switch (zone->cond) {
599 		case BLK_ZONE_COND_IMP_OPEN:
600 			dev->nr_zones_imp_open--;
601 			break;
602 		case BLK_ZONE_COND_EXP_OPEN:
603 			dev->nr_zones_exp_open--;
604 			break;
605 		case BLK_ZONE_COND_CLOSED:
606 			dev->nr_zones_closed--;
607 			break;
608 		case BLK_ZONE_COND_EMPTY:
609 		case BLK_ZONE_COND_FULL:
610 			break;
611 		default:
612 			spin_unlock(&dev->zone_res_lock);
613 			return BLK_STS_IOERR;
614 		}
615 
616 		spin_unlock(&dev->zone_res_lock);
617 	}
618 
619 	zone->cond = BLK_ZONE_COND_EMPTY;
620 	zone->wp = zone->start;
621 
622 	if (dev->memory_backed)
623 		return null_handle_discard(dev, zone->start, zone->len);
624 
625 	return BLK_STS_OK;
626 }
627 
628 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
629 				   sector_t sector)
630 {
631 	struct nullb_device *dev = cmd->nq->dev;
632 	unsigned int zone_no;
633 	struct nullb_zone *zone;
634 	blk_status_t ret;
635 	size_t i;
636 
637 	if (op == REQ_OP_ZONE_RESET_ALL) {
638 		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
639 			zone = &dev->zones[i];
640 			null_lock_zone(dev, zone);
641 			if (zone->cond != BLK_ZONE_COND_EMPTY &&
642 			    zone->cond != BLK_ZONE_COND_READONLY &&
643 			    zone->cond != BLK_ZONE_COND_OFFLINE) {
644 				null_reset_zone(dev, zone);
645 				trace_nullb_zone_op(cmd, i, zone->cond);
646 			}
647 			null_unlock_zone(dev, zone);
648 		}
649 		return BLK_STS_OK;
650 	}
651 
652 	zone_no = null_zone_no(dev, sector);
653 	zone = &dev->zones[zone_no];
654 
655 	null_lock_zone(dev, zone);
656 
657 	if (zone->cond == BLK_ZONE_COND_READONLY ||
658 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
659 		ret = BLK_STS_IOERR;
660 		goto unlock;
661 	}
662 
663 	switch (op) {
664 	case REQ_OP_ZONE_RESET:
665 		ret = null_reset_zone(dev, zone);
666 		break;
667 	case REQ_OP_ZONE_OPEN:
668 		ret = null_open_zone(dev, zone);
669 		break;
670 	case REQ_OP_ZONE_CLOSE:
671 		ret = null_close_zone(dev, zone);
672 		break;
673 	case REQ_OP_ZONE_FINISH:
674 		ret = null_finish_zone(dev, zone);
675 		break;
676 	default:
677 		ret = BLK_STS_NOTSUPP;
678 		break;
679 	}
680 
681 	if (ret == BLK_STS_OK)
682 		trace_nullb_zone_op(cmd, zone_no, zone->cond);
683 
684 unlock:
685 	null_unlock_zone(dev, zone);
686 
687 	return ret;
688 }
689 
690 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
691 				    sector_t sector, sector_t nr_sectors)
692 {
693 	struct nullb_device *dev;
694 	struct nullb_zone *zone;
695 	blk_status_t sts;
696 
697 	switch (op) {
698 	case REQ_OP_WRITE:
699 		return null_zone_write(cmd, sector, nr_sectors, false);
700 	case REQ_OP_ZONE_APPEND:
701 		return null_zone_write(cmd, sector, nr_sectors, true);
702 	case REQ_OP_ZONE_RESET:
703 	case REQ_OP_ZONE_RESET_ALL:
704 	case REQ_OP_ZONE_OPEN:
705 	case REQ_OP_ZONE_CLOSE:
706 	case REQ_OP_ZONE_FINISH:
707 		return null_zone_mgmt(cmd, op, sector);
708 	default:
709 		dev = cmd->nq->dev;
710 		zone = &dev->zones[null_zone_no(dev, sector)];
711 		if (zone->cond == BLK_ZONE_COND_OFFLINE)
712 			return BLK_STS_IOERR;
713 
714 		null_lock_zone(dev, zone);
715 		sts = null_process_cmd(cmd, op, sector, nr_sectors);
716 		null_unlock_zone(dev, zone);
717 		return sts;
718 	}
719 }
720 
721 /*
722  * Set a zone in the read-only or offline condition.
723  */
724 static void null_set_zone_cond(struct nullb_device *dev,
725 			       struct nullb_zone *zone, enum blk_zone_cond cond)
726 {
727 	if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
728 			 cond != BLK_ZONE_COND_OFFLINE))
729 		return;
730 
731 	null_lock_zone(dev, zone);
732 
733 	/*
734 	 * If the read-only condition is requested again to zones already in
735 	 * read-only condition, restore back normal empty condition. Do the same
736 	 * if the offline condition is requested for offline zones. Otherwise,
737 	 * set the specified zone condition to the zones. Finish the zones
738 	 * beforehand to free up zone resources.
739 	 */
740 	if (zone->cond == cond) {
741 		zone->cond = BLK_ZONE_COND_EMPTY;
742 		zone->wp = zone->start;
743 		if (dev->memory_backed)
744 			null_handle_discard(dev, zone->start, zone->len);
745 	} else {
746 		if (zone->cond != BLK_ZONE_COND_READONLY &&
747 		    zone->cond != BLK_ZONE_COND_OFFLINE)
748 			null_finish_zone(dev, zone);
749 		zone->cond = cond;
750 		zone->wp = NULL_ZONE_INVALID_WP;
751 	}
752 
753 	null_unlock_zone(dev, zone);
754 }
755 
756 /*
757  * Identify a zone from the sector written to configfs file. Then set zone
758  * condition to the zone.
759  */
760 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
761 			size_t count, enum blk_zone_cond cond)
762 {
763 	unsigned long long sector;
764 	unsigned int zone_no;
765 	int ret;
766 
767 	if (!dev->zoned) {
768 		pr_err("null_blk device is not zoned\n");
769 		return -EINVAL;
770 	}
771 
772 	if (!dev->zones) {
773 		pr_err("null_blk device is not yet powered\n");
774 		return -EINVAL;
775 	}
776 
777 	ret = kstrtoull(page, 0, &sector);
778 	if (ret < 0)
779 		return ret;
780 
781 	zone_no = null_zone_no(dev, sector);
782 	if (zone_no >= dev->nr_zones) {
783 		pr_err("Sector out of range\n");
784 		return -EINVAL;
785 	}
786 
787 	if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
788 		pr_err("Can not change condition of conventional zones\n");
789 		return -EINVAL;
790 	}
791 
792 	null_set_zone_cond(dev, &dev->zones[zone_no], cond);
793 
794 	return count;
795 }
796