1 // SPDX-License-Identifier: GPL-2.0-only 2 //#define DEBUG 3 #include <linux/spinlock.h> 4 #include <linux/slab.h> 5 #include <linux/blkdev.h> 6 #include <linux/hdreg.h> 7 #include <linux/module.h> 8 #include <linux/mutex.h> 9 #include <linux/interrupt.h> 10 #include <linux/virtio.h> 11 #include <linux/virtio_blk.h> 12 #include <linux/scatterlist.h> 13 #include <linux/string_helpers.h> 14 #include <linux/idr.h> 15 #include <linux/blk-mq.h> 16 #include <linux/numa.h> 17 #include <linux/vmalloc.h> 18 #include <uapi/linux/virtio_ring.h> 19 20 #define PART_BITS 4 21 #define VQ_NAME_LEN 16 22 #define MAX_DISCARD_SEGMENTS 256u 23 24 /* The maximum number of sg elements that fit into a virtqueue */ 25 #define VIRTIO_BLK_MAX_SG_ELEMS 32768 26 27 #ifdef CONFIG_ARCH_NO_SG_CHAIN 28 #define VIRTIO_BLK_INLINE_SG_CNT 0 29 #else 30 #define VIRTIO_BLK_INLINE_SG_CNT 2 31 #endif 32 33 static unsigned int num_request_queues; 34 module_param(num_request_queues, uint, 0644); 35 MODULE_PARM_DESC(num_request_queues, 36 "Limit the number of request queues to use for blk device. " 37 "0 for no limit. " 38 "Values > nr_cpu_ids truncated to nr_cpu_ids."); 39 40 static unsigned int poll_queues; 41 module_param(poll_queues, uint, 0644); 42 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O"); 43 44 static int major; 45 static DEFINE_IDA(vd_index_ida); 46 47 static struct workqueue_struct *virtblk_wq; 48 49 struct virtio_blk_vq { 50 struct virtqueue *vq; 51 spinlock_t lock; 52 char name[VQ_NAME_LEN]; 53 } ____cacheline_aligned_in_smp; 54 55 struct virtio_blk { 56 /* 57 * This mutex must be held by anything that may run after 58 * virtblk_remove() sets vblk->vdev to NULL. 59 * 60 * blk-mq, virtqueue processing, and sysfs attribute code paths are 61 * shut down before vblk->vdev is set to NULL and therefore do not need 62 * to hold this mutex. 63 */ 64 struct mutex vdev_mutex; 65 struct virtio_device *vdev; 66 67 /* The disk structure for the kernel. */ 68 struct gendisk *disk; 69 70 /* Block layer tags. */ 71 struct blk_mq_tag_set tag_set; 72 73 /* Process context for config space updates */ 74 struct work_struct config_work; 75 76 /* Ida index - used to track minor number allocations. */ 77 int index; 78 79 /* num of vqs */ 80 int num_vqs; 81 int io_queues[HCTX_MAX_TYPES]; 82 struct virtio_blk_vq *vqs; 83 84 /* For zoned device */ 85 unsigned int zone_sectors; 86 }; 87 88 struct virtblk_req { 89 /* Out header */ 90 struct virtio_blk_outhdr out_hdr; 91 92 /* In header */ 93 union { 94 u8 status; 95 96 /* 97 * The zone append command has an extended in header. 98 * The status field in zone_append_in_hdr must always 99 * be the last byte. 100 */ 101 struct { 102 __virtio64 sector; 103 u8 status; 104 } zone_append; 105 } in_hdr; 106 107 size_t in_hdr_len; 108 109 struct sg_table sg_table; 110 struct scatterlist sg[]; 111 }; 112 113 static inline blk_status_t virtblk_result(u8 status) 114 { 115 switch (status) { 116 case VIRTIO_BLK_S_OK: 117 return BLK_STS_OK; 118 case VIRTIO_BLK_S_UNSUPP: 119 return BLK_STS_NOTSUPP; 120 case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE: 121 return BLK_STS_ZONE_OPEN_RESOURCE; 122 case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE: 123 return BLK_STS_ZONE_ACTIVE_RESOURCE; 124 case VIRTIO_BLK_S_IOERR: 125 case VIRTIO_BLK_S_ZONE_UNALIGNED_WP: 126 default: 127 return BLK_STS_IOERR; 128 } 129 } 130 131 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx) 132 { 133 struct virtio_blk *vblk = hctx->queue->queuedata; 134 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; 135 136 return vq; 137 } 138 139 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr) 140 { 141 struct scatterlist out_hdr, in_hdr, *sgs[3]; 142 unsigned int num_out = 0, num_in = 0; 143 144 sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); 145 sgs[num_out++] = &out_hdr; 146 147 if (vbr->sg_table.nents) { 148 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) 149 sgs[num_out++] = vbr->sg_table.sgl; 150 else 151 sgs[num_out + num_in++] = vbr->sg_table.sgl; 152 } 153 154 sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len); 155 sgs[num_out + num_in++] = &in_hdr; 156 157 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); 158 } 159 160 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap) 161 { 162 unsigned short segments = blk_rq_nr_discard_segments(req); 163 unsigned short n = 0; 164 struct virtio_blk_discard_write_zeroes *range; 165 struct bio *bio; 166 u32 flags = 0; 167 168 if (unmap) 169 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP; 170 171 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 172 if (!range) 173 return -ENOMEM; 174 175 /* 176 * Single max discard segment means multi-range discard isn't 177 * supported, and block layer only runs contiguity merge like 178 * normal RW request. So we can't reply on bio for retrieving 179 * each range info. 180 */ 181 if (queue_max_discard_segments(req->q) == 1) { 182 range[0].flags = cpu_to_le32(flags); 183 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); 184 range[0].sector = cpu_to_le64(blk_rq_pos(req)); 185 n = 1; 186 } else { 187 __rq_for_each_bio(bio, req) { 188 u64 sector = bio->bi_iter.bi_sector; 189 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; 190 191 range[n].flags = cpu_to_le32(flags); 192 range[n].num_sectors = cpu_to_le32(num_sectors); 193 range[n].sector = cpu_to_le64(sector); 194 n++; 195 } 196 } 197 198 WARN_ON_ONCE(n != segments); 199 200 bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments); 201 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 202 203 return 0; 204 } 205 206 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr) 207 { 208 if (blk_rq_nr_phys_segments(req)) 209 sg_free_table_chained(&vbr->sg_table, 210 VIRTIO_BLK_INLINE_SG_CNT); 211 } 212 213 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req, 214 struct virtblk_req *vbr) 215 { 216 int err; 217 218 if (!blk_rq_nr_phys_segments(req)) 219 return 0; 220 221 vbr->sg_table.sgl = vbr->sg; 222 err = sg_alloc_table_chained(&vbr->sg_table, 223 blk_rq_nr_phys_segments(req), 224 vbr->sg_table.sgl, 225 VIRTIO_BLK_INLINE_SG_CNT); 226 if (unlikely(err)) 227 return -ENOMEM; 228 229 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); 230 } 231 232 static void virtblk_cleanup_cmd(struct request *req) 233 { 234 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) 235 kfree(bvec_virt(&req->special_vec)); 236 } 237 238 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev, 239 struct request *req, 240 struct virtblk_req *vbr) 241 { 242 size_t in_hdr_len = sizeof(vbr->in_hdr.status); 243 bool unmap = false; 244 u32 type; 245 u64 sector = 0; 246 247 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req))) 248 return BLK_STS_NOTSUPP; 249 250 /* Set fields for all request types */ 251 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); 252 253 switch (req_op(req)) { 254 case REQ_OP_READ: 255 type = VIRTIO_BLK_T_IN; 256 sector = blk_rq_pos(req); 257 break; 258 case REQ_OP_WRITE: 259 type = VIRTIO_BLK_T_OUT; 260 sector = blk_rq_pos(req); 261 break; 262 case REQ_OP_FLUSH: 263 type = VIRTIO_BLK_T_FLUSH; 264 break; 265 case REQ_OP_DISCARD: 266 type = VIRTIO_BLK_T_DISCARD; 267 break; 268 case REQ_OP_WRITE_ZEROES: 269 type = VIRTIO_BLK_T_WRITE_ZEROES; 270 unmap = !(req->cmd_flags & REQ_NOUNMAP); 271 break; 272 case REQ_OP_SECURE_ERASE: 273 type = VIRTIO_BLK_T_SECURE_ERASE; 274 break; 275 case REQ_OP_ZONE_OPEN: 276 type = VIRTIO_BLK_T_ZONE_OPEN; 277 sector = blk_rq_pos(req); 278 break; 279 case REQ_OP_ZONE_CLOSE: 280 type = VIRTIO_BLK_T_ZONE_CLOSE; 281 sector = blk_rq_pos(req); 282 break; 283 case REQ_OP_ZONE_FINISH: 284 type = VIRTIO_BLK_T_ZONE_FINISH; 285 sector = blk_rq_pos(req); 286 break; 287 case REQ_OP_ZONE_APPEND: 288 type = VIRTIO_BLK_T_ZONE_APPEND; 289 sector = blk_rq_pos(req); 290 in_hdr_len = sizeof(vbr->in_hdr.zone_append); 291 break; 292 case REQ_OP_ZONE_RESET: 293 type = VIRTIO_BLK_T_ZONE_RESET; 294 sector = blk_rq_pos(req); 295 break; 296 case REQ_OP_ZONE_RESET_ALL: 297 type = VIRTIO_BLK_T_ZONE_RESET_ALL; 298 break; 299 case REQ_OP_DRV_IN: 300 /* 301 * Out header has already been prepared by the caller (virtblk_get_id() 302 * or virtblk_submit_zone_report()), nothing to do here. 303 */ 304 return 0; 305 default: 306 WARN_ON_ONCE(1); 307 return BLK_STS_IOERR; 308 } 309 310 /* Set fields for non-REQ_OP_DRV_IN request types */ 311 vbr->in_hdr_len = in_hdr_len; 312 vbr->out_hdr.type = cpu_to_virtio32(vdev, type); 313 vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector); 314 315 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES || 316 type == VIRTIO_BLK_T_SECURE_ERASE) { 317 if (virtblk_setup_discard_write_zeroes_erase(req, unmap)) 318 return BLK_STS_RESOURCE; 319 } 320 321 return 0; 322 } 323 324 /* 325 * The status byte is always the last byte of the virtblk request 326 * in-header. This helper fetches its value for all in-header formats 327 * that are currently defined. 328 */ 329 static inline u8 virtblk_vbr_status(struct virtblk_req *vbr) 330 { 331 return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1); 332 } 333 334 static inline void virtblk_request_done(struct request *req) 335 { 336 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 337 blk_status_t status = virtblk_result(virtblk_vbr_status(vbr)); 338 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; 339 340 virtblk_unmap_data(req, vbr); 341 virtblk_cleanup_cmd(req); 342 343 if (req_op(req) == REQ_OP_ZONE_APPEND) 344 req->__sector = virtio64_to_cpu(vblk->vdev, 345 vbr->in_hdr.zone_append.sector); 346 347 blk_mq_end_request(req, status); 348 } 349 350 static void virtblk_done(struct virtqueue *vq) 351 { 352 struct virtio_blk *vblk = vq->vdev->priv; 353 bool req_done = false; 354 int qid = vq->index; 355 struct virtblk_req *vbr; 356 unsigned long flags; 357 unsigned int len; 358 359 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 360 do { 361 virtqueue_disable_cb(vq); 362 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 363 struct request *req = blk_mq_rq_from_pdu(vbr); 364 365 if (likely(!blk_should_fake_timeout(req->q))) 366 blk_mq_complete_request(req); 367 req_done = true; 368 } 369 } while (!virtqueue_enable_cb(vq)); 370 371 /* In case queue is stopped waiting for more buffers. */ 372 if (req_done) 373 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 374 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 375 } 376 377 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) 378 { 379 struct virtio_blk *vblk = hctx->queue->queuedata; 380 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; 381 bool kick; 382 383 spin_lock_irq(&vq->lock); 384 kick = virtqueue_kick_prepare(vq->vq); 385 spin_unlock_irq(&vq->lock); 386 387 if (kick) 388 virtqueue_notify(vq->vq); 389 } 390 391 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc) 392 { 393 virtblk_cleanup_cmd(req); 394 switch (rc) { 395 case -ENOSPC: 396 return BLK_STS_DEV_RESOURCE; 397 case -ENOMEM: 398 return BLK_STS_RESOURCE; 399 default: 400 return BLK_STS_IOERR; 401 } 402 } 403 404 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, 405 struct virtio_blk *vblk, 406 struct request *req, 407 struct virtblk_req *vbr) 408 { 409 blk_status_t status; 410 int num; 411 412 status = virtblk_setup_cmd(vblk->vdev, req, vbr); 413 if (unlikely(status)) 414 return status; 415 416 num = virtblk_map_data(hctx, req, vbr); 417 if (unlikely(num < 0)) 418 return virtblk_fail_to_queue(req, -ENOMEM); 419 vbr->sg_table.nents = num; 420 421 blk_mq_start_request(req); 422 423 return BLK_STS_OK; 424 } 425 426 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, 427 const struct blk_mq_queue_data *bd) 428 { 429 struct virtio_blk *vblk = hctx->queue->queuedata; 430 struct request *req = bd->rq; 431 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 432 unsigned long flags; 433 int qid = hctx->queue_num; 434 bool notify = false; 435 blk_status_t status; 436 int err; 437 438 status = virtblk_prep_rq(hctx, vblk, req, vbr); 439 if (unlikely(status)) 440 return status; 441 442 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 443 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); 444 if (err) { 445 virtqueue_kick(vblk->vqs[qid].vq); 446 /* Don't stop the queue if -ENOMEM: we may have failed to 447 * bounce the buffer due to global resource outage. 448 */ 449 if (err == -ENOSPC) 450 blk_mq_stop_hw_queue(hctx); 451 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 452 virtblk_unmap_data(req, vbr); 453 return virtblk_fail_to_queue(req, err); 454 } 455 456 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) 457 notify = true; 458 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 459 460 if (notify) 461 virtqueue_notify(vblk->vqs[qid].vq); 462 return BLK_STS_OK; 463 } 464 465 static bool virtblk_prep_rq_batch(struct request *req) 466 { 467 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; 468 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 469 470 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; 471 } 472 473 static void virtblk_add_req_batch(struct virtio_blk_vq *vq, 474 struct rq_list *rqlist) 475 { 476 struct request *req; 477 unsigned long flags; 478 bool kick; 479 480 spin_lock_irqsave(&vq->lock, flags); 481 482 while ((req = rq_list_pop(rqlist))) { 483 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 484 int err; 485 486 err = virtblk_add_req(vq->vq, vbr); 487 if (err) { 488 virtblk_unmap_data(req, vbr); 489 virtblk_cleanup_cmd(req); 490 blk_mq_requeue_request(req, true); 491 } 492 } 493 494 kick = virtqueue_kick_prepare(vq->vq); 495 spin_unlock_irqrestore(&vq->lock, flags); 496 497 if (kick) 498 virtqueue_notify(vq->vq); 499 } 500 501 static void virtio_queue_rqs(struct rq_list *rqlist) 502 { 503 struct rq_list submit_list = { }; 504 struct rq_list requeue_list = { }; 505 struct virtio_blk_vq *vq = NULL; 506 struct request *req; 507 508 while ((req = rq_list_pop(rqlist))) { 509 struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx); 510 511 if (vq && vq != this_vq) 512 virtblk_add_req_batch(vq, &submit_list); 513 vq = this_vq; 514 515 if (virtblk_prep_rq_batch(req)) 516 rq_list_add_tail(&submit_list, req); 517 else 518 rq_list_add_tail(&requeue_list, req); 519 } 520 521 if (vq) 522 virtblk_add_req_batch(vq, &submit_list); 523 *rqlist = requeue_list; 524 } 525 526 #ifdef CONFIG_BLK_DEV_ZONED 527 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk, 528 unsigned int nr_zones, 529 size_t *buflen) 530 { 531 struct request_queue *q = vblk->disk->queue; 532 size_t bufsize; 533 void *buf; 534 535 nr_zones = min_t(unsigned int, nr_zones, 536 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors)); 537 538 bufsize = sizeof(struct virtio_blk_zone_report) + 539 nr_zones * sizeof(struct virtio_blk_zone_descriptor); 540 bufsize = min_t(size_t, bufsize, 541 queue_max_hw_sectors(q) << SECTOR_SHIFT); 542 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); 543 544 while (bufsize >= sizeof(struct virtio_blk_zone_report)) { 545 buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); 546 if (buf) { 547 *buflen = bufsize; 548 return buf; 549 } 550 bufsize >>= 1; 551 } 552 553 return NULL; 554 } 555 556 static int virtblk_submit_zone_report(struct virtio_blk *vblk, 557 char *report_buf, size_t report_len, 558 sector_t sector) 559 { 560 struct request_queue *q = vblk->disk->queue; 561 struct request *req; 562 struct virtblk_req *vbr; 563 int err; 564 565 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0); 566 if (IS_ERR(req)) 567 return PTR_ERR(req); 568 569 vbr = blk_mq_rq_to_pdu(req); 570 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); 571 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); 572 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); 573 574 err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL); 575 if (err) 576 goto out; 577 578 blk_execute_rq(req, false); 579 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); 580 out: 581 blk_mq_free_request(req); 582 return err; 583 } 584 585 static int virtblk_parse_zone(struct virtio_blk *vblk, 586 struct virtio_blk_zone_descriptor *entry, 587 unsigned int idx, report_zones_cb cb, void *data) 588 { 589 struct blk_zone zone = { }; 590 591 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start); 592 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk)) 593 zone.len = vblk->zone_sectors; 594 else 595 zone.len = get_capacity(vblk->disk) - zone.start; 596 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap); 597 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp); 598 599 switch (entry->z_type) { 600 case VIRTIO_BLK_ZT_SWR: 601 zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; 602 break; 603 case VIRTIO_BLK_ZT_SWP: 604 zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF; 605 break; 606 case VIRTIO_BLK_ZT_CONV: 607 zone.type = BLK_ZONE_TYPE_CONVENTIONAL; 608 break; 609 default: 610 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n", 611 zone.start, entry->z_type); 612 return -EIO; 613 } 614 615 switch (entry->z_state) { 616 case VIRTIO_BLK_ZS_EMPTY: 617 zone.cond = BLK_ZONE_COND_EMPTY; 618 break; 619 case VIRTIO_BLK_ZS_CLOSED: 620 zone.cond = BLK_ZONE_COND_CLOSED; 621 break; 622 case VIRTIO_BLK_ZS_FULL: 623 zone.cond = BLK_ZONE_COND_FULL; 624 zone.wp = zone.start + zone.len; 625 break; 626 case VIRTIO_BLK_ZS_EOPEN: 627 zone.cond = BLK_ZONE_COND_EXP_OPEN; 628 break; 629 case VIRTIO_BLK_ZS_IOPEN: 630 zone.cond = BLK_ZONE_COND_IMP_OPEN; 631 break; 632 case VIRTIO_BLK_ZS_NOT_WP: 633 zone.cond = BLK_ZONE_COND_NOT_WP; 634 break; 635 case VIRTIO_BLK_ZS_RDONLY: 636 zone.cond = BLK_ZONE_COND_READONLY; 637 zone.wp = ULONG_MAX; 638 break; 639 case VIRTIO_BLK_ZS_OFFLINE: 640 zone.cond = BLK_ZONE_COND_OFFLINE; 641 zone.wp = ULONG_MAX; 642 break; 643 default: 644 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n", 645 zone.start, entry->z_state); 646 return -EIO; 647 } 648 649 /* 650 * The callback below checks the validity of the reported 651 * entry data, no need to further validate it here. 652 */ 653 return cb(&zone, idx, data); 654 } 655 656 static int virtblk_report_zones(struct gendisk *disk, sector_t sector, 657 unsigned int nr_zones, report_zones_cb cb, 658 void *data) 659 { 660 struct virtio_blk *vblk = disk->private_data; 661 struct virtio_blk_zone_report *report; 662 unsigned long long nz, i; 663 size_t buflen; 664 unsigned int zone_idx = 0; 665 int ret; 666 667 if (WARN_ON_ONCE(!vblk->zone_sectors)) 668 return -EOPNOTSUPP; 669 670 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen); 671 if (!report) 672 return -ENOMEM; 673 674 mutex_lock(&vblk->vdev_mutex); 675 676 if (!vblk->vdev) { 677 ret = -ENXIO; 678 goto fail_report; 679 } 680 681 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { 682 memset(report, 0, buflen); 683 684 ret = virtblk_submit_zone_report(vblk, (char *)report, 685 buflen, sector); 686 if (ret) 687 goto fail_report; 688 689 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones), 690 nr_zones); 691 if (!nz) 692 break; 693 694 for (i = 0; i < nz && zone_idx < nr_zones; i++) { 695 ret = virtblk_parse_zone(vblk, &report->zones[i], 696 zone_idx, cb, data); 697 if (ret) 698 goto fail_report; 699 700 sector = virtio64_to_cpu(vblk->vdev, 701 report->zones[i].z_start) + 702 vblk->zone_sectors; 703 zone_idx++; 704 } 705 } 706 707 if (zone_idx > 0) 708 ret = zone_idx; 709 else 710 ret = -EINVAL; 711 fail_report: 712 mutex_unlock(&vblk->vdev_mutex); 713 kvfree(report); 714 return ret; 715 } 716 717 static int virtblk_read_zoned_limits(struct virtio_blk *vblk, 718 struct queue_limits *lim) 719 { 720 struct virtio_device *vdev = vblk->vdev; 721 u32 v, wg; 722 723 dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); 724 725 lim->features |= BLK_FEAT_ZONED; 726 727 virtio_cread(vdev, struct virtio_blk_config, 728 zoned.max_open_zones, &v); 729 lim->max_open_zones = v; 730 dev_dbg(&vdev->dev, "max open zones = %u\n", v); 731 732 virtio_cread(vdev, struct virtio_blk_config, 733 zoned.max_active_zones, &v); 734 lim->max_active_zones = v; 735 dev_dbg(&vdev->dev, "max active zones = %u\n", v); 736 737 virtio_cread(vdev, struct virtio_blk_config, 738 zoned.write_granularity, &wg); 739 if (!wg) { 740 dev_warn(&vdev->dev, "zero write granularity reported\n"); 741 return -ENODEV; 742 } 743 lim->physical_block_size = wg; 744 lim->io_min = wg; 745 746 dev_dbg(&vdev->dev, "write granularity = %u\n", wg); 747 748 /* 749 * virtio ZBD specification doesn't require zones to be a power of 750 * two sectors in size, but the code in this driver expects that. 751 */ 752 virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors, 753 &vblk->zone_sectors); 754 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) { 755 dev_err(&vdev->dev, 756 "zoned device with non power of two zone size %u\n", 757 vblk->zone_sectors); 758 return -ENODEV; 759 } 760 lim->chunk_sectors = vblk->zone_sectors; 761 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); 762 763 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { 764 dev_warn(&vblk->vdev->dev, 765 "ignoring negotiated F_DISCARD for zoned device\n"); 766 lim->max_hw_discard_sectors = 0; 767 } 768 769 virtio_cread(vdev, struct virtio_blk_config, 770 zoned.max_append_sectors, &v); 771 if (!v) { 772 dev_warn(&vdev->dev, "zero max_append_sectors reported\n"); 773 return -ENODEV; 774 } 775 if ((v << SECTOR_SHIFT) < wg) { 776 dev_err(&vdev->dev, 777 "write granularity %u exceeds max_append_sectors %u limit\n", 778 wg, v); 779 return -ENODEV; 780 } 781 lim->max_hw_zone_append_sectors = v; 782 dev_dbg(&vdev->dev, "max append sectors = %u\n", v); 783 784 return 0; 785 } 786 #else 787 /* 788 * Zoned block device support is not configured in this kernel, host-managed 789 * zoned devices can't be supported. 790 */ 791 #define virtblk_report_zones NULL 792 static inline int virtblk_read_zoned_limits(struct virtio_blk *vblk, 793 struct queue_limits *lim) 794 { 795 dev_err(&vblk->vdev->dev, 796 "virtio_blk: zoned devices are not supported"); 797 return -EOPNOTSUPP; 798 } 799 #endif /* CONFIG_BLK_DEV_ZONED */ 800 801 /* return id (s/n) string for *disk to *id_str 802 */ 803 static int virtblk_get_id(struct gendisk *disk, char *id_str) 804 { 805 struct virtio_blk *vblk = disk->private_data; 806 struct request_queue *q = vblk->disk->queue; 807 struct request *req; 808 struct virtblk_req *vbr; 809 int err; 810 811 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0); 812 if (IS_ERR(req)) 813 return PTR_ERR(req); 814 815 vbr = blk_mq_rq_to_pdu(req); 816 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); 817 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); 818 vbr->out_hdr.sector = 0; 819 820 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 821 if (err) 822 goto out; 823 824 blk_execute_rq(req, false); 825 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); 826 out: 827 blk_mq_free_request(req); 828 return err; 829 } 830 831 /* We provide getgeo only to please some old bootloader/partitioning tools */ 832 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 833 { 834 struct virtio_blk *vblk = bd->bd_disk->private_data; 835 int ret = 0; 836 837 mutex_lock(&vblk->vdev_mutex); 838 839 if (!vblk->vdev) { 840 ret = -ENXIO; 841 goto out; 842 } 843 844 /* see if the host passed in geometry config */ 845 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { 846 virtio_cread(vblk->vdev, struct virtio_blk_config, 847 geometry.cylinders, &geo->cylinders); 848 virtio_cread(vblk->vdev, struct virtio_blk_config, 849 geometry.heads, &geo->heads); 850 virtio_cread(vblk->vdev, struct virtio_blk_config, 851 geometry.sectors, &geo->sectors); 852 } else { 853 /* some standard values, similar to sd */ 854 geo->heads = 1 << 6; 855 geo->sectors = 1 << 5; 856 geo->cylinders = get_capacity(bd->bd_disk) >> 11; 857 } 858 out: 859 mutex_unlock(&vblk->vdev_mutex); 860 return ret; 861 } 862 863 static void virtblk_free_disk(struct gendisk *disk) 864 { 865 struct virtio_blk *vblk = disk->private_data; 866 867 ida_free(&vd_index_ida, vblk->index); 868 mutex_destroy(&vblk->vdev_mutex); 869 kfree(vblk); 870 } 871 872 static const struct block_device_operations virtblk_fops = { 873 .owner = THIS_MODULE, 874 .getgeo = virtblk_getgeo, 875 .free_disk = virtblk_free_disk, 876 .report_zones = virtblk_report_zones, 877 }; 878 879 static int index_to_minor(int index) 880 { 881 return index << PART_BITS; 882 } 883 884 static int minor_to_index(int minor) 885 { 886 return minor >> PART_BITS; 887 } 888 889 static ssize_t serial_show(struct device *dev, 890 struct device_attribute *attr, char *buf) 891 { 892 struct gendisk *disk = dev_to_disk(dev); 893 int err; 894 895 /* sysfs gives us a PAGE_SIZE buffer */ 896 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); 897 898 buf[VIRTIO_BLK_ID_BYTES] = '\0'; 899 err = virtblk_get_id(disk, buf); 900 if (!err) 901 return strlen(buf); 902 903 if (err == -EIO) /* Unsupported? Make it empty. */ 904 return 0; 905 906 return err; 907 } 908 909 static DEVICE_ATTR_RO(serial); 910 911 /* The queue's logical block size must be set before calling this */ 912 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) 913 { 914 struct virtio_device *vdev = vblk->vdev; 915 struct request_queue *q = vblk->disk->queue; 916 char cap_str_2[10], cap_str_10[10]; 917 unsigned long long nblocks; 918 u64 capacity; 919 920 /* Host must always specify the capacity. */ 921 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity); 922 923 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9); 924 925 string_get_size(nblocks, queue_logical_block_size(q), 926 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 927 string_get_size(nblocks, queue_logical_block_size(q), 928 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 929 930 dev_notice(&vdev->dev, 931 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n", 932 vblk->disk->disk_name, 933 resize ? "new size: " : "", 934 nblocks, 935 queue_logical_block_size(q), 936 cap_str_10, 937 cap_str_2); 938 939 set_capacity_and_notify(vblk->disk, capacity); 940 } 941 942 static void virtblk_config_changed_work(struct work_struct *work) 943 { 944 struct virtio_blk *vblk = 945 container_of(work, struct virtio_blk, config_work); 946 947 virtblk_update_capacity(vblk, true); 948 } 949 950 static void virtblk_config_changed(struct virtio_device *vdev) 951 { 952 struct virtio_blk *vblk = vdev->priv; 953 954 queue_work(virtblk_wq, &vblk->config_work); 955 } 956 957 static int init_vq(struct virtio_blk *vblk) 958 { 959 int err; 960 unsigned short i; 961 struct virtqueue_info *vqs_info; 962 struct virtqueue **vqs; 963 unsigned short num_vqs; 964 unsigned short num_poll_vqs; 965 struct virtio_device *vdev = vblk->vdev; 966 struct irq_affinity desc = { 0, }; 967 968 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, 969 struct virtio_blk_config, num_queues, 970 &num_vqs); 971 if (err) 972 num_vqs = 1; 973 974 if (!err && !num_vqs) { 975 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n"); 976 return -EINVAL; 977 } 978 979 num_vqs = min_t(unsigned int, 980 min_not_zero(num_request_queues, nr_cpu_ids), 981 num_vqs); 982 983 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1); 984 985 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs; 986 vblk->io_queues[HCTX_TYPE_READ] = 0; 987 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; 988 989 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n", 990 vblk->io_queues[HCTX_TYPE_DEFAULT], 991 vblk->io_queues[HCTX_TYPE_READ], 992 vblk->io_queues[HCTX_TYPE_POLL]); 993 994 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); 995 if (!vblk->vqs) 996 return -ENOMEM; 997 998 vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL); 999 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL); 1000 if (!vqs_info || !vqs) { 1001 err = -ENOMEM; 1002 goto out; 1003 } 1004 1005 for (i = 0; i < num_vqs - num_poll_vqs; i++) { 1006 vqs_info[i].callback = virtblk_done; 1007 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i); 1008 vqs_info[i].name = vblk->vqs[i].name; 1009 } 1010 1011 for (; i < num_vqs; i++) { 1012 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i); 1013 vqs_info[i].name = vblk->vqs[i].name; 1014 } 1015 1016 /* Discover virtqueues and write information to configuration. */ 1017 err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc); 1018 if (err) 1019 goto out; 1020 1021 for (i = 0; i < num_vqs; i++) { 1022 spin_lock_init(&vblk->vqs[i].lock); 1023 vblk->vqs[i].vq = vqs[i]; 1024 } 1025 vblk->num_vqs = num_vqs; 1026 1027 out: 1028 kfree(vqs); 1029 kfree(vqs_info); 1030 if (err) 1031 kfree(vblk->vqs); 1032 return err; 1033 } 1034 1035 /* 1036 * Legacy naming scheme used for virtio devices. We are stuck with it for 1037 * virtio blk but don't ever use it for any new driver. 1038 */ 1039 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) 1040 { 1041 const int base = 'z' - 'a' + 1; 1042 char *begin = buf + strlen(prefix); 1043 char *end = buf + buflen; 1044 char *p; 1045 int unit; 1046 1047 p = end - 1; 1048 *p = '\0'; 1049 unit = base; 1050 do { 1051 if (p == begin) 1052 return -EINVAL; 1053 *--p = 'a' + (index % unit); 1054 index = (index / unit) - 1; 1055 } while (index >= 0); 1056 1057 memmove(begin, p, end - p); 1058 memcpy(buf, prefix, strlen(prefix)); 1059 1060 return 0; 1061 } 1062 1063 static int virtblk_get_cache_mode(struct virtio_device *vdev) 1064 { 1065 u8 writeback; 1066 int err; 1067 1068 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE, 1069 struct virtio_blk_config, wce, 1070 &writeback); 1071 1072 /* 1073 * If WCE is not configurable and flush is not available, 1074 * assume no writeback cache is in use. 1075 */ 1076 if (err) 1077 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH); 1078 1079 return writeback; 1080 } 1081 1082 static const char *const virtblk_cache_types[] = { 1083 "write through", "write back" 1084 }; 1085 1086 static ssize_t 1087 cache_type_store(struct device *dev, struct device_attribute *attr, 1088 const char *buf, size_t count) 1089 { 1090 struct gendisk *disk = dev_to_disk(dev); 1091 struct virtio_blk *vblk = disk->private_data; 1092 struct virtio_device *vdev = vblk->vdev; 1093 struct queue_limits lim; 1094 int i; 1095 1096 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); 1097 i = sysfs_match_string(virtblk_cache_types, buf); 1098 if (i < 0) 1099 return i; 1100 1101 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); 1102 1103 lim = queue_limits_start_update(disk->queue); 1104 if (virtblk_get_cache_mode(vdev)) 1105 lim.features |= BLK_FEAT_WRITE_CACHE; 1106 else 1107 lim.features &= ~BLK_FEAT_WRITE_CACHE; 1108 i = queue_limits_commit_update_frozen(disk->queue, &lim); 1109 if (i) 1110 return i; 1111 return count; 1112 } 1113 1114 static ssize_t 1115 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1116 { 1117 struct gendisk *disk = dev_to_disk(dev); 1118 struct virtio_blk *vblk = disk->private_data; 1119 u8 writeback = virtblk_get_cache_mode(vblk->vdev); 1120 1121 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); 1122 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]); 1123 } 1124 1125 static DEVICE_ATTR_RW(cache_type); 1126 1127 static struct attribute *virtblk_attrs[] = { 1128 &dev_attr_serial.attr, 1129 &dev_attr_cache_type.attr, 1130 NULL, 1131 }; 1132 1133 static umode_t virtblk_attrs_are_visible(struct kobject *kobj, 1134 struct attribute *a, int n) 1135 { 1136 struct device *dev = kobj_to_dev(kobj); 1137 struct gendisk *disk = dev_to_disk(dev); 1138 struct virtio_blk *vblk = disk->private_data; 1139 struct virtio_device *vdev = vblk->vdev; 1140 1141 if (a == &dev_attr_cache_type.attr && 1142 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) 1143 return S_IRUGO; 1144 1145 return a->mode; 1146 } 1147 1148 static const struct attribute_group virtblk_attr_group = { 1149 .attrs = virtblk_attrs, 1150 .is_visible = virtblk_attrs_are_visible, 1151 }; 1152 1153 static const struct attribute_group *virtblk_attr_groups[] = { 1154 &virtblk_attr_group, 1155 NULL, 1156 }; 1157 1158 static void virtblk_map_queues(struct blk_mq_tag_set *set) 1159 { 1160 struct virtio_blk *vblk = set->driver_data; 1161 int i, qoff; 1162 1163 for (i = 0, qoff = 0; i < set->nr_maps; i++) { 1164 struct blk_mq_queue_map *map = &set->map[i]; 1165 1166 map->nr_queues = vblk->io_queues[i]; 1167 map->queue_offset = qoff; 1168 qoff += map->nr_queues; 1169 1170 if (map->nr_queues == 0) 1171 continue; 1172 1173 /* 1174 * Regular queues have interrupts and hence CPU affinity is 1175 * defined by the core virtio code, but polling queues have 1176 * no interrupts so we let the block layer assign CPU affinity. 1177 */ 1178 if (i == HCTX_TYPE_POLL) 1179 blk_mq_map_queues(&set->map[i]); 1180 else 1181 blk_mq_map_hw_queues(&set->map[i], 1182 &vblk->vdev->dev, 0); 1183 } 1184 } 1185 1186 static void virtblk_complete_batch(struct io_comp_batch *iob) 1187 { 1188 struct request *req; 1189 1190 rq_list_for_each(&iob->req_list, req) { 1191 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req)); 1192 virtblk_cleanup_cmd(req); 1193 } 1194 blk_mq_end_request_batch(iob); 1195 } 1196 1197 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 1198 { 1199 struct virtio_blk *vblk = hctx->queue->queuedata; 1200 struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx); 1201 struct virtblk_req *vbr; 1202 unsigned long flags; 1203 unsigned int len; 1204 int found = 0; 1205 1206 spin_lock_irqsave(&vq->lock, flags); 1207 1208 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { 1209 struct request *req = blk_mq_rq_from_pdu(vbr); 1210 1211 found++; 1212 if (!blk_mq_complete_request_remote(req) && 1213 !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), 1214 virtblk_complete_batch)) 1215 virtblk_request_done(req); 1216 } 1217 1218 if (found) 1219 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); 1220 1221 spin_unlock_irqrestore(&vq->lock, flags); 1222 1223 return found; 1224 } 1225 1226 static const struct blk_mq_ops virtio_mq_ops = { 1227 .queue_rq = virtio_queue_rq, 1228 .queue_rqs = virtio_queue_rqs, 1229 .commit_rqs = virtio_commit_rqs, 1230 .complete = virtblk_request_done, 1231 .map_queues = virtblk_map_queues, 1232 .poll = virtblk_poll, 1233 }; 1234 1235 static unsigned int virtblk_queue_depth; 1236 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); 1237 1238 static int virtblk_read_limits(struct virtio_blk *vblk, 1239 struct queue_limits *lim) 1240 { 1241 struct virtio_device *vdev = vblk->vdev; 1242 u32 v, max_size, sg_elems, opt_io_size; 1243 u32 max_discard_segs = 0; 1244 u32 discard_granularity = 0; 1245 u16 min_io_size; 1246 u8 physical_block_exp, alignment_offset; 1247 size_t max_dma_size; 1248 int err; 1249 1250 /* We need to know how many segments before we allocate. */ 1251 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX, 1252 struct virtio_blk_config, seg_max, 1253 &sg_elems); 1254 1255 /* We need at least one SG element, whatever they say. */ 1256 if (err || !sg_elems) 1257 sg_elems = 1; 1258 1259 /* Prevent integer overflows and honor max vq size */ 1260 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); 1261 1262 /* We can handle whatever the host told us to handle. */ 1263 lim->max_segments = sg_elems; 1264 1265 /* No real sector limit. */ 1266 lim->max_hw_sectors = UINT_MAX; 1267 1268 max_dma_size = virtio_max_dma_size(vdev); 1269 max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size; 1270 1271 /* Host can optionally specify maximum segment size and number of 1272 * segments. */ 1273 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, 1274 struct virtio_blk_config, size_max, &v); 1275 if (!err) 1276 max_size = min(max_size, v); 1277 1278 lim->max_segment_size = max_size; 1279 1280 /* Host can optionally specify the block size of the device */ 1281 virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, 1282 struct virtio_blk_config, blk_size, 1283 &lim->logical_block_size); 1284 1285 /* Use topology information if available */ 1286 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1287 struct virtio_blk_config, physical_block_exp, 1288 &physical_block_exp); 1289 if (!err && physical_block_exp) 1290 lim->physical_block_size = 1291 lim->logical_block_size * (1 << physical_block_exp); 1292 1293 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1294 struct virtio_blk_config, alignment_offset, 1295 &alignment_offset); 1296 if (!err && alignment_offset) 1297 lim->alignment_offset = 1298 lim->logical_block_size * alignment_offset; 1299 1300 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1301 struct virtio_blk_config, min_io_size, 1302 &min_io_size); 1303 if (!err && min_io_size) 1304 lim->io_min = lim->logical_block_size * min_io_size; 1305 1306 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY, 1307 struct virtio_blk_config, opt_io_size, 1308 &opt_io_size); 1309 if (!err && opt_io_size) 1310 lim->io_opt = lim->logical_block_size * opt_io_size; 1311 1312 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) { 1313 virtio_cread(vdev, struct virtio_blk_config, 1314 discard_sector_alignment, &discard_granularity); 1315 1316 virtio_cread(vdev, struct virtio_blk_config, 1317 max_discard_sectors, &v); 1318 lim->max_hw_discard_sectors = v ? v : UINT_MAX; 1319 1320 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, 1321 &max_discard_segs); 1322 } 1323 1324 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) { 1325 virtio_cread(vdev, struct virtio_blk_config, 1326 max_write_zeroes_sectors, &v); 1327 lim->max_write_zeroes_sectors = v ? v : UINT_MAX; 1328 } 1329 1330 /* The discard and secure erase limits are combined since the Linux 1331 * block layer uses the same limit for both commands. 1332 * 1333 * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features 1334 * are negotiated, we will use the minimum between the limits. 1335 * 1336 * discard sector alignment is set to the minimum between discard_sector_alignment 1337 * and secure_erase_sector_alignment. 1338 * 1339 * max discard sectors is set to the minimum between max_discard_seg and 1340 * max_secure_erase_seg. 1341 */ 1342 if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) { 1343 1344 virtio_cread(vdev, struct virtio_blk_config, 1345 secure_erase_sector_alignment, &v); 1346 1347 /* secure_erase_sector_alignment should not be zero, the device should set a 1348 * valid number of sectors. 1349 */ 1350 if (!v) { 1351 dev_err(&vdev->dev, 1352 "virtio_blk: secure_erase_sector_alignment can't be 0\n"); 1353 return -EINVAL; 1354 } 1355 1356 discard_granularity = min_not_zero(discard_granularity, v); 1357 1358 virtio_cread(vdev, struct virtio_blk_config, 1359 max_secure_erase_sectors, &v); 1360 1361 /* max_secure_erase_sectors should not be zero, the device should set a 1362 * valid number of sectors. 1363 */ 1364 if (!v) { 1365 dev_err(&vdev->dev, 1366 "virtio_blk: max_secure_erase_sectors can't be 0\n"); 1367 return -EINVAL; 1368 } 1369 1370 lim->max_secure_erase_sectors = v; 1371 1372 virtio_cread(vdev, struct virtio_blk_config, 1373 max_secure_erase_seg, &v); 1374 1375 /* max_secure_erase_seg should not be zero, the device should set a 1376 * valid number of segments 1377 */ 1378 if (!v) { 1379 dev_err(&vdev->dev, 1380 "virtio_blk: max_secure_erase_seg can't be 0\n"); 1381 return -EINVAL; 1382 } 1383 1384 max_discard_segs = min_not_zero(max_discard_segs, v); 1385 } 1386 1387 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) || 1388 virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) { 1389 /* max_discard_seg and discard_granularity will be 0 only 1390 * if max_discard_seg and discard_sector_alignment fields in the virtio 1391 * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated. 1392 * In this case, we use default values. 1393 */ 1394 if (!max_discard_segs) 1395 max_discard_segs = sg_elems; 1396 1397 lim->max_discard_segments = 1398 min(max_discard_segs, MAX_DISCARD_SEGMENTS); 1399 1400 if (discard_granularity) 1401 lim->discard_granularity = 1402 discard_granularity << SECTOR_SHIFT; 1403 else 1404 lim->discard_granularity = lim->logical_block_size; 1405 } 1406 1407 if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) { 1408 u8 model; 1409 1410 virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model); 1411 switch (model) { 1412 case VIRTIO_BLK_Z_NONE: 1413 case VIRTIO_BLK_Z_HA: 1414 /* treat host-aware devices as non-zoned */ 1415 return 0; 1416 case VIRTIO_BLK_Z_HM: 1417 err = virtblk_read_zoned_limits(vblk, lim); 1418 if (err) 1419 return err; 1420 break; 1421 default: 1422 dev_err(&vdev->dev, "unsupported zone model %d\n", model); 1423 return -EINVAL; 1424 } 1425 } 1426 1427 return 0; 1428 } 1429 1430 static int virtblk_probe(struct virtio_device *vdev) 1431 { 1432 struct virtio_blk *vblk; 1433 struct queue_limits lim = { 1434 .features = BLK_FEAT_ROTATIONAL, 1435 .logical_block_size = SECTOR_SIZE, 1436 }; 1437 int err, index; 1438 unsigned int queue_depth; 1439 1440 if (!vdev->config->get) { 1441 dev_err(&vdev->dev, "%s failure: config access disabled\n", 1442 __func__); 1443 return -EINVAL; 1444 } 1445 1446 err = ida_alloc_range(&vd_index_ida, 0, 1447 minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL); 1448 if (err < 0) 1449 goto out; 1450 index = err; 1451 1452 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); 1453 if (!vblk) { 1454 err = -ENOMEM; 1455 goto out_free_index; 1456 } 1457 1458 mutex_init(&vblk->vdev_mutex); 1459 1460 vblk->vdev = vdev; 1461 1462 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); 1463 1464 err = init_vq(vblk); 1465 if (err) 1466 goto out_free_vblk; 1467 1468 /* Default queue sizing is to fill the ring. */ 1469 if (!virtblk_queue_depth) { 1470 queue_depth = vblk->vqs[0].vq->num_free; 1471 /* ... but without indirect descs, we use 2 descs per req */ 1472 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) 1473 queue_depth /= 2; 1474 } else { 1475 queue_depth = virtblk_queue_depth; 1476 } 1477 1478 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); 1479 vblk->tag_set.ops = &virtio_mq_ops; 1480 vblk->tag_set.queue_depth = queue_depth; 1481 vblk->tag_set.numa_node = NUMA_NO_NODE; 1482 vblk->tag_set.cmd_size = 1483 sizeof(struct virtblk_req) + 1484 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT; 1485 vblk->tag_set.driver_data = vblk; 1486 vblk->tag_set.nr_hw_queues = vblk->num_vqs; 1487 vblk->tag_set.nr_maps = 1; 1488 if (vblk->io_queues[HCTX_TYPE_POLL]) 1489 vblk->tag_set.nr_maps = 3; 1490 1491 err = blk_mq_alloc_tag_set(&vblk->tag_set); 1492 if (err) 1493 goto out_free_vq; 1494 1495 err = virtblk_read_limits(vblk, &lim); 1496 if (err) 1497 goto out_free_tags; 1498 1499 if (virtblk_get_cache_mode(vdev)) 1500 lim.features |= BLK_FEAT_WRITE_CACHE; 1501 1502 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk); 1503 if (IS_ERR(vblk->disk)) { 1504 err = PTR_ERR(vblk->disk); 1505 goto out_free_tags; 1506 } 1507 1508 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); 1509 1510 vblk->disk->major = major; 1511 vblk->disk->first_minor = index_to_minor(index); 1512 vblk->disk->minors = 1 << PART_BITS; 1513 vblk->disk->private_data = vblk; 1514 vblk->disk->fops = &virtblk_fops; 1515 vblk->index = index; 1516 1517 /* If disk is read-only in the host, the guest should obey */ 1518 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) 1519 set_disk_ro(vblk->disk, 1); 1520 1521 virtblk_update_capacity(vblk, false); 1522 virtio_device_ready(vdev); 1523 1524 /* 1525 * All steps that follow use the VQs therefore they need to be 1526 * placed after the virtio_device_ready() call above. 1527 */ 1528 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 1529 (lim.features & BLK_FEAT_ZONED)) { 1530 err = blk_revalidate_disk_zones(vblk->disk); 1531 if (err) 1532 goto out_cleanup_disk; 1533 } 1534 1535 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); 1536 if (err) 1537 goto out_cleanup_disk; 1538 1539 return 0; 1540 1541 out_cleanup_disk: 1542 put_disk(vblk->disk); 1543 out_free_tags: 1544 blk_mq_free_tag_set(&vblk->tag_set); 1545 out_free_vq: 1546 vdev->config->del_vqs(vdev); 1547 kfree(vblk->vqs); 1548 out_free_vblk: 1549 kfree(vblk); 1550 out_free_index: 1551 ida_free(&vd_index_ida, index); 1552 out: 1553 return err; 1554 } 1555 1556 static void virtblk_remove(struct virtio_device *vdev) 1557 { 1558 struct virtio_blk *vblk = vdev->priv; 1559 1560 /* Make sure no work handler is accessing the device. */ 1561 flush_work(&vblk->config_work); 1562 1563 del_gendisk(vblk->disk); 1564 blk_mq_free_tag_set(&vblk->tag_set); 1565 1566 mutex_lock(&vblk->vdev_mutex); 1567 1568 /* Stop all the virtqueues. */ 1569 virtio_reset_device(vdev); 1570 1571 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ 1572 vblk->vdev = NULL; 1573 1574 vdev->config->del_vqs(vdev); 1575 kfree(vblk->vqs); 1576 1577 mutex_unlock(&vblk->vdev_mutex); 1578 1579 put_disk(vblk->disk); 1580 } 1581 1582 #ifdef CONFIG_PM_SLEEP 1583 static int virtblk_freeze(struct virtio_device *vdev) 1584 { 1585 struct virtio_blk *vblk = vdev->priv; 1586 struct request_queue *q = vblk->disk->queue; 1587 1588 /* Ensure no requests in virtqueues before deleting vqs. */ 1589 blk_mq_freeze_queue(q); 1590 blk_mq_quiesce_queue_nowait(q); 1591 blk_mq_unfreeze_queue(q); 1592 1593 /* Ensure we don't receive any more interrupts */ 1594 virtio_reset_device(vdev); 1595 1596 /* Make sure no work handler is accessing the device. */ 1597 flush_work(&vblk->config_work); 1598 1599 vdev->config->del_vqs(vdev); 1600 kfree(vblk->vqs); 1601 1602 return 0; 1603 } 1604 1605 static int virtblk_restore(struct virtio_device *vdev) 1606 { 1607 struct virtio_blk *vblk = vdev->priv; 1608 int ret; 1609 1610 ret = init_vq(vdev->priv); 1611 if (ret) 1612 return ret; 1613 1614 virtio_device_ready(vdev); 1615 blk_mq_unquiesce_queue(vblk->disk->queue); 1616 1617 return 0; 1618 } 1619 #endif 1620 1621 static const struct virtio_device_id id_table[] = { 1622 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, 1623 { 0 }, 1624 }; 1625 1626 static unsigned int features_legacy[] = { 1627 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 1628 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 1629 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 1630 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, 1631 VIRTIO_BLK_F_SECURE_ERASE, 1632 } 1633 ; 1634 static unsigned int features[] = { 1635 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 1636 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 1637 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 1638 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES, 1639 VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED, 1640 }; 1641 1642 static struct virtio_driver virtio_blk = { 1643 .feature_table = features, 1644 .feature_table_size = ARRAY_SIZE(features), 1645 .feature_table_legacy = features_legacy, 1646 .feature_table_size_legacy = ARRAY_SIZE(features_legacy), 1647 .driver.name = KBUILD_MODNAME, 1648 .id_table = id_table, 1649 .probe = virtblk_probe, 1650 .remove = virtblk_remove, 1651 .config_changed = virtblk_config_changed, 1652 #ifdef CONFIG_PM_SLEEP 1653 .freeze = virtblk_freeze, 1654 .restore = virtblk_restore, 1655 #endif 1656 }; 1657 1658 static int __init virtio_blk_init(void) 1659 { 1660 int error; 1661 1662 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); 1663 if (!virtblk_wq) 1664 return -ENOMEM; 1665 1666 major = register_blkdev(0, "virtblk"); 1667 if (major < 0) { 1668 error = major; 1669 goto out_destroy_workqueue; 1670 } 1671 1672 error = register_virtio_driver(&virtio_blk); 1673 if (error) 1674 goto out_unregister_blkdev; 1675 return 0; 1676 1677 out_unregister_blkdev: 1678 unregister_blkdev(major, "virtblk"); 1679 out_destroy_workqueue: 1680 destroy_workqueue(virtblk_wq); 1681 return error; 1682 } 1683 1684 static void __exit virtio_blk_fini(void) 1685 { 1686 unregister_virtio_driver(&virtio_blk); 1687 unregister_blkdev(major, "virtblk"); 1688 destroy_workqueue(virtblk_wq); 1689 } 1690 module_init(virtio_blk_init); 1691 module_exit(virtio_blk_fini); 1692 1693 MODULE_DEVICE_TABLE(virtio, id_table); 1694 MODULE_DESCRIPTION("Virtio block driver"); 1695 MODULE_LICENSE("GPL"); 1696