Lines Matching defs:vblk
58 * virtblk_remove() sets vblk->vdev to NULL.
61 * shut down before vblk->vdev is set to NULL and therefore do not need
133 struct virtio_blk *vblk = hctx->queue->queuedata;
134 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
338 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
344 req->__sector = virtio64_to_cpu(vblk->vdev,
352 struct virtio_blk *vblk = vq->vdev->priv;
359 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
362 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
373 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
374 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
379 struct virtio_blk *vblk = hctx->queue->queuedata;
380 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
405 struct virtio_blk *vblk,
412 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
429 struct virtio_blk *vblk = hctx->queue->queuedata;
438 status = virtblk_prep_rq(hctx, vblk, req, vbr);
442 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
443 err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
445 virtqueue_kick(vblk->vqs[qid].vq);
451 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
456 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
458 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
461 virtqueue_notify(vblk->vqs[qid].vq);
467 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
470 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
527 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
531 struct request_queue *q = vblk->disk->queue;
536 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
556 static int virtblk_submit_zone_report(struct virtio_blk *vblk,
560 struct request_queue *q = vblk->disk->queue;
571 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
572 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
585 static int virtblk_parse_zone(struct virtio_blk *vblk,
591 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
592 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
593 zone.len = vblk->zone_sectors;
595 zone.len = get_capacity(vblk->disk) - zone.start;
596 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
597 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
610 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
644 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
660 struct virtio_blk *vblk = disk->private_data;
667 if (WARN_ON_ONCE(!vblk->zone_sectors))
670 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
674 mutex_lock(&vblk->vdev_mutex);
676 if (!vblk->vdev) {
681 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
684 ret = virtblk_submit_zone_report(vblk, (char *)report,
689 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
695 ret = virtblk_parse_zone(vblk, &report->zones[i],
700 sector = virtio64_to_cpu(vblk->vdev,
702 vblk->zone_sectors;
712 mutex_unlock(&vblk->vdev_mutex);
717 static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
720 struct virtio_device *vdev = vblk->vdev;
753 &vblk->zone_sectors);
754 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
757 vblk->zone_sectors);
760 lim->chunk_sectors = vblk->zone_sectors;
761 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
764 dev_warn(&vblk->vdev->dev,
792 static inline int virtblk_read_zoned_limits(struct virtio_blk *vblk,
795 dev_err(&vblk->vdev->dev,
805 struct virtio_blk *vblk = disk->private_data;
806 struct request_queue *q = vblk->disk->queue;
817 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
834 struct virtio_blk *vblk = bd->bd_disk->private_data;
837 mutex_lock(&vblk->vdev_mutex);
839 if (!vblk->vdev) {
845 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
846 virtio_cread(vblk->vdev, struct virtio_blk_config,
848 virtio_cread(vblk->vdev, struct virtio_blk_config,
850 virtio_cread(vblk->vdev, struct virtio_blk_config,
859 mutex_unlock(&vblk->vdev_mutex);
865 struct virtio_blk *vblk = disk->private_data;
867 ida_free(&vd_index_ida, vblk->index);
868 mutex_destroy(&vblk->vdev_mutex);
869 kfree(vblk);
912 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
914 struct virtio_device *vdev = vblk->vdev;
915 struct request_queue *q = vblk->disk->queue;
932 vblk->disk->disk_name,
939 set_capacity_and_notify(vblk->disk, capacity);
944 struct virtio_blk *vblk =
947 virtblk_update_capacity(vblk, true);
952 struct virtio_blk *vblk = vdev->priv;
954 queue_work(virtblk_wq, &vblk->config_work);
957 static int init_vq(struct virtio_blk *vblk)
965 struct virtio_device *vdev = vblk->vdev;
985 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
986 vblk->io_queues[HCTX_TYPE_READ] = 0;
987 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
990 vblk->io_queues[HCTX_TYPE_DEFAULT],
991 vblk->io_queues[HCTX_TYPE_READ],
992 vblk->io_queues[HCTX_TYPE_POLL]);
994 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
995 if (!vblk->vqs)
1007 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
1008 vqs_info[i].name = vblk->vqs[i].name;
1012 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i);
1013 vqs_info[i].name = vblk->vqs[i].name;
1022 spin_lock_init(&vblk->vqs[i].lock);
1023 vblk->vqs[i].vq = vqs[i];
1025 vblk->num_vqs = num_vqs;
1031 kfree(vblk->vqs);
1091 struct virtio_blk *vblk = disk->private_data;
1092 struct virtio_device *vdev = vblk->vdev;
1096 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
1118 struct virtio_blk *vblk = disk->private_data;
1119 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
1138 struct virtio_blk *vblk = disk->private_data;
1139 struct virtio_device *vdev = vblk->vdev;
1160 struct virtio_blk *vblk = set->driver_data;
1166 map->nr_queues = vblk->io_queues[i];
1182 &vblk->vdev->dev, 0);
1199 struct virtio_blk *vblk = hctx->queue->queuedata;
1220 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
1239 static int virtblk_read_limits(struct virtio_blk *vblk,
1242 struct virtio_device *vdev = vblk->vdev;
1418 err = virtblk_read_zoned_limits(vblk, lim);
1433 struct virtio_blk *vblk;
1453 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
1454 if (!vblk) {
1459 mutex_init(&vblk->vdev_mutex);
1461 vblk->vdev = vdev;
1463 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
1465 err = init_vq(vblk);
1471 queue_depth = vblk->vqs[0].vq->num_free;
1479 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
1480 vblk->tag_set.ops = &virtio_mq_ops;
1481 vblk->tag_set.queue_depth = queue_depth;
1482 vblk->tag_set.numa_node = NUMA_NO_NODE;
1483 vblk->tag_set.cmd_size =
1486 vblk->tag_set.driver_data = vblk;
1487 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
1488 vblk->tag_set.nr_maps = 1;
1489 if (vblk->io_queues[HCTX_TYPE_POLL])
1490 vblk->tag_set.nr_maps = 3;
1492 err = blk_mq_alloc_tag_set(&vblk->tag_set);
1496 err = virtblk_read_limits(vblk, &lim);
1503 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
1504 if (IS_ERR(vblk->disk)) {
1505 err = PTR_ERR(vblk->disk);
1509 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
1511 vblk->disk->major = major;
1512 vblk->disk->first_minor = index_to_minor(index);
1513 vblk->disk->minors = 1 << PART_BITS;
1514 vblk->disk->private_data = vblk;
1515 vblk->disk->fops = &virtblk_fops;
1516 vblk->index = index;
1520 set_disk_ro(vblk->disk, 1);
1522 virtblk_update_capacity(vblk, false);
1531 err = blk_revalidate_disk_zones(vblk->disk);
1536 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1543 put_disk(vblk->disk);
1545 blk_mq_free_tag_set(&vblk->tag_set);
1548 kfree(vblk->vqs);
1550 kfree(vblk);
1559 struct virtio_blk *vblk = vdev->priv;
1562 flush_work(&vblk->config_work);
1564 del_gendisk(vblk->disk);
1565 blk_mq_free_tag_set(&vblk->tag_set);
1567 mutex_lock(&vblk->vdev_mutex);
1572 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1573 vblk->vdev = NULL;
1576 kfree(vblk->vqs);
1578 mutex_unlock(&vblk->vdev_mutex);
1580 put_disk(vblk->disk);
1585 struct virtio_blk *vblk = vdev->priv;
1586 struct request_queue *q = vblk->disk->queue;
1598 flush_work(&vblk->config_work);
1601 kfree(vblk->vqs);
1608 struct virtio_blk *vblk = vdev->priv;
1616 blk_mq_unquiesce_queue(vblk->disk->queue);