1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio PCI driver - modern (virtio 1.0) device support 4 * 5 * This module allows virtio devices to be used over a virtual PCI device. 6 * This can be used with QEMU based VMMs like KVM or Xen. 7 * 8 * Copyright IBM Corp. 2007 9 * Copyright Red Hat, Inc. 2014 10 * 11 * Authors: 12 * Anthony Liguori <aliguori@us.ibm.com> 13 * Rusty Russell <rusty@rustcorp.com.au> 14 * Michael S. Tsirkin <mst@redhat.com> 15 */ 16 17 #include <linux/delay.h> 18 #include <linux/virtio_pci_admin.h> 19 #define VIRTIO_PCI_NO_LEGACY 20 #define VIRTIO_RING_NO_LEGACY 21 #include "virtio_pci_common.h" 22 23 #define VIRTIO_AVQ_SGS_MAX 4 24 25 static u64 vp_get_features(struct virtio_device *vdev) 26 { 27 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 28 29 return vp_modern_get_features(&vp_dev->mdev); 30 } 31 32 static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num) 33 { 34 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 35 36 *num = 0; 37 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 38 return 0; 39 40 *num = vp_modern_avq_num(&vp_dev->mdev); 41 if (!(*num)) 42 return -EINVAL; 43 *index = vp_modern_avq_index(&vp_dev->mdev); 44 return 0; 45 } 46 47 void vp_modern_avq_done(struct virtqueue *vq) 48 { 49 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 50 struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq; 51 struct virtio_admin_cmd *cmd; 52 unsigned long flags; 53 unsigned int len; 54 55 spin_lock_irqsave(&admin_vq->lock, flags); 56 do { 57 virtqueue_disable_cb(vq); 58 while ((cmd = virtqueue_get_buf(vq, &len))) { 59 cmd->result_sg_size = len; 60 complete(&cmd->completion); 61 } 62 } while (!virtqueue_enable_cb(vq)); 63 spin_unlock_irqrestore(&admin_vq->lock, flags); 64 } 65 66 static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq, 67 u16 opcode, 68 struct scatterlist **sgs, 69 unsigned int out_num, 70 unsigned int in_num, 71 struct virtio_admin_cmd *cmd) 72 { 73 struct virtqueue *vq; 74 unsigned long flags; 75 int ret; 76 77 vq = admin_vq->info->vq; 78 if (!vq) 79 return -EIO; 80 81 if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY && 82 opcode != VIRTIO_ADMIN_CMD_LIST_USE && 83 !((1ULL << opcode) & admin_vq->supported_cmds)) 84 return -EOPNOTSUPP; 85 86 init_completion(&cmd->completion); 87 88 again: 89 if (virtqueue_is_broken(vq)) 90 return -EIO; 91 92 spin_lock_irqsave(&admin_vq->lock, flags); 93 ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL); 94 if (ret < 0) { 95 if (ret == -ENOSPC) { 96 spin_unlock_irqrestore(&admin_vq->lock, flags); 97 cpu_relax(); 98 goto again; 99 } 100 goto unlock_err; 101 } 102 if (!virtqueue_kick(vq)) 103 goto unlock_err; 104 spin_unlock_irqrestore(&admin_vq->lock, flags); 105 106 wait_for_completion(&cmd->completion); 107 108 return cmd->ret; 109 110 unlock_err: 111 spin_unlock_irqrestore(&admin_vq->lock, flags); 112 return -EIO; 113 } 114 115 int vp_modern_admin_cmd_exec(struct virtio_device *vdev, 116 struct virtio_admin_cmd *cmd) 117 { 118 struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat; 119 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 120 struct virtio_admin_cmd_status *va_status; 121 unsigned int out_num = 0, in_num = 0; 122 struct virtio_admin_cmd_hdr *va_hdr; 123 u16 status; 124 int ret; 125 126 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 127 return -EOPNOTSUPP; 128 129 va_status = kzalloc(sizeof(*va_status), GFP_KERNEL); 130 if (!va_status) 131 return -ENOMEM; 132 133 va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL); 134 if (!va_hdr) { 135 ret = -ENOMEM; 136 goto err_alloc; 137 } 138 139 va_hdr->opcode = cmd->opcode; 140 va_hdr->group_type = cmd->group_type; 141 va_hdr->group_member_id = cmd->group_member_id; 142 143 /* Add header */ 144 sg_init_one(&hdr, va_hdr, sizeof(*va_hdr)); 145 sgs[out_num] = &hdr; 146 out_num++; 147 148 if (cmd->data_sg) { 149 sgs[out_num] = cmd->data_sg; 150 out_num++; 151 } 152 153 /* Add return status */ 154 sg_init_one(&stat, va_status, sizeof(*va_status)); 155 sgs[out_num + in_num] = &stat; 156 in_num++; 157 158 if (cmd->result_sg) { 159 sgs[out_num + in_num] = cmd->result_sg; 160 in_num++; 161 } 162 163 ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq, 164 le16_to_cpu(cmd->opcode), 165 sgs, out_num, in_num, cmd); 166 if (ret) { 167 dev_err(&vdev->dev, 168 "Failed to execute command on admin vq: %d\n.", ret); 169 goto err_cmd_exec; 170 } 171 172 status = le16_to_cpu(va_status->status); 173 if (status != VIRTIO_ADMIN_STATUS_OK) { 174 dev_err(&vdev->dev, 175 "admin command error: status(%#x) qualifier(%#x)\n", 176 status, le16_to_cpu(va_status->status_qualifier)); 177 ret = -status; 178 } 179 180 err_cmd_exec: 181 kfree(va_hdr); 182 err_alloc: 183 kfree(va_status); 184 return ret; 185 } 186 187 static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev) 188 { 189 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); 190 struct virtio_admin_cmd cmd = {}; 191 struct scatterlist result_sg; 192 struct scatterlist data_sg; 193 __le64 *data; 194 int ret; 195 196 data = kzalloc(sizeof(*data), GFP_KERNEL); 197 if (!data) 198 return; 199 200 sg_init_one(&result_sg, data, sizeof(*data)); 201 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY); 202 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 203 cmd.result_sg = &result_sg; 204 205 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 206 if (ret) 207 goto end; 208 209 *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP); 210 sg_init_one(&data_sg, data, sizeof(*data)); 211 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE); 212 cmd.data_sg = &data_sg; 213 cmd.result_sg = NULL; 214 215 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 216 if (ret) 217 goto end; 218 219 vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data); 220 end: 221 kfree(data); 222 } 223 224 static void 225 virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev) 226 { 227 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); 228 struct virtio_admin_cmd_cap_get_data *get_data; 229 struct virtio_admin_cmd_cap_set_data *set_data; 230 struct virtio_dev_parts_cap *result; 231 struct virtio_admin_cmd cmd = {}; 232 struct scatterlist result_sg; 233 struct scatterlist data_sg; 234 u8 resource_objects_limit; 235 u16 set_data_size; 236 int ret; 237 238 get_data = kzalloc(sizeof(*get_data), GFP_KERNEL); 239 if (!get_data) 240 return; 241 242 result = kzalloc(sizeof(*result), GFP_KERNEL); 243 if (!result) 244 goto end; 245 246 get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); 247 sg_init_one(&data_sg, get_data, sizeof(*get_data)); 248 sg_init_one(&result_sg, result, sizeof(*result)); 249 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET); 250 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 251 cmd.data_sg = &data_sg; 252 cmd.result_sg = &result_sg; 253 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 254 if (ret) 255 goto err_get; 256 257 set_data_size = sizeof(*set_data) + sizeof(*result); 258 set_data = kzalloc(set_data_size, GFP_KERNEL); 259 if (!set_data) 260 goto err_get; 261 262 set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); 263 264 /* Set the limit to the minimum value between the GET and SET values 265 * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP 266 * is a globally unique value per PF, there is no possibility of 267 * overlap between GET and SET operations. 268 */ 269 resource_objects_limit = min(result->get_parts_resource_objects_limit, 270 result->set_parts_resource_objects_limit); 271 result->get_parts_resource_objects_limit = resource_objects_limit; 272 result->set_parts_resource_objects_limit = resource_objects_limit; 273 memcpy(set_data->cap_specific_data, result, sizeof(*result)); 274 sg_init_one(&data_sg, set_data, set_data_size); 275 cmd.data_sg = &data_sg; 276 cmd.result_sg = NULL; 277 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET); 278 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 279 if (ret) 280 goto err_set; 281 282 /* Allocate IDR to manage the dev caps objects */ 283 ida_init(&vp_dev->admin_vq.dev_parts_ida); 284 vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit; 285 286 err_set: 287 kfree(set_data); 288 err_get: 289 kfree(result); 290 end: 291 kfree(get_data); 292 } 293 294 static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev) 295 { 296 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); 297 struct virtio_admin_cmd_query_cap_id_result *data; 298 struct virtio_admin_cmd cmd = {}; 299 struct scatterlist result_sg; 300 int ret; 301 302 data = kzalloc(sizeof(*data), GFP_KERNEL); 303 if (!data) 304 return; 305 306 sg_init_one(&result_sg, data, sizeof(*data)); 307 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY); 308 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 309 cmd.result_sg = &result_sg; 310 311 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 312 if (ret) 313 goto end; 314 315 /* Max number of caps fits into a single u64 */ 316 BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64)); 317 318 vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]); 319 320 if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP))) 321 goto end; 322 323 virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev); 324 end: 325 kfree(data); 326 } 327 328 static void vp_modern_avq_activate(struct virtio_device *vdev) 329 { 330 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 331 return; 332 333 virtio_pci_admin_cmd_list_init(vdev); 334 virtio_pci_admin_cmd_cap_init(vdev); 335 } 336 337 static void vp_modern_avq_cleanup(struct virtio_device *vdev) 338 { 339 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 340 struct virtio_admin_cmd *cmd; 341 struct virtqueue *vq; 342 343 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) 344 return; 345 346 vq = vp_dev->admin_vq.info->vq; 347 if (!vq) 348 return; 349 350 while ((cmd = virtqueue_detach_unused_buf(vq))) { 351 cmd->ret = -EIO; 352 complete(&cmd->completion); 353 } 354 } 355 356 static void vp_transport_features(struct virtio_device *vdev, u64 features) 357 { 358 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 359 struct pci_dev *pci_dev = vp_dev->pci_dev; 360 361 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) && 362 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV)) 363 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV); 364 365 if (features & BIT_ULL(VIRTIO_F_RING_RESET)) 366 __virtio_set_bit(vdev, VIRTIO_F_RING_RESET); 367 368 if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ)) 369 __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ); 370 } 371 372 static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit, 373 u32 offset, const char *fname) 374 { 375 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 376 377 if (!__virtio_test_bit(vdev, fbit)) 378 return 0; 379 380 if (likely(vp_dev->mdev.common_len >= offset)) 381 return 0; 382 383 dev_err(&vdev->dev, 384 "virtio: common cfg size(%zu) does not match the feature %s\n", 385 vp_dev->mdev.common_len, fname); 386 387 return -EINVAL; 388 } 389 390 #define vp_check_common_size_one_feature(vdev, fbit, field) \ 391 __vp_check_common_size_one_feature(vdev, fbit, \ 392 offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit) 393 394 static int vp_check_common_size(struct virtio_device *vdev) 395 { 396 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data)) 397 return -EINVAL; 398 399 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset)) 400 return -EINVAL; 401 402 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num)) 403 return -EINVAL; 404 405 return 0; 406 } 407 408 /* virtio config->finalize_features() implementation */ 409 static int vp_finalize_features(struct virtio_device *vdev) 410 { 411 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 412 u64 features = vdev->features; 413 414 /* Give virtio_ring a chance to accept features. */ 415 vring_transport_features(vdev); 416 417 /* Give virtio_pci a chance to accept features. */ 418 vp_transport_features(vdev, features); 419 420 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 421 dev_err(&vdev->dev, "virtio: device uses modern interface " 422 "but does not have VIRTIO_F_VERSION_1\n"); 423 return -EINVAL; 424 } 425 426 if (vp_check_common_size(vdev)) 427 return -EINVAL; 428 429 vp_modern_set_features(&vp_dev->mdev, vdev->features); 430 431 return 0; 432 } 433 434 /* virtio config->get() implementation */ 435 static void vp_get(struct virtio_device *vdev, unsigned int offset, 436 void *buf, unsigned int len) 437 { 438 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 439 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 440 void __iomem *device = mdev->device; 441 u8 b; 442 __le16 w; 443 __le32 l; 444 445 BUG_ON(offset + len > mdev->device_len); 446 447 switch (len) { 448 case 1: 449 b = ioread8(device + offset); 450 memcpy(buf, &b, sizeof b); 451 break; 452 case 2: 453 w = cpu_to_le16(ioread16(device + offset)); 454 memcpy(buf, &w, sizeof w); 455 break; 456 case 4: 457 l = cpu_to_le32(ioread32(device + offset)); 458 memcpy(buf, &l, sizeof l); 459 break; 460 case 8: 461 l = cpu_to_le32(ioread32(device + offset)); 462 memcpy(buf, &l, sizeof l); 463 l = cpu_to_le32(ioread32(device + offset + sizeof l)); 464 memcpy(buf + sizeof l, &l, sizeof l); 465 break; 466 default: 467 BUG(); 468 } 469 } 470 471 /* the config->set() implementation. it's symmetric to the config->get() 472 * implementation */ 473 static void vp_set(struct virtio_device *vdev, unsigned int offset, 474 const void *buf, unsigned int len) 475 { 476 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 477 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 478 void __iomem *device = mdev->device; 479 u8 b; 480 __le16 w; 481 __le32 l; 482 483 BUG_ON(offset + len > mdev->device_len); 484 485 switch (len) { 486 case 1: 487 memcpy(&b, buf, sizeof b); 488 iowrite8(b, device + offset); 489 break; 490 case 2: 491 memcpy(&w, buf, sizeof w); 492 iowrite16(le16_to_cpu(w), device + offset); 493 break; 494 case 4: 495 memcpy(&l, buf, sizeof l); 496 iowrite32(le32_to_cpu(l), device + offset); 497 break; 498 case 8: 499 memcpy(&l, buf, sizeof l); 500 iowrite32(le32_to_cpu(l), device + offset); 501 memcpy(&l, buf + sizeof l, sizeof l); 502 iowrite32(le32_to_cpu(l), device + offset + sizeof l); 503 break; 504 default: 505 BUG(); 506 } 507 } 508 509 static u32 vp_generation(struct virtio_device *vdev) 510 { 511 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 512 513 return vp_modern_generation(&vp_dev->mdev); 514 } 515 516 /* config->{get,set}_status() implementations */ 517 static u8 vp_get_status(struct virtio_device *vdev) 518 { 519 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 520 521 return vp_modern_get_status(&vp_dev->mdev); 522 } 523 524 static void vp_set_status(struct virtio_device *vdev, u8 status) 525 { 526 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 527 528 /* We should never be setting status to 0. */ 529 BUG_ON(status == 0); 530 vp_modern_set_status(&vp_dev->mdev, status); 531 if (status & VIRTIO_CONFIG_S_DRIVER_OK) 532 vp_modern_avq_activate(vdev); 533 } 534 535 static void vp_reset(struct virtio_device *vdev) 536 { 537 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 538 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 539 540 /* 0 status means a reset. */ 541 vp_modern_set_status(mdev, 0); 542 /* After writing 0 to device_status, the driver MUST wait for a read of 543 * device_status to return 0 before reinitializing the device. 544 * This will flush out the status write, and flush in device writes, 545 * including MSI-X interrupts, if any. 546 */ 547 while (vp_modern_get_status(mdev)) 548 msleep(1); 549 550 vp_modern_avq_cleanup(vdev); 551 552 /* Flush pending VQ/configuration callbacks. */ 553 vp_synchronize_vectors(vdev); 554 } 555 556 static int vp_active_vq(struct virtqueue *vq, u16 msix_vec) 557 { 558 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 559 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 560 unsigned long index; 561 562 index = vq->index; 563 564 /* activate the queue */ 565 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq)); 566 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq), 567 virtqueue_get_avail_addr(vq), 568 virtqueue_get_used_addr(vq)); 569 570 if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 571 msix_vec = vp_modern_queue_vector(mdev, index, msix_vec); 572 if (msix_vec == VIRTIO_MSI_NO_VECTOR) 573 return -EBUSY; 574 } 575 576 return 0; 577 } 578 579 static int vp_modern_disable_vq_and_reset(struct virtqueue *vq) 580 { 581 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 582 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 583 struct virtio_pci_vq_info *info; 584 unsigned long flags; 585 586 if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET)) 587 return -ENOENT; 588 589 vp_modern_set_queue_reset(mdev, vq->index); 590 591 info = vp_dev->vqs[vq->index]; 592 593 /* delete vq from irq handler */ 594 spin_lock_irqsave(&vp_dev->lock, flags); 595 list_del(&info->node); 596 spin_unlock_irqrestore(&vp_dev->lock, flags); 597 598 INIT_LIST_HEAD(&info->node); 599 600 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 601 __virtqueue_break(vq); 602 #endif 603 604 /* For the case where vq has an exclusive irq, call synchronize_irq() to 605 * wait for completion. 606 * 607 * note: We can't use disable_irq() since it conflicts with the affinity 608 * managed IRQ that is used by some drivers. 609 */ 610 if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR) 611 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector)); 612 613 vq->reset = true; 614 615 return 0; 616 } 617 618 static int vp_modern_enable_vq_after_reset(struct virtqueue *vq) 619 { 620 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 621 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 622 struct virtio_pci_vq_info *info; 623 unsigned long flags, index; 624 int err; 625 626 if (!vq->reset) 627 return -EBUSY; 628 629 index = vq->index; 630 info = vp_dev->vqs[index]; 631 632 if (vp_modern_get_queue_reset(mdev, index)) 633 return -EBUSY; 634 635 if (vp_modern_get_queue_enable(mdev, index)) 636 return -EBUSY; 637 638 err = vp_active_vq(vq, info->msix_vector); 639 if (err) 640 return err; 641 642 if (vq->callback) { 643 spin_lock_irqsave(&vp_dev->lock, flags); 644 list_add(&info->node, &vp_dev->virtqueues); 645 spin_unlock_irqrestore(&vp_dev->lock, flags); 646 } else { 647 INIT_LIST_HEAD(&info->node); 648 } 649 650 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 651 __virtqueue_unbreak(vq); 652 #endif 653 654 vp_modern_set_queue_enable(&vp_dev->mdev, index, true); 655 vq->reset = false; 656 657 return 0; 658 } 659 660 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) 661 { 662 return vp_modern_config_vector(&vp_dev->mdev, vector); 663 } 664 665 static bool vp_notify_with_data(struct virtqueue *vq) 666 { 667 u32 data = vring_notification_data(vq); 668 669 iowrite32(data, (void __iomem *)vq->priv); 670 671 return true; 672 } 673 674 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 675 struct virtio_pci_vq_info *info, 676 unsigned int index, 677 void (*callback)(struct virtqueue *vq), 678 const char *name, 679 bool ctx, 680 u16 msix_vec) 681 { 682 683 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 684 bool (*notify)(struct virtqueue *vq); 685 struct virtqueue *vq; 686 bool is_avq; 687 u16 num; 688 int err; 689 690 if (__virtio_test_bit(&vp_dev->vdev, VIRTIO_F_NOTIFICATION_DATA)) 691 notify = vp_notify_with_data; 692 else 693 notify = vp_notify; 694 695 is_avq = vp_is_avq(&vp_dev->vdev, index); 696 if (index >= vp_modern_get_num_queues(mdev) && !is_avq) 697 return ERR_PTR(-EINVAL); 698 699 num = vp_modern_get_queue_size(mdev, index); 700 /* Check if queue is either not available or already active. */ 701 if (!num || vp_modern_get_queue_enable(mdev, index)) 702 return ERR_PTR(-ENOENT); 703 704 info->msix_vector = msix_vec; 705 706 /* create the vring */ 707 vq = vring_create_virtqueue(index, num, 708 SMP_CACHE_BYTES, &vp_dev->vdev, 709 true, true, ctx, 710 notify, callback, name); 711 if (!vq) 712 return ERR_PTR(-ENOMEM); 713 714 vq->num_max = num; 715 716 err = vp_active_vq(vq, msix_vec); 717 if (err) 718 goto err; 719 720 vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL); 721 if (!vq->priv) { 722 err = -ENOMEM; 723 goto err; 724 } 725 726 return vq; 727 728 err: 729 vring_del_virtqueue(vq); 730 return ERR_PTR(err); 731 } 732 733 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs, 734 struct virtqueue *vqs[], 735 struct virtqueue_info vqs_info[], 736 struct irq_affinity *desc) 737 { 738 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 739 struct virtqueue *vq; 740 int rc = vp_find_vqs(vdev, nvqs, vqs, vqs_info, desc); 741 742 if (rc) 743 return rc; 744 745 /* Select and activate all queues. Has to be done last: once we do 746 * this, there's no way to go back except reset. 747 */ 748 list_for_each_entry(vq, &vdev->vqs, list) 749 vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true); 750 751 return 0; 752 } 753 754 static void del_vq(struct virtio_pci_vq_info *info) 755 { 756 struct virtqueue *vq = info->vq; 757 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 758 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 759 760 if (vp_dev->msix_enabled) 761 vp_modern_queue_vector(mdev, vq->index, 762 VIRTIO_MSI_NO_VECTOR); 763 764 if (!mdev->notify_base) 765 pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv); 766 767 vring_del_virtqueue(vq); 768 } 769 770 static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id, 771 u8 *bar, u64 *offset, u64 *len) 772 { 773 int pos; 774 775 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0; 776 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 777 u8 type, cap_len, id, res_bar; 778 u32 tmp32; 779 u64 res_offset, res_length; 780 781 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 782 cfg_type), &type); 783 if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG) 784 continue; 785 786 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 787 cap_len), &cap_len); 788 if (cap_len != sizeof(struct virtio_pci_cap64)) { 789 dev_err(&dev->dev, "%s: shm cap with bad size offset:" 790 " %d size: %d\n", __func__, pos, cap_len); 791 continue; 792 } 793 794 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 795 id), &id); 796 if (id != required_id) 797 continue; 798 799 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 800 bar), &res_bar); 801 if (res_bar >= PCI_STD_NUM_BARS) 802 continue; 803 804 /* Type and ID match, and the BAR value isn't reserved. 805 * Looks good. 806 */ 807 808 /* Read the lower 32bit of length and offset */ 809 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap, 810 offset), &tmp32); 811 res_offset = tmp32; 812 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap, 813 length), &tmp32); 814 res_length = tmp32; 815 816 /* and now the top half */ 817 pci_read_config_dword(dev, 818 pos + offsetof(struct virtio_pci_cap64, 819 offset_hi), &tmp32); 820 res_offset |= ((u64)tmp32) << 32; 821 pci_read_config_dword(dev, 822 pos + offsetof(struct virtio_pci_cap64, 823 length_hi), &tmp32); 824 res_length |= ((u64)tmp32) << 32; 825 826 *bar = res_bar; 827 *offset = res_offset; 828 *len = res_length; 829 830 return pos; 831 } 832 return 0; 833 } 834 835 static bool vp_get_shm_region(struct virtio_device *vdev, 836 struct virtio_shm_region *region, u8 id) 837 { 838 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 839 struct pci_dev *pci_dev = vp_dev->pci_dev; 840 u8 bar; 841 u64 offset, len; 842 phys_addr_t phys_addr; 843 size_t bar_len; 844 845 if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len)) 846 return false; 847 848 phys_addr = pci_resource_start(pci_dev, bar); 849 bar_len = pci_resource_len(pci_dev, bar); 850 851 if ((offset + len) < offset) { 852 dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n", 853 __func__); 854 return false; 855 } 856 857 if (offset + len > bar_len) { 858 dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n", 859 __func__); 860 return false; 861 } 862 863 region->len = len; 864 region->addr = (u64) phys_addr + offset; 865 866 return true; 867 } 868 869 /* 870 * virtio_pci_admin_has_dev_parts - Checks whether the device parts 871 * functionality is supported 872 * @pdev: VF pci_dev 873 * 874 * Returns true on success. 875 */ 876 bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev) 877 { 878 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); 879 struct virtio_pci_device *vp_dev; 880 881 if (!virtio_dev) 882 return false; 883 884 if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ)) 885 return false; 886 887 vp_dev = to_vp_device(virtio_dev); 888 889 if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) == 890 VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)) 891 return false; 892 893 return vp_dev->admin_vq.max_dev_parts_objects; 894 } 895 EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts); 896 897 /* 898 * virtio_pci_admin_mode_set - Sets the mode of a member device 899 * @pdev: VF pci_dev 900 * @flags: device mode's flags 901 * 902 * Note: caller must serialize access for the given device. 903 * Returns 0 on success, or negative on failure. 904 */ 905 int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags) 906 { 907 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); 908 struct virtio_admin_cmd_dev_mode_set_data *data; 909 struct virtio_admin_cmd cmd = {}; 910 struct scatterlist data_sg; 911 int vf_id; 912 int ret; 913 914 if (!virtio_dev) 915 return -ENODEV; 916 917 vf_id = pci_iov_vf_id(pdev); 918 if (vf_id < 0) 919 return vf_id; 920 921 data = kzalloc(sizeof(*data), GFP_KERNEL); 922 if (!data) 923 return -ENOMEM; 924 925 data->flags = flags; 926 sg_init_one(&data_sg, data, sizeof(*data)); 927 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET); 928 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 929 cmd.group_member_id = cpu_to_le64(vf_id + 1); 930 cmd.data_sg = &data_sg; 931 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 932 933 kfree(data); 934 return ret; 935 } 936 EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set); 937 938 /* 939 * virtio_pci_admin_obj_create - Creates an object for a given type and operation, 940 * following the max objects that can be created for that request. 941 * @pdev: VF pci_dev 942 * @obj_type: Object type 943 * @operation_type: Operation type 944 * @obj_id: Output unique object id 945 * 946 * Note: caller must serialize access for the given device. 947 * Returns 0 on success, or negative on failure. 948 */ 949 int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, 950 u32 *obj_id) 951 { 952 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); 953 u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data); 954 struct virtio_admin_cmd_resource_obj_create_data *obj_create_data; 955 struct virtio_resource_obj_dev_parts obj_dev_parts = {}; 956 struct virtio_pci_admin_vq *avq; 957 struct virtio_admin_cmd cmd = {}; 958 struct scatterlist data_sg; 959 void *data; 960 int id = -1; 961 int vf_id; 962 int ret; 963 964 if (!virtio_dev) 965 return -ENODEV; 966 967 vf_id = pci_iov_vf_id(pdev); 968 if (vf_id < 0) 969 return vf_id; 970 971 if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) 972 return -EOPNOTSUPP; 973 974 if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET && 975 operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET) 976 return -EINVAL; 977 978 avq = &to_vp_device(virtio_dev)->admin_vq; 979 if (!avq->max_dev_parts_objects) 980 return -EOPNOTSUPP; 981 982 id = ida_alloc_range(&avq->dev_parts_ida, 0, 983 avq->max_dev_parts_objects - 1, GFP_KERNEL); 984 if (id < 0) 985 return id; 986 987 *obj_id = id; 988 data_size += sizeof(obj_dev_parts); 989 data = kzalloc(data_size, GFP_KERNEL); 990 if (!data) { 991 ret = -ENOMEM; 992 goto end; 993 } 994 995 obj_create_data = data; 996 obj_create_data->hdr.type = cpu_to_le16(obj_type); 997 obj_create_data->hdr.id = cpu_to_le32(*obj_id); 998 obj_dev_parts.type = operation_type; 999 memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts, 1000 sizeof(obj_dev_parts)); 1001 sg_init_one(&data_sg, data, data_size); 1002 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE); 1003 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 1004 cmd.group_member_id = cpu_to_le64(vf_id + 1); 1005 cmd.data_sg = &data_sg; 1006 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 1007 1008 kfree(data); 1009 end: 1010 if (ret) 1011 ida_free(&avq->dev_parts_ida, id); 1012 1013 return ret; 1014 } 1015 EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create); 1016 1017 /* 1018 * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id 1019 * @pdev: VF pci_dev 1020 * @obj_type: Object type 1021 * @id: Object id 1022 * 1023 * Note: caller must serialize access for the given device. 1024 * Returns 0 on success, or negative on failure. 1025 */ 1026 int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id) 1027 { 1028 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); 1029 struct virtio_admin_cmd_resource_obj_cmd_hdr *data; 1030 struct virtio_pci_device *vp_dev; 1031 struct virtio_admin_cmd cmd = {}; 1032 struct scatterlist data_sg; 1033 int vf_id; 1034 int ret; 1035 1036 if (!virtio_dev) 1037 return -ENODEV; 1038 1039 vf_id = pci_iov_vf_id(pdev); 1040 if (vf_id < 0) 1041 return vf_id; 1042 1043 if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) 1044 return -EINVAL; 1045 1046 data = kzalloc(sizeof(*data), GFP_KERNEL); 1047 if (!data) 1048 return -ENOMEM; 1049 1050 data->type = cpu_to_le16(obj_type); 1051 data->id = cpu_to_le32(id); 1052 sg_init_one(&data_sg, data, sizeof(*data)); 1053 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY); 1054 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 1055 cmd.group_member_id = cpu_to_le64(vf_id + 1); 1056 cmd.data_sg = &data_sg; 1057 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 1058 if (!ret) { 1059 vp_dev = to_vp_device(virtio_dev); 1060 ida_free(&vp_dev->admin_vq.dev_parts_ida, id); 1061 } 1062 1063 kfree(data); 1064 return ret; 1065 } 1066 EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy); 1067 1068 /* 1069 * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts 1070 * identified by the below attributes. 1071 * @pdev: VF pci_dev 1072 * @obj_type: Object type 1073 * @id: Object id 1074 * @metadata_type: Metadata type 1075 * @out: Upon success holds the output for 'metadata type size' 1076 * 1077 * Note: caller must serialize access for the given device. 1078 * Returns 0 on success, or negative on failure. 1079 */ 1080 int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, 1081 u32 id, u8 metadata_type, u32 *out) 1082 { 1083 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); 1084 struct virtio_admin_cmd_dev_parts_metadata_result *result; 1085 struct virtio_admin_cmd_dev_parts_metadata_data *data; 1086 struct scatterlist data_sg, result_sg; 1087 struct virtio_admin_cmd cmd = {}; 1088 int vf_id; 1089 int ret; 1090 1091 if (!virtio_dev) 1092 return -ENODEV; 1093 1094 if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE) 1095 return -EOPNOTSUPP; 1096 1097 vf_id = pci_iov_vf_id(pdev); 1098 if (vf_id < 0) 1099 return vf_id; 1100 1101 data = kzalloc(sizeof(*data), GFP_KERNEL); 1102 if (!data) 1103 return -ENOMEM; 1104 1105 result = kzalloc(sizeof(*result), GFP_KERNEL); 1106 if (!result) { 1107 ret = -ENOMEM; 1108 goto end; 1109 } 1110 1111 data->hdr.type = cpu_to_le16(obj_type); 1112 data->hdr.id = cpu_to_le32(id); 1113 data->type = metadata_type; 1114 sg_init_one(&data_sg, data, sizeof(*data)); 1115 sg_init_one(&result_sg, result, sizeof(*result)); 1116 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET); 1117 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 1118 cmd.group_member_id = cpu_to_le64(vf_id + 1); 1119 cmd.data_sg = &data_sg; 1120 cmd.result_sg = &result_sg; 1121 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 1122 if (!ret) 1123 *out = le32_to_cpu(result->parts_size.size); 1124 1125 kfree(result); 1126 end: 1127 kfree(data); 1128 return ret; 1129 } 1130 EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get); 1131 1132 /* 1133 * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes. 1134 * @pdev: VF pci_dev 1135 * @obj_type: Object type 1136 * @id: Object id 1137 * @get_type: Get type 1138 * @res_sg: Upon success holds the output result data 1139 * @res_size: Upon success holds the output result size 1140 * 1141 * Note: caller must serialize access for the given device. 1142 * Returns 0 on success, or negative on failure. 1143 */ 1144 int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, 1145 u8 get_type, struct scatterlist *res_sg, 1146 u32 *res_size) 1147 { 1148 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); 1149 struct virtio_admin_cmd_dev_parts_get_data *data; 1150 struct scatterlist data_sg; 1151 struct virtio_admin_cmd cmd = {}; 1152 int vf_id; 1153 int ret; 1154 1155 if (!virtio_dev) 1156 return -ENODEV; 1157 1158 if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL) 1159 return -EOPNOTSUPP; 1160 1161 vf_id = pci_iov_vf_id(pdev); 1162 if (vf_id < 0) 1163 return vf_id; 1164 1165 data = kzalloc(sizeof(*data), GFP_KERNEL); 1166 if (!data) 1167 return -ENOMEM; 1168 1169 data->hdr.type = cpu_to_le16(obj_type); 1170 data->hdr.id = cpu_to_le32(id); 1171 data->type = get_type; 1172 sg_init_one(&data_sg, data, sizeof(*data)); 1173 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET); 1174 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 1175 cmd.group_member_id = cpu_to_le64(vf_id + 1); 1176 cmd.data_sg = &data_sg; 1177 cmd.result_sg = res_sg; 1178 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); 1179 if (!ret) 1180 *res_size = cmd.result_sg_size; 1181 1182 kfree(data); 1183 return ret; 1184 } 1185 EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get); 1186 1187 /* 1188 * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes. 1189 * @pdev: VF pci_dev 1190 * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data 1191 * 1192 * Note: caller must serialize access for the given device. 1193 * Returns 0 on success, or negative on failure. 1194 */ 1195 int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg) 1196 { 1197 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); 1198 struct virtio_admin_cmd cmd = {}; 1199 int vf_id; 1200 1201 if (!virtio_dev) 1202 return -ENODEV; 1203 1204 vf_id = pci_iov_vf_id(pdev); 1205 if (vf_id < 0) 1206 return vf_id; 1207 1208 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET); 1209 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); 1210 cmd.group_member_id = cpu_to_le64(vf_id + 1); 1211 cmd.data_sg = data_sg; 1212 return vp_modern_admin_cmd_exec(virtio_dev, &cmd); 1213 } 1214 EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set); 1215 1216 static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 1217 .get = NULL, 1218 .set = NULL, 1219 .generation = vp_generation, 1220 .get_status = vp_get_status, 1221 .set_status = vp_set_status, 1222 .reset = vp_reset, 1223 .find_vqs = vp_modern_find_vqs, 1224 .del_vqs = vp_del_vqs, 1225 .synchronize_cbs = vp_synchronize_vectors, 1226 .get_features = vp_get_features, 1227 .finalize_features = vp_finalize_features, 1228 .bus_name = vp_bus_name, 1229 .set_vq_affinity = vp_set_vq_affinity, 1230 .get_vq_affinity = vp_get_vq_affinity, 1231 .get_shm_region = vp_get_shm_region, 1232 .disable_vq_and_reset = vp_modern_disable_vq_and_reset, 1233 .enable_vq_after_reset = vp_modern_enable_vq_after_reset, 1234 }; 1235 1236 static const struct virtio_config_ops virtio_pci_config_ops = { 1237 .get = vp_get, 1238 .set = vp_set, 1239 .generation = vp_generation, 1240 .get_status = vp_get_status, 1241 .set_status = vp_set_status, 1242 .reset = vp_reset, 1243 .find_vqs = vp_modern_find_vqs, 1244 .del_vqs = vp_del_vqs, 1245 .synchronize_cbs = vp_synchronize_vectors, 1246 .get_features = vp_get_features, 1247 .finalize_features = vp_finalize_features, 1248 .bus_name = vp_bus_name, 1249 .set_vq_affinity = vp_set_vq_affinity, 1250 .get_vq_affinity = vp_get_vq_affinity, 1251 .get_shm_region = vp_get_shm_region, 1252 .disable_vq_and_reset = vp_modern_disable_vq_and_reset, 1253 .enable_vq_after_reset = vp_modern_enable_vq_after_reset, 1254 }; 1255 1256 /* the PCI probing function */ 1257 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 1258 { 1259 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 1260 struct pci_dev *pci_dev = vp_dev->pci_dev; 1261 int err; 1262 1263 mdev->pci_dev = pci_dev; 1264 1265 err = vp_modern_probe(mdev); 1266 if (err) 1267 return err; 1268 1269 if (mdev->device) 1270 vp_dev->vdev.config = &virtio_pci_config_ops; 1271 else 1272 vp_dev->vdev.config = &virtio_pci_config_nodev_ops; 1273 1274 vp_dev->config_vector = vp_config_vector; 1275 vp_dev->setup_vq = setup_vq; 1276 vp_dev->del_vq = del_vq; 1277 vp_dev->avq_index = vp_avq_index; 1278 vp_dev->isr = mdev->isr; 1279 vp_dev->vdev.id = mdev->id; 1280 1281 spin_lock_init(&vp_dev->admin_vq.lock); 1282 return 0; 1283 } 1284 1285 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) 1286 { 1287 struct virtio_pci_modern_device *mdev = &vp_dev->mdev; 1288 1289 vp_modern_remove(mdev); 1290 } 1291