1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bridge driver for modern virtio-pci device 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 * Based on virtio_pci_modern.c. 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/vdpa.h> 15 #include <linux/virtio.h> 16 #include <linux/virtio_config.h> 17 #include <linux/virtio_ring.h> 18 #include <linux/virtio_pci.h> 19 #include <linux/virtio_pci_modern.h> 20 #include <uapi/linux/vdpa.h> 21 22 #define VP_VDPA_QUEUE_MAX 256 23 #define VP_VDPA_DRIVER_NAME "vp_vdpa" 24 #define VP_VDPA_NAME_SIZE 256 25 26 struct vp_vring { 27 void __iomem *notify; 28 char msix_name[VP_VDPA_NAME_SIZE]; 29 struct vdpa_callback cb; 30 resource_size_t notify_pa; 31 int irq; 32 }; 33 34 struct vp_vdpa { 35 struct vdpa_device vdpa; 36 struct virtio_pci_modern_device *mdev; 37 struct vp_vring *vring; 38 struct vdpa_callback config_cb; 39 u64 device_features; 40 char msix_name[VP_VDPA_NAME_SIZE]; 41 int config_irq; 42 int queues; 43 int vectors; 44 }; 45 46 struct vp_vdpa_mgmtdev { 47 struct vdpa_mgmt_dev mgtdev; 48 struct virtio_pci_modern_device *mdev; 49 struct vp_vdpa *vp_vdpa; 50 }; 51 52 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa) 53 { 54 return container_of(vdpa, struct vp_vdpa, vdpa); 55 } 56 57 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) 58 { 59 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 60 61 return vp_vdpa->mdev; 62 } 63 64 static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa) 65 { 66 return vp_vdpa->mdev; 67 } 68 69 static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa) 70 { 71 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 72 73 return vp_vdpa->device_features; 74 } 75 76 static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) 77 { 78 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 79 80 vp_modern_set_features(mdev, features); 81 82 return 0; 83 } 84 85 static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa) 86 { 87 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 88 89 return vp_modern_get_driver_features(mdev); 90 } 91 92 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) 93 { 94 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 95 96 return vp_modern_get_status(mdev); 97 } 98 99 static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx) 100 { 101 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 102 int irq = vp_vdpa->vring[idx].irq; 103 104 if (irq == VIRTIO_MSI_NO_VECTOR) 105 return -EINVAL; 106 107 return irq; 108 } 109 110 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) 111 { 112 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 113 struct pci_dev *pdev = mdev->pci_dev; 114 int i; 115 116 for (i = 0; i < vp_vdpa->queues; i++) { 117 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { 118 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); 119 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq, 120 &vp_vdpa->vring[i]); 121 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 122 } 123 } 124 125 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { 126 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR); 127 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa); 128 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 129 } 130 131 if (vp_vdpa->vectors) { 132 pci_free_irq_vectors(pdev); 133 vp_vdpa->vectors = 0; 134 } 135 } 136 137 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg) 138 { 139 struct vp_vring *vring = arg; 140 141 if (vring->cb.callback) 142 return vring->cb.callback(vring->cb.private); 143 144 return IRQ_HANDLED; 145 } 146 147 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg) 148 { 149 struct vp_vdpa *vp_vdpa = arg; 150 151 if (vp_vdpa->config_cb.callback) 152 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private); 153 154 return IRQ_HANDLED; 155 } 156 157 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) 158 { 159 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 160 struct pci_dev *pdev = mdev->pci_dev; 161 int i, ret, irq; 162 int queues = vp_vdpa->queues; 163 int vectors = 1; 164 int msix_vec = 0; 165 166 for (i = 0; i < queues; i++) { 167 if (vp_vdpa->vring[i].cb.callback) 168 vectors++; 169 } 170 171 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX); 172 if (ret != vectors) { 173 dev_err(&pdev->dev, 174 "vp_vdpa: fail to allocate irq vectors want %d but %d\n", 175 vectors, ret); 176 return ret; 177 } 178 179 vp_vdpa->vectors = vectors; 180 181 for (i = 0; i < queues; i++) { 182 if (!vp_vdpa->vring[i].cb.callback) 183 continue; 184 185 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, 186 "vp-vdpa[%s]-%d\n", pci_name(pdev), i); 187 irq = pci_irq_vector(pdev, msix_vec); 188 ret = devm_request_irq(&pdev->dev, irq, 189 vp_vdpa_vq_handler, 190 0, vp_vdpa->vring[i].msix_name, 191 &vp_vdpa->vring[i]); 192 if (ret) { 193 dev_err(&pdev->dev, 194 "vp_vdpa: fail to request irq for vq %d\n", i); 195 goto err; 196 } 197 vp_modern_queue_vector(mdev, i, msix_vec); 198 vp_vdpa->vring[i].irq = irq; 199 msix_vec++; 200 } 201 202 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n", 203 pci_name(pdev)); 204 irq = pci_irq_vector(pdev, msix_vec); 205 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0, 206 vp_vdpa->msix_name, vp_vdpa); 207 if (ret) { 208 dev_err(&pdev->dev, 209 "vp_vdpa: fail to request irq for config: %d\n", ret); 210 goto err; 211 } 212 vp_modern_config_vector(mdev, msix_vec); 213 vp_vdpa->config_irq = irq; 214 215 return 0; 216 err: 217 vp_vdpa_free_irq(vp_vdpa); 218 return ret; 219 } 220 221 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) 222 { 223 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 224 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 225 u8 s = vp_vdpa_get_status(vdpa); 226 227 if (status & VIRTIO_CONFIG_S_DRIVER_OK && 228 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { 229 if (vp_vdpa_request_irq(vp_vdpa)) { 230 WARN_ON(1); 231 return; 232 } 233 } 234 235 vp_modern_set_status(mdev, status); 236 } 237 238 static int vp_vdpa_reset(struct vdpa_device *vdpa) 239 { 240 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 241 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 242 u8 s = vp_vdpa_get_status(vdpa); 243 244 vp_modern_set_status(mdev, 0); 245 246 if (s & VIRTIO_CONFIG_S_DRIVER_OK) 247 vp_vdpa_free_irq(vp_vdpa); 248 249 return 0; 250 } 251 252 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa) 253 { 254 return VP_VDPA_QUEUE_MAX; 255 } 256 257 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, 258 struct vdpa_vq_state *state) 259 { 260 /* Note that this is not supported by virtio specification, so 261 * we return -EOPNOTSUPP here. This means we can't support live 262 * migration, vhost device start/stop. 263 */ 264 return -EOPNOTSUPP; 265 } 266 267 static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa, 268 const struct vdpa_vq_state *state) 269 { 270 const struct vdpa_vq_state_split *split = &state->split; 271 272 if (split->avail_index == 0) 273 return 0; 274 275 return -EOPNOTSUPP; 276 } 277 278 static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa, 279 const struct vdpa_vq_state *state) 280 { 281 const struct vdpa_vq_state_packed *packed = &state->packed; 282 283 if (packed->last_avail_counter == 1 && 284 packed->last_avail_idx == 0 && 285 packed->last_used_counter == 1 && 286 packed->last_used_idx == 0) 287 return 0; 288 289 return -EOPNOTSUPP; 290 } 291 292 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, 293 const struct vdpa_vq_state *state) 294 { 295 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 296 297 /* Note that this is not supported by virtio specification. 298 * But if the state is by chance equal to the device initial 299 * state, we can let it go. 300 */ 301 if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) && 302 !vp_modern_get_queue_enable(mdev, qid)) { 303 if (vp_modern_get_driver_features(mdev) & 304 BIT_ULL(VIRTIO_F_RING_PACKED)) 305 return vp_vdpa_set_vq_state_packed(vdpa, state); 306 else 307 return vp_vdpa_set_vq_state_split(vdpa, state); 308 } 309 310 return -EOPNOTSUPP; 311 } 312 313 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, 314 struct vdpa_callback *cb) 315 { 316 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 317 318 vp_vdpa->vring[qid].cb = *cb; 319 } 320 321 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa, 322 u16 qid, bool ready) 323 { 324 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 325 326 vp_modern_set_queue_enable(mdev, qid, ready); 327 } 328 329 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) 330 { 331 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 332 333 return vp_modern_get_queue_enable(mdev, qid); 334 } 335 336 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, 337 u32 num) 338 { 339 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 340 341 vp_modern_set_queue_size(mdev, qid, num); 342 } 343 344 static u16 vp_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid) 345 { 346 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 347 348 return vp_modern_get_queue_size(mdev, qid); 349 } 350 351 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, 352 u64 desc_area, u64 driver_area, 353 u64 device_area) 354 { 355 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 356 357 vp_modern_queue_address(mdev, qid, desc_area, 358 driver_area, device_area); 359 360 return 0; 361 } 362 363 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) 364 { 365 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 366 367 vp_iowrite16(qid, vp_vdpa->vring[qid].notify); 368 } 369 370 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa) 371 { 372 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 373 374 return vp_modern_generation(mdev); 375 } 376 377 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa) 378 { 379 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 380 381 return mdev->id.device; 382 } 383 384 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa) 385 { 386 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 387 388 return mdev->id.vendor; 389 } 390 391 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) 392 { 393 return PAGE_SIZE; 394 } 395 396 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) 397 { 398 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 399 400 return mdev->device_len; 401 } 402 403 static void vp_vdpa_get_config(struct vdpa_device *vdpa, 404 unsigned int offset, 405 void *buf, unsigned int len) 406 { 407 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 408 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 409 u8 old, new; 410 u8 *p; 411 int i; 412 413 do { 414 old = vp_ioread8(&mdev->common->config_generation); 415 p = buf; 416 for (i = 0; i < len; i++) 417 *p++ = vp_ioread8(mdev->device + offset + i); 418 419 new = vp_ioread8(&mdev->common->config_generation); 420 } while (old != new); 421 } 422 423 static void vp_vdpa_set_config(struct vdpa_device *vdpa, 424 unsigned int offset, const void *buf, 425 unsigned int len) 426 { 427 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 428 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 429 const u8 *p = buf; 430 int i; 431 432 for (i = 0; i < len; i++) 433 vp_iowrite8(*p++, mdev->device + offset + i); 434 } 435 436 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa, 437 struct vdpa_callback *cb) 438 { 439 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 440 441 vp_vdpa->config_cb = *cb; 442 } 443 444 static struct vdpa_notification_area 445 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) 446 { 447 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 448 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 449 struct vdpa_notification_area notify; 450 451 notify.addr = vp_vdpa->vring[qid].notify_pa; 452 notify.size = mdev->notify_offset_multiplier; 453 454 return notify; 455 } 456 457 static const struct vdpa_config_ops vp_vdpa_ops = { 458 .get_device_features = vp_vdpa_get_device_features, 459 .set_driver_features = vp_vdpa_set_driver_features, 460 .get_driver_features = vp_vdpa_get_driver_features, 461 .get_status = vp_vdpa_get_status, 462 .set_status = vp_vdpa_set_status, 463 .reset = vp_vdpa_reset, 464 .get_vq_num_max = vp_vdpa_get_vq_num_max, 465 .get_vq_state = vp_vdpa_get_vq_state, 466 .get_vq_notification = vp_vdpa_get_vq_notification, 467 .set_vq_state = vp_vdpa_set_vq_state, 468 .set_vq_cb = vp_vdpa_set_vq_cb, 469 .set_vq_ready = vp_vdpa_set_vq_ready, 470 .get_vq_ready = vp_vdpa_get_vq_ready, 471 .set_vq_num = vp_vdpa_set_vq_num, 472 .get_vq_size = vp_vdpa_get_vq_size, 473 .set_vq_address = vp_vdpa_set_vq_address, 474 .kick_vq = vp_vdpa_kick_vq, 475 .get_generation = vp_vdpa_get_generation, 476 .get_device_id = vp_vdpa_get_device_id, 477 .get_vendor_id = vp_vdpa_get_vendor_id, 478 .get_vq_align = vp_vdpa_get_vq_align, 479 .get_config_size = vp_vdpa_get_config_size, 480 .get_config = vp_vdpa_get_config, 481 .set_config = vp_vdpa_set_config, 482 .set_config_cb = vp_vdpa_set_config_cb, 483 .get_vq_irq = vp_vdpa_get_vq_irq, 484 }; 485 486 static void vp_vdpa_free_irq_vectors(void *data) 487 { 488 pci_free_irq_vectors(data); 489 } 490 491 static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, 492 const struct vdpa_dev_set_config *add_config) 493 { 494 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = 495 container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev); 496 497 struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev; 498 struct pci_dev *pdev = mdev->pci_dev; 499 struct device *dev = &pdev->dev; 500 struct vp_vdpa *vp_vdpa = NULL; 501 u64 device_features; 502 int ret, i; 503 504 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, 505 dev, &vp_vdpa_ops, 1, 1, name, false); 506 507 if (IS_ERR(vp_vdpa)) { 508 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); 509 return PTR_ERR(vp_vdpa); 510 } 511 512 vp_vdpa_mgtdev->vp_vdpa = vp_vdpa; 513 514 vp_vdpa->vdpa.dma_dev = &pdev->dev; 515 vp_vdpa->queues = vp_modern_get_num_queues(mdev); 516 vp_vdpa->mdev = mdev; 517 518 device_features = vp_modern_get_features(mdev); 519 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { 520 if (add_config->device_features & ~device_features) { 521 ret = -EINVAL; 522 dev_err(&pdev->dev, "Try to provision features " 523 "that are not supported by the device: " 524 "device_features 0x%llx provisioned 0x%llx\n", 525 device_features, add_config->device_features); 526 goto err; 527 } 528 device_features = add_config->device_features; 529 } 530 vp_vdpa->device_features = device_features; 531 532 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev); 533 if (ret) { 534 dev_err(&pdev->dev, 535 "Failed for adding devres for freeing irq vectors\n"); 536 goto err; 537 } 538 539 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, 540 sizeof(*vp_vdpa->vring), 541 GFP_KERNEL); 542 if (!vp_vdpa->vring) { 543 ret = -ENOMEM; 544 dev_err(&pdev->dev, "Fail to allocate virtqueues\n"); 545 goto err; 546 } 547 548 for (i = 0; i < vp_vdpa->queues; i++) { 549 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 550 vp_vdpa->vring[i].notify = 551 vp_modern_map_vq_notify(mdev, i, 552 &vp_vdpa->vring[i].notify_pa); 553 if (!vp_vdpa->vring[i].notify) { 554 ret = -EINVAL; 555 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); 556 goto err; 557 } 558 } 559 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 560 561 vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev; 562 ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues); 563 if (ret) { 564 dev_err(&pdev->dev, "Failed to register to vdpa bus\n"); 565 goto err; 566 } 567 568 return 0; 569 570 err: 571 put_device(&vp_vdpa->vdpa.dev); 572 return ret; 573 } 574 575 static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, 576 struct vdpa_device *dev) 577 { 578 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = 579 container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev); 580 581 struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa; 582 583 _vdpa_unregister_device(&vp_vdpa->vdpa); 584 vp_vdpa_mgtdev->vp_vdpa = NULL; 585 } 586 587 static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = { 588 .dev_add = vp_vdpa_dev_add, 589 .dev_del = vp_vdpa_dev_del, 590 }; 591 592 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 593 { 594 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL; 595 struct vdpa_mgmt_dev *mgtdev; 596 struct device *dev = &pdev->dev; 597 struct virtio_pci_modern_device *mdev = NULL; 598 struct virtio_device_id *mdev_id = NULL; 599 int err; 600 601 vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL); 602 if (!vp_vdpa_mgtdev) 603 return -ENOMEM; 604 605 mgtdev = &vp_vdpa_mgtdev->mgtdev; 606 mgtdev->ops = &vp_vdpa_mdev_ops; 607 mgtdev->device = dev; 608 609 mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL); 610 if (!mdev) { 611 err = -ENOMEM; 612 goto mdev_err; 613 } 614 615 mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL); 616 if (!mdev_id) { 617 err = -ENOMEM; 618 goto mdev_id_err; 619 } 620 621 vp_vdpa_mgtdev->mdev = mdev; 622 mdev->pci_dev = pdev; 623 624 err = pcim_enable_device(pdev); 625 if (err) { 626 goto probe_err; 627 } 628 629 err = vp_modern_probe(mdev); 630 if (err) { 631 dev_err(&pdev->dev, "Failed to probe modern PCI device\n"); 632 goto probe_err; 633 } 634 635 mdev_id->device = mdev->id.device; 636 mdev_id->vendor = mdev->id.vendor; 637 mgtdev->id_table = mdev_id; 638 mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev); 639 mgtdev->supported_features = vp_modern_get_features(mdev); 640 mgtdev->config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES); 641 pci_set_master(pdev); 642 pci_set_drvdata(pdev, vp_vdpa_mgtdev); 643 644 err = vdpa_mgmtdev_register(mgtdev); 645 if (err) { 646 dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n"); 647 goto register_err; 648 } 649 650 return 0; 651 652 register_err: 653 vp_modern_remove(vp_vdpa_mgtdev->mdev); 654 probe_err: 655 kfree(mdev_id); 656 mdev_id_err: 657 kfree(mdev); 658 mdev_err: 659 kfree(vp_vdpa_mgtdev); 660 return err; 661 } 662 663 static void vp_vdpa_remove(struct pci_dev *pdev) 664 { 665 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev); 666 struct virtio_pci_modern_device *mdev = NULL; 667 668 mdev = vp_vdpa_mgtdev->mdev; 669 vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev); 670 vp_modern_remove(mdev); 671 kfree(vp_vdpa_mgtdev->mgtdev.id_table); 672 kfree(mdev); 673 kfree(vp_vdpa_mgtdev); 674 } 675 676 static struct pci_driver vp_vdpa_driver = { 677 .name = "vp-vdpa", 678 .id_table = NULL, /* only dynamic ids */ 679 .probe = vp_vdpa_probe, 680 .remove = vp_vdpa_remove, 681 }; 682 683 module_pci_driver(vp_vdpa_driver); 684 685 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 686 MODULE_DESCRIPTION("vp-vdpa"); 687 MODULE_LICENSE("GPL"); 688 MODULE_VERSION("1"); 689