1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vDPA bridge driver for modern virtio-pci device 4 * 5 * Copyright (c) 2020, Red Hat Inc. All rights reserved. 6 * Author: Jason Wang <jasowang@redhat.com> 7 * 8 * Based on virtio_pci_modern.c. 9 */ 10 11 #include <linux/interrupt.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/vdpa.h> 15 #include <linux/virtio.h> 16 #include <linux/virtio_config.h> 17 #include <linux/virtio_ring.h> 18 #include <linux/virtio_pci.h> 19 #include <linux/virtio_pci_modern.h> 20 #include <uapi/linux/vdpa.h> 21 22 #define VP_VDPA_QUEUE_MAX 256 23 #define VP_VDPA_DRIVER_NAME "vp_vdpa" 24 #define VP_VDPA_NAME_SIZE 256 25 26 struct vp_vring { 27 void __iomem *notify; 28 char msix_name[VP_VDPA_NAME_SIZE]; 29 struct vdpa_callback cb; 30 resource_size_t notify_pa; 31 int irq; 32 }; 33 34 struct vp_vdpa { 35 struct vdpa_device vdpa; 36 struct virtio_pci_modern_device *mdev; 37 struct vp_vring *vring; 38 struct vdpa_callback config_cb; 39 u64 device_features; 40 char msix_name[VP_VDPA_NAME_SIZE]; 41 int config_irq; 42 int queues; 43 int vectors; 44 }; 45 46 struct vp_vdpa_mgmtdev { 47 struct vdpa_mgmt_dev mgtdev; 48 struct virtio_pci_modern_device *mdev; 49 struct vp_vdpa *vp_vdpa; 50 }; 51 52 static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa) 53 { 54 return container_of(vdpa, struct vp_vdpa, vdpa); 55 } 56 57 static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa) 58 { 59 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 60 61 return vp_vdpa->mdev; 62 } 63 64 static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa) 65 { 66 return vp_vdpa->mdev; 67 } 68 69 static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa) 70 { 71 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 72 73 return vp_vdpa->device_features; 74 } 75 76 static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features) 77 { 78 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 79 80 vp_modern_set_features(mdev, features); 81 82 return 0; 83 } 84 85 static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa) 86 { 87 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 88 89 return vp_modern_get_driver_features(mdev); 90 } 91 92 static u8 vp_vdpa_get_status(struct vdpa_device *vdpa) 93 { 94 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 95 96 return vp_modern_get_status(mdev); 97 } 98 99 static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx) 100 { 101 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 102 int irq = vp_vdpa->vring[idx].irq; 103 104 if (irq == VIRTIO_MSI_NO_VECTOR) 105 return -EINVAL; 106 107 return irq; 108 } 109 110 static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa) 111 { 112 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 113 struct pci_dev *pdev = mdev->pci_dev; 114 int i; 115 116 for (i = 0; i < vp_vdpa->queues; i++) { 117 if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) { 118 vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR); 119 devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq, 120 &vp_vdpa->vring[i]); 121 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 122 } 123 } 124 125 if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) { 126 vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR); 127 devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa); 128 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 129 } 130 131 if (vp_vdpa->vectors) { 132 pci_free_irq_vectors(pdev); 133 vp_vdpa->vectors = 0; 134 } 135 } 136 137 static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg) 138 { 139 struct vp_vring *vring = arg; 140 141 if (vring->cb.callback) 142 return vring->cb.callback(vring->cb.private); 143 144 return IRQ_HANDLED; 145 } 146 147 static irqreturn_t vp_vdpa_config_handler(int irq, void *arg) 148 { 149 struct vp_vdpa *vp_vdpa = arg; 150 151 if (vp_vdpa->config_cb.callback) 152 return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private); 153 154 return IRQ_HANDLED; 155 } 156 157 static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa) 158 { 159 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 160 struct pci_dev *pdev = mdev->pci_dev; 161 int i, ret, irq; 162 int queues = vp_vdpa->queues; 163 int vectors = 1; 164 int msix_vec = 0; 165 166 for (i = 0; i < queues; i++) { 167 if (vp_vdpa->vring[i].cb.callback) 168 vectors++; 169 } 170 171 ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX); 172 if (ret != vectors) { 173 dev_err(&pdev->dev, 174 "vp_vdpa: fail to allocate irq vectors want %d but %d\n", 175 vectors, ret); 176 return ret; 177 } 178 179 vp_vdpa->vectors = vectors; 180 181 for (i = 0; i < queues; i++) { 182 if (!vp_vdpa->vring[i].cb.callback) 183 continue; 184 185 snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE, 186 "vp-vdpa[%s]-%d\n", pci_name(pdev), i); 187 irq = pci_irq_vector(pdev, msix_vec); 188 ret = devm_request_irq(&pdev->dev, irq, 189 vp_vdpa_vq_handler, 190 0, vp_vdpa->vring[i].msix_name, 191 &vp_vdpa->vring[i]); 192 if (ret) { 193 dev_err(&pdev->dev, 194 "vp_vdpa: fail to request irq for vq %d\n", i); 195 goto err; 196 } 197 vp_modern_queue_vector(mdev, i, msix_vec); 198 vp_vdpa->vring[i].irq = irq; 199 msix_vec++; 200 } 201 202 snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n", 203 pci_name(pdev)); 204 irq = pci_irq_vector(pdev, msix_vec); 205 ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0, 206 vp_vdpa->msix_name, vp_vdpa); 207 if (ret) { 208 dev_err(&pdev->dev, 209 "vp_vdpa: fail to request irq for config: %d\n", ret); 210 goto err; 211 } 212 vp_modern_config_vector(mdev, msix_vec); 213 vp_vdpa->config_irq = irq; 214 215 return 0; 216 err: 217 vp_vdpa_free_irq(vp_vdpa); 218 return ret; 219 } 220 221 static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status) 222 { 223 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 224 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 225 u8 s = vp_vdpa_get_status(vdpa); 226 227 if (status & VIRTIO_CONFIG_S_DRIVER_OK && 228 !(s & VIRTIO_CONFIG_S_DRIVER_OK)) { 229 if (vp_vdpa_request_irq(vp_vdpa)) { 230 WARN_ON(1); 231 return; 232 } 233 } 234 235 vp_modern_set_status(mdev, status); 236 } 237 238 static int vp_vdpa_reset(struct vdpa_device *vdpa) 239 { 240 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 241 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 242 u8 s = vp_vdpa_get_status(vdpa); 243 244 vp_modern_set_status(mdev, 0); 245 246 if (s & VIRTIO_CONFIG_S_DRIVER_OK) 247 vp_vdpa_free_irq(vp_vdpa); 248 249 return 0; 250 } 251 252 static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa) 253 { 254 return VP_VDPA_QUEUE_MAX; 255 } 256 257 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, 258 struct vdpa_vq_state *state) 259 { 260 /* Note that this is not supported by virtio specification, so 261 * we return -EOPNOTSUPP here. This means we can't support live 262 * migration, vhost device start/stop. 263 */ 264 return -EOPNOTSUPP; 265 } 266 267 static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa, 268 const struct vdpa_vq_state *state) 269 { 270 const struct vdpa_vq_state_split *split = &state->split; 271 272 if (split->avail_index == 0) 273 return 0; 274 275 return -EOPNOTSUPP; 276 } 277 278 static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa, 279 const struct vdpa_vq_state *state) 280 { 281 const struct vdpa_vq_state_packed *packed = &state->packed; 282 283 if (packed->last_avail_counter == 1 && 284 packed->last_avail_idx == 0 && 285 packed->last_used_counter == 1 && 286 packed->last_used_idx == 0) 287 return 0; 288 289 return -EOPNOTSUPP; 290 } 291 292 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, 293 const struct vdpa_vq_state *state) 294 { 295 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 296 297 /* Note that this is not supported by virtio specification. 298 * But if the state is by chance equal to the device initial 299 * state, we can let it go. 300 */ 301 if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) && 302 !vp_modern_get_queue_enable(mdev, qid)) { 303 if (vp_modern_get_driver_features(mdev) & 304 BIT_ULL(VIRTIO_F_RING_PACKED)) 305 return vp_vdpa_set_vq_state_packed(vdpa, state); 306 else 307 return vp_vdpa_set_vq_state_split(vdpa, state); 308 } 309 310 return -EOPNOTSUPP; 311 } 312 313 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, 314 struct vdpa_callback *cb) 315 { 316 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 317 318 vp_vdpa->vring[qid].cb = *cb; 319 } 320 321 static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa, 322 u16 qid, bool ready) 323 { 324 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 325 326 vp_modern_set_queue_enable(mdev, qid, ready); 327 } 328 329 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) 330 { 331 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 332 333 return vp_modern_get_queue_enable(mdev, qid); 334 } 335 336 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, 337 u32 num) 338 { 339 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 340 341 vp_modern_set_queue_size(mdev, qid, num); 342 } 343 344 static u16 vp_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid) 345 { 346 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 347 348 return vp_modern_get_queue_size(mdev, qid); 349 } 350 351 static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid, 352 u64 desc_area, u64 driver_area, 353 u64 device_area) 354 { 355 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 356 357 vp_modern_queue_address(mdev, qid, desc_area, 358 driver_area, device_area); 359 360 return 0; 361 } 362 363 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) 364 { 365 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 366 367 vp_iowrite16(qid, vp_vdpa->vring[qid].notify); 368 } 369 370 static void vp_vdpa_kick_vq_with_data(struct vdpa_device *vdpa, u32 data) 371 { 372 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 373 u16 qid = data & 0xFFFF; 374 375 vp_iowrite32(data, vp_vdpa->vring[qid].notify); 376 } 377 378 static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa) 379 { 380 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 381 382 return vp_modern_generation(mdev); 383 } 384 385 static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa) 386 { 387 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 388 389 return mdev->id.device; 390 } 391 392 static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa) 393 { 394 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 395 396 return mdev->id.vendor; 397 } 398 399 static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa) 400 { 401 return PAGE_SIZE; 402 } 403 404 static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa) 405 { 406 struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); 407 408 return mdev->device_len; 409 } 410 411 static void vp_vdpa_get_config(struct vdpa_device *vdpa, 412 unsigned int offset, 413 void *buf, unsigned int len) 414 { 415 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 416 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 417 u8 old, new; 418 u8 *p; 419 int i; 420 421 do { 422 old = vp_ioread8(&mdev->common->config_generation); 423 p = buf; 424 for (i = 0; i < len; i++) 425 *p++ = vp_ioread8(mdev->device + offset + i); 426 427 new = vp_ioread8(&mdev->common->config_generation); 428 } while (old != new); 429 } 430 431 static void vp_vdpa_set_config(struct vdpa_device *vdpa, 432 unsigned int offset, const void *buf, 433 unsigned int len) 434 { 435 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 436 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 437 const u8 *p = buf; 438 int i; 439 440 for (i = 0; i < len; i++) 441 vp_iowrite8(*p++, mdev->device + offset + i); 442 } 443 444 static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa, 445 struct vdpa_callback *cb) 446 { 447 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 448 449 vp_vdpa->config_cb = *cb; 450 } 451 452 static struct vdpa_notification_area 453 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) 454 { 455 struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa); 456 struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa); 457 struct vdpa_notification_area notify; 458 459 notify.addr = vp_vdpa->vring[qid].notify_pa; 460 notify.size = mdev->notify_offset_multiplier; 461 462 return notify; 463 } 464 465 static const struct vdpa_config_ops vp_vdpa_ops = { 466 .get_device_features = vp_vdpa_get_device_features, 467 .set_driver_features = vp_vdpa_set_driver_features, 468 .get_driver_features = vp_vdpa_get_driver_features, 469 .get_status = vp_vdpa_get_status, 470 .set_status = vp_vdpa_set_status, 471 .reset = vp_vdpa_reset, 472 .get_vq_num_max = vp_vdpa_get_vq_num_max, 473 .get_vq_state = vp_vdpa_get_vq_state, 474 .get_vq_notification = vp_vdpa_get_vq_notification, 475 .set_vq_state = vp_vdpa_set_vq_state, 476 .set_vq_cb = vp_vdpa_set_vq_cb, 477 .set_vq_ready = vp_vdpa_set_vq_ready, 478 .get_vq_ready = vp_vdpa_get_vq_ready, 479 .set_vq_num = vp_vdpa_set_vq_num, 480 .get_vq_size = vp_vdpa_get_vq_size, 481 .set_vq_address = vp_vdpa_set_vq_address, 482 .kick_vq = vp_vdpa_kick_vq, 483 .kick_vq_with_data = vp_vdpa_kick_vq_with_data, 484 .get_generation = vp_vdpa_get_generation, 485 .get_device_id = vp_vdpa_get_device_id, 486 .get_vendor_id = vp_vdpa_get_vendor_id, 487 .get_vq_align = vp_vdpa_get_vq_align, 488 .get_config_size = vp_vdpa_get_config_size, 489 .get_config = vp_vdpa_get_config, 490 .set_config = vp_vdpa_set_config, 491 .set_config_cb = vp_vdpa_set_config_cb, 492 .get_vq_irq = vp_vdpa_get_vq_irq, 493 }; 494 495 static void vp_vdpa_free_irq_vectors(void *data) 496 { 497 pci_free_irq_vectors(data); 498 } 499 500 static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, 501 const struct vdpa_dev_set_config *add_config) 502 { 503 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = 504 container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev); 505 506 struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev; 507 struct pci_dev *pdev = mdev->pci_dev; 508 struct device *dev = &pdev->dev; 509 struct vp_vdpa *vp_vdpa = NULL; 510 u64 device_features; 511 int ret, i; 512 513 vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa, 514 dev, &vp_vdpa_ops, 1, 1, name, false); 515 516 if (IS_ERR(vp_vdpa)) { 517 dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n"); 518 return PTR_ERR(vp_vdpa); 519 } 520 521 vp_vdpa_mgtdev->vp_vdpa = vp_vdpa; 522 523 vp_vdpa->vdpa.dma_dev = &pdev->dev; 524 vp_vdpa->queues = vp_modern_get_num_queues(mdev); 525 vp_vdpa->mdev = mdev; 526 527 device_features = vp_modern_get_features(mdev); 528 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { 529 if (add_config->device_features & ~device_features) { 530 ret = -EINVAL; 531 dev_err(&pdev->dev, "Try to provision features " 532 "that are not supported by the device: " 533 "device_features 0x%llx provisioned 0x%llx\n", 534 device_features, add_config->device_features); 535 goto err; 536 } 537 device_features = add_config->device_features; 538 } 539 vp_vdpa->device_features = device_features; 540 541 ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev); 542 if (ret) { 543 dev_err(&pdev->dev, 544 "Failed for adding devres for freeing irq vectors\n"); 545 goto err; 546 } 547 548 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, 549 sizeof(*vp_vdpa->vring), 550 GFP_KERNEL); 551 if (!vp_vdpa->vring) { 552 ret = -ENOMEM; 553 dev_err(&pdev->dev, "Fail to allocate virtqueues\n"); 554 goto err; 555 } 556 557 for (i = 0; i < vp_vdpa->queues; i++) { 558 vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR; 559 vp_vdpa->vring[i].notify = 560 vp_modern_map_vq_notify(mdev, i, 561 &vp_vdpa->vring[i].notify_pa); 562 if (!vp_vdpa->vring[i].notify) { 563 ret = -EINVAL; 564 dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); 565 goto err; 566 } 567 } 568 vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR; 569 570 vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev; 571 ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues); 572 if (ret) { 573 dev_err(&pdev->dev, "Failed to register to vdpa bus\n"); 574 goto err; 575 } 576 577 return 0; 578 579 err: 580 put_device(&vp_vdpa->vdpa.dev); 581 return ret; 582 } 583 584 static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, 585 struct vdpa_device *dev) 586 { 587 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = 588 container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev); 589 590 struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa; 591 592 _vdpa_unregister_device(&vp_vdpa->vdpa); 593 vp_vdpa_mgtdev->vp_vdpa = NULL; 594 } 595 596 static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = { 597 .dev_add = vp_vdpa_dev_add, 598 .dev_del = vp_vdpa_dev_del, 599 }; 600 601 static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 602 { 603 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL; 604 struct vdpa_mgmt_dev *mgtdev; 605 struct device *dev = &pdev->dev; 606 struct virtio_pci_modern_device *mdev = NULL; 607 struct virtio_device_id *mdev_id = NULL; 608 int err; 609 610 vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL); 611 if (!vp_vdpa_mgtdev) 612 return -ENOMEM; 613 614 mgtdev = &vp_vdpa_mgtdev->mgtdev; 615 mgtdev->ops = &vp_vdpa_mdev_ops; 616 mgtdev->device = dev; 617 618 mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL); 619 if (!mdev) { 620 err = -ENOMEM; 621 goto mdev_err; 622 } 623 624 /* 625 * id_table should be a null terminated array, so allocate one additional 626 * entry here, see vdpa_mgmtdev_get_classes(). 627 */ 628 mdev_id = kcalloc(2, sizeof(struct virtio_device_id), GFP_KERNEL); 629 if (!mdev_id) { 630 err = -ENOMEM; 631 goto mdev_id_err; 632 } 633 634 vp_vdpa_mgtdev->mdev = mdev; 635 mdev->pci_dev = pdev; 636 637 err = pcim_enable_device(pdev); 638 if (err) { 639 goto probe_err; 640 } 641 642 err = vp_modern_probe(mdev); 643 if (err) { 644 dev_err(&pdev->dev, "Failed to probe modern PCI device\n"); 645 goto probe_err; 646 } 647 648 mdev_id[0].device = mdev->id.device; 649 mdev_id[0].vendor = mdev->id.vendor; 650 mgtdev->id_table = mdev_id; 651 mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev); 652 mgtdev->supported_features = vp_modern_get_features(mdev); 653 mgtdev->config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES); 654 pci_set_master(pdev); 655 pci_set_drvdata(pdev, vp_vdpa_mgtdev); 656 657 err = vdpa_mgmtdev_register(mgtdev); 658 if (err) { 659 dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n"); 660 goto register_err; 661 } 662 663 return 0; 664 665 register_err: 666 vp_modern_remove(vp_vdpa_mgtdev->mdev); 667 probe_err: 668 kfree(mdev_id); 669 mdev_id_err: 670 kfree(mdev); 671 mdev_err: 672 kfree(vp_vdpa_mgtdev); 673 return err; 674 } 675 676 static void vp_vdpa_remove(struct pci_dev *pdev) 677 { 678 struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev); 679 struct virtio_pci_modern_device *mdev = NULL; 680 681 mdev = vp_vdpa_mgtdev->mdev; 682 vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev); 683 vp_modern_remove(mdev); 684 kfree(vp_vdpa_mgtdev->mgtdev.id_table); 685 kfree(mdev); 686 kfree(vp_vdpa_mgtdev); 687 } 688 689 static struct pci_driver vp_vdpa_driver = { 690 .name = "vp-vdpa", 691 .id_table = NULL, /* only dynamic ids */ 692 .probe = vp_vdpa_probe, 693 .remove = vp_vdpa_remove, 694 }; 695 696 module_pci_driver(vp_vdpa_driver); 697 698 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 699 MODULE_DESCRIPTION("vp-vdpa"); 700 MODULE_LICENSE("GPL"); 701 MODULE_VERSION("1"); 702