1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2024 Marvell. */ 3 4 #include <linux/interrupt.h> 5 #include <linux/io-64-nonatomic-lo-hi.h> 6 #include <linux/module.h> 7 #include <linux/iommu.h> 8 #include "octep_vdpa.h" 9 10 #define OCTEP_VDPA_DRIVER_NAME "octep_vdpa" 11 12 struct octep_pf { 13 u8 __iomem *base[PCI_STD_NUM_BARS]; 14 struct pci_dev *pdev; 15 struct resource res; 16 u64 vf_base; 17 int enabled_vfs; 18 u32 vf_stride; 19 u16 vf_devid; 20 }; 21 22 struct octep_vdpa { 23 struct vdpa_device vdpa; 24 struct octep_hw *oct_hw; 25 struct pci_dev *pdev; 26 }; 27 28 struct octep_vdpa_mgmt_dev { 29 struct vdpa_mgmt_dev mdev; 30 struct octep_hw oct_hw; 31 struct pci_dev *pdev; 32 /* Work entry to handle device setup */ 33 struct work_struct setup_task; 34 /* Device status */ 35 atomic_t status; 36 }; 37 38 static struct octep_hw *vdpa_to_octep_hw(struct vdpa_device *vdpa_dev) 39 { 40 struct octep_vdpa *oct_vdpa; 41 42 oct_vdpa = container_of(vdpa_dev, struct octep_vdpa, vdpa); 43 44 return oct_vdpa->oct_hw; 45 } 46 47 static irqreturn_t octep_vdpa_intr_handler(int irq, void *data) 48 { 49 struct octep_hw *oct_hw = data; 50 int i; 51 52 for (i = 0; i < oct_hw->nr_vring; i++) { 53 if (oct_hw->vqs[i].cb.callback && ioread32(oct_hw->vqs[i].cb_notify_addr)) { 54 /* Acknowledge the per queue notification to the device */ 55 iowrite32(0, oct_hw->vqs[i].cb_notify_addr); 56 oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private); 57 } 58 } 59 60 return IRQ_HANDLED; 61 } 62 63 static void octep_free_irqs(struct octep_hw *oct_hw) 64 { 65 struct pci_dev *pdev = oct_hw->pdev; 66 67 if (oct_hw->irq != -1) { 68 devm_free_irq(&pdev->dev, oct_hw->irq, oct_hw); 69 oct_hw->irq = -1; 70 } 71 pci_free_irq_vectors(pdev); 72 } 73 74 static int octep_request_irqs(struct octep_hw *oct_hw) 75 { 76 struct pci_dev *pdev = oct_hw->pdev; 77 int ret, irq; 78 79 /* Currently HW device provisions one IRQ per VF, hence 80 * allocate one IRQ for all virtqueues call interface. 81 */ 82 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); 83 if (ret < 0) { 84 dev_err(&pdev->dev, "Failed to alloc msix vector"); 85 return ret; 86 } 87 88 snprintf(oct_hw->vqs->msix_name, sizeof(oct_hw->vqs->msix_name), 89 OCTEP_VDPA_DRIVER_NAME "-vf-%d", pci_iov_vf_id(pdev)); 90 91 irq = pci_irq_vector(pdev, 0); 92 ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0, 93 oct_hw->vqs->msix_name, oct_hw); 94 if (ret) { 95 dev_err(&pdev->dev, "Failed to register interrupt handler\n"); 96 goto free_irq_vec; 97 } 98 oct_hw->irq = irq; 99 100 return 0; 101 102 free_irq_vec: 103 pci_free_irq_vectors(pdev); 104 return ret; 105 } 106 107 static u64 octep_vdpa_get_device_features(struct vdpa_device *vdpa_dev) 108 { 109 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 110 111 return oct_hw->features; 112 } 113 114 static int octep_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features) 115 { 116 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 117 int ret; 118 119 pr_debug("Driver Features: %llx\n", features); 120 121 ret = octep_verify_features(features); 122 if (ret) { 123 dev_warn(&oct_hw->pdev->dev, 124 "Must negotiate minimum features 0x%llx for this device", 125 BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_NOTIFICATION_DATA) | 126 BIT_ULL(VIRTIO_F_RING_PACKED)); 127 return ret; 128 } 129 octep_hw_set_drv_features(oct_hw, features); 130 131 return 0; 132 } 133 134 static u64 octep_vdpa_get_driver_features(struct vdpa_device *vdpa_dev) 135 { 136 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 137 138 return octep_hw_get_drv_features(oct_hw); 139 } 140 141 static u8 octep_vdpa_get_status(struct vdpa_device *vdpa_dev) 142 { 143 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 144 145 return octep_hw_get_status(oct_hw); 146 } 147 148 static void octep_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) 149 { 150 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 151 u8 status_old; 152 153 status_old = octep_hw_get_status(oct_hw); 154 155 if (status_old == status) 156 return; 157 158 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && 159 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) { 160 if (octep_request_irqs(oct_hw)) 161 status = status_old | VIRTIO_CONFIG_S_FAILED; 162 } 163 octep_hw_set_status(oct_hw, status); 164 } 165 166 static int octep_vdpa_reset(struct vdpa_device *vdpa_dev) 167 { 168 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 169 u8 status = octep_hw_get_status(oct_hw); 170 u16 qid; 171 172 if (status == 0) 173 return 0; 174 175 for (qid = 0; qid < oct_hw->nr_vring; qid++) { 176 oct_hw->vqs[qid].cb.callback = NULL; 177 oct_hw->vqs[qid].cb.private = NULL; 178 oct_hw->config_cb.callback = NULL; 179 oct_hw->config_cb.private = NULL; 180 } 181 octep_hw_reset(oct_hw); 182 183 if (status & VIRTIO_CONFIG_S_DRIVER_OK) 184 octep_free_irqs(oct_hw); 185 186 return 0; 187 } 188 189 static u16 octep_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) 190 { 191 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 192 193 return octep_get_vq_size(oct_hw); 194 } 195 196 static int octep_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 197 struct vdpa_vq_state *state) 198 { 199 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 200 201 return octep_get_vq_state(oct_hw, qid, state); 202 } 203 204 static int octep_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, 205 const struct vdpa_vq_state *state) 206 { 207 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 208 209 return octep_set_vq_state(oct_hw, qid, state); 210 } 211 212 static void octep_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, struct vdpa_callback *cb) 213 { 214 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 215 216 oct_hw->vqs[qid].cb = *cb; 217 } 218 219 static void octep_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready) 220 { 221 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 222 223 octep_set_vq_ready(oct_hw, qid, ready); 224 } 225 226 static bool octep_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) 227 { 228 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 229 230 return octep_get_vq_ready(oct_hw, qid); 231 } 232 233 static void octep_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num) 234 { 235 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 236 237 octep_set_vq_num(oct_hw, qid, num); 238 } 239 240 static int octep_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, u64 desc_area, 241 u64 driver_area, u64 device_area) 242 { 243 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 244 245 pr_debug("qid[%d]: desc_area: %llx\n", qid, desc_area); 246 pr_debug("qid[%d]: driver_area: %llx\n", qid, driver_area); 247 pr_debug("qid[%d]: device_area: %llx\n\n", qid, device_area); 248 249 return octep_set_vq_address(oct_hw, qid, desc_area, driver_area, device_area); 250 } 251 252 static void octep_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) 253 { 254 /* Not supported */ 255 } 256 257 static void octep_vdpa_kick_vq_with_data(struct vdpa_device *vdpa_dev, u32 data) 258 { 259 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 260 u16 idx = data & 0xFFFF; 261 262 vp_iowrite32(data, oct_hw->vqs[idx].notify_addr); 263 } 264 265 static u32 octep_vdpa_get_generation(struct vdpa_device *vdpa_dev) 266 { 267 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 268 269 return vp_ioread8(&oct_hw->common_cfg->config_generation); 270 } 271 272 static u32 octep_vdpa_get_device_id(struct vdpa_device *vdpa_dev) 273 { 274 return VIRTIO_ID_NET; 275 } 276 277 static u32 octep_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) 278 { 279 return PCI_VENDOR_ID_CAVIUM; 280 } 281 282 static u32 octep_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) 283 { 284 return PAGE_SIZE; 285 } 286 287 static size_t octep_vdpa_get_config_size(struct vdpa_device *vdpa_dev) 288 { 289 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 290 291 return oct_hw->config_size; 292 } 293 294 static void octep_vdpa_get_config(struct vdpa_device *vdpa_dev, unsigned int offset, void *buf, 295 unsigned int len) 296 { 297 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 298 299 octep_read_dev_config(oct_hw, offset, buf, len); 300 } 301 302 static void octep_vdpa_set_config(struct vdpa_device *vdpa_dev, unsigned int offset, 303 const void *buf, unsigned int len) 304 { 305 /* Not supported */ 306 } 307 308 static void octep_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, struct vdpa_callback *cb) 309 { 310 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 311 312 oct_hw->config_cb.callback = cb->callback; 313 oct_hw->config_cb.private = cb->private; 314 } 315 316 static struct vdpa_notification_area octep_get_vq_notification(struct vdpa_device *vdpa_dev, 317 u16 idx) 318 { 319 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev); 320 struct vdpa_notification_area area; 321 322 area.addr = oct_hw->vqs[idx].notify_pa; 323 area.size = PAGE_SIZE; 324 325 return area; 326 } 327 328 static struct vdpa_config_ops octep_vdpa_ops = { 329 .get_device_features = octep_vdpa_get_device_features, 330 .set_driver_features = octep_vdpa_set_driver_features, 331 .get_driver_features = octep_vdpa_get_driver_features, 332 .get_status = octep_vdpa_get_status, 333 .set_status = octep_vdpa_set_status, 334 .reset = octep_vdpa_reset, 335 .get_vq_num_max = octep_vdpa_get_vq_num_max, 336 .get_vq_state = octep_vdpa_get_vq_state, 337 .set_vq_state = octep_vdpa_set_vq_state, 338 .set_vq_cb = octep_vdpa_set_vq_cb, 339 .set_vq_ready = octep_vdpa_set_vq_ready, 340 .get_vq_ready = octep_vdpa_get_vq_ready, 341 .set_vq_num = octep_vdpa_set_vq_num, 342 .set_vq_address = octep_vdpa_set_vq_address, 343 .get_vq_irq = NULL, 344 .kick_vq = octep_vdpa_kick_vq, 345 .kick_vq_with_data = octep_vdpa_kick_vq_with_data, 346 .get_generation = octep_vdpa_get_generation, 347 .get_device_id = octep_vdpa_get_device_id, 348 .get_vendor_id = octep_vdpa_get_vendor_id, 349 .get_vq_align = octep_vdpa_get_vq_align, 350 .get_config_size = octep_vdpa_get_config_size, 351 .get_config = octep_vdpa_get_config, 352 .set_config = octep_vdpa_set_config, 353 .set_config_cb = octep_vdpa_set_config_cb, 354 .get_vq_notification = octep_get_vq_notification, 355 }; 356 357 static int octep_iomap_region(struct pci_dev *pdev, u8 __iomem **tbl, u8 bar) 358 { 359 int ret; 360 361 ret = pci_request_region(pdev, bar, OCTEP_VDPA_DRIVER_NAME); 362 if (ret) { 363 dev_err(&pdev->dev, "Failed to request BAR:%u region\n", bar); 364 return ret; 365 } 366 367 tbl[bar] = pci_iomap(pdev, bar, pci_resource_len(pdev, bar)); 368 if (!tbl[bar]) { 369 dev_err(&pdev->dev, "Failed to iomap BAR:%u\n", bar); 370 pci_release_region(pdev, bar); 371 ret = -ENOMEM; 372 } 373 374 return ret; 375 } 376 377 static void octep_iounmap_region(struct pci_dev *pdev, u8 __iomem **tbl, u8 bar) 378 { 379 pci_iounmap(pdev, tbl[bar]); 380 pci_release_region(pdev, bar); 381 } 382 383 static void octep_vdpa_pf_bar_shrink(struct octep_pf *octpf) 384 { 385 struct pci_dev *pf_dev = octpf->pdev; 386 struct resource *res = pf_dev->resource + PCI_STD_RESOURCES + 4; 387 struct pci_bus_region bus_region; 388 389 octpf->res.start = res->start; 390 octpf->res.end = res->end; 391 octpf->vf_base = res->start; 392 393 bus_region.start = res->start; 394 bus_region.end = res->start - 1; 395 396 pcibios_bus_to_resource(pf_dev->bus, res, &bus_region); 397 } 398 399 static void octep_vdpa_pf_bar_expand(struct octep_pf *octpf) 400 { 401 struct pci_dev *pf_dev = octpf->pdev; 402 struct resource *res = pf_dev->resource + PCI_STD_RESOURCES + 4; 403 struct pci_bus_region bus_region; 404 405 bus_region.start = octpf->res.start; 406 bus_region.end = octpf->res.end; 407 408 pcibios_bus_to_resource(pf_dev->bus, res, &bus_region); 409 } 410 411 static void octep_vdpa_remove_pf(struct pci_dev *pdev) 412 { 413 struct octep_pf *octpf = pci_get_drvdata(pdev); 414 415 pci_disable_sriov(pdev); 416 417 if (octpf->base[OCTEP_HW_CAPS_BAR]) 418 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_CAPS_BAR); 419 420 if (octpf->base[OCTEP_HW_MBOX_BAR]) 421 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR); 422 423 octep_vdpa_pf_bar_expand(octpf); 424 } 425 426 static void octep_vdpa_vf_bar_shrink(struct pci_dev *pdev) 427 { 428 struct resource *vf_res = pdev->resource + PCI_STD_RESOURCES + 4; 429 430 memset(vf_res, 0, sizeof(*vf_res)); 431 } 432 433 static void octep_vdpa_remove_vf(struct pci_dev *pdev) 434 { 435 struct octep_vdpa_mgmt_dev *mgmt_dev = pci_get_drvdata(pdev); 436 struct octep_hw *oct_hw; 437 int status; 438 439 oct_hw = &mgmt_dev->oct_hw; 440 status = atomic_read(&mgmt_dev->status); 441 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_UNINIT); 442 443 cancel_work_sync(&mgmt_dev->setup_task); 444 if (status == OCTEP_VDPA_DEV_STATUS_READY) 445 vdpa_mgmtdev_unregister(&mgmt_dev->mdev); 446 447 if (oct_hw->base[OCTEP_HW_CAPS_BAR]) 448 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR); 449 450 if (oct_hw->base[OCTEP_HW_MBOX_BAR]) 451 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_MBOX_BAR); 452 453 octep_vdpa_vf_bar_shrink(pdev); 454 } 455 456 static void octep_vdpa_remove(struct pci_dev *pdev) 457 { 458 if (pdev->is_virtfn) 459 octep_vdpa_remove_vf(pdev); 460 else 461 octep_vdpa_remove_pf(pdev); 462 } 463 464 static int octep_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, 465 const struct vdpa_dev_set_config *config) 466 { 467 struct octep_vdpa_mgmt_dev *mgmt_dev = container_of(mdev, struct octep_vdpa_mgmt_dev, mdev); 468 struct octep_hw *oct_hw = &mgmt_dev->oct_hw; 469 struct pci_dev *pdev = oct_hw->pdev; 470 struct vdpa_device *vdpa_dev; 471 struct octep_vdpa *oct_vdpa; 472 u64 device_features; 473 int ret; 474 475 oct_vdpa = vdpa_alloc_device(struct octep_vdpa, vdpa, &pdev->dev, &octep_vdpa_ops, 1, 1, 476 NULL, false); 477 if (IS_ERR(oct_vdpa)) { 478 dev_err(&pdev->dev, "Failed to allocate vDPA structure for octep vdpa device"); 479 return PTR_ERR(oct_vdpa); 480 } 481 482 oct_vdpa->pdev = pdev; 483 oct_vdpa->vdpa.dma_dev = &pdev->dev; 484 oct_vdpa->vdpa.mdev = mdev; 485 oct_vdpa->oct_hw = oct_hw; 486 vdpa_dev = &oct_vdpa->vdpa; 487 488 device_features = oct_hw->features; 489 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { 490 if (config->device_features & ~device_features) { 491 dev_err(&pdev->dev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n", 492 config->device_features, device_features); 493 ret = -EINVAL; 494 goto vdpa_dev_put; 495 } 496 device_features &= config->device_features; 497 } 498 499 oct_hw->features = device_features; 500 dev_info(&pdev->dev, "Vdpa management device features : %llx\n", device_features); 501 502 ret = octep_verify_features(device_features); 503 if (ret) { 504 dev_warn(mdev->device, 505 "Must provision minimum features 0x%llx for this device", 506 BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) | 507 BIT_ULL(VIRTIO_F_NOTIFICATION_DATA) | BIT_ULL(VIRTIO_F_RING_PACKED)); 508 goto vdpa_dev_put; 509 } 510 if (name) 511 ret = dev_set_name(&vdpa_dev->dev, "%s", name); 512 else 513 ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index); 514 515 ret = _vdpa_register_device(&oct_vdpa->vdpa, oct_hw->nr_vring); 516 if (ret) { 517 dev_err(&pdev->dev, "Failed to register to vDPA bus"); 518 goto vdpa_dev_put; 519 } 520 return 0; 521 522 vdpa_dev_put: 523 put_device(&oct_vdpa->vdpa.dev); 524 return ret; 525 } 526 527 static void octep_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *vdpa_dev) 528 { 529 _vdpa_unregister_device(vdpa_dev); 530 } 531 532 static const struct vdpa_mgmtdev_ops octep_vdpa_mgmt_dev_ops = { 533 .dev_add = octep_vdpa_dev_add, 534 .dev_del = octep_vdpa_dev_del 535 }; 536 537 static bool get_device_ready_status(u8 __iomem *addr) 538 { 539 u64 signature = readq(addr + OCTEP_VF_MBOX_DATA(0)); 540 541 if (signature == OCTEP_DEV_READY_SIGNATURE) { 542 writeq(0, addr + OCTEP_VF_MBOX_DATA(0)); 543 return true; 544 } 545 546 return false; 547 } 548 549 static struct virtio_device_id id_table[] = { 550 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 551 { 0 }, 552 }; 553 554 static void octep_vdpa_setup_task(struct work_struct *work) 555 { 556 struct octep_vdpa_mgmt_dev *mgmt_dev = container_of(work, struct octep_vdpa_mgmt_dev, 557 setup_task); 558 struct pci_dev *pdev = mgmt_dev->pdev; 559 struct device *dev = &pdev->dev; 560 struct octep_hw *oct_hw; 561 unsigned long timeout; 562 int ret; 563 564 oct_hw = &mgmt_dev->oct_hw; 565 566 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_WAIT_FOR_BAR_INIT); 567 568 /* Wait for a maximum of 5 sec */ 569 timeout = jiffies + msecs_to_jiffies(5000); 570 while (!time_after(jiffies, timeout)) { 571 if (get_device_ready_status(oct_hw->base[OCTEP_HW_MBOX_BAR])) { 572 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_INIT); 573 break; 574 } 575 576 if (atomic_read(&mgmt_dev->status) >= OCTEP_VDPA_DEV_STATUS_READY) { 577 dev_info(dev, "Stopping vDPA setup task.\n"); 578 return; 579 } 580 581 usleep_range(1000, 1500); 582 } 583 584 if (atomic_read(&mgmt_dev->status) != OCTEP_VDPA_DEV_STATUS_INIT) { 585 dev_err(dev, "BAR initialization is timed out\n"); 586 return; 587 } 588 589 ret = octep_iomap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR); 590 if (ret) 591 return; 592 593 ret = octep_hw_caps_read(oct_hw, pdev); 594 if (ret < 0) 595 goto unmap_region; 596 597 mgmt_dev->mdev.ops = &octep_vdpa_mgmt_dev_ops; 598 mgmt_dev->mdev.id_table = id_table; 599 mgmt_dev->mdev.max_supported_vqs = oct_hw->nr_vring; 600 mgmt_dev->mdev.supported_features = oct_hw->features; 601 mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES); 602 mgmt_dev->mdev.device = dev; 603 604 ret = vdpa_mgmtdev_register(&mgmt_dev->mdev); 605 if (ret) { 606 dev_err(dev, "Failed to register vdpa management interface\n"); 607 goto unmap_region; 608 } 609 610 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_READY); 611 612 return; 613 614 unmap_region: 615 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR); 616 oct_hw->base[OCTEP_HW_CAPS_BAR] = NULL; 617 } 618 619 static int octep_vdpa_probe_vf(struct pci_dev *pdev) 620 { 621 struct octep_vdpa_mgmt_dev *mgmt_dev; 622 struct device *dev = &pdev->dev; 623 int ret; 624 625 ret = pcim_enable_device(pdev); 626 if (ret) { 627 dev_err(dev, "Failed to enable device\n"); 628 return ret; 629 } 630 631 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 632 if (ret) { 633 dev_err(dev, "No usable DMA configuration\n"); 634 return ret; 635 } 636 pci_set_master(pdev); 637 638 mgmt_dev = devm_kzalloc(dev, sizeof(struct octep_vdpa_mgmt_dev), GFP_KERNEL); 639 if (!mgmt_dev) 640 return -ENOMEM; 641 642 ret = octep_iomap_region(pdev, mgmt_dev->oct_hw.base, OCTEP_HW_MBOX_BAR); 643 if (ret) 644 return ret; 645 646 mgmt_dev->pdev = pdev; 647 pci_set_drvdata(pdev, mgmt_dev); 648 649 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_ALLOC); 650 INIT_WORK(&mgmt_dev->setup_task, octep_vdpa_setup_task); 651 schedule_work(&mgmt_dev->setup_task); 652 dev_info(&pdev->dev, "octep vdpa mgmt device setup task is queued\n"); 653 654 return 0; 655 } 656 657 static void octep_vdpa_assign_barspace(struct pci_dev *vf_dev, struct pci_dev *pf_dev, u8 idx) 658 { 659 struct resource *vf_res = vf_dev->resource + PCI_STD_RESOURCES + 4; 660 struct resource *pf_res = pf_dev->resource + PCI_STD_RESOURCES + 4; 661 struct octep_pf *pf = pci_get_drvdata(pf_dev); 662 struct pci_bus_region bus_region; 663 664 vf_res->name = pci_name(vf_dev); 665 vf_res->flags = pf_res->flags; 666 vf_res->parent = (pf_dev->resource + PCI_STD_RESOURCES)->parent; 667 668 bus_region.start = pf->vf_base + idx * pf->vf_stride; 669 bus_region.end = bus_region.start + pf->vf_stride - 1; 670 pcibios_bus_to_resource(vf_dev->bus, vf_res, &bus_region); 671 } 672 673 static int octep_sriov_enable(struct pci_dev *pdev, int num_vfs) 674 { 675 struct octep_pf *pf = pci_get_drvdata(pdev); 676 u8 __iomem *addr = pf->base[OCTEP_HW_MBOX_BAR]; 677 struct pci_dev *vf_pdev = NULL; 678 bool done = false; 679 int index = 0; 680 int ret, i; 681 682 ret = pci_enable_sriov(pdev, num_vfs); 683 if (ret) 684 return ret; 685 686 pf->enabled_vfs = num_vfs; 687 688 while ((vf_pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, vf_pdev))) { 689 if (vf_pdev->device != pf->vf_devid) 690 continue; 691 692 octep_vdpa_assign_barspace(vf_pdev, pdev, index); 693 if (++index == num_vfs) { 694 done = true; 695 break; 696 } 697 } 698 699 if (done) { 700 for (i = 0; i < pf->enabled_vfs; i++) 701 writeq(OCTEP_DEV_READY_SIGNATURE, addr + OCTEP_PF_MBOX_DATA(i)); 702 } 703 704 return num_vfs; 705 } 706 707 static int octep_sriov_disable(struct pci_dev *pdev) 708 { 709 struct octep_pf *pf = pci_get_drvdata(pdev); 710 711 if (!pci_num_vf(pdev)) 712 return 0; 713 714 pci_disable_sriov(pdev); 715 pf->enabled_vfs = 0; 716 717 return 0; 718 } 719 720 static int octep_vdpa_sriov_configure(struct pci_dev *pdev, int num_vfs) 721 { 722 if (num_vfs > 0) 723 return octep_sriov_enable(pdev, num_vfs); 724 else 725 return octep_sriov_disable(pdev); 726 } 727 728 static u16 octep_get_vf_devid(struct pci_dev *pdev) 729 { 730 u16 did; 731 732 switch (pdev->device) { 733 case OCTEP_VDPA_DEVID_CN106K_PF: 734 did = OCTEP_VDPA_DEVID_CN106K_VF; 735 break; 736 case OCTEP_VDPA_DEVID_CN105K_PF: 737 did = OCTEP_VDPA_DEVID_CN105K_VF; 738 break; 739 case OCTEP_VDPA_DEVID_CN103K_PF: 740 did = OCTEP_VDPA_DEVID_CN103K_VF; 741 break; 742 default: 743 did = 0xFFFF; 744 break; 745 } 746 747 return did; 748 } 749 750 static int octep_vdpa_pf_setup(struct octep_pf *octpf) 751 { 752 u8 __iomem *addr = octpf->base[OCTEP_HW_MBOX_BAR]; 753 struct pci_dev *pdev = octpf->pdev; 754 int totalvfs; 755 size_t len; 756 u64 val; 757 758 totalvfs = pci_sriov_get_totalvfs(pdev); 759 if (unlikely(!totalvfs)) { 760 dev_info(&pdev->dev, "Total VFs are %d in PF sriov configuration\n", totalvfs); 761 return 0; 762 } 763 764 addr = octpf->base[OCTEP_HW_MBOX_BAR]; 765 val = readq(addr + OCTEP_EPF_RINFO(0)); 766 if (val == 0) { 767 dev_err(&pdev->dev, "Invalid device configuration\n"); 768 return -EINVAL; 769 } 770 771 if (OCTEP_EPF_RINFO_RPVF(val) != BIT_ULL(0)) { 772 val &= ~GENMASK_ULL(35, 32); 773 val |= BIT_ULL(32); 774 writeq(val, addr + OCTEP_EPF_RINFO(0)); 775 } 776 777 len = pci_resource_len(pdev, OCTEP_HW_CAPS_BAR); 778 779 octpf->vf_stride = len / totalvfs; 780 octpf->vf_devid = octep_get_vf_devid(pdev); 781 782 octep_vdpa_pf_bar_shrink(octpf); 783 784 return 0; 785 } 786 787 static int octep_vdpa_probe_pf(struct pci_dev *pdev) 788 { 789 struct device *dev = &pdev->dev; 790 struct octep_pf *octpf; 791 int ret; 792 793 ret = pcim_enable_device(pdev); 794 if (ret) { 795 dev_err(dev, "Failed to enable device\n"); 796 return ret; 797 } 798 799 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 800 if (ret) { 801 dev_err(dev, "No usable DMA configuration\n"); 802 return ret; 803 } 804 octpf = devm_kzalloc(dev, sizeof(*octpf), GFP_KERNEL); 805 if (!octpf) 806 return -ENOMEM; 807 808 ret = octep_iomap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR); 809 if (ret) 810 return ret; 811 812 pci_set_master(pdev); 813 pci_set_drvdata(pdev, octpf); 814 octpf->pdev = pdev; 815 816 ret = octep_vdpa_pf_setup(octpf); 817 if (ret) 818 goto unmap_region; 819 820 return 0; 821 822 unmap_region: 823 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR); 824 return ret; 825 } 826 827 static int octep_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 828 { 829 if (pdev->is_virtfn) 830 return octep_vdpa_probe_vf(pdev); 831 else 832 return octep_vdpa_probe_pf(pdev); 833 } 834 835 static struct pci_device_id octep_pci_vdpa_map[] = { 836 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN106K_PF) }, 837 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN106K_VF) }, 838 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN105K_PF) }, 839 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN105K_VF) }, 840 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN103K_PF) }, 841 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN103K_VF) }, 842 { 0 }, 843 }; 844 845 static struct pci_driver octep_pci_vdpa = { 846 .name = OCTEP_VDPA_DRIVER_NAME, 847 .id_table = octep_pci_vdpa_map, 848 .probe = octep_vdpa_probe, 849 .remove = octep_vdpa_remove, 850 .sriov_configure = octep_vdpa_sriov_configure 851 }; 852 853 module_pci_driver(octep_pci_vdpa); 854 855 MODULE_AUTHOR("Marvell"); 856 MODULE_DESCRIPTION("Marvell Octeon PCIe endpoint vDPA driver"); 857 MODULE_LICENSE("GPL"); 858