1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SolidRun DPU driver for control plane 4 * 5 * Copyright (C) 2022-2023 SolidRun 6 * 7 * Author: Alvaro Karsz <alvaro.karsz@solid-run.com> 8 * 9 */ 10 #include <linux/iopoll.h> 11 12 #include "snet_vdpa.h" 13 14 /* SNET DPU device ID */ 15 #define SNET_DEVICE_ID 0x1000 16 /* SNET signature */ 17 #define SNET_SIGNATURE 0xD0D06363 18 /* Max. config version that we can work with */ 19 #define SNET_CFG_VERSION 0x2 20 /* Queue align */ 21 #define SNET_QUEUE_ALIGNMENT PAGE_SIZE 22 /* Kick value to notify that new data is available */ 23 #define SNET_KICK_VAL 0x1 24 #define SNET_CONFIG_OFF 0x0 25 /* How long we are willing to wait for a SNET device */ 26 #define SNET_DETECT_TIMEOUT 5000000 27 /* How long should we wait for the DPU to read our config */ 28 #define SNET_READ_CFG_TIMEOUT 3000000 29 /* Size of configs written to the DPU */ 30 #define SNET_GENERAL_CFG_LEN 36 31 #define SNET_GENERAL_CFG_VQ_LEN 40 32 33 static struct snet *vdpa_to_snet(struct vdpa_device *vdpa) 34 { 35 return container_of(vdpa, struct snet, vdpa); 36 } 37 38 static irqreturn_t snet_cfg_irq_hndlr(int irq, void *data) 39 { 40 struct snet *snet = data; 41 /* Call callback if any */ 42 if (snet->cb.callback) 43 return snet->cb.callback(snet->cb.private); 44 45 return IRQ_HANDLED; 46 } 47 48 static irqreturn_t snet_vq_irq_hndlr(int irq, void *data) 49 { 50 struct snet_vq *vq = data; 51 /* Call callback if any */ 52 if (vq->cb.callback) 53 return vq->cb.callback(vq->cb.private); 54 55 return IRQ_HANDLED; 56 } 57 58 static void snet_free_irqs(struct snet *snet) 59 { 60 struct psnet *psnet = snet->psnet; 61 struct pci_dev *pdev; 62 u32 i; 63 64 /* Which Device allcoated the IRQs? */ 65 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF)) 66 pdev = snet->pdev->physfn; 67 else 68 pdev = snet->pdev; 69 70 /* Free config's IRQ */ 71 if (snet->cfg_irq != -1) { 72 devm_free_irq(&pdev->dev, snet->cfg_irq, snet); 73 snet->cfg_irq = -1; 74 } 75 /* Free VQ IRQs */ 76 for (i = 0; i < snet->cfg->vq_num; i++) { 77 if (snet->vqs[i] && snet->vqs[i]->irq != -1) { 78 devm_free_irq(&pdev->dev, snet->vqs[i]->irq, snet->vqs[i]); 79 snet->vqs[i]->irq = -1; 80 } 81 } 82 83 /* IRQ vectors are freed when the pci remove callback is called */ 84 } 85 86 static int snet_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area, 87 u64 driver_area, u64 device_area) 88 { 89 struct snet *snet = vdpa_to_snet(vdev); 90 /* save received parameters in vqueue sturct */ 91 snet->vqs[idx]->desc_area = desc_area; 92 snet->vqs[idx]->driver_area = driver_area; 93 snet->vqs[idx]->device_area = device_area; 94 95 return 0; 96 } 97 98 static void snet_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num) 99 { 100 struct snet *snet = vdpa_to_snet(vdev); 101 /* save num in vqueue */ 102 snet->vqs[idx]->num = num; 103 } 104 105 static void snet_kick_vq(struct vdpa_device *vdev, u16 idx) 106 { 107 struct snet *snet = vdpa_to_snet(vdev); 108 /* not ready - ignore */ 109 if (!snet->vqs[idx]->ready) 110 return; 111 112 iowrite32(SNET_KICK_VAL, snet->vqs[idx]->kick_ptr); 113 } 114 115 static void snet_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb) 116 { 117 struct snet *snet = vdpa_to_snet(vdev); 118 119 snet->vqs[idx]->cb.callback = cb->callback; 120 snet->vqs[idx]->cb.private = cb->private; 121 } 122 123 static void snet_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready) 124 { 125 struct snet *snet = vdpa_to_snet(vdev); 126 127 snet->vqs[idx]->ready = ready; 128 } 129 130 static bool snet_get_vq_ready(struct vdpa_device *vdev, u16 idx) 131 { 132 struct snet *snet = vdpa_to_snet(vdev); 133 134 return snet->vqs[idx]->ready; 135 } 136 137 static bool snet_vq_state_is_initial(struct snet *snet, const struct vdpa_vq_state *state) 138 { 139 if (SNET_HAS_FEATURE(snet, VIRTIO_F_RING_PACKED)) { 140 const struct vdpa_vq_state_packed *p = &state->packed; 141 142 if (p->last_avail_counter == 1 && p->last_used_counter == 1 && 143 p->last_avail_idx == 0 && p->last_used_idx == 0) 144 return true; 145 } else { 146 const struct vdpa_vq_state_split *s = &state->split; 147 148 if (s->avail_index == 0) 149 return true; 150 } 151 152 return false; 153 } 154 155 static int snet_set_vq_state(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state) 156 { 157 struct snet *snet = vdpa_to_snet(vdev); 158 159 /* We can set any state for config version 2+ */ 160 if (SNET_CFG_VER(snet, 2)) { 161 memcpy(&snet->vqs[idx]->vq_state, state, sizeof(*state)); 162 return 0; 163 } 164 165 /* Older config - we can't set the VQ state. 166 * Return 0 only if this is the initial state we use in the DPU. 167 */ 168 if (snet_vq_state_is_initial(snet, state)) 169 return 0; 170 171 return -EOPNOTSUPP; 172 } 173 174 static int snet_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state) 175 { 176 struct snet *snet = vdpa_to_snet(vdev); 177 178 return snet_read_vq_state(snet, idx, state); 179 } 180 181 static int snet_get_vq_irq(struct vdpa_device *vdev, u16 idx) 182 { 183 struct snet *snet = vdpa_to_snet(vdev); 184 185 return snet->vqs[idx]->irq; 186 } 187 188 static u32 snet_get_vq_align(struct vdpa_device *vdev) 189 { 190 return (u32)SNET_QUEUE_ALIGNMENT; 191 } 192 193 static int snet_reset_dev(struct snet *snet) 194 { 195 struct pci_dev *pdev = snet->pdev; 196 int ret = 0; 197 u32 i; 198 199 /* If status is 0, nothing to do */ 200 if (!snet->status) 201 return 0; 202 203 /* If DPU started, destroy it */ 204 if (snet->status & VIRTIO_CONFIG_S_DRIVER_OK) 205 ret = snet_destroy_dev(snet); 206 207 /* Clear VQs */ 208 for (i = 0; i < snet->cfg->vq_num; i++) { 209 if (!snet->vqs[i]) 210 continue; 211 snet->vqs[i]->cb.callback = NULL; 212 snet->vqs[i]->cb.private = NULL; 213 snet->vqs[i]->desc_area = 0; 214 snet->vqs[i]->device_area = 0; 215 snet->vqs[i]->driver_area = 0; 216 snet->vqs[i]->ready = false; 217 } 218 219 /* Clear config callback */ 220 snet->cb.callback = NULL; 221 snet->cb.private = NULL; 222 /* Free IRQs */ 223 snet_free_irqs(snet); 224 /* Reset status */ 225 snet->status = 0; 226 snet->dpu_ready = false; 227 228 if (ret) 229 SNET_WARN(pdev, "Incomplete reset to SNET[%u] device, err: %d\n", snet->sid, ret); 230 else 231 SNET_DBG(pdev, "Reset SNET[%u] device\n", snet->sid); 232 233 return 0; 234 } 235 236 static int snet_reset(struct vdpa_device *vdev) 237 { 238 struct snet *snet = vdpa_to_snet(vdev); 239 240 return snet_reset_dev(snet); 241 } 242 243 static size_t snet_get_config_size(struct vdpa_device *vdev) 244 { 245 struct snet *snet = vdpa_to_snet(vdev); 246 247 return (size_t)snet->cfg->cfg_size; 248 } 249 250 static u64 snet_get_features(struct vdpa_device *vdev) 251 { 252 struct snet *snet = vdpa_to_snet(vdev); 253 254 return snet->cfg->features; 255 } 256 257 static int snet_set_drv_features(struct vdpa_device *vdev, u64 features) 258 { 259 struct snet *snet = vdpa_to_snet(vdev); 260 261 snet->negotiated_features = snet->cfg->features & features; 262 return 0; 263 } 264 265 static u64 snet_get_drv_features(struct vdpa_device *vdev) 266 { 267 struct snet *snet = vdpa_to_snet(vdev); 268 269 return snet->negotiated_features; 270 } 271 272 static u16 snet_get_vq_num_max(struct vdpa_device *vdev) 273 { 274 struct snet *snet = vdpa_to_snet(vdev); 275 276 return (u16)snet->cfg->vq_size; 277 } 278 279 static void snet_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb) 280 { 281 struct snet *snet = vdpa_to_snet(vdev); 282 283 snet->cb.callback = cb->callback; 284 snet->cb.private = cb->private; 285 } 286 287 static u32 snet_get_device_id(struct vdpa_device *vdev) 288 { 289 struct snet *snet = vdpa_to_snet(vdev); 290 291 return snet->cfg->virtio_id; 292 } 293 294 static u32 snet_get_vendor_id(struct vdpa_device *vdev) 295 { 296 return (u32)PCI_VENDOR_ID_SOLIDRUN; 297 } 298 299 static u8 snet_get_status(struct vdpa_device *vdev) 300 { 301 struct snet *snet = vdpa_to_snet(vdev); 302 303 return snet->status; 304 } 305 306 static int snet_write_conf(struct snet *snet) 307 { 308 u32 off, i, tmp; 309 int ret; 310 311 /* No need to write the config twice */ 312 if (snet->dpu_ready) 313 return true; 314 315 /* Snet data : 316 * 317 * General data: SNET_GENERAL_CFG_LEN bytes long 318 * 0 0x4 0x8 0xC 0x10 0x14 0x1C 0x24 319 * | MAGIC NUMBER | CFG VER | SNET SID | NUMBER OF QUEUES | IRQ IDX | FEATURES | RSVD | 320 * 321 * For every VQ: SNET_GENERAL_CFG_VQ_LEN bytes long 322 * 0 0x4 0x8 323 * | VQ SID AND QUEUE SIZE | IRQ Index | 324 * | DESC AREA | 325 * | DEVICE AREA | 326 * | DRIVER AREA | 327 * | VQ STATE (CFG 2+) | RSVD | 328 * 329 * Magic number should be written last, this is the DPU indication that the data is ready 330 */ 331 332 /* Init offset */ 333 off = snet->psnet->cfg.host_cfg_off; 334 335 /* Ignore magic number for now */ 336 off += 4; 337 snet_write32(snet, off, snet->psnet->negotiated_cfg_ver); 338 off += 4; 339 snet_write32(snet, off, snet->sid); 340 off += 4; 341 snet_write32(snet, off, snet->cfg->vq_num); 342 off += 4; 343 snet_write32(snet, off, snet->cfg_irq_idx); 344 off += 4; 345 snet_write64(snet, off, snet->negotiated_features); 346 off += 8; 347 /* Ignore reserved */ 348 off += 8; 349 /* Write VQs */ 350 for (i = 0 ; i < snet->cfg->vq_num ; i++) { 351 tmp = (i << 16) | (snet->vqs[i]->num & 0xFFFF); 352 snet_write32(snet, off, tmp); 353 off += 4; 354 snet_write32(snet, off, snet->vqs[i]->irq_idx); 355 off += 4; 356 snet_write64(snet, off, snet->vqs[i]->desc_area); 357 off += 8; 358 snet_write64(snet, off, snet->vqs[i]->device_area); 359 off += 8; 360 snet_write64(snet, off, snet->vqs[i]->driver_area); 361 off += 8; 362 /* Write VQ state if config version is 2+ */ 363 if (SNET_CFG_VER(snet, 2)) 364 snet_write32(snet, off, *(u32 *)&snet->vqs[i]->vq_state); 365 off += 4; 366 367 /* Ignore reserved */ 368 off += 4; 369 } 370 371 /* Write magic number - data is ready */ 372 snet_write32(snet, snet->psnet->cfg.host_cfg_off, SNET_SIGNATURE); 373 374 /* The DPU will ACK the config by clearing the signature */ 375 ret = readx_poll_timeout(ioread32, snet->bar + snet->psnet->cfg.host_cfg_off, 376 tmp, !tmp, 10, SNET_READ_CFG_TIMEOUT); 377 if (ret) { 378 SNET_ERR(snet->pdev, "Timeout waiting for the DPU to read the config\n"); 379 return false; 380 } 381 382 /* set DPU flag */ 383 snet->dpu_ready = true; 384 385 return true; 386 } 387 388 static int snet_request_irqs(struct pci_dev *pdev, struct snet *snet) 389 { 390 int ret, i, irq; 391 392 /* Request config IRQ */ 393 irq = pci_irq_vector(pdev, snet->cfg_irq_idx); 394 ret = devm_request_irq(&pdev->dev, irq, snet_cfg_irq_hndlr, 0, 395 snet->cfg_irq_name, snet); 396 if (ret) { 397 SNET_ERR(pdev, "Failed to request IRQ\n"); 398 return ret; 399 } 400 snet->cfg_irq = irq; 401 402 /* Request IRQ for every VQ */ 403 for (i = 0; i < snet->cfg->vq_num; i++) { 404 irq = pci_irq_vector(pdev, snet->vqs[i]->irq_idx); 405 ret = devm_request_irq(&pdev->dev, irq, snet_vq_irq_hndlr, 0, 406 snet->vqs[i]->irq_name, snet->vqs[i]); 407 if (ret) { 408 SNET_ERR(pdev, "Failed to request IRQ\n"); 409 return ret; 410 } 411 snet->vqs[i]->irq = irq; 412 } 413 return 0; 414 } 415 416 static void snet_set_status(struct vdpa_device *vdev, u8 status) 417 { 418 struct snet *snet = vdpa_to_snet(vdev); 419 struct psnet *psnet = snet->psnet; 420 struct pci_dev *pdev = snet->pdev; 421 int ret; 422 bool pf_irqs; 423 424 if (status == snet->status) 425 return; 426 427 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && 428 !(snet->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 429 /* Request IRQs */ 430 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF); 431 ret = snet_request_irqs(pf_irqs ? pdev->physfn : pdev, snet); 432 if (ret) 433 goto set_err; 434 435 /* Write config to the DPU */ 436 if (snet_write_conf(snet)) { 437 SNET_INFO(pdev, "Create SNET[%u] device\n", snet->sid); 438 } else { 439 snet_free_irqs(snet); 440 goto set_err; 441 } 442 } 443 444 /* Save the new status */ 445 snet->status = status; 446 return; 447 448 set_err: 449 snet->status |= VIRTIO_CONFIG_S_FAILED; 450 } 451 452 static void snet_get_config(struct vdpa_device *vdev, unsigned int offset, 453 void *buf, unsigned int len) 454 { 455 struct snet *snet = vdpa_to_snet(vdev); 456 void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset; 457 u8 *buf_ptr = buf; 458 u32 i; 459 460 /* check for offset error */ 461 if (offset + len > snet->cfg->cfg_size) 462 return; 463 464 /* Write into buffer */ 465 for (i = 0; i < len; i++) 466 *buf_ptr++ = ioread8(cfg_ptr + i); 467 } 468 469 static void snet_set_config(struct vdpa_device *vdev, unsigned int offset, 470 const void *buf, unsigned int len) 471 { 472 struct snet *snet = vdpa_to_snet(vdev); 473 void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset; 474 const u8 *buf_ptr = buf; 475 u32 i; 476 477 /* check for offset error */ 478 if (offset + len > snet->cfg->cfg_size) 479 return; 480 481 /* Write into PCI BAR */ 482 for (i = 0; i < len; i++) 483 iowrite8(*buf_ptr++, cfg_ptr + i); 484 } 485 486 static const struct vdpa_config_ops snet_config_ops = { 487 .set_vq_address = snet_set_vq_address, 488 .set_vq_num = snet_set_vq_num, 489 .kick_vq = snet_kick_vq, 490 .set_vq_cb = snet_set_vq_cb, 491 .set_vq_ready = snet_set_vq_ready, 492 .get_vq_ready = snet_get_vq_ready, 493 .set_vq_state = snet_set_vq_state, 494 .get_vq_state = snet_get_vq_state, 495 .get_vq_irq = snet_get_vq_irq, 496 .get_vq_align = snet_get_vq_align, 497 .reset = snet_reset, 498 .get_config_size = snet_get_config_size, 499 .get_device_features = snet_get_features, 500 .set_driver_features = snet_set_drv_features, 501 .get_driver_features = snet_get_drv_features, 502 .get_vq_num_min = snet_get_vq_num_max, 503 .get_vq_num_max = snet_get_vq_num_max, 504 .set_config_cb = snet_set_config_cb, 505 .get_device_id = snet_get_device_id, 506 .get_vendor_id = snet_get_vendor_id, 507 .get_status = snet_get_status, 508 .set_status = snet_set_status, 509 .get_config = snet_get_config, 510 .set_config = snet_set_config, 511 }; 512 513 static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet) 514 { 515 char name[50]; 516 int ret, i, mask = 0; 517 /* We don't know which BAR will be used to communicate.. 518 * We will map every bar with len > 0. 519 * 520 * Later, we will discover the BAR and unmap all other BARs. 521 */ 522 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 523 if (pci_resource_len(pdev, i)) 524 mask |= (1 << i); 525 } 526 527 /* No BAR can be used.. */ 528 if (!mask) { 529 SNET_ERR(pdev, "Failed to find a PCI BAR\n"); 530 return -ENODEV; 531 } 532 533 snprintf(name, sizeof(name), "psnet[%s]-bars", pci_name(pdev)); 534 ret = pcim_iomap_regions(pdev, mask, name); 535 if (ret) { 536 SNET_ERR(pdev, "Failed to request and map PCI BARs\n"); 537 return ret; 538 } 539 540 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 541 if (mask & (1 << i)) 542 psnet->bars[i] = pcim_iomap_table(pdev)[i]; 543 } 544 545 return 0; 546 } 547 548 static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet) 549 { 550 char name[50]; 551 int ret; 552 553 snprintf(name, sizeof(name), "snet[%s]-bar", pci_name(pdev)); 554 /* Request and map BAR */ 555 ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name); 556 if (ret) { 557 SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n"); 558 return ret; 559 } 560 561 snet->bar = pcim_iomap_table(pdev)[snet->psnet->cfg.vf_bar]; 562 563 return 0; 564 } 565 566 static void snet_free_cfg(struct snet_cfg *cfg) 567 { 568 u32 i; 569 570 if (!cfg->devs) 571 return; 572 573 /* Free devices */ 574 for (i = 0; i < cfg->devices_num; i++) { 575 if (!cfg->devs[i]) 576 break; 577 578 kfree(cfg->devs[i]); 579 } 580 /* Free pointers to devices */ 581 kfree(cfg->devs); 582 } 583 584 /* Detect which BAR is used for communication with the device. */ 585 static int psnet_detect_bar(struct psnet *psnet, u32 off) 586 { 587 unsigned long exit_time; 588 int i; 589 590 exit_time = jiffies + usecs_to_jiffies(SNET_DETECT_TIMEOUT); 591 592 /* SNET DPU will write SNET's signature when the config is ready. */ 593 while (time_before(jiffies, exit_time)) { 594 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 595 /* Is this BAR mapped? */ 596 if (!psnet->bars[i]) 597 continue; 598 599 if (ioread32(psnet->bars[i] + off) == SNET_SIGNATURE) 600 return i; 601 } 602 usleep_range(1000, 10000); 603 } 604 605 return -ENODEV; 606 } 607 608 static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet) 609 { 610 int i, mask = 0; 611 612 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 613 if (psnet->bars[i] && i != psnet->barno) 614 mask |= (1 << i); 615 } 616 617 if (mask) 618 pcim_iounmap_regions(pdev, mask); 619 } 620 621 /* Read SNET config from PCI BAR */ 622 static int psnet_read_cfg(struct pci_dev *pdev, struct psnet *psnet) 623 { 624 struct snet_cfg *cfg = &psnet->cfg; 625 u32 i, off; 626 int barno; 627 628 /* Move to where the config starts */ 629 off = SNET_CONFIG_OFF; 630 631 /* Find BAR used for communication */ 632 barno = psnet_detect_bar(psnet, off); 633 if (barno < 0) { 634 SNET_ERR(pdev, "SNET config is not ready.\n"); 635 return barno; 636 } 637 638 /* Save used BAR number and unmap all other BARs */ 639 psnet->barno = barno; 640 SNET_DBG(pdev, "Using BAR number %d\n", barno); 641 642 psnet_unmap_unused_bars(pdev, psnet); 643 644 /* load config from BAR */ 645 cfg->key = psnet_read32(psnet, off); 646 off += 4; 647 cfg->cfg_size = psnet_read32(psnet, off); 648 off += 4; 649 cfg->cfg_ver = psnet_read32(psnet, off); 650 off += 4; 651 /* The negotiated config version is the lower one between this driver's config 652 * and the DPU's. 653 */ 654 psnet->negotiated_cfg_ver = min_t(u32, cfg->cfg_ver, SNET_CFG_VERSION); 655 SNET_DBG(pdev, "SNET config version %u\n", psnet->negotiated_cfg_ver); 656 657 cfg->vf_num = psnet_read32(psnet, off); 658 off += 4; 659 cfg->vf_bar = psnet_read32(psnet, off); 660 off += 4; 661 cfg->host_cfg_off = psnet_read32(psnet, off); 662 off += 4; 663 cfg->max_size_host_cfg = psnet_read32(psnet, off); 664 off += 4; 665 cfg->virtio_cfg_off = psnet_read32(psnet, off); 666 off += 4; 667 cfg->kick_off = psnet_read32(psnet, off); 668 off += 4; 669 cfg->hwmon_off = psnet_read32(psnet, off); 670 off += 4; 671 cfg->ctrl_off = psnet_read32(psnet, off); 672 off += 4; 673 cfg->flags = psnet_read32(psnet, off); 674 off += 4; 675 /* Ignore Reserved */ 676 off += sizeof(cfg->rsvd); 677 678 cfg->devices_num = psnet_read32(psnet, off); 679 off += 4; 680 /* Allocate memory to hold pointer to the devices */ 681 cfg->devs = kcalloc(cfg->devices_num, sizeof(void *), GFP_KERNEL); 682 if (!cfg->devs) 683 return -ENOMEM; 684 685 /* Load device configuration from BAR */ 686 for (i = 0; i < cfg->devices_num; i++) { 687 cfg->devs[i] = kzalloc(sizeof(*cfg->devs[i]), GFP_KERNEL); 688 if (!cfg->devs[i]) { 689 snet_free_cfg(cfg); 690 return -ENOMEM; 691 } 692 /* Read device config */ 693 cfg->devs[i]->virtio_id = psnet_read32(psnet, off); 694 off += 4; 695 cfg->devs[i]->vq_num = psnet_read32(psnet, off); 696 off += 4; 697 cfg->devs[i]->vq_size = psnet_read32(psnet, off); 698 off += 4; 699 cfg->devs[i]->vfid = psnet_read32(psnet, off); 700 off += 4; 701 cfg->devs[i]->features = psnet_read64(psnet, off); 702 off += 8; 703 /* Ignore Reserved */ 704 off += sizeof(cfg->devs[i]->rsvd); 705 706 cfg->devs[i]->cfg_size = psnet_read32(psnet, off); 707 off += 4; 708 709 /* Is the config witten to the DPU going to be too big? */ 710 if (SNET_GENERAL_CFG_LEN + SNET_GENERAL_CFG_VQ_LEN * cfg->devs[i]->vq_num > 711 cfg->max_size_host_cfg) { 712 SNET_ERR(pdev, "Failed to read SNET config, the config is too big..\n"); 713 snet_free_cfg(cfg); 714 return -EINVAL; 715 } 716 } 717 return 0; 718 } 719 720 static int psnet_alloc_irq_vector(struct pci_dev *pdev, struct psnet *psnet) 721 { 722 int ret = 0; 723 u32 i, irq_num = 0; 724 725 /* Let's count how many IRQs we need, 1 for every VQ + 1 for config change */ 726 for (i = 0; i < psnet->cfg.devices_num; i++) 727 irq_num += psnet->cfg.devs[i]->vq_num + 1; 728 729 ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX); 730 if (ret != irq_num) { 731 SNET_ERR(pdev, "Failed to allocate IRQ vectors\n"); 732 return ret; 733 } 734 SNET_DBG(pdev, "Allocated %u IRQ vectors from physical function\n", irq_num); 735 736 return 0; 737 } 738 739 static int snet_alloc_irq_vector(struct pci_dev *pdev, struct snet_dev_cfg *snet_cfg) 740 { 741 int ret = 0; 742 u32 irq_num; 743 744 /* We want 1 IRQ for every VQ + 1 for config change events */ 745 irq_num = snet_cfg->vq_num + 1; 746 747 ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX); 748 if (ret <= 0) { 749 SNET_ERR(pdev, "Failed to allocate IRQ vectors\n"); 750 return ret; 751 } 752 753 return 0; 754 } 755 756 static void snet_free_vqs(struct snet *snet) 757 { 758 u32 i; 759 760 if (!snet->vqs) 761 return; 762 763 for (i = 0 ; i < snet->cfg->vq_num ; i++) { 764 if (!snet->vqs[i]) 765 break; 766 767 kfree(snet->vqs[i]); 768 } 769 kfree(snet->vqs); 770 } 771 772 static int snet_build_vqs(struct snet *snet) 773 { 774 u32 i; 775 /* Allocate the VQ pointers array */ 776 snet->vqs = kcalloc(snet->cfg->vq_num, sizeof(void *), GFP_KERNEL); 777 if (!snet->vqs) 778 return -ENOMEM; 779 780 /* Allocate the VQs */ 781 for (i = 0; i < snet->cfg->vq_num; i++) { 782 snet->vqs[i] = kzalloc(sizeof(*snet->vqs[i]), GFP_KERNEL); 783 if (!snet->vqs[i]) { 784 snet_free_vqs(snet); 785 return -ENOMEM; 786 } 787 /* Reset IRQ num */ 788 snet->vqs[i]->irq = -1; 789 /* VQ serial ID */ 790 snet->vqs[i]->sid = i; 791 /* Kick address - every VQ gets 4B */ 792 snet->vqs[i]->kick_ptr = snet->bar + snet->psnet->cfg.kick_off + 793 snet->vqs[i]->sid * 4; 794 /* Clear kick address for this VQ */ 795 iowrite32(0, snet->vqs[i]->kick_ptr); 796 } 797 return 0; 798 } 799 800 static int psnet_get_next_irq_num(struct psnet *psnet) 801 { 802 int irq; 803 804 spin_lock(&psnet->lock); 805 irq = psnet->next_irq++; 806 spin_unlock(&psnet->lock); 807 808 return irq; 809 } 810 811 static void snet_reserve_irq_idx(struct pci_dev *pdev, struct snet *snet) 812 { 813 struct psnet *psnet = snet->psnet; 814 int i; 815 816 /* one IRQ for every VQ, and one for config changes */ 817 snet->cfg_irq_idx = psnet_get_next_irq_num(psnet); 818 snprintf(snet->cfg_irq_name, SNET_NAME_SIZE, "snet[%s]-cfg[%d]", 819 pci_name(pdev), snet->cfg_irq_idx); 820 821 for (i = 0; i < snet->cfg->vq_num; i++) { 822 /* Get next free IRQ ID */ 823 snet->vqs[i]->irq_idx = psnet_get_next_irq_num(psnet); 824 /* Write IRQ name */ 825 snprintf(snet->vqs[i]->irq_name, SNET_NAME_SIZE, "snet[%s]-vq[%d]", 826 pci_name(pdev), snet->vqs[i]->irq_idx); 827 } 828 } 829 830 /* Find a device config based on virtual function id */ 831 static struct snet_dev_cfg *snet_find_dev_cfg(struct snet_cfg *cfg, u32 vfid) 832 { 833 u32 i; 834 835 for (i = 0; i < cfg->devices_num; i++) { 836 if (cfg->devs[i]->vfid == vfid) 837 return cfg->devs[i]; 838 } 839 /* Oppss.. no config found.. */ 840 return NULL; 841 } 842 843 /* Probe function for a physical PCI function */ 844 static int snet_vdpa_probe_pf(struct pci_dev *pdev) 845 { 846 struct psnet *psnet; 847 int ret = 0; 848 bool pf_irqs = false; 849 850 ret = pcim_enable_device(pdev); 851 if (ret) { 852 SNET_ERR(pdev, "Failed to enable PCI device\n"); 853 return ret; 854 } 855 856 /* Allocate a PCI physical function device */ 857 psnet = kzalloc(sizeof(*psnet), GFP_KERNEL); 858 if (!psnet) 859 return -ENOMEM; 860 861 /* Init PSNET spinlock */ 862 spin_lock_init(&psnet->lock); 863 864 pci_set_master(pdev); 865 pci_set_drvdata(pdev, psnet); 866 867 /* Open SNET MAIN BAR */ 868 ret = psnet_open_pf_bar(pdev, psnet); 869 if (ret) 870 goto free_psnet; 871 872 /* Try to read SNET's config from PCI BAR */ 873 ret = psnet_read_cfg(pdev, psnet); 874 if (ret) 875 goto free_psnet; 876 877 /* If SNET_CFG_FLAG_IRQ_PF flag is set, we should use 878 * PF MSI-X vectors 879 */ 880 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF); 881 882 if (pf_irqs) { 883 ret = psnet_alloc_irq_vector(pdev, psnet); 884 if (ret) 885 goto free_cfg; 886 } 887 888 SNET_DBG(pdev, "Enable %u virtual functions\n", psnet->cfg.vf_num); 889 ret = pci_enable_sriov(pdev, psnet->cfg.vf_num); 890 if (ret) { 891 SNET_ERR(pdev, "Failed to enable SR-IOV\n"); 892 goto free_irq; 893 } 894 895 /* Create HW monitor device */ 896 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_HWMON)) { 897 #if IS_ENABLED(CONFIG_HWMON) 898 psnet_create_hwmon(pdev); 899 #else 900 SNET_WARN(pdev, "Can't start HWMON, CONFIG_HWMON is not enabled\n"); 901 #endif 902 } 903 904 return 0; 905 906 free_irq: 907 if (pf_irqs) 908 pci_free_irq_vectors(pdev); 909 free_cfg: 910 snet_free_cfg(&psnet->cfg); 911 free_psnet: 912 kfree(psnet); 913 return ret; 914 } 915 916 /* Probe function for a virtual PCI function */ 917 static int snet_vdpa_probe_vf(struct pci_dev *pdev) 918 { 919 struct pci_dev *pdev_pf = pdev->physfn; 920 struct psnet *psnet = pci_get_drvdata(pdev_pf); 921 struct snet_dev_cfg *dev_cfg; 922 struct snet *snet; 923 u32 vfid; 924 int ret; 925 bool pf_irqs = false; 926 927 /* Get virtual function id. 928 * (the DPU counts the VFs from 1) 929 */ 930 ret = pci_iov_vf_id(pdev); 931 if (ret < 0) { 932 SNET_ERR(pdev, "Failed to find a VF id\n"); 933 return ret; 934 } 935 vfid = ret + 1; 936 937 /* Find the snet_dev_cfg based on vfid */ 938 dev_cfg = snet_find_dev_cfg(&psnet->cfg, vfid); 939 if (!dev_cfg) { 940 SNET_WARN(pdev, "Failed to find a VF config..\n"); 941 return -ENODEV; 942 } 943 944 /* Which PCI device should allocate the IRQs? 945 * If the SNET_CFG_FLAG_IRQ_PF flag set, the PF device allocates the IRQs 946 */ 947 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF); 948 949 ret = pcim_enable_device(pdev); 950 if (ret) { 951 SNET_ERR(pdev, "Failed to enable PCI VF device\n"); 952 return ret; 953 } 954 955 /* Request for MSI-X IRQs */ 956 if (!pf_irqs) { 957 ret = snet_alloc_irq_vector(pdev, dev_cfg); 958 if (ret) 959 return ret; 960 } 961 962 /* Allocate vdpa device */ 963 snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops, 1, 1, NULL, 964 false); 965 if (!snet) { 966 SNET_ERR(pdev, "Failed to allocate a vdpa device\n"); 967 ret = -ENOMEM; 968 goto free_irqs; 969 } 970 971 /* Init control mutex and spinlock */ 972 mutex_init(&snet->ctrl_lock); 973 spin_lock_init(&snet->ctrl_spinlock); 974 975 /* Save pci device pointer */ 976 snet->pdev = pdev; 977 snet->psnet = psnet; 978 snet->cfg = dev_cfg; 979 snet->dpu_ready = false; 980 snet->sid = vfid; 981 /* Reset IRQ value */ 982 snet->cfg_irq = -1; 983 984 ret = snet_open_vf_bar(pdev, snet); 985 if (ret) 986 goto put_device; 987 988 /* Create a VirtIO config pointer */ 989 snet->cfg->virtio_cfg = snet->bar + snet->psnet->cfg.virtio_cfg_off; 990 991 /* Clear control registers */ 992 snet_ctrl_clear(snet); 993 994 pci_set_master(pdev); 995 pci_set_drvdata(pdev, snet); 996 997 ret = snet_build_vqs(snet); 998 if (ret) 999 goto put_device; 1000 1001 /* Reserve IRQ indexes, 1002 * The IRQs may be requested and freed multiple times, 1003 * but the indexes won't change. 1004 */ 1005 snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet); 1006 1007 /*set DMA device*/ 1008 snet->vdpa.dma_dev = &pdev->dev; 1009 1010 /* Register VDPA device */ 1011 ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num); 1012 if (ret) { 1013 SNET_ERR(pdev, "Failed to register vdpa device\n"); 1014 goto free_vqs; 1015 } 1016 1017 return 0; 1018 1019 free_vqs: 1020 snet_free_vqs(snet); 1021 put_device: 1022 put_device(&snet->vdpa.dev); 1023 free_irqs: 1024 if (!pf_irqs) 1025 pci_free_irq_vectors(pdev); 1026 return ret; 1027 } 1028 1029 static int snet_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1030 { 1031 if (pdev->is_virtfn) 1032 return snet_vdpa_probe_vf(pdev); 1033 else 1034 return snet_vdpa_probe_pf(pdev); 1035 } 1036 1037 static void snet_vdpa_remove_pf(struct pci_dev *pdev) 1038 { 1039 struct psnet *psnet = pci_get_drvdata(pdev); 1040 1041 pci_disable_sriov(pdev); 1042 /* If IRQs are allocated from the PF, we should free the IRQs */ 1043 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF)) 1044 pci_free_irq_vectors(pdev); 1045 1046 snet_free_cfg(&psnet->cfg); 1047 kfree(psnet); 1048 } 1049 1050 static void snet_vdpa_remove_vf(struct pci_dev *pdev) 1051 { 1052 struct snet *snet = pci_get_drvdata(pdev); 1053 struct psnet *psnet = snet->psnet; 1054 1055 vdpa_unregister_device(&snet->vdpa); 1056 snet_free_vqs(snet); 1057 /* If IRQs are allocated from the VF, we should free the IRQs */ 1058 if (!PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF)) 1059 pci_free_irq_vectors(pdev); 1060 } 1061 1062 static void snet_vdpa_remove(struct pci_dev *pdev) 1063 { 1064 if (pdev->is_virtfn) 1065 snet_vdpa_remove_vf(pdev); 1066 else 1067 snet_vdpa_remove_pf(pdev); 1068 } 1069 1070 static struct pci_device_id snet_driver_pci_ids[] = { 1071 { PCI_DEVICE_SUB(PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID, 1072 PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID) }, 1073 { 0 }, 1074 }; 1075 1076 MODULE_DEVICE_TABLE(pci, snet_driver_pci_ids); 1077 1078 static struct pci_driver snet_vdpa_driver = { 1079 .name = "snet-vdpa-driver", 1080 .id_table = snet_driver_pci_ids, 1081 .probe = snet_vdpa_probe, 1082 .remove = snet_vdpa_remove, 1083 }; 1084 1085 module_pci_driver(snet_vdpa_driver); 1086 1087 MODULE_AUTHOR("Alvaro Karsz <alvaro.karsz@solid-run.com>"); 1088 MODULE_DESCRIPTION("SolidRun vDPA driver"); 1089 MODULE_LICENSE("GPL v2"); 1090