1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/types.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/vmalloc.h> 15 16 #include "octep_config.h" 17 #include "octep_main.h" 18 #include "octep_ctrl_net.h" 19 #include "octep_pfvf_mbox.h" 20 21 #define OCTEP_INTR_POLL_TIME_MSECS 100 22 struct workqueue_struct *octep_wq; 23 24 /* Supported Devices */ 25 static const struct pci_device_id octep_pci_id_tbl[] = { 26 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_PF)}, 27 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, 28 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)}, 29 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_PF)}, 30 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_PF)}, 31 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_PF)}, 32 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_PF)}, 33 {0, }, 34 }; 35 MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); 36 37 MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); 38 MODULE_DESCRIPTION(OCTEP_DRV_STRING); 39 MODULE_LICENSE("GPL"); 40 41 /** 42 * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. 43 * 44 * @oct: Octeon device private data structure. 45 * 46 * Allocate resources to hold per Tx/Rx queue interrupt info. 47 * This is the information passed to interrupt handler, from which napi poll 48 * is scheduled and includes quick access to private data of Tx/Rx queue 49 * corresponding to the interrupt being handled. 50 * 51 * Return: 0, on successful allocation of resources for all queue interrupts. 52 * -1, if failed to allocate any resource. 53 */ 54 static int octep_alloc_ioq_vectors(struct octep_device *oct) 55 { 56 int i; 57 struct octep_ioq_vector *ioq_vector; 58 59 for (i = 0; i < oct->num_oqs; i++) { 60 oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); 61 if (!oct->ioq_vector[i]) 62 goto free_ioq_vector; 63 64 ioq_vector = oct->ioq_vector[i]; 65 ioq_vector->iq = oct->iq[i]; 66 ioq_vector->oq = oct->oq[i]; 67 ioq_vector->octep_dev = oct; 68 } 69 70 dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); 71 return 0; 72 73 free_ioq_vector: 74 while (i) { 75 i--; 76 vfree(oct->ioq_vector[i]); 77 oct->ioq_vector[i] = NULL; 78 } 79 return -1; 80 } 81 82 /** 83 * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. 84 * 85 * @oct: Octeon device private data structure. 86 */ 87 static void octep_free_ioq_vectors(struct octep_device *oct) 88 { 89 int i; 90 91 for (i = 0; i < oct->num_oqs; i++) { 92 if (oct->ioq_vector[i]) { 93 vfree(oct->ioq_vector[i]); 94 oct->ioq_vector[i] = NULL; 95 } 96 } 97 netdev_info(oct->netdev, "Freed IOQ Vectors\n"); 98 } 99 100 /** 101 * octep_enable_msix_range() - enable MSI-x interrupts. 102 * 103 * @oct: Octeon device private data structure. 104 * 105 * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) 106 * for the Octeon device. 107 * 108 * Return: 0, on successfully enabling all MSI-x interrupts. 109 * -1, if failed to enable any MSI-x interrupt. 110 */ 111 static int octep_enable_msix_range(struct octep_device *oct) 112 { 113 int num_msix, msix_allocated; 114 int i; 115 116 /* Generic interrupts apart from input/output queues */ 117 num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); 118 oct->msix_entries = kcalloc(num_msix, 119 sizeof(struct msix_entry), GFP_KERNEL); 120 if (!oct->msix_entries) 121 goto msix_alloc_err; 122 123 for (i = 0; i < num_msix; i++) 124 oct->msix_entries[i].entry = i; 125 126 msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, 127 num_msix, num_msix); 128 if (msix_allocated != num_msix) { 129 dev_err(&oct->pdev->dev, 130 "Failed to enable %d msix irqs; got only %d\n", 131 num_msix, msix_allocated); 132 goto enable_msix_err; 133 } 134 oct->num_irqs = msix_allocated; 135 dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); 136 137 return 0; 138 139 enable_msix_err: 140 if (msix_allocated > 0) 141 pci_disable_msix(oct->pdev); 142 kfree(oct->msix_entries); 143 oct->msix_entries = NULL; 144 msix_alloc_err: 145 return -1; 146 } 147 148 /** 149 * octep_disable_msix() - disable MSI-x interrupts. 150 * 151 * @oct: Octeon device private data structure. 152 * 153 * Disable MSI-x on the Octeon device. 154 */ 155 static void octep_disable_msix(struct octep_device *oct) 156 { 157 pci_disable_msix(oct->pdev); 158 kfree(oct->msix_entries); 159 oct->msix_entries = NULL; 160 dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); 161 } 162 163 /** 164 * octep_mbox_intr_handler() - common handler for pfvf mbox interrupts. 165 * 166 * @irq: Interrupt number. 167 * @data: interrupt data. 168 * 169 * this is common handler for pfvf mbox interrupts. 170 */ 171 static irqreturn_t octep_mbox_intr_handler(int irq, void *data) 172 { 173 struct octep_device *oct = data; 174 175 return oct->hw_ops.mbox_intr_handler(oct); 176 } 177 178 /** 179 * octep_oei_intr_handler() - common handler for output endpoint interrupts. 180 * 181 * @irq: Interrupt number. 182 * @data: interrupt data. 183 * 184 * this is common handler for all output endpoint interrupts. 185 */ 186 static irqreturn_t octep_oei_intr_handler(int irq, void *data) 187 { 188 struct octep_device *oct = data; 189 190 return oct->hw_ops.oei_intr_handler(oct); 191 } 192 193 /** 194 * octep_ire_intr_handler() - common handler for input ring error interrupts. 195 * 196 * @irq: Interrupt number. 197 * @data: interrupt data. 198 * 199 * this is common handler for input ring error interrupts. 200 */ 201 static irqreturn_t octep_ire_intr_handler(int irq, void *data) 202 { 203 struct octep_device *oct = data; 204 205 return oct->hw_ops.ire_intr_handler(oct); 206 } 207 208 /** 209 * octep_ore_intr_handler() - common handler for output ring error interrupts. 210 * 211 * @irq: Interrupt number. 212 * @data: interrupt data. 213 * 214 * this is common handler for output ring error interrupts. 215 */ 216 static irqreturn_t octep_ore_intr_handler(int irq, void *data) 217 { 218 struct octep_device *oct = data; 219 220 return oct->hw_ops.ore_intr_handler(oct); 221 } 222 223 /** 224 * octep_vfire_intr_handler() - common handler for vf input ring error interrupts. 225 * 226 * @irq: Interrupt number. 227 * @data: interrupt data. 228 * 229 * this is common handler for vf input ring error interrupts. 230 */ 231 static irqreturn_t octep_vfire_intr_handler(int irq, void *data) 232 { 233 struct octep_device *oct = data; 234 235 return oct->hw_ops.vfire_intr_handler(oct); 236 } 237 238 /** 239 * octep_vfore_intr_handler() - common handler for vf output ring error interrupts. 240 * 241 * @irq: Interrupt number. 242 * @data: interrupt data. 243 * 244 * this is common handler for vf output ring error interrupts. 245 */ 246 static irqreturn_t octep_vfore_intr_handler(int irq, void *data) 247 { 248 struct octep_device *oct = data; 249 250 return oct->hw_ops.vfore_intr_handler(oct); 251 } 252 253 /** 254 * octep_dma_intr_handler() - common handler for dpi dma related interrupts. 255 * 256 * @irq: Interrupt number. 257 * @data: interrupt data. 258 * 259 * this is common handler for dpi dma related interrupts. 260 */ 261 static irqreturn_t octep_dma_intr_handler(int irq, void *data) 262 { 263 struct octep_device *oct = data; 264 265 return oct->hw_ops.dma_intr_handler(oct); 266 } 267 268 /** 269 * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs. 270 * 271 * @irq: Interrupt number. 272 * @data: interrupt data. 273 * 274 * this is common handler for dpi dma transaction error interrupts for VFs. 275 */ 276 static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data) 277 { 278 struct octep_device *oct = data; 279 280 return oct->hw_ops.dma_vf_intr_handler(oct); 281 } 282 283 /** 284 * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs. 285 * 286 * @irq: Interrupt number. 287 * @data: interrupt data. 288 * 289 * this is common handler for pp transaction error interrupts for VFs. 290 */ 291 static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data) 292 { 293 struct octep_device *oct = data; 294 295 return oct->hw_ops.pp_vf_intr_handler(oct); 296 } 297 298 /** 299 * octep_misc_intr_handler() - common handler for mac related interrupts. 300 * 301 * @irq: Interrupt number. 302 * @data: interrupt data. 303 * 304 * this is common handler for mac related interrupts. 305 */ 306 static irqreturn_t octep_misc_intr_handler(int irq, void *data) 307 { 308 struct octep_device *oct = data; 309 310 return oct->hw_ops.misc_intr_handler(oct); 311 } 312 313 /** 314 * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use). 315 * 316 * @irq: Interrupt number. 317 * @data: interrupt data. 318 * 319 * this is common handler for all reserved interrupts. 320 */ 321 static irqreturn_t octep_rsvd_intr_handler(int irq, void *data) 322 { 323 struct octep_device *oct = data; 324 325 return oct->hw_ops.rsvd_intr_handler(oct); 326 } 327 328 /** 329 * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. 330 * 331 * @irq: Interrupt number. 332 * @data: interrupt data contains pointers to Tx/Rx queue private data 333 * and correspong NAPI context. 334 * 335 * this is common handler for all non-queue (generic) interrupts. 336 */ 337 static irqreturn_t octep_ioq_intr_handler(int irq, void *data) 338 { 339 struct octep_ioq_vector *ioq_vector = data; 340 struct octep_device *oct = ioq_vector->octep_dev; 341 342 return oct->hw_ops.ioq_intr_handler(ioq_vector); 343 } 344 345 /** 346 * octep_request_irqs() - Register interrupt handlers. 347 * 348 * @oct: Octeon device private data structure. 349 * 350 * Register handlers for all queue and non-queue interrupts. 351 * 352 * Return: 0, on successful registration of all interrupt handlers. 353 * -1, on any error. 354 */ 355 static int octep_request_irqs(struct octep_device *oct) 356 { 357 struct net_device *netdev = oct->netdev; 358 struct octep_ioq_vector *ioq_vector; 359 struct msix_entry *msix_entry; 360 char **non_ioq_msix_names; 361 int num_non_ioq_msix; 362 int ret, i, j; 363 364 num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); 365 non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); 366 367 oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, 368 OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); 369 if (!oct->non_ioq_irq_names) 370 goto alloc_err; 371 372 /* First few MSI-X interrupts are non-queue interrupts */ 373 for (i = 0; i < num_non_ioq_msix; i++) { 374 char *irq_name; 375 376 irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; 377 msix_entry = &oct->msix_entries[i]; 378 379 snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, 380 "%s-%s", netdev->name, non_ioq_msix_names[i]); 381 if (!strncmp(non_ioq_msix_names[i], "epf_mbox_rint", strlen("epf_mbox_rint"))) { 382 ret = request_irq(msix_entry->vector, 383 octep_mbox_intr_handler, 0, 384 irq_name, oct); 385 } else if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint", 386 strlen("epf_oei_rint"))) { 387 ret = request_irq(msix_entry->vector, 388 octep_oei_intr_handler, 0, 389 irq_name, oct); 390 } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint", 391 strlen("epf_ire_rint"))) { 392 ret = request_irq(msix_entry->vector, 393 octep_ire_intr_handler, 0, 394 irq_name, oct); 395 } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint", 396 strlen("epf_ore_rint"))) { 397 ret = request_irq(msix_entry->vector, 398 octep_ore_intr_handler, 0, 399 irq_name, oct); 400 } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint", 401 strlen("epf_vfire_rint"))) { 402 ret = request_irq(msix_entry->vector, 403 octep_vfire_intr_handler, 0, 404 irq_name, oct); 405 } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint", 406 strlen("epf_vfore_rint"))) { 407 ret = request_irq(msix_entry->vector, 408 octep_vfore_intr_handler, 0, 409 irq_name, oct); 410 } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint", 411 strlen("epf_dma_rint"))) { 412 ret = request_irq(msix_entry->vector, 413 octep_dma_intr_handler, 0, 414 irq_name, oct); 415 } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint", 416 strlen("epf_dma_vf_rint"))) { 417 ret = request_irq(msix_entry->vector, 418 octep_dma_vf_intr_handler, 0, 419 irq_name, oct); 420 } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint", 421 strlen("epf_pp_vf_rint"))) { 422 ret = request_irq(msix_entry->vector, 423 octep_pp_vf_intr_handler, 0, 424 irq_name, oct); 425 } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint", 426 strlen("epf_misc_rint"))) { 427 ret = request_irq(msix_entry->vector, 428 octep_misc_intr_handler, 0, 429 irq_name, oct); 430 } else { 431 ret = request_irq(msix_entry->vector, 432 octep_rsvd_intr_handler, 0, 433 irq_name, oct); 434 } 435 436 if (ret) { 437 netdev_err(netdev, 438 "request_irq failed for %s; err=%d", 439 irq_name, ret); 440 goto non_ioq_irq_err; 441 } 442 } 443 444 /* Request IRQs for Tx/Rx queues */ 445 for (j = 0; j < oct->num_oqs; j++) { 446 ioq_vector = oct->ioq_vector[j]; 447 msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; 448 449 snprintf(ioq_vector->name, sizeof(ioq_vector->name), 450 "%s-q%d", netdev->name, j); 451 ret = request_irq(msix_entry->vector, 452 octep_ioq_intr_handler, 0, 453 ioq_vector->name, ioq_vector); 454 if (ret) { 455 netdev_err(netdev, 456 "request_irq failed for Q-%d; err=%d", 457 j, ret); 458 goto ioq_irq_err; 459 } 460 461 cpumask_set_cpu(j % num_online_cpus(), 462 &ioq_vector->affinity_mask); 463 irq_set_affinity_hint(msix_entry->vector, 464 &ioq_vector->affinity_mask); 465 } 466 467 return 0; 468 ioq_irq_err: 469 while (j) { 470 --j; 471 ioq_vector = oct->ioq_vector[j]; 472 msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; 473 474 irq_set_affinity_hint(msix_entry->vector, NULL); 475 free_irq(msix_entry->vector, ioq_vector); 476 } 477 non_ioq_irq_err: 478 while (i) { 479 --i; 480 free_irq(oct->msix_entries[i].vector, oct); 481 } 482 kfree(oct->non_ioq_irq_names); 483 oct->non_ioq_irq_names = NULL; 484 alloc_err: 485 return -1; 486 } 487 488 /** 489 * octep_free_irqs() - free all registered interrupts. 490 * 491 * @oct: Octeon device private data structure. 492 * 493 * Free all queue and non-queue interrupts of the Octeon device. 494 */ 495 static void octep_free_irqs(struct octep_device *oct) 496 { 497 int i; 498 499 /* First few MSI-X interrupts are non queue interrupts; free them */ 500 for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) 501 free_irq(oct->msix_entries[i].vector, oct); 502 kfree(oct->non_ioq_irq_names); 503 504 /* Free IRQs for Input/Output (Tx/Rx) queues */ 505 for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { 506 irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); 507 free_irq(oct->msix_entries[i].vector, 508 oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); 509 } 510 netdev_info(oct->netdev, "IRQs freed\n"); 511 } 512 513 /** 514 * octep_setup_irqs() - setup interrupts for the Octeon device. 515 * 516 * @oct: Octeon device private data structure. 517 * 518 * Allocate data structures to hold per interrupt information, allocate/enable 519 * MSI-x interrupt and register interrupt handlers. 520 * 521 * Return: 0, on successful allocation and registration of all interrupts. 522 * -1, on any error. 523 */ 524 static int octep_setup_irqs(struct octep_device *oct) 525 { 526 if (octep_alloc_ioq_vectors(oct)) 527 goto ioq_vector_err; 528 529 if (octep_enable_msix_range(oct)) 530 goto enable_msix_err; 531 532 if (octep_request_irqs(oct)) 533 goto request_irq_err; 534 535 return 0; 536 537 request_irq_err: 538 octep_disable_msix(oct); 539 enable_msix_err: 540 octep_free_ioq_vectors(oct); 541 ioq_vector_err: 542 return -1; 543 } 544 545 /** 546 * octep_clean_irqs() - free all interrupts and its resources. 547 * 548 * @oct: Octeon device private data structure. 549 */ 550 static void octep_clean_irqs(struct octep_device *oct) 551 { 552 octep_free_irqs(oct); 553 octep_disable_msix(oct); 554 octep_free_ioq_vectors(oct); 555 } 556 557 /** 558 * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. 559 * 560 * @iq: Octeon Tx queue data structure. 561 * @oq: Octeon Rx queue data structure. 562 */ 563 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) 564 { 565 u32 pkts_pend = oq->pkts_pending; 566 567 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); 568 if (iq->pkts_processed) { 569 writel(iq->pkts_processed, iq->inst_cnt_reg); 570 iq->pkt_in_done -= iq->pkts_processed; 571 iq->pkts_processed = 0; 572 } 573 if (oq->last_pkt_count - pkts_pend) { 574 writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); 575 oq->last_pkt_count = pkts_pend; 576 } 577 578 /* Flush the previous wrties before writing to RESEND bit */ 579 wmb(); 580 writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); 581 writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); 582 } 583 584 /** 585 * octep_napi_poll() - NAPI poll function for Tx/Rx. 586 * 587 * @napi: pointer to napi context. 588 * @budget: max number of packets to be processed in single invocation. 589 */ 590 static int octep_napi_poll(struct napi_struct *napi, int budget) 591 { 592 struct octep_ioq_vector *ioq_vector = 593 container_of(napi, struct octep_ioq_vector, napi); 594 u32 tx_pending, rx_done; 595 596 tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); 597 rx_done = octep_oq_process_rx(ioq_vector->oq, budget); 598 599 /* need more polling if tx completion processing is still pending or 600 * processed at least 'budget' number of rx packets. 601 */ 602 if (tx_pending || rx_done >= budget) 603 return budget; 604 605 napi_complete(napi); 606 octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); 607 return rx_done; 608 } 609 610 /** 611 * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. 612 * 613 * @oct: Octeon device private data structure. 614 */ 615 static void octep_napi_add(struct octep_device *oct) 616 { 617 int i; 618 619 for (i = 0; i < oct->num_oqs; i++) { 620 netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); 621 netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, 622 octep_napi_poll); 623 oct->oq[i]->napi = &oct->ioq_vector[i]->napi; 624 } 625 } 626 627 /** 628 * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. 629 * 630 * @oct: Octeon device private data structure. 631 */ 632 static void octep_napi_delete(struct octep_device *oct) 633 { 634 int i; 635 636 for (i = 0; i < oct->num_oqs; i++) { 637 netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); 638 netif_napi_del(&oct->ioq_vector[i]->napi); 639 oct->oq[i]->napi = NULL; 640 } 641 } 642 643 /** 644 * octep_napi_enable() - enable NAPI for all Tx/Rx queues. 645 * 646 * @oct: Octeon device private data structure. 647 */ 648 static void octep_napi_enable(struct octep_device *oct) 649 { 650 int i; 651 652 for (i = 0; i < oct->num_oqs; i++) { 653 netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); 654 napi_enable(&oct->ioq_vector[i]->napi); 655 } 656 } 657 658 /** 659 * octep_napi_disable() - disable NAPI for all Tx/Rx queues. 660 * 661 * @oct: Octeon device private data structure. 662 */ 663 static void octep_napi_disable(struct octep_device *oct) 664 { 665 int i; 666 667 for (i = 0; i < oct->num_oqs; i++) { 668 netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); 669 napi_disable(&oct->ioq_vector[i]->napi); 670 } 671 } 672 673 static void octep_link_up(struct net_device *netdev) 674 { 675 netif_carrier_on(netdev); 676 netif_tx_start_all_queues(netdev); 677 } 678 679 /** 680 * octep_open() - start the octeon network device. 681 * 682 * @netdev: pointer to kernel network device. 683 * 684 * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues 685 * and interrupts.. 686 * 687 * Return: 0, on successfully setting up device and bring it up. 688 * -1, on any error. 689 */ 690 static int octep_open(struct net_device *netdev) 691 { 692 struct octep_device *oct = netdev_priv(netdev); 693 int err, ret; 694 695 netdev_info(netdev, "Starting netdev ...\n"); 696 netif_carrier_off(netdev); 697 698 oct->hw_ops.reset_io_queues(oct); 699 700 if (octep_setup_iqs(oct)) 701 goto setup_iq_err; 702 if (octep_setup_oqs(oct)) 703 goto setup_oq_err; 704 if (octep_setup_irqs(oct)) 705 goto setup_irq_err; 706 707 err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); 708 if (err) 709 goto set_queues_err; 710 err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); 711 if (err) 712 goto set_queues_err; 713 714 octep_napi_add(oct); 715 octep_napi_enable(oct); 716 717 oct->link_info.admin_up = 1; 718 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true, 719 false); 720 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true, 721 false); 722 oct->poll_non_ioq_intr = false; 723 724 /* Enable the input and output queues for this Octeon device */ 725 oct->hw_ops.enable_io_queues(oct); 726 727 /* Enable Octeon device interrupts */ 728 oct->hw_ops.enable_interrupts(oct); 729 730 octep_oq_dbell_init(oct); 731 732 ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID); 733 if (ret > 0) 734 octep_link_up(netdev); 735 736 return 0; 737 738 set_queues_err: 739 octep_clean_irqs(oct); 740 setup_irq_err: 741 octep_free_oqs(oct); 742 setup_oq_err: 743 octep_free_iqs(oct); 744 setup_iq_err: 745 return -1; 746 } 747 748 /** 749 * octep_stop() - stop the octeon network device. 750 * 751 * @netdev: pointer to kernel network device. 752 * 753 * stop the device Tx/Rx operations, bring down the link and 754 * free up all resources allocated for Tx/Rx queues and interrupts. 755 */ 756 static int octep_stop(struct net_device *netdev) 757 { 758 struct octep_device *oct = netdev_priv(netdev); 759 760 netdev_info(netdev, "Stopping the device ...\n"); 761 762 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false, 763 false); 764 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false, 765 false); 766 767 /* Stop Tx from stack */ 768 netif_tx_stop_all_queues(netdev); 769 netif_carrier_off(netdev); 770 netif_tx_disable(netdev); 771 772 oct->link_info.admin_up = 0; 773 oct->link_info.oper_up = 0; 774 775 oct->hw_ops.disable_interrupts(oct); 776 octep_napi_disable(oct); 777 octep_napi_delete(oct); 778 779 octep_clean_irqs(oct); 780 octep_clean_iqs(oct); 781 782 oct->hw_ops.disable_io_queues(oct); 783 oct->hw_ops.reset_io_queues(oct); 784 octep_free_oqs(oct); 785 octep_free_iqs(oct); 786 787 oct->poll_non_ioq_intr = true; 788 queue_delayed_work(octep_wq, &oct->intr_poll_task, 789 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 790 791 netdev_info(netdev, "Device stopped !!\n"); 792 return 0; 793 } 794 795 /** 796 * octep_iq_full_check() - check if a Tx queue is full. 797 * 798 * @iq: Octeon Tx queue data structure. 799 * 800 * Return: 0, if the Tx queue is not full. 801 * 1, if the Tx queue is full. 802 */ 803 static inline int octep_iq_full_check(struct octep_iq *iq) 804 { 805 if (likely((IQ_INSTR_SPACE(iq)) > 806 OCTEP_WAKE_QUEUE_THRESHOLD)) 807 return 0; 808 809 /* Stop the queue if unable to send */ 810 netif_stop_subqueue(iq->netdev, iq->q_no); 811 812 /* Allow for pending updates in write index 813 * from iq_process_completion in other cpus 814 * to reflect, in case queue gets free 815 * entries. 816 */ 817 smp_mb(); 818 819 /* check again and restart the queue, in case NAPI has just freed 820 * enough Tx ring entries. 821 */ 822 if (unlikely(IQ_INSTR_SPACE(iq) > 823 OCTEP_WAKE_QUEUE_THRESHOLD)) { 824 netif_start_subqueue(iq->netdev, iq->q_no); 825 iq->stats->restart_cnt++; 826 return 0; 827 } 828 829 return 1; 830 } 831 832 /** 833 * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. 834 * 835 * @skb: packet skbuff pointer. 836 * @netdev: kernel network device. 837 * 838 * Return: NETDEV_TX_BUSY, if Tx Queue is full. 839 * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. 840 */ 841 static netdev_tx_t octep_start_xmit(struct sk_buff *skb, 842 struct net_device *netdev) 843 { 844 struct octep_device *oct = netdev_priv(netdev); 845 netdev_features_t feat = netdev->features; 846 struct octep_tx_sglist_desc *sglist; 847 struct octep_tx_buffer *tx_buffer; 848 struct octep_tx_desc_hw *hw_desc; 849 struct skb_shared_info *shinfo; 850 struct octep_instr_hdr *ih; 851 struct octep_iq *iq; 852 skb_frag_t *frag; 853 u16 nr_frags, si; 854 int xmit_more; 855 u16 q_no, wi; 856 857 if (skb_put_padto(skb, ETH_ZLEN)) 858 return NETDEV_TX_OK; 859 860 q_no = skb_get_queue_mapping(skb); 861 if (q_no >= oct->num_iqs) { 862 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); 863 q_no = q_no % oct->num_iqs; 864 } 865 866 iq = oct->iq[q_no]; 867 868 shinfo = skb_shinfo(skb); 869 nr_frags = shinfo->nr_frags; 870 871 wi = iq->host_write_index; 872 hw_desc = &iq->desc_ring[wi]; 873 hw_desc->ih64 = 0; 874 875 tx_buffer = iq->buff_info + wi; 876 tx_buffer->skb = skb; 877 878 ih = &hw_desc->ih; 879 ih->pkind = oct->conf->fw_info.pkind; 880 ih->fsz = oct->conf->fw_info.fsz; 881 ih->tlen = skb->len + ih->fsz; 882 883 if (!nr_frags) { 884 tx_buffer->gather = 0; 885 tx_buffer->dma = dma_map_single(iq->dev, skb->data, 886 skb->len, DMA_TO_DEVICE); 887 if (dma_mapping_error(iq->dev, tx_buffer->dma)) 888 goto dma_map_err; 889 hw_desc->dptr = tx_buffer->dma; 890 } else { 891 /* Scatter/Gather */ 892 dma_addr_t dma; 893 u16 len; 894 895 sglist = tx_buffer->sglist; 896 897 ih->gsz = nr_frags + 1; 898 ih->gather = 1; 899 tx_buffer->gather = 1; 900 901 len = skb_headlen(skb); 902 dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); 903 if (dma_mapping_error(iq->dev, dma)) 904 goto dma_map_err; 905 906 memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); 907 sglist[0].len[3] = len; 908 sglist[0].dma_ptr[0] = dma; 909 910 si = 1; /* entry 0 is main skb, mapped above */ 911 frag = &shinfo->frags[0]; 912 while (nr_frags--) { 913 len = skb_frag_size(frag); 914 dma = skb_frag_dma_map(iq->dev, frag, 0, 915 len, DMA_TO_DEVICE); 916 if (dma_mapping_error(iq->dev, dma)) 917 goto dma_map_sg_err; 918 919 sglist[si >> 2].len[3 - (si & 3)] = len; 920 sglist[si >> 2].dma_ptr[si & 3] = dma; 921 922 frag++; 923 si++; 924 } 925 hw_desc->dptr = tx_buffer->sglist_dma; 926 } 927 928 if (oct->conf->fw_info.tx_ol_flags) { 929 if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) { 930 hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM; 931 hw_desc->txm.ol_flags |= OCTEP_TX_OFFLOAD_TSO; 932 hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size; 933 hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs; 934 } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 935 hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM; 936 } 937 /* due to ESR txm will be swapped by hw */ 938 hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]); 939 } 940 941 xmit_more = netdev_xmit_more(); 942 943 __netdev_tx_sent_queue(iq->netdev_q, skb->len, xmit_more); 944 945 skb_tx_timestamp(skb); 946 iq->fill_cnt++; 947 wi++; 948 iq->host_write_index = wi & iq->ring_size_mask; 949 950 /* octep_iq_full_check stops the queue and returns 951 * true if so, in case the queue has become full 952 * by inserting current packet. If so, we can 953 * go ahead and ring doorbell. 954 */ 955 if (!octep_iq_full_check(iq) && xmit_more && 956 iq->fill_cnt < iq->fill_threshold) 957 return NETDEV_TX_OK; 958 959 /* Flush the hw descriptor before writing to doorbell */ 960 wmb(); 961 /* Ring Doorbell to notify the NIC of new packets */ 962 writel(iq->fill_cnt, iq->doorbell_reg); 963 iq->stats->instr_posted += iq->fill_cnt; 964 iq->fill_cnt = 0; 965 return NETDEV_TX_OK; 966 967 dma_map_sg_err: 968 if (si > 0) { 969 dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], 970 sglist[0].len[3], DMA_TO_DEVICE); 971 sglist[0].len[3] = 0; 972 } 973 while (si > 1) { 974 dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], 975 sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE); 976 sglist[si >> 2].len[3 - (si & 3)] = 0; 977 si--; 978 } 979 tx_buffer->gather = 0; 980 dma_map_err: 981 dev_kfree_skb_any(skb); 982 return NETDEV_TX_OK; 983 } 984 985 /** 986 * octep_get_stats64() - Get Octeon network device statistics. 987 * 988 * @netdev: kernel network device. 989 * @stats: pointer to stats structure to be filled in. 990 */ 991 static void octep_get_stats64(struct net_device *netdev, 992 struct rtnl_link_stats64 *stats) 993 { 994 struct octep_device *oct = netdev_priv(netdev); 995 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 996 int q; 997 998 tx_packets = 0; 999 tx_bytes = 0; 1000 rx_packets = 0; 1001 rx_bytes = 0; 1002 for (q = 0; q < OCTEP_MAX_QUEUES; q++) { 1003 tx_packets += oct->stats_iq[q].instr_completed; 1004 tx_bytes += oct->stats_iq[q].bytes_sent; 1005 rx_packets += oct->stats_oq[q].packets; 1006 rx_bytes += oct->stats_oq[q].bytes; 1007 } 1008 stats->tx_packets = tx_packets; 1009 stats->tx_bytes = tx_bytes; 1010 stats->rx_packets = rx_packets; 1011 stats->rx_bytes = rx_bytes; 1012 } 1013 1014 /** 1015 * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. 1016 * 1017 * @work: pointer to Tx queue timeout work_struct 1018 * 1019 * Stop and start the device so that it frees up all queue resources 1020 * and restarts the queues, that potentially clears a Tx queue timeout 1021 * condition. 1022 **/ 1023 static void octep_tx_timeout_task(struct work_struct *work) 1024 { 1025 struct octep_device *oct = container_of(work, struct octep_device, 1026 tx_timeout_task); 1027 struct net_device *netdev = oct->netdev; 1028 1029 rtnl_lock(); 1030 if (netif_running(netdev)) { 1031 octep_stop(netdev); 1032 octep_open(netdev); 1033 } 1034 rtnl_unlock(); 1035 } 1036 1037 /** 1038 * octep_tx_timeout() - Handle Tx Queue timeout. 1039 * 1040 * @netdev: pointer to kernel network device. 1041 * @txqueue: Timed out Tx queue number. 1042 * 1043 * Schedule a work to handle Tx queue timeout. 1044 */ 1045 static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1046 { 1047 struct octep_device *oct = netdev_priv(netdev); 1048 1049 queue_work(octep_wq, &oct->tx_timeout_task); 1050 } 1051 1052 static int octep_set_mac(struct net_device *netdev, void *p) 1053 { 1054 struct octep_device *oct = netdev_priv(netdev); 1055 struct sockaddr *addr = (struct sockaddr *)p; 1056 int err; 1057 1058 if (!is_valid_ether_addr(addr->sa_data)) 1059 return -EADDRNOTAVAIL; 1060 1061 err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID, 1062 addr->sa_data, true); 1063 if (err) 1064 return err; 1065 1066 memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); 1067 eth_hw_addr_set(netdev, addr->sa_data); 1068 1069 return 0; 1070 } 1071 1072 static int octep_change_mtu(struct net_device *netdev, int new_mtu) 1073 { 1074 struct octep_device *oct = netdev_priv(netdev); 1075 struct octep_iface_link_info *link_info; 1076 int err = 0; 1077 1078 link_info = &oct->link_info; 1079 if (link_info->mtu == new_mtu) 1080 return 0; 1081 1082 err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu, 1083 true); 1084 if (!err) { 1085 oct->link_info.mtu = new_mtu; 1086 WRITE_ONCE(netdev->mtu, new_mtu); 1087 } 1088 1089 return err; 1090 } 1091 1092 static int octep_set_features(struct net_device *dev, netdev_features_t features) 1093 { 1094 struct octep_ctrl_net_offloads offloads = { 0 }; 1095 struct octep_device *oct = netdev_priv(dev); 1096 int err; 1097 1098 /* We only support features received from firmware */ 1099 if ((features & dev->hw_features) != features) 1100 return -EINVAL; 1101 1102 if (features & NETIF_F_TSO) 1103 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO; 1104 1105 if (features & NETIF_F_TSO6) 1106 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO; 1107 1108 if (features & NETIF_F_IP_CSUM) 1109 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM; 1110 1111 if (features & NETIF_F_IPV6_CSUM) 1112 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM; 1113 1114 if (features & NETIF_F_RXCSUM) 1115 offloads.rx_offloads |= OCTEP_RX_OFFLOAD_CKSUM; 1116 1117 err = octep_ctrl_net_set_offloads(oct, 1118 OCTEP_CTRL_NET_INVALID_VFID, 1119 &offloads, 1120 true); 1121 if (!err) 1122 dev->features = features; 1123 1124 return err; 1125 } 1126 1127 static bool octep_is_vf_valid(struct octep_device *oct, int vf) 1128 { 1129 if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) { 1130 netdev_err(oct->netdev, "Invalid VF ID %d\n", vf); 1131 return false; 1132 } 1133 1134 return true; 1135 } 1136 1137 static int octep_get_vf_config(struct net_device *dev, int vf, 1138 struct ifla_vf_info *ivi) 1139 { 1140 struct octep_device *oct = netdev_priv(dev); 1141 1142 if (!octep_is_vf_valid(oct, vf)) 1143 return -EINVAL; 1144 1145 ivi->vf = vf; 1146 ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); 1147 ivi->spoofchk = true; 1148 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 1149 ivi->trusted = false; 1150 1151 return 0; 1152 } 1153 1154 static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac) 1155 { 1156 struct octep_device *oct = netdev_priv(dev); 1157 int err; 1158 1159 if (!octep_is_vf_valid(oct, vf)) 1160 return -EINVAL; 1161 1162 if (!is_valid_ether_addr(mac)) { 1163 dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac); 1164 return -EADDRNOTAVAIL; 1165 } 1166 1167 dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac); 1168 ether_addr_copy(oct->vf_info[vf].mac_addr, mac); 1169 oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF; 1170 1171 err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true); 1172 if (err) 1173 dev_err(&oct->pdev->dev, 1174 "Set VF%d MAC address failed via host control Mbox\n", 1175 vf); 1176 1177 return err; 1178 } 1179 1180 static const struct net_device_ops octep_netdev_ops = { 1181 .ndo_open = octep_open, 1182 .ndo_stop = octep_stop, 1183 .ndo_start_xmit = octep_start_xmit, 1184 .ndo_get_stats64 = octep_get_stats64, 1185 .ndo_tx_timeout = octep_tx_timeout, 1186 .ndo_set_mac_address = octep_set_mac, 1187 .ndo_change_mtu = octep_change_mtu, 1188 .ndo_set_features = octep_set_features, 1189 .ndo_get_vf_config = octep_get_vf_config, 1190 .ndo_set_vf_mac = octep_set_vf_mac 1191 }; 1192 1193 /** 1194 * octep_intr_poll_task - work queue task to process non-ioq interrupts. 1195 * 1196 * @work: pointer to mbox work_struct 1197 * 1198 * Process non-ioq interrupts to handle control mailbox, pfvf mailbox. 1199 **/ 1200 static void octep_intr_poll_task(struct work_struct *work) 1201 { 1202 struct octep_device *oct = container_of(work, struct octep_device, 1203 intr_poll_task.work); 1204 1205 if (!oct->poll_non_ioq_intr) { 1206 dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n"); 1207 return; 1208 } 1209 1210 oct->hw_ops.poll_non_ioq_interrupts(oct); 1211 queue_delayed_work(octep_wq, &oct->intr_poll_task, 1212 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 1213 } 1214 1215 /** 1216 * octep_hb_timeout_task - work queue task to check firmware heartbeat. 1217 * 1218 * @work: pointer to hb work_struct 1219 * 1220 * Check for heartbeat miss count. Uninitialize oct device if miss count 1221 * exceeds configured max heartbeat miss count. 1222 * 1223 **/ 1224 static void octep_hb_timeout_task(struct work_struct *work) 1225 { 1226 struct octep_device *oct = container_of(work, struct octep_device, 1227 hb_task.work); 1228 1229 int miss_cnt; 1230 1231 miss_cnt = atomic_inc_return(&oct->hb_miss_cnt); 1232 if (miss_cnt < oct->conf->fw_info.hb_miss_count) { 1233 queue_delayed_work(octep_wq, &oct->hb_task, 1234 msecs_to_jiffies(oct->conf->fw_info.hb_interval)); 1235 return; 1236 } 1237 1238 dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n", 1239 miss_cnt); 1240 rtnl_lock(); 1241 if (netif_running(oct->netdev)) 1242 dev_close(oct->netdev); 1243 rtnl_unlock(); 1244 } 1245 1246 /** 1247 * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. 1248 * 1249 * @work: pointer to ctrl mbox work_struct 1250 * 1251 * Poll ctrl mbox message queue and handle control messages from firmware. 1252 **/ 1253 static void octep_ctrl_mbox_task(struct work_struct *work) 1254 { 1255 struct octep_device *oct = container_of(work, struct octep_device, 1256 ctrl_mbox_task); 1257 1258 octep_ctrl_net_recv_fw_messages(oct); 1259 } 1260 1261 static const char *octep_devid_to_str(struct octep_device *oct) 1262 { 1263 switch (oct->chip_id) { 1264 case OCTEP_PCI_DEVICE_ID_CN98_PF: 1265 return "CN98XX"; 1266 case OCTEP_PCI_DEVICE_ID_CN93_PF: 1267 return "CN93XX"; 1268 case OCTEP_PCI_DEVICE_ID_CNF95N_PF: 1269 return "CNF95N"; 1270 case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1271 return "CN10KA"; 1272 case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1273 return "CNF10KA"; 1274 case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1275 return "CNF10KB"; 1276 case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1277 return "CN10KB"; 1278 default: 1279 return "Unsupported"; 1280 } 1281 } 1282 1283 /** 1284 * octep_device_setup() - Setup Octeon Device. 1285 * 1286 * @oct: Octeon device private data structure. 1287 * 1288 * Setup Octeon device hardware operations, configuration, etc ... 1289 */ 1290 int octep_device_setup(struct octep_device *oct) 1291 { 1292 struct pci_dev *pdev = oct->pdev; 1293 int i, ret; 1294 1295 /* allocate memory for oct->conf */ 1296 oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); 1297 if (!oct->conf) 1298 return -ENOMEM; 1299 1300 /* Map BAR regions */ 1301 for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 1302 oct->mmio[i].hw_addr = 1303 ioremap(pci_resource_start(oct->pdev, i * 2), 1304 pci_resource_len(oct->pdev, i * 2)); 1305 if (!oct->mmio[i].hw_addr) 1306 goto unmap_prev; 1307 1308 oct->mmio[i].mapped = 1; 1309 } 1310 1311 oct->chip_id = pdev->device; 1312 oct->rev_id = pdev->revision; 1313 dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); 1314 1315 switch (oct->chip_id) { 1316 case OCTEP_PCI_DEVICE_ID_CN98_PF: 1317 case OCTEP_PCI_DEVICE_ID_CN93_PF: 1318 case OCTEP_PCI_DEVICE_ID_CNF95N_PF: 1319 dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", 1320 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), 1321 OCTEP_MINOR_REV(oct)); 1322 octep_device_setup_cn93_pf(oct); 1323 break; 1324 case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1325 case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1326 case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1327 case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1328 dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", 1329 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); 1330 octep_device_setup_cnxk_pf(oct); 1331 break; 1332 default: 1333 dev_err(&pdev->dev, 1334 "%s: unsupported device\n", __func__); 1335 goto unsupported_dev; 1336 } 1337 1338 1339 ret = octep_ctrl_net_init(oct); 1340 if (ret) 1341 return ret; 1342 1343 INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); 1344 INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); 1345 INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task); 1346 oct->poll_non_ioq_intr = true; 1347 queue_delayed_work(octep_wq, &oct->intr_poll_task, 1348 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 1349 1350 atomic_set(&oct->hb_miss_cnt, 0); 1351 INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); 1352 1353 return 0; 1354 1355 unsupported_dev: 1356 i = OCTEP_MMIO_REGIONS; 1357 unmap_prev: 1358 while (i--) 1359 iounmap(oct->mmio[i].hw_addr); 1360 1361 kfree(oct->conf); 1362 return -1; 1363 } 1364 1365 /** 1366 * octep_device_cleanup() - Cleanup Octeon Device. 1367 * 1368 * @oct: Octeon device private data structure. 1369 * 1370 * Cleanup Octeon device allocated resources. 1371 */ 1372 static void octep_device_cleanup(struct octep_device *oct) 1373 { 1374 int i; 1375 1376 oct->poll_non_ioq_intr = false; 1377 cancel_delayed_work_sync(&oct->intr_poll_task); 1378 cancel_work_sync(&oct->ctrl_mbox_task); 1379 1380 dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); 1381 1382 for (i = 0; i < OCTEP_MAX_VF; i++) { 1383 vfree(oct->mbox[i]); 1384 oct->mbox[i] = NULL; 1385 } 1386 1387 octep_delete_pfvf_mbox(oct); 1388 octep_ctrl_net_uninit(oct); 1389 cancel_delayed_work_sync(&oct->hb_task); 1390 1391 oct->hw_ops.soft_reset(oct); 1392 for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 1393 if (oct->mmio[i].mapped) 1394 iounmap(oct->mmio[i].hw_addr); 1395 } 1396 1397 kfree(oct->conf); 1398 oct->conf = NULL; 1399 } 1400 1401 static bool get_fw_ready_status(struct pci_dev *pdev) 1402 { 1403 u32 pos = 0; 1404 u16 vsec_id; 1405 u8 status; 1406 1407 while ((pos = pci_find_next_ext_capability(pdev, pos, 1408 PCI_EXT_CAP_ID_VNDR))) { 1409 pci_read_config_word(pdev, pos + 4, &vsec_id); 1410 #define FW_STATUS_VSEC_ID 0xA3 1411 if (vsec_id != FW_STATUS_VSEC_ID) 1412 continue; 1413 1414 pci_read_config_byte(pdev, (pos + 8), &status); 1415 dev_info(&pdev->dev, "Firmware ready status = %u\n", status); 1416 #define FW_STATUS_READY 1ULL 1417 return status == FW_STATUS_READY; 1418 } 1419 return false; 1420 } 1421 1422 /** 1423 * octep_probe() - Octeon PCI device probe handler. 1424 * 1425 * @pdev: PCI device structure. 1426 * @ent: entry in Octeon PCI device ID table. 1427 * 1428 * Initializes and enables the Octeon PCI device for network operations. 1429 * Initializes Octeon private data structure and registers a network device. 1430 */ 1431 static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1432 { 1433 struct octep_device *octep_dev = NULL; 1434 struct net_device *netdev; 1435 int max_rx_pktlen; 1436 int err; 1437 1438 err = pci_enable_device(pdev); 1439 if (err) { 1440 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1441 return err; 1442 } 1443 1444 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1445 if (err) { 1446 dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); 1447 goto err_dma_mask; 1448 } 1449 1450 err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); 1451 if (err) { 1452 dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); 1453 goto err_pci_regions; 1454 } 1455 1456 pci_set_master(pdev); 1457 1458 if (!get_fw_ready_status(pdev)) { 1459 dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n"); 1460 err = -EPROBE_DEFER; 1461 goto err_alloc_netdev; 1462 } 1463 1464 netdev = alloc_etherdev_mq(sizeof(struct octep_device), 1465 OCTEP_MAX_QUEUES); 1466 if (!netdev) { 1467 dev_err(&pdev->dev, "Failed to allocate netdev\n"); 1468 err = -ENOMEM; 1469 goto err_alloc_netdev; 1470 } 1471 SET_NETDEV_DEV(netdev, &pdev->dev); 1472 1473 octep_dev = netdev_priv(netdev); 1474 octep_dev->netdev = netdev; 1475 octep_dev->pdev = pdev; 1476 octep_dev->dev = &pdev->dev; 1477 pci_set_drvdata(pdev, octep_dev); 1478 1479 err = octep_device_setup(octep_dev); 1480 if (err) { 1481 dev_err(&pdev->dev, "Device setup failed\n"); 1482 goto err_octep_config; 1483 } 1484 1485 err = octep_setup_pfvf_mbox(octep_dev); 1486 if (err) { 1487 dev_err(&pdev->dev, "PF-VF mailbox setup failed\n"); 1488 goto register_dev_err; 1489 } 1490 1491 err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, 1492 &octep_dev->conf->fw_info); 1493 if (err) { 1494 dev_err(&pdev->dev, "Failed to get firmware info\n"); 1495 goto register_dev_err; 1496 } 1497 dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n", 1498 octep_dev->conf->fw_info.hb_interval, 1499 octep_dev->conf->fw_info.hb_miss_count); 1500 queue_delayed_work(octep_wq, &octep_dev->hb_task, 1501 msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval)); 1502 1503 netdev->netdev_ops = &octep_netdev_ops; 1504 octep_set_ethtool_ops(netdev); 1505 netif_carrier_off(netdev); 1506 1507 netdev->hw_features = NETIF_F_SG; 1508 if (OCTEP_TX_IP_CSUM(octep_dev->conf->fw_info.tx_ol_flags)) 1509 netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 1510 1511 if (OCTEP_RX_IP_CSUM(octep_dev->conf->fw_info.rx_ol_flags)) 1512 netdev->hw_features |= NETIF_F_RXCSUM; 1513 1514 max_rx_pktlen = octep_ctrl_net_get_mtu(octep_dev, OCTEP_CTRL_NET_INVALID_VFID); 1515 if (max_rx_pktlen < 0) { 1516 dev_err(&octep_dev->pdev->dev, 1517 "Failed to get max receive packet size; err = %d\n", max_rx_pktlen); 1518 err = max_rx_pktlen; 1519 goto register_dev_err; 1520 } 1521 netdev->min_mtu = OCTEP_MIN_MTU; 1522 netdev->max_mtu = max_rx_pktlen - (ETH_HLEN + ETH_FCS_LEN); 1523 netdev->mtu = OCTEP_DEFAULT_MTU; 1524 1525 if (OCTEP_TX_TSO(octep_dev->conf->fw_info.tx_ol_flags)) { 1526 netdev->hw_features |= NETIF_F_TSO; 1527 netif_set_tso_max_size(netdev, netdev->max_mtu); 1528 } 1529 1530 netdev->features |= netdev->hw_features; 1531 err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, 1532 octep_dev->mac_addr); 1533 if (err) { 1534 dev_err(&pdev->dev, "Failed to get mac address\n"); 1535 goto register_dev_err; 1536 } 1537 eth_hw_addr_set(netdev, octep_dev->mac_addr); 1538 1539 err = register_netdev(netdev); 1540 if (err) { 1541 dev_err(&pdev->dev, "Failed to register netdev\n"); 1542 goto register_dev_err; 1543 } 1544 dev_info(&pdev->dev, "Device probe successful\n"); 1545 return 0; 1546 1547 register_dev_err: 1548 octep_device_cleanup(octep_dev); 1549 err_octep_config: 1550 free_netdev(netdev); 1551 err_alloc_netdev: 1552 pci_release_mem_regions(pdev); 1553 err_pci_regions: 1554 err_dma_mask: 1555 pci_disable_device(pdev); 1556 return err; 1557 } 1558 1559 static int octep_sriov_disable(struct octep_device *oct) 1560 { 1561 struct pci_dev *pdev = oct->pdev; 1562 1563 if (pci_vfs_assigned(oct->pdev)) { 1564 dev_warn(&pdev->dev, "Can't disable SRIOV while VFs are assigned\n"); 1565 return -EPERM; 1566 } 1567 1568 pci_disable_sriov(pdev); 1569 CFG_GET_ACTIVE_VFS(oct->conf) = 0; 1570 1571 return 0; 1572 } 1573 1574 /** 1575 * octep_remove() - Remove Octeon PCI device from driver control. 1576 * 1577 * @pdev: PCI device structure of the Octeon device. 1578 * 1579 * Cleanup all resources allocated for the Octeon device. 1580 * Unregister from network device and disable the PCI device. 1581 */ 1582 static void octep_remove(struct pci_dev *pdev) 1583 { 1584 struct octep_device *oct = pci_get_drvdata(pdev); 1585 struct net_device *netdev; 1586 1587 if (!oct) 1588 return; 1589 1590 netdev = oct->netdev; 1591 octep_sriov_disable(oct); 1592 if (netdev->reg_state == NETREG_REGISTERED) 1593 unregister_netdev(netdev); 1594 1595 cancel_work_sync(&oct->tx_timeout_task); 1596 octep_device_cleanup(oct); 1597 pci_release_mem_regions(pdev); 1598 free_netdev(netdev); 1599 pci_disable_device(pdev); 1600 } 1601 1602 static int octep_sriov_enable(struct octep_device *oct, int num_vfs) 1603 { 1604 struct pci_dev *pdev = oct->pdev; 1605 int err; 1606 1607 CFG_GET_ACTIVE_VFS(oct->conf) = num_vfs; 1608 err = pci_enable_sriov(pdev, num_vfs); 1609 if (err) { 1610 dev_warn(&pdev->dev, "Failed to enable SRIOV err=%d\n", err); 1611 CFG_GET_ACTIVE_VFS(oct->conf) = 0; 1612 return err; 1613 } 1614 1615 return num_vfs; 1616 } 1617 1618 static int octep_sriov_configure(struct pci_dev *pdev, int num_vfs) 1619 { 1620 struct octep_device *oct = pci_get_drvdata(pdev); 1621 int max_nvfs; 1622 1623 if (num_vfs == 0) 1624 return octep_sriov_disable(oct); 1625 1626 max_nvfs = CFG_GET_MAX_VFS(oct->conf); 1627 1628 if (num_vfs > max_nvfs) { 1629 dev_err(&pdev->dev, "Invalid VF count Max supported VFs = %d\n", 1630 max_nvfs); 1631 return -EINVAL; 1632 } 1633 1634 return octep_sriov_enable(oct, num_vfs); 1635 } 1636 1637 static struct pci_driver octep_driver = { 1638 .name = OCTEP_DRV_NAME, 1639 .id_table = octep_pci_id_tbl, 1640 .probe = octep_probe, 1641 .remove = octep_remove, 1642 .sriov_configure = octep_sriov_configure, 1643 }; 1644 1645 /** 1646 * octep_init_module() - Module initialiation. 1647 * 1648 * create common resource for the driver and register PCI driver. 1649 */ 1650 static int __init octep_init_module(void) 1651 { 1652 int ret; 1653 1654 pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); 1655 1656 /* work queue for all deferred tasks */ 1657 octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); 1658 if (!octep_wq) { 1659 pr_err("%s: Failed to create common workqueue\n", 1660 OCTEP_DRV_NAME); 1661 return -ENOMEM; 1662 } 1663 1664 ret = pci_register_driver(&octep_driver); 1665 if (ret < 0) { 1666 pr_err("%s: Failed to register PCI driver; err=%d\n", 1667 OCTEP_DRV_NAME, ret); 1668 destroy_workqueue(octep_wq); 1669 return ret; 1670 } 1671 1672 pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); 1673 1674 return ret; 1675 } 1676 1677 /** 1678 * octep_exit_module() - Module exit routine. 1679 * 1680 * unregister the driver with PCI subsystem and cleanup common resources. 1681 */ 1682 static void __exit octep_exit_module(void) 1683 { 1684 pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); 1685 1686 pci_unregister_driver(&octep_driver); 1687 destroy_workqueue(octep_wq); 1688 1689 pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); 1690 } 1691 1692 module_init(octep_init_module); 1693 module_exit(octep_exit_module); 1694