1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell Octeon EP (EndPoint) Ethernet Driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/types.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/etherdevice.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/vmalloc.h> 15 16 #include "octep_config.h" 17 #include "octep_main.h" 18 #include "octep_ctrl_net.h" 19 #include "octep_pfvf_mbox.h" 20 21 #define OCTEP_INTR_POLL_TIME_MSECS 100 22 struct workqueue_struct *octep_wq; 23 24 /* Supported Devices */ 25 static const struct pci_device_id octep_pci_id_tbl[] = { 26 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_PF)}, 27 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, 28 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)}, 29 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_PF)}, 30 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_PF)}, 31 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_PF)}, 32 {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_PF)}, 33 {0, }, 34 }; 35 MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); 36 37 MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); 38 MODULE_DESCRIPTION(OCTEP_DRV_STRING); 39 MODULE_LICENSE("GPL"); 40 41 /** 42 * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. 43 * 44 * @oct: Octeon device private data structure. 45 * 46 * Allocate resources to hold per Tx/Rx queue interrupt info. 47 * This is the information passed to interrupt handler, from which napi poll 48 * is scheduled and includes quick access to private data of Tx/Rx queue 49 * corresponding to the interrupt being handled. 50 * 51 * Return: 0, on successful allocation of resources for all queue interrupts. 52 * -1, if failed to allocate any resource. 53 */ 54 static int octep_alloc_ioq_vectors(struct octep_device *oct) 55 { 56 int i; 57 struct octep_ioq_vector *ioq_vector; 58 59 for (i = 0; i < oct->num_oqs; i++) { 60 oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); 61 if (!oct->ioq_vector[i]) 62 goto free_ioq_vector; 63 64 ioq_vector = oct->ioq_vector[i]; 65 ioq_vector->iq = oct->iq[i]; 66 ioq_vector->oq = oct->oq[i]; 67 ioq_vector->octep_dev = oct; 68 } 69 70 dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); 71 return 0; 72 73 free_ioq_vector: 74 while (i) { 75 i--; 76 vfree(oct->ioq_vector[i]); 77 oct->ioq_vector[i] = NULL; 78 } 79 return -1; 80 } 81 82 /** 83 * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. 84 * 85 * @oct: Octeon device private data structure. 86 */ 87 static void octep_free_ioq_vectors(struct octep_device *oct) 88 { 89 int i; 90 91 for (i = 0; i < oct->num_oqs; i++) { 92 if (oct->ioq_vector[i]) { 93 vfree(oct->ioq_vector[i]); 94 oct->ioq_vector[i] = NULL; 95 } 96 } 97 netdev_info(oct->netdev, "Freed IOQ Vectors\n"); 98 } 99 100 /** 101 * octep_enable_msix_range() - enable MSI-x interrupts. 102 * 103 * @oct: Octeon device private data structure. 104 * 105 * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) 106 * for the Octeon device. 107 * 108 * Return: 0, on successfully enabling all MSI-x interrupts. 109 * -1, if failed to enable any MSI-x interrupt. 110 */ 111 static int octep_enable_msix_range(struct octep_device *oct) 112 { 113 int num_msix, msix_allocated; 114 int i; 115 116 /* Generic interrupts apart from input/output queues */ 117 num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); 118 oct->msix_entries = kcalloc(num_msix, 119 sizeof(struct msix_entry), GFP_KERNEL); 120 if (!oct->msix_entries) 121 goto msix_alloc_err; 122 123 for (i = 0; i < num_msix; i++) 124 oct->msix_entries[i].entry = i; 125 126 msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, 127 num_msix, num_msix); 128 if (msix_allocated != num_msix) { 129 dev_err(&oct->pdev->dev, 130 "Failed to enable %d msix irqs; got only %d\n", 131 num_msix, msix_allocated); 132 goto enable_msix_err; 133 } 134 oct->num_irqs = msix_allocated; 135 dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); 136 137 return 0; 138 139 enable_msix_err: 140 if (msix_allocated > 0) 141 pci_disable_msix(oct->pdev); 142 kfree(oct->msix_entries); 143 oct->msix_entries = NULL; 144 msix_alloc_err: 145 return -1; 146 } 147 148 /** 149 * octep_disable_msix() - disable MSI-x interrupts. 150 * 151 * @oct: Octeon device private data structure. 152 * 153 * Disable MSI-x on the Octeon device. 154 */ 155 static void octep_disable_msix(struct octep_device *oct) 156 { 157 pci_disable_msix(oct->pdev); 158 kfree(oct->msix_entries); 159 oct->msix_entries = NULL; 160 dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); 161 } 162 163 /** 164 * octep_mbox_intr_handler() - common handler for pfvf mbox interrupts. 165 * 166 * @irq: Interrupt number. 167 * @data: interrupt data. 168 * 169 * this is common handler for pfvf mbox interrupts. 170 */ 171 static irqreturn_t octep_mbox_intr_handler(int irq, void *data) 172 { 173 struct octep_device *oct = data; 174 175 return oct->hw_ops.mbox_intr_handler(oct); 176 } 177 178 /** 179 * octep_oei_intr_handler() - common handler for output endpoint interrupts. 180 * 181 * @irq: Interrupt number. 182 * @data: interrupt data. 183 * 184 * this is common handler for all output endpoint interrupts. 185 */ 186 static irqreturn_t octep_oei_intr_handler(int irq, void *data) 187 { 188 struct octep_device *oct = data; 189 190 return oct->hw_ops.oei_intr_handler(oct); 191 } 192 193 /** 194 * octep_ire_intr_handler() - common handler for input ring error interrupts. 195 * 196 * @irq: Interrupt number. 197 * @data: interrupt data. 198 * 199 * this is common handler for input ring error interrupts. 200 */ 201 static irqreturn_t octep_ire_intr_handler(int irq, void *data) 202 { 203 struct octep_device *oct = data; 204 205 return oct->hw_ops.ire_intr_handler(oct); 206 } 207 208 /** 209 * octep_ore_intr_handler() - common handler for output ring error interrupts. 210 * 211 * @irq: Interrupt number. 212 * @data: interrupt data. 213 * 214 * this is common handler for output ring error interrupts. 215 */ 216 static irqreturn_t octep_ore_intr_handler(int irq, void *data) 217 { 218 struct octep_device *oct = data; 219 220 return oct->hw_ops.ore_intr_handler(oct); 221 } 222 223 /** 224 * octep_vfire_intr_handler() - common handler for vf input ring error interrupts. 225 * 226 * @irq: Interrupt number. 227 * @data: interrupt data. 228 * 229 * this is common handler for vf input ring error interrupts. 230 */ 231 static irqreturn_t octep_vfire_intr_handler(int irq, void *data) 232 { 233 struct octep_device *oct = data; 234 235 return oct->hw_ops.vfire_intr_handler(oct); 236 } 237 238 /** 239 * octep_vfore_intr_handler() - common handler for vf output ring error interrupts. 240 * 241 * @irq: Interrupt number. 242 * @data: interrupt data. 243 * 244 * this is common handler for vf output ring error interrupts. 245 */ 246 static irqreturn_t octep_vfore_intr_handler(int irq, void *data) 247 { 248 struct octep_device *oct = data; 249 250 return oct->hw_ops.vfore_intr_handler(oct); 251 } 252 253 /** 254 * octep_dma_intr_handler() - common handler for dpi dma related interrupts. 255 * 256 * @irq: Interrupt number. 257 * @data: interrupt data. 258 * 259 * this is common handler for dpi dma related interrupts. 260 */ 261 static irqreturn_t octep_dma_intr_handler(int irq, void *data) 262 { 263 struct octep_device *oct = data; 264 265 return oct->hw_ops.dma_intr_handler(oct); 266 } 267 268 /** 269 * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs. 270 * 271 * @irq: Interrupt number. 272 * @data: interrupt data. 273 * 274 * this is common handler for dpi dma transaction error interrupts for VFs. 275 */ 276 static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data) 277 { 278 struct octep_device *oct = data; 279 280 return oct->hw_ops.dma_vf_intr_handler(oct); 281 } 282 283 /** 284 * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs. 285 * 286 * @irq: Interrupt number. 287 * @data: interrupt data. 288 * 289 * this is common handler for pp transaction error interrupts for VFs. 290 */ 291 static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data) 292 { 293 struct octep_device *oct = data; 294 295 return oct->hw_ops.pp_vf_intr_handler(oct); 296 } 297 298 /** 299 * octep_misc_intr_handler() - common handler for mac related interrupts. 300 * 301 * @irq: Interrupt number. 302 * @data: interrupt data. 303 * 304 * this is common handler for mac related interrupts. 305 */ 306 static irqreturn_t octep_misc_intr_handler(int irq, void *data) 307 { 308 struct octep_device *oct = data; 309 310 return oct->hw_ops.misc_intr_handler(oct); 311 } 312 313 /** 314 * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use). 315 * 316 * @irq: Interrupt number. 317 * @data: interrupt data. 318 * 319 * this is common handler for all reserved interrupts. 320 */ 321 static irqreturn_t octep_rsvd_intr_handler(int irq, void *data) 322 { 323 struct octep_device *oct = data; 324 325 return oct->hw_ops.rsvd_intr_handler(oct); 326 } 327 328 /** 329 * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. 330 * 331 * @irq: Interrupt number. 332 * @data: interrupt data contains pointers to Tx/Rx queue private data 333 * and correspong NAPI context. 334 * 335 * this is common handler for all non-queue (generic) interrupts. 336 */ 337 static irqreturn_t octep_ioq_intr_handler(int irq, void *data) 338 { 339 struct octep_ioq_vector *ioq_vector = data; 340 struct octep_device *oct = ioq_vector->octep_dev; 341 342 return oct->hw_ops.ioq_intr_handler(ioq_vector); 343 } 344 345 /** 346 * octep_request_irqs() - Register interrupt handlers. 347 * 348 * @oct: Octeon device private data structure. 349 * 350 * Register handlers for all queue and non-queue interrupts. 351 * 352 * Return: 0, on successful registration of all interrupt handlers. 353 * -1, on any error. 354 */ 355 static int octep_request_irqs(struct octep_device *oct) 356 { 357 struct net_device *netdev = oct->netdev; 358 struct octep_ioq_vector *ioq_vector; 359 struct msix_entry *msix_entry; 360 char **non_ioq_msix_names; 361 int num_non_ioq_msix; 362 int ret, i, j; 363 364 num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); 365 non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); 366 367 oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, 368 OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); 369 if (!oct->non_ioq_irq_names) 370 goto alloc_err; 371 372 /* First few MSI-X interrupts are non-queue interrupts */ 373 for (i = 0; i < num_non_ioq_msix; i++) { 374 char *irq_name; 375 376 irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; 377 msix_entry = &oct->msix_entries[i]; 378 379 snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, 380 "%s-%s", netdev->name, non_ioq_msix_names[i]); 381 if (!strncmp(non_ioq_msix_names[i], "epf_mbox_rint", strlen("epf_mbox_rint"))) { 382 ret = request_irq(msix_entry->vector, 383 octep_mbox_intr_handler, 0, 384 irq_name, oct); 385 } else if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint", 386 strlen("epf_oei_rint"))) { 387 ret = request_irq(msix_entry->vector, 388 octep_oei_intr_handler, 0, 389 irq_name, oct); 390 } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint", 391 strlen("epf_ire_rint"))) { 392 ret = request_irq(msix_entry->vector, 393 octep_ire_intr_handler, 0, 394 irq_name, oct); 395 } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint", 396 strlen("epf_ore_rint"))) { 397 ret = request_irq(msix_entry->vector, 398 octep_ore_intr_handler, 0, 399 irq_name, oct); 400 } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint", 401 strlen("epf_vfire_rint"))) { 402 ret = request_irq(msix_entry->vector, 403 octep_vfire_intr_handler, 0, 404 irq_name, oct); 405 } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint", 406 strlen("epf_vfore_rint"))) { 407 ret = request_irq(msix_entry->vector, 408 octep_vfore_intr_handler, 0, 409 irq_name, oct); 410 } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint", 411 strlen("epf_dma_rint"))) { 412 ret = request_irq(msix_entry->vector, 413 octep_dma_intr_handler, 0, 414 irq_name, oct); 415 } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint", 416 strlen("epf_dma_vf_rint"))) { 417 ret = request_irq(msix_entry->vector, 418 octep_dma_vf_intr_handler, 0, 419 irq_name, oct); 420 } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint", 421 strlen("epf_pp_vf_rint"))) { 422 ret = request_irq(msix_entry->vector, 423 octep_pp_vf_intr_handler, 0, 424 irq_name, oct); 425 } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint", 426 strlen("epf_misc_rint"))) { 427 ret = request_irq(msix_entry->vector, 428 octep_misc_intr_handler, 0, 429 irq_name, oct); 430 } else { 431 ret = request_irq(msix_entry->vector, 432 octep_rsvd_intr_handler, 0, 433 irq_name, oct); 434 } 435 436 if (ret) { 437 netdev_err(netdev, 438 "request_irq failed for %s; err=%d", 439 irq_name, ret); 440 goto non_ioq_irq_err; 441 } 442 } 443 444 /* Request IRQs for Tx/Rx queues */ 445 for (j = 0; j < oct->num_oqs; j++) { 446 ioq_vector = oct->ioq_vector[j]; 447 msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; 448 449 snprintf(ioq_vector->name, sizeof(ioq_vector->name), 450 "%s-q%d", netdev->name, j); 451 ret = request_irq(msix_entry->vector, 452 octep_ioq_intr_handler, 0, 453 ioq_vector->name, ioq_vector); 454 if (ret) { 455 netdev_err(netdev, 456 "request_irq failed for Q-%d; err=%d", 457 j, ret); 458 goto ioq_irq_err; 459 } 460 461 cpumask_set_cpu(j % num_online_cpus(), 462 &ioq_vector->affinity_mask); 463 irq_set_affinity_hint(msix_entry->vector, 464 &ioq_vector->affinity_mask); 465 } 466 467 return 0; 468 ioq_irq_err: 469 while (j) { 470 --j; 471 ioq_vector = oct->ioq_vector[j]; 472 msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; 473 474 irq_set_affinity_hint(msix_entry->vector, NULL); 475 free_irq(msix_entry->vector, ioq_vector); 476 } 477 non_ioq_irq_err: 478 while (i) { 479 --i; 480 free_irq(oct->msix_entries[i].vector, oct); 481 } 482 kfree(oct->non_ioq_irq_names); 483 oct->non_ioq_irq_names = NULL; 484 alloc_err: 485 return -1; 486 } 487 488 /** 489 * octep_free_irqs() - free all registered interrupts. 490 * 491 * @oct: Octeon device private data structure. 492 * 493 * Free all queue and non-queue interrupts of the Octeon device. 494 */ 495 static void octep_free_irqs(struct octep_device *oct) 496 { 497 int i; 498 499 /* First few MSI-X interrupts are non queue interrupts; free them */ 500 for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) 501 free_irq(oct->msix_entries[i].vector, oct); 502 kfree(oct->non_ioq_irq_names); 503 504 /* Free IRQs for Input/Output (Tx/Rx) queues */ 505 for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { 506 irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); 507 free_irq(oct->msix_entries[i].vector, 508 oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); 509 } 510 netdev_info(oct->netdev, "IRQs freed\n"); 511 } 512 513 /** 514 * octep_setup_irqs() - setup interrupts for the Octeon device. 515 * 516 * @oct: Octeon device private data structure. 517 * 518 * Allocate data structures to hold per interrupt information, allocate/enable 519 * MSI-x interrupt and register interrupt handlers. 520 * 521 * Return: 0, on successful allocation and registration of all interrupts. 522 * -1, on any error. 523 */ 524 static int octep_setup_irqs(struct octep_device *oct) 525 { 526 if (octep_alloc_ioq_vectors(oct)) 527 goto ioq_vector_err; 528 529 if (octep_enable_msix_range(oct)) 530 goto enable_msix_err; 531 532 if (octep_request_irqs(oct)) 533 goto request_irq_err; 534 535 return 0; 536 537 request_irq_err: 538 octep_disable_msix(oct); 539 enable_msix_err: 540 octep_free_ioq_vectors(oct); 541 ioq_vector_err: 542 return -1; 543 } 544 545 /** 546 * octep_clean_irqs() - free all interrupts and its resources. 547 * 548 * @oct: Octeon device private data structure. 549 */ 550 static void octep_clean_irqs(struct octep_device *oct) 551 { 552 octep_free_irqs(oct); 553 octep_disable_msix(oct); 554 octep_free_ioq_vectors(oct); 555 } 556 557 /** 558 * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. 559 * 560 * @iq: Octeon Tx queue data structure. 561 * @oq: Octeon Rx queue data structure. 562 */ 563 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) 564 { 565 u32 pkts_pend = oq->pkts_pending; 566 567 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); 568 if (iq->pkts_processed) { 569 writel(iq->pkts_processed, iq->inst_cnt_reg); 570 iq->pkt_in_done -= iq->pkts_processed; 571 iq->pkts_processed = 0; 572 } 573 if (oq->last_pkt_count - pkts_pend) { 574 writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); 575 oq->last_pkt_count = pkts_pend; 576 } 577 578 /* Flush the previous wrties before writing to RESEND bit */ 579 wmb(); 580 writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); 581 writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); 582 } 583 584 /** 585 * octep_napi_poll() - NAPI poll function for Tx/Rx. 586 * 587 * @napi: pointer to napi context. 588 * @budget: max number of packets to be processed in single invocation. 589 */ 590 static int octep_napi_poll(struct napi_struct *napi, int budget) 591 { 592 struct octep_ioq_vector *ioq_vector = 593 container_of(napi, struct octep_ioq_vector, napi); 594 u32 tx_pending, rx_done; 595 596 tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); 597 rx_done = octep_oq_process_rx(ioq_vector->oq, budget); 598 599 /* need more polling if tx completion processing is still pending or 600 * processed at least 'budget' number of rx packets. 601 */ 602 if (tx_pending || rx_done >= budget) 603 return budget; 604 605 napi_complete(napi); 606 octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); 607 return rx_done; 608 } 609 610 /** 611 * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. 612 * 613 * @oct: Octeon device private data structure. 614 */ 615 static void octep_napi_add(struct octep_device *oct) 616 { 617 int i; 618 619 for (i = 0; i < oct->num_oqs; i++) { 620 netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); 621 netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, 622 octep_napi_poll); 623 oct->oq[i]->napi = &oct->ioq_vector[i]->napi; 624 } 625 } 626 627 /** 628 * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. 629 * 630 * @oct: Octeon device private data structure. 631 */ 632 static void octep_napi_delete(struct octep_device *oct) 633 { 634 int i; 635 636 for (i = 0; i < oct->num_oqs; i++) { 637 netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); 638 netif_napi_del(&oct->ioq_vector[i]->napi); 639 oct->oq[i]->napi = NULL; 640 } 641 } 642 643 /** 644 * octep_napi_enable() - enable NAPI for all Tx/Rx queues. 645 * 646 * @oct: Octeon device private data structure. 647 */ 648 static void octep_napi_enable(struct octep_device *oct) 649 { 650 int i; 651 652 for (i = 0; i < oct->num_oqs; i++) { 653 netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); 654 napi_enable(&oct->ioq_vector[i]->napi); 655 } 656 } 657 658 /** 659 * octep_napi_disable() - disable NAPI for all Tx/Rx queues. 660 * 661 * @oct: Octeon device private data structure. 662 */ 663 static void octep_napi_disable(struct octep_device *oct) 664 { 665 int i; 666 667 for (i = 0; i < oct->num_oqs; i++) { 668 netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); 669 napi_disable(&oct->ioq_vector[i]->napi); 670 } 671 } 672 673 static void octep_link_up(struct net_device *netdev) 674 { 675 netif_carrier_on(netdev); 676 netif_tx_start_all_queues(netdev); 677 } 678 679 /** 680 * octep_open() - start the octeon network device. 681 * 682 * @netdev: pointer to kernel network device. 683 * 684 * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues 685 * and interrupts.. 686 * 687 * Return: 0, on successfully setting up device and bring it up. 688 * -1, on any error. 689 */ 690 static int octep_open(struct net_device *netdev) 691 { 692 struct octep_device *oct = netdev_priv(netdev); 693 int err, ret; 694 695 netdev_info(netdev, "Starting netdev ...\n"); 696 netif_carrier_off(netdev); 697 698 oct->hw_ops.reset_io_queues(oct); 699 700 if (octep_setup_iqs(oct)) 701 goto setup_iq_err; 702 if (octep_setup_oqs(oct)) 703 goto setup_oq_err; 704 if (octep_setup_irqs(oct)) 705 goto setup_irq_err; 706 707 err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); 708 if (err) 709 goto set_queues_err; 710 err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); 711 if (err) 712 goto set_queues_err; 713 714 octep_napi_add(oct); 715 octep_napi_enable(oct); 716 717 oct->link_info.admin_up = 1; 718 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true, 719 false); 720 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true, 721 false); 722 oct->poll_non_ioq_intr = false; 723 724 /* Enable the input and output queues for this Octeon device */ 725 oct->hw_ops.enable_io_queues(oct); 726 727 /* Enable Octeon device interrupts */ 728 oct->hw_ops.enable_interrupts(oct); 729 730 octep_oq_dbell_init(oct); 731 732 ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID); 733 if (ret > 0) 734 octep_link_up(netdev); 735 736 return 0; 737 738 set_queues_err: 739 octep_clean_irqs(oct); 740 setup_irq_err: 741 octep_free_oqs(oct); 742 setup_oq_err: 743 octep_free_iqs(oct); 744 setup_iq_err: 745 return -1; 746 } 747 748 /** 749 * octep_stop() - stop the octeon network device. 750 * 751 * @netdev: pointer to kernel network device. 752 * 753 * stop the device Tx/Rx operations, bring down the link and 754 * free up all resources allocated for Tx/Rx queues and interrupts. 755 */ 756 static int octep_stop(struct net_device *netdev) 757 { 758 struct octep_device *oct = netdev_priv(netdev); 759 760 netdev_info(netdev, "Stopping the device ...\n"); 761 762 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false, 763 false); 764 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false, 765 false); 766 767 /* Stop Tx from stack */ 768 netif_tx_stop_all_queues(netdev); 769 netif_carrier_off(netdev); 770 netif_tx_disable(netdev); 771 772 oct->link_info.admin_up = 0; 773 oct->link_info.oper_up = 0; 774 775 oct->hw_ops.disable_interrupts(oct); 776 octep_napi_disable(oct); 777 octep_napi_delete(oct); 778 779 octep_clean_irqs(oct); 780 octep_clean_iqs(oct); 781 782 oct->hw_ops.disable_io_queues(oct); 783 oct->hw_ops.reset_io_queues(oct); 784 octep_free_oqs(oct); 785 octep_free_iqs(oct); 786 787 oct->poll_non_ioq_intr = true; 788 queue_delayed_work(octep_wq, &oct->intr_poll_task, 789 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 790 791 netdev_info(netdev, "Device stopped !!\n"); 792 return 0; 793 } 794 795 /** 796 * octep_iq_full_check() - check if a Tx queue is full. 797 * 798 * @iq: Octeon Tx queue data structure. 799 * 800 * Return: 0, if the Tx queue is not full. 801 * 1, if the Tx queue is full. 802 */ 803 static inline int octep_iq_full_check(struct octep_iq *iq) 804 { 805 if (likely((IQ_INSTR_SPACE(iq)) > 806 OCTEP_WAKE_QUEUE_THRESHOLD)) 807 return 0; 808 809 /* Stop the queue if unable to send */ 810 netif_stop_subqueue(iq->netdev, iq->q_no); 811 812 /* Allow for pending updates in write index 813 * from iq_process_completion in other cpus 814 * to reflect, in case queue gets free 815 * entries. 816 */ 817 smp_mb(); 818 819 /* check again and restart the queue, in case NAPI has just freed 820 * enough Tx ring entries. 821 */ 822 if (unlikely(IQ_INSTR_SPACE(iq) > 823 OCTEP_WAKE_QUEUE_THRESHOLD)) { 824 netif_start_subqueue(iq->netdev, iq->q_no); 825 iq->stats.restart_cnt++; 826 return 0; 827 } 828 829 return 1; 830 } 831 832 /** 833 * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. 834 * 835 * @skb: packet skbuff pointer. 836 * @netdev: kernel network device. 837 * 838 * Return: NETDEV_TX_BUSY, if Tx Queue is full. 839 * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. 840 */ 841 static netdev_tx_t octep_start_xmit(struct sk_buff *skb, 842 struct net_device *netdev) 843 { 844 struct octep_device *oct = netdev_priv(netdev); 845 netdev_features_t feat = netdev->features; 846 struct octep_tx_sglist_desc *sglist; 847 struct octep_tx_buffer *tx_buffer; 848 struct octep_tx_desc_hw *hw_desc; 849 struct skb_shared_info *shinfo; 850 struct octep_instr_hdr *ih; 851 struct octep_iq *iq; 852 skb_frag_t *frag; 853 u16 nr_frags, si; 854 int xmit_more; 855 u16 q_no, wi; 856 857 if (skb_put_padto(skb, ETH_ZLEN)) 858 return NETDEV_TX_OK; 859 860 q_no = skb_get_queue_mapping(skb); 861 if (q_no >= oct->num_iqs) { 862 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); 863 q_no = q_no % oct->num_iqs; 864 } 865 866 iq = oct->iq[q_no]; 867 868 shinfo = skb_shinfo(skb); 869 nr_frags = shinfo->nr_frags; 870 871 wi = iq->host_write_index; 872 hw_desc = &iq->desc_ring[wi]; 873 hw_desc->ih64 = 0; 874 875 tx_buffer = iq->buff_info + wi; 876 tx_buffer->skb = skb; 877 878 ih = &hw_desc->ih; 879 ih->pkind = oct->conf->fw_info.pkind; 880 ih->fsz = oct->conf->fw_info.fsz; 881 ih->tlen = skb->len + ih->fsz; 882 883 if (!nr_frags) { 884 tx_buffer->gather = 0; 885 tx_buffer->dma = dma_map_single(iq->dev, skb->data, 886 skb->len, DMA_TO_DEVICE); 887 if (dma_mapping_error(iq->dev, tx_buffer->dma)) 888 goto dma_map_err; 889 hw_desc->dptr = tx_buffer->dma; 890 } else { 891 /* Scatter/Gather */ 892 dma_addr_t dma; 893 u16 len; 894 895 sglist = tx_buffer->sglist; 896 897 ih->gsz = nr_frags + 1; 898 ih->gather = 1; 899 tx_buffer->gather = 1; 900 901 len = skb_headlen(skb); 902 dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); 903 if (dma_mapping_error(iq->dev, dma)) 904 goto dma_map_err; 905 906 memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); 907 sglist[0].len[3] = len; 908 sglist[0].dma_ptr[0] = dma; 909 910 si = 1; /* entry 0 is main skb, mapped above */ 911 frag = &shinfo->frags[0]; 912 while (nr_frags--) { 913 len = skb_frag_size(frag); 914 dma = skb_frag_dma_map(iq->dev, frag, 0, 915 len, DMA_TO_DEVICE); 916 if (dma_mapping_error(iq->dev, dma)) 917 goto dma_map_sg_err; 918 919 sglist[si >> 2].len[3 - (si & 3)] = len; 920 sglist[si >> 2].dma_ptr[si & 3] = dma; 921 922 frag++; 923 si++; 924 } 925 hw_desc->dptr = tx_buffer->sglist_dma; 926 } 927 928 if (oct->conf->fw_info.tx_ol_flags) { 929 if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) { 930 hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM; 931 hw_desc->txm.ol_flags |= OCTEP_TX_OFFLOAD_TSO; 932 hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size; 933 hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs; 934 } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 935 hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM; 936 } 937 /* due to ESR txm will be swapped by hw */ 938 hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]); 939 } 940 941 xmit_more = netdev_xmit_more(); 942 943 __netdev_tx_sent_queue(iq->netdev_q, skb->len, xmit_more); 944 945 skb_tx_timestamp(skb); 946 iq->fill_cnt++; 947 wi++; 948 iq->host_write_index = wi & iq->ring_size_mask; 949 950 /* octep_iq_full_check stops the queue and returns 951 * true if so, in case the queue has become full 952 * by inserting current packet. If so, we can 953 * go ahead and ring doorbell. 954 */ 955 if (!octep_iq_full_check(iq) && xmit_more && 956 iq->fill_cnt < iq->fill_threshold) 957 return NETDEV_TX_OK; 958 959 /* Flush the hw descriptor before writing to doorbell */ 960 wmb(); 961 /* Ring Doorbell to notify the NIC of new packets */ 962 writel(iq->fill_cnt, iq->doorbell_reg); 963 iq->stats.instr_posted += iq->fill_cnt; 964 iq->fill_cnt = 0; 965 return NETDEV_TX_OK; 966 967 dma_map_sg_err: 968 if (si > 0) { 969 dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], 970 sglist[0].len[3], DMA_TO_DEVICE); 971 sglist[0].len[3] = 0; 972 } 973 while (si > 1) { 974 dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], 975 sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE); 976 sglist[si >> 2].len[3 - (si & 3)] = 0; 977 si--; 978 } 979 tx_buffer->gather = 0; 980 dma_map_err: 981 dev_kfree_skb_any(skb); 982 return NETDEV_TX_OK; 983 } 984 985 /** 986 * octep_get_stats64() - Get Octeon network device statistics. 987 * 988 * @netdev: kernel network device. 989 * @stats: pointer to stats structure to be filled in. 990 */ 991 static void octep_get_stats64(struct net_device *netdev, 992 struct rtnl_link_stats64 *stats) 993 { 994 u64 tx_packets, tx_bytes, rx_packets, rx_bytes; 995 struct octep_device *oct = netdev_priv(netdev); 996 int q; 997 998 if (netif_running(netdev)) 999 octep_ctrl_net_get_if_stats(oct, 1000 OCTEP_CTRL_NET_INVALID_VFID, 1001 &oct->iface_rx_stats, 1002 &oct->iface_tx_stats); 1003 1004 tx_packets = 0; 1005 tx_bytes = 0; 1006 rx_packets = 0; 1007 rx_bytes = 0; 1008 for (q = 0; q < oct->num_oqs; q++) { 1009 struct octep_iq *iq = oct->iq[q]; 1010 struct octep_oq *oq = oct->oq[q]; 1011 1012 tx_packets += iq->stats.instr_completed; 1013 tx_bytes += iq->stats.bytes_sent; 1014 rx_packets += oq->stats.packets; 1015 rx_bytes += oq->stats.bytes; 1016 } 1017 stats->tx_packets = tx_packets; 1018 stats->tx_bytes = tx_bytes; 1019 stats->rx_packets = rx_packets; 1020 stats->rx_bytes = rx_bytes; 1021 stats->multicast = oct->iface_rx_stats.mcast_pkts; 1022 stats->rx_errors = oct->iface_rx_stats.err_pkts; 1023 stats->collisions = oct->iface_tx_stats.xscol; 1024 stats->tx_fifo_errors = oct->iface_tx_stats.undflw; 1025 } 1026 1027 /** 1028 * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. 1029 * 1030 * @work: pointer to Tx queue timeout work_struct 1031 * 1032 * Stop and start the device so that it frees up all queue resources 1033 * and restarts the queues, that potentially clears a Tx queue timeout 1034 * condition. 1035 **/ 1036 static void octep_tx_timeout_task(struct work_struct *work) 1037 { 1038 struct octep_device *oct = container_of(work, struct octep_device, 1039 tx_timeout_task); 1040 struct net_device *netdev = oct->netdev; 1041 1042 rtnl_lock(); 1043 if (netif_running(netdev)) { 1044 octep_stop(netdev); 1045 octep_open(netdev); 1046 } 1047 rtnl_unlock(); 1048 } 1049 1050 /** 1051 * octep_tx_timeout() - Handle Tx Queue timeout. 1052 * 1053 * @netdev: pointer to kernel network device. 1054 * @txqueue: Timed out Tx queue number. 1055 * 1056 * Schedule a work to handle Tx queue timeout. 1057 */ 1058 static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1059 { 1060 struct octep_device *oct = netdev_priv(netdev); 1061 1062 queue_work(octep_wq, &oct->tx_timeout_task); 1063 } 1064 1065 static int octep_set_mac(struct net_device *netdev, void *p) 1066 { 1067 struct octep_device *oct = netdev_priv(netdev); 1068 struct sockaddr *addr = (struct sockaddr *)p; 1069 int err; 1070 1071 if (!is_valid_ether_addr(addr->sa_data)) 1072 return -EADDRNOTAVAIL; 1073 1074 err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID, 1075 addr->sa_data, true); 1076 if (err) 1077 return err; 1078 1079 memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); 1080 eth_hw_addr_set(netdev, addr->sa_data); 1081 1082 return 0; 1083 } 1084 1085 static int octep_change_mtu(struct net_device *netdev, int new_mtu) 1086 { 1087 struct octep_device *oct = netdev_priv(netdev); 1088 struct octep_iface_link_info *link_info; 1089 int err = 0; 1090 1091 link_info = &oct->link_info; 1092 if (link_info->mtu == new_mtu) 1093 return 0; 1094 1095 err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu, 1096 true); 1097 if (!err) { 1098 oct->link_info.mtu = new_mtu; 1099 WRITE_ONCE(netdev->mtu, new_mtu); 1100 } 1101 1102 return err; 1103 } 1104 1105 static int octep_set_features(struct net_device *dev, netdev_features_t features) 1106 { 1107 struct octep_ctrl_net_offloads offloads = { 0 }; 1108 struct octep_device *oct = netdev_priv(dev); 1109 int err; 1110 1111 /* We only support features received from firmware */ 1112 if ((features & dev->hw_features) != features) 1113 return -EINVAL; 1114 1115 if (features & NETIF_F_TSO) 1116 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO; 1117 1118 if (features & NETIF_F_TSO6) 1119 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO; 1120 1121 if (features & NETIF_F_IP_CSUM) 1122 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM; 1123 1124 if (features & NETIF_F_IPV6_CSUM) 1125 offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM; 1126 1127 if (features & NETIF_F_RXCSUM) 1128 offloads.rx_offloads |= OCTEP_RX_OFFLOAD_CKSUM; 1129 1130 err = octep_ctrl_net_set_offloads(oct, 1131 OCTEP_CTRL_NET_INVALID_VFID, 1132 &offloads, 1133 true); 1134 if (!err) 1135 dev->features = features; 1136 1137 return err; 1138 } 1139 1140 static const struct net_device_ops octep_netdev_ops = { 1141 .ndo_open = octep_open, 1142 .ndo_stop = octep_stop, 1143 .ndo_start_xmit = octep_start_xmit, 1144 .ndo_get_stats64 = octep_get_stats64, 1145 .ndo_tx_timeout = octep_tx_timeout, 1146 .ndo_set_mac_address = octep_set_mac, 1147 .ndo_change_mtu = octep_change_mtu, 1148 .ndo_set_features = octep_set_features, 1149 }; 1150 1151 /** 1152 * octep_intr_poll_task - work queue task to process non-ioq interrupts. 1153 * 1154 * @work: pointer to mbox work_struct 1155 * 1156 * Process non-ioq interrupts to handle control mailbox, pfvf mailbox. 1157 **/ 1158 static void octep_intr_poll_task(struct work_struct *work) 1159 { 1160 struct octep_device *oct = container_of(work, struct octep_device, 1161 intr_poll_task.work); 1162 1163 if (!oct->poll_non_ioq_intr) { 1164 dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n"); 1165 return; 1166 } 1167 1168 oct->hw_ops.poll_non_ioq_interrupts(oct); 1169 queue_delayed_work(octep_wq, &oct->intr_poll_task, 1170 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 1171 } 1172 1173 /** 1174 * octep_hb_timeout_task - work queue task to check firmware heartbeat. 1175 * 1176 * @work: pointer to hb work_struct 1177 * 1178 * Check for heartbeat miss count. Uninitialize oct device if miss count 1179 * exceeds configured max heartbeat miss count. 1180 * 1181 **/ 1182 static void octep_hb_timeout_task(struct work_struct *work) 1183 { 1184 struct octep_device *oct = container_of(work, struct octep_device, 1185 hb_task.work); 1186 1187 int miss_cnt; 1188 1189 miss_cnt = atomic_inc_return(&oct->hb_miss_cnt); 1190 if (miss_cnt < oct->conf->fw_info.hb_miss_count) { 1191 queue_delayed_work(octep_wq, &oct->hb_task, 1192 msecs_to_jiffies(oct->conf->fw_info.hb_interval)); 1193 return; 1194 } 1195 1196 dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n", 1197 miss_cnt); 1198 rtnl_lock(); 1199 if (netif_running(oct->netdev)) 1200 octep_stop(oct->netdev); 1201 rtnl_unlock(); 1202 } 1203 1204 /** 1205 * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. 1206 * 1207 * @work: pointer to ctrl mbox work_struct 1208 * 1209 * Poll ctrl mbox message queue and handle control messages from firmware. 1210 **/ 1211 static void octep_ctrl_mbox_task(struct work_struct *work) 1212 { 1213 struct octep_device *oct = container_of(work, struct octep_device, 1214 ctrl_mbox_task); 1215 1216 octep_ctrl_net_recv_fw_messages(oct); 1217 } 1218 1219 static const char *octep_devid_to_str(struct octep_device *oct) 1220 { 1221 switch (oct->chip_id) { 1222 case OCTEP_PCI_DEVICE_ID_CN98_PF: 1223 return "CN98XX"; 1224 case OCTEP_PCI_DEVICE_ID_CN93_PF: 1225 return "CN93XX"; 1226 case OCTEP_PCI_DEVICE_ID_CNF95N_PF: 1227 return "CNF95N"; 1228 case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1229 return "CN10KA"; 1230 case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1231 return "CNF10KA"; 1232 case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1233 return "CNF10KB"; 1234 case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1235 return "CN10KB"; 1236 default: 1237 return "Unsupported"; 1238 } 1239 } 1240 1241 /** 1242 * octep_device_setup() - Setup Octeon Device. 1243 * 1244 * @oct: Octeon device private data structure. 1245 * 1246 * Setup Octeon device hardware operations, configuration, etc ... 1247 */ 1248 int octep_device_setup(struct octep_device *oct) 1249 { 1250 struct pci_dev *pdev = oct->pdev; 1251 int i, ret; 1252 1253 /* allocate memory for oct->conf */ 1254 oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); 1255 if (!oct->conf) 1256 return -ENOMEM; 1257 1258 /* Map BAR regions */ 1259 for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 1260 oct->mmio[i].hw_addr = 1261 ioremap(pci_resource_start(oct->pdev, i * 2), 1262 pci_resource_len(oct->pdev, i * 2)); 1263 if (!oct->mmio[i].hw_addr) 1264 goto unmap_prev; 1265 1266 oct->mmio[i].mapped = 1; 1267 } 1268 1269 oct->chip_id = pdev->device; 1270 oct->rev_id = pdev->revision; 1271 dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); 1272 1273 switch (oct->chip_id) { 1274 case OCTEP_PCI_DEVICE_ID_CN98_PF: 1275 case OCTEP_PCI_DEVICE_ID_CN93_PF: 1276 case OCTEP_PCI_DEVICE_ID_CNF95N_PF: 1277 dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", 1278 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), 1279 OCTEP_MINOR_REV(oct)); 1280 octep_device_setup_cn93_pf(oct); 1281 break; 1282 case OCTEP_PCI_DEVICE_ID_CNF10KA_PF: 1283 case OCTEP_PCI_DEVICE_ID_CN10KA_PF: 1284 case OCTEP_PCI_DEVICE_ID_CNF10KB_PF: 1285 case OCTEP_PCI_DEVICE_ID_CN10KB_PF: 1286 dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", 1287 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); 1288 octep_device_setup_cnxk_pf(oct); 1289 break; 1290 default: 1291 dev_err(&pdev->dev, 1292 "%s: unsupported device\n", __func__); 1293 goto unsupported_dev; 1294 } 1295 1296 1297 ret = octep_ctrl_net_init(oct); 1298 if (ret) 1299 return ret; 1300 1301 INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); 1302 INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); 1303 INIT_DELAYED_WORK(&oct->intr_poll_task, octep_intr_poll_task); 1304 oct->poll_non_ioq_intr = true; 1305 queue_delayed_work(octep_wq, &oct->intr_poll_task, 1306 msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); 1307 1308 atomic_set(&oct->hb_miss_cnt, 0); 1309 INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); 1310 1311 return 0; 1312 1313 unsupported_dev: 1314 i = OCTEP_MMIO_REGIONS; 1315 unmap_prev: 1316 while (i--) 1317 iounmap(oct->mmio[i].hw_addr); 1318 1319 kfree(oct->conf); 1320 return -1; 1321 } 1322 1323 /** 1324 * octep_device_cleanup() - Cleanup Octeon Device. 1325 * 1326 * @oct: Octeon device private data structure. 1327 * 1328 * Cleanup Octeon device allocated resources. 1329 */ 1330 static void octep_device_cleanup(struct octep_device *oct) 1331 { 1332 int i; 1333 1334 oct->poll_non_ioq_intr = false; 1335 cancel_delayed_work_sync(&oct->intr_poll_task); 1336 cancel_work_sync(&oct->ctrl_mbox_task); 1337 1338 dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); 1339 1340 for (i = 0; i < OCTEP_MAX_VF; i++) { 1341 vfree(oct->mbox[i]); 1342 oct->mbox[i] = NULL; 1343 } 1344 1345 octep_delete_pfvf_mbox(oct); 1346 octep_ctrl_net_uninit(oct); 1347 cancel_delayed_work_sync(&oct->hb_task); 1348 1349 oct->hw_ops.soft_reset(oct); 1350 for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { 1351 if (oct->mmio[i].mapped) 1352 iounmap(oct->mmio[i].hw_addr); 1353 } 1354 1355 kfree(oct->conf); 1356 oct->conf = NULL; 1357 } 1358 1359 static bool get_fw_ready_status(struct pci_dev *pdev) 1360 { 1361 u32 pos = 0; 1362 u16 vsec_id; 1363 u8 status; 1364 1365 while ((pos = pci_find_next_ext_capability(pdev, pos, 1366 PCI_EXT_CAP_ID_VNDR))) { 1367 pci_read_config_word(pdev, pos + 4, &vsec_id); 1368 #define FW_STATUS_VSEC_ID 0xA3 1369 if (vsec_id != FW_STATUS_VSEC_ID) 1370 continue; 1371 1372 pci_read_config_byte(pdev, (pos + 8), &status); 1373 dev_info(&pdev->dev, "Firmware ready status = %u\n", status); 1374 #define FW_STATUS_READY 1ULL 1375 return status == FW_STATUS_READY; 1376 } 1377 return false; 1378 } 1379 1380 /** 1381 * octep_probe() - Octeon PCI device probe handler. 1382 * 1383 * @pdev: PCI device structure. 1384 * @ent: entry in Octeon PCI device ID table. 1385 * 1386 * Initializes and enables the Octeon PCI device for network operations. 1387 * Initializes Octeon private data structure and registers a network device. 1388 */ 1389 static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1390 { 1391 struct octep_device *octep_dev = NULL; 1392 struct net_device *netdev; 1393 int max_rx_pktlen; 1394 int err; 1395 1396 err = pci_enable_device(pdev); 1397 if (err) { 1398 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1399 return err; 1400 } 1401 1402 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1403 if (err) { 1404 dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); 1405 goto err_dma_mask; 1406 } 1407 1408 err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); 1409 if (err) { 1410 dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); 1411 goto err_pci_regions; 1412 } 1413 1414 pci_set_master(pdev); 1415 1416 if (!get_fw_ready_status(pdev)) { 1417 dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n"); 1418 err = -EPROBE_DEFER; 1419 goto err_alloc_netdev; 1420 } 1421 1422 netdev = alloc_etherdev_mq(sizeof(struct octep_device), 1423 OCTEP_MAX_QUEUES); 1424 if (!netdev) { 1425 dev_err(&pdev->dev, "Failed to allocate netdev\n"); 1426 err = -ENOMEM; 1427 goto err_alloc_netdev; 1428 } 1429 SET_NETDEV_DEV(netdev, &pdev->dev); 1430 1431 octep_dev = netdev_priv(netdev); 1432 octep_dev->netdev = netdev; 1433 octep_dev->pdev = pdev; 1434 octep_dev->dev = &pdev->dev; 1435 pci_set_drvdata(pdev, octep_dev); 1436 1437 err = octep_device_setup(octep_dev); 1438 if (err) { 1439 dev_err(&pdev->dev, "Device setup failed\n"); 1440 goto err_octep_config; 1441 } 1442 1443 err = octep_setup_pfvf_mbox(octep_dev); 1444 if (err) { 1445 dev_err(&pdev->dev, "PF-VF mailbox setup failed\n"); 1446 goto register_dev_err; 1447 } 1448 1449 err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, 1450 &octep_dev->conf->fw_info); 1451 if (err) { 1452 dev_err(&pdev->dev, "Failed to get firmware info\n"); 1453 goto register_dev_err; 1454 } 1455 dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n", 1456 octep_dev->conf->fw_info.hb_interval, 1457 octep_dev->conf->fw_info.hb_miss_count); 1458 queue_delayed_work(octep_wq, &octep_dev->hb_task, 1459 msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval)); 1460 1461 netdev->netdev_ops = &octep_netdev_ops; 1462 octep_set_ethtool_ops(netdev); 1463 netif_carrier_off(netdev); 1464 1465 netdev->hw_features = NETIF_F_SG; 1466 if (OCTEP_TX_IP_CSUM(octep_dev->conf->fw_info.tx_ol_flags)) 1467 netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 1468 1469 if (OCTEP_RX_IP_CSUM(octep_dev->conf->fw_info.rx_ol_flags)) 1470 netdev->hw_features |= NETIF_F_RXCSUM; 1471 1472 max_rx_pktlen = octep_ctrl_net_get_mtu(octep_dev, OCTEP_CTRL_NET_INVALID_VFID); 1473 if (max_rx_pktlen < 0) { 1474 dev_err(&octep_dev->pdev->dev, 1475 "Failed to get max receive packet size; err = %d\n", max_rx_pktlen); 1476 err = max_rx_pktlen; 1477 goto register_dev_err; 1478 } 1479 netdev->min_mtu = OCTEP_MIN_MTU; 1480 netdev->max_mtu = max_rx_pktlen - (ETH_HLEN + ETH_FCS_LEN); 1481 netdev->mtu = OCTEP_DEFAULT_MTU; 1482 1483 if (OCTEP_TX_TSO(octep_dev->conf->fw_info.tx_ol_flags)) { 1484 netdev->hw_features |= NETIF_F_TSO; 1485 netif_set_tso_max_size(netdev, netdev->max_mtu); 1486 } 1487 1488 netdev->features |= netdev->hw_features; 1489 err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, 1490 octep_dev->mac_addr); 1491 if (err) { 1492 dev_err(&pdev->dev, "Failed to get mac address\n"); 1493 goto register_dev_err; 1494 } 1495 eth_hw_addr_set(netdev, octep_dev->mac_addr); 1496 1497 err = register_netdev(netdev); 1498 if (err) { 1499 dev_err(&pdev->dev, "Failed to register netdev\n"); 1500 goto register_dev_err; 1501 } 1502 dev_info(&pdev->dev, "Device probe successful\n"); 1503 return 0; 1504 1505 register_dev_err: 1506 octep_device_cleanup(octep_dev); 1507 err_octep_config: 1508 free_netdev(netdev); 1509 err_alloc_netdev: 1510 pci_release_mem_regions(pdev); 1511 err_pci_regions: 1512 err_dma_mask: 1513 pci_disable_device(pdev); 1514 return err; 1515 } 1516 1517 static int octep_sriov_disable(struct octep_device *oct) 1518 { 1519 struct pci_dev *pdev = oct->pdev; 1520 1521 if (pci_vfs_assigned(oct->pdev)) { 1522 dev_warn(&pdev->dev, "Can't disable SRIOV while VFs are assigned\n"); 1523 return -EPERM; 1524 } 1525 1526 pci_disable_sriov(pdev); 1527 CFG_GET_ACTIVE_VFS(oct->conf) = 0; 1528 1529 return 0; 1530 } 1531 1532 /** 1533 * octep_remove() - Remove Octeon PCI device from driver control. 1534 * 1535 * @pdev: PCI device structure of the Octeon device. 1536 * 1537 * Cleanup all resources allocated for the Octeon device. 1538 * Unregister from network device and disable the PCI device. 1539 */ 1540 static void octep_remove(struct pci_dev *pdev) 1541 { 1542 struct octep_device *oct = pci_get_drvdata(pdev); 1543 struct net_device *netdev; 1544 1545 if (!oct) 1546 return; 1547 1548 netdev = oct->netdev; 1549 octep_sriov_disable(oct); 1550 if (netdev->reg_state == NETREG_REGISTERED) 1551 unregister_netdev(netdev); 1552 1553 cancel_work_sync(&oct->tx_timeout_task); 1554 octep_device_cleanup(oct); 1555 pci_release_mem_regions(pdev); 1556 free_netdev(netdev); 1557 pci_disable_device(pdev); 1558 } 1559 1560 static int octep_sriov_enable(struct octep_device *oct, int num_vfs) 1561 { 1562 struct pci_dev *pdev = oct->pdev; 1563 int err; 1564 1565 CFG_GET_ACTIVE_VFS(oct->conf) = num_vfs; 1566 err = pci_enable_sriov(pdev, num_vfs); 1567 if (err) { 1568 dev_warn(&pdev->dev, "Failed to enable SRIOV err=%d\n", err); 1569 CFG_GET_ACTIVE_VFS(oct->conf) = 0; 1570 return err; 1571 } 1572 1573 return num_vfs; 1574 } 1575 1576 static int octep_sriov_configure(struct pci_dev *pdev, int num_vfs) 1577 { 1578 struct octep_device *oct = pci_get_drvdata(pdev); 1579 int max_nvfs; 1580 1581 if (num_vfs == 0) 1582 return octep_sriov_disable(oct); 1583 1584 max_nvfs = CFG_GET_MAX_VFS(oct->conf); 1585 1586 if (num_vfs > max_nvfs) { 1587 dev_err(&pdev->dev, "Invalid VF count Max supported VFs = %d\n", 1588 max_nvfs); 1589 return -EINVAL; 1590 } 1591 1592 return octep_sriov_enable(oct, num_vfs); 1593 } 1594 1595 static struct pci_driver octep_driver = { 1596 .name = OCTEP_DRV_NAME, 1597 .id_table = octep_pci_id_tbl, 1598 .probe = octep_probe, 1599 .remove = octep_remove, 1600 .sriov_configure = octep_sriov_configure, 1601 }; 1602 1603 /** 1604 * octep_init_module() - Module initialiation. 1605 * 1606 * create common resource for the driver and register PCI driver. 1607 */ 1608 static int __init octep_init_module(void) 1609 { 1610 int ret; 1611 1612 pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); 1613 1614 /* work queue for all deferred tasks */ 1615 octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); 1616 if (!octep_wq) { 1617 pr_err("%s: Failed to create common workqueue\n", 1618 OCTEP_DRV_NAME); 1619 return -ENOMEM; 1620 } 1621 1622 ret = pci_register_driver(&octep_driver); 1623 if (ret < 0) { 1624 pr_err("%s: Failed to register PCI driver; err=%d\n", 1625 OCTEP_DRV_NAME, ret); 1626 destroy_workqueue(octep_wq); 1627 return ret; 1628 } 1629 1630 pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); 1631 1632 return ret; 1633 } 1634 1635 /** 1636 * octep_exit_module() - Module exit routine. 1637 * 1638 * unregister the driver with PCI subsystem and cleanup common resources. 1639 */ 1640 static void __exit octep_exit_module(void) 1641 { 1642 pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); 1643 1644 pci_unregister_driver(&octep_driver); 1645 destroy_workqueue(octep_wq); 1646 1647 pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); 1648 } 1649 1650 module_init(octep_init_module); 1651 module_exit(octep_exit_module); 1652