1 /* 2 * Copyright (C) 2015-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 /* 35 * nfp_net_common.c 36 * Netronome network device driver: Common functions between PF and VF 37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> 38 * Jason McMullan <jason.mcmullan@netronome.com> 39 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 40 * Brad Petrus <brad.petrus@netronome.com> 41 * Chris Telfer <chris.telfer@netronome.com> 42 */ 43 44 #include <linux/bitfield.h> 45 #include <linux/bpf.h> 46 #include <linux/bpf_trace.h> 47 #include <linux/module.h> 48 #include <linux/kernel.h> 49 #include <linux/init.h> 50 #include <linux/fs.h> 51 #include <linux/netdevice.h> 52 #include <linux/etherdevice.h> 53 #include <linux/interrupt.h> 54 #include <linux/ip.h> 55 #include <linux/ipv6.h> 56 #include <linux/mm.h> 57 #include <linux/overflow.h> 58 #include <linux/page_ref.h> 59 #include <linux/pci.h> 60 #include <linux/pci_regs.h> 61 #include <linux/msi.h> 62 #include <linux/ethtool.h> 63 #include <linux/log2.h> 64 #include <linux/if_vlan.h> 65 #include <linux/random.h> 66 #include <linux/vmalloc.h> 67 #include <linux/ktime.h> 68 69 #include <net/switchdev.h> 70 #include <net/vxlan.h> 71 72 #include "nfpcore/nfp_nsp.h" 73 #include "nfp_app.h" 74 #include "nfp_net_ctrl.h" 75 #include "nfp_net.h" 76 #include "nfp_net_sriov.h" 77 #include "nfp_port.h" 78 79 /** 80 * nfp_net_get_fw_version() - Read and parse the FW version 81 * @fw_ver: Output fw_version structure to read to 82 * @ctrl_bar: Mapped address of the control BAR 83 */ 84 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, 85 void __iomem *ctrl_bar) 86 { 87 u32 reg; 88 89 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION); 90 put_unaligned_le32(reg, fw_ver); 91 } 92 93 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag) 94 { 95 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM, 96 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, 97 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 98 } 99 100 static void 101 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr) 102 { 103 dma_sync_single_for_device(dp->dev, dma_addr, 104 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, 105 dp->rx_dma_dir); 106 } 107 108 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr) 109 { 110 dma_unmap_single_attrs(dp->dev, dma_addr, 111 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, 112 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 113 } 114 115 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr, 116 unsigned int len) 117 { 118 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM, 119 len, dp->rx_dma_dir); 120 } 121 122 /* Firmware reconfig 123 * 124 * Firmware reconfig may take a while so we have two versions of it - 125 * synchronous and asynchronous (posted). All synchronous callers are holding 126 * RTNL so we don't have to worry about serializing them. 127 */ 128 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update) 129 { 130 nn_writel(nn, NFP_NET_CFG_UPDATE, update); 131 /* ensure update is written before pinging HW */ 132 nn_pci_flush(nn); 133 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); 134 } 135 136 /* Pass 0 as update to run posted reconfigs. */ 137 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update) 138 { 139 update |= nn->reconfig_posted; 140 nn->reconfig_posted = 0; 141 142 nfp_net_reconfig_start(nn, update); 143 144 nn->reconfig_timer_active = true; 145 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ); 146 } 147 148 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) 149 { 150 u32 reg; 151 152 reg = nn_readl(nn, NFP_NET_CFG_UPDATE); 153 if (reg == 0) 154 return true; 155 if (reg & NFP_NET_CFG_UPDATE_ERR) { 156 nn_err(nn, "Reconfig error: 0x%08x\n", reg); 157 return true; 158 } else if (last_check) { 159 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg); 160 return true; 161 } 162 163 return false; 164 } 165 166 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) 167 { 168 bool timed_out = false; 169 170 /* Poll update field, waiting for NFP to ack the config */ 171 while (!nfp_net_reconfig_check_done(nn, timed_out)) { 172 msleep(1); 173 timed_out = time_is_before_eq_jiffies(deadline); 174 } 175 176 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR) 177 return -EIO; 178 179 return timed_out ? -EIO : 0; 180 } 181 182 static void nfp_net_reconfig_timer(struct timer_list *t) 183 { 184 struct nfp_net *nn = from_timer(nn, t, reconfig_timer); 185 186 spin_lock_bh(&nn->reconfig_lock); 187 188 nn->reconfig_timer_active = false; 189 190 /* If sync caller is present it will take over from us */ 191 if (nn->reconfig_sync_present) 192 goto done; 193 194 /* Read reconfig status and report errors */ 195 nfp_net_reconfig_check_done(nn, true); 196 197 if (nn->reconfig_posted) 198 nfp_net_reconfig_start_async(nn, 0); 199 done: 200 spin_unlock_bh(&nn->reconfig_lock); 201 } 202 203 /** 204 * nfp_net_reconfig_post() - Post async reconfig request 205 * @nn: NFP Net device to reconfigure 206 * @update: The value for the update field in the BAR config 207 * 208 * Record FW reconfiguration request. Reconfiguration will be kicked off 209 * whenever reconfiguration machinery is idle. Multiple requests can be 210 * merged together! 211 */ 212 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update) 213 { 214 spin_lock_bh(&nn->reconfig_lock); 215 216 /* Sync caller will kick off async reconf when it's done, just post */ 217 if (nn->reconfig_sync_present) { 218 nn->reconfig_posted |= update; 219 goto done; 220 } 221 222 /* Opportunistically check if the previous command is done */ 223 if (!nn->reconfig_timer_active || 224 nfp_net_reconfig_check_done(nn, false)) 225 nfp_net_reconfig_start_async(nn, update); 226 else 227 nn->reconfig_posted |= update; 228 done: 229 spin_unlock_bh(&nn->reconfig_lock); 230 } 231 232 static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) 233 { 234 bool cancelled_timer = false; 235 u32 pre_posted_requests; 236 237 spin_lock_bh(&nn->reconfig_lock); 238 239 nn->reconfig_sync_present = true; 240 241 if (nn->reconfig_timer_active) { 242 nn->reconfig_timer_active = false; 243 cancelled_timer = true; 244 } 245 pre_posted_requests = nn->reconfig_posted; 246 nn->reconfig_posted = 0; 247 248 spin_unlock_bh(&nn->reconfig_lock); 249 250 if (cancelled_timer) { 251 del_timer_sync(&nn->reconfig_timer); 252 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); 253 } 254 255 /* Run the posted reconfigs which were issued before we started */ 256 if (pre_posted_requests) { 257 nfp_net_reconfig_start(nn, pre_posted_requests); 258 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 259 } 260 } 261 262 static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) 263 { 264 nfp_net_reconfig_sync_enter(nn); 265 266 spin_lock_bh(&nn->reconfig_lock); 267 nn->reconfig_sync_present = false; 268 spin_unlock_bh(&nn->reconfig_lock); 269 } 270 271 /** 272 * nfp_net_reconfig() - Reconfigure the firmware 273 * @nn: NFP Net device to reconfigure 274 * @update: The value for the update field in the BAR config 275 * 276 * Write the update word to the BAR and ping the reconfig queue. The 277 * poll until the firmware has acknowledged the update by zeroing the 278 * update word. 279 * 280 * Return: Negative errno on error, 0 on success 281 */ 282 int nfp_net_reconfig(struct nfp_net *nn, u32 update) 283 { 284 int ret; 285 286 nfp_net_reconfig_sync_enter(nn); 287 288 nfp_net_reconfig_start(nn, update); 289 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 290 291 spin_lock_bh(&nn->reconfig_lock); 292 293 if (nn->reconfig_posted) 294 nfp_net_reconfig_start_async(nn, 0); 295 296 nn->reconfig_sync_present = false; 297 298 spin_unlock_bh(&nn->reconfig_lock); 299 300 return ret; 301 } 302 303 /** 304 * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox 305 * @nn: NFP Net device to reconfigure 306 * @mbox_cmd: The value for the mailbox command 307 * 308 * Helper function for mailbox updates 309 * 310 * Return: Negative errno on error, 0 on success 311 */ 312 static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) 313 { 314 u32 mbox = nn->tlv_caps.mbox_off; 315 int ret; 316 317 if (!nfp_net_has_mbox(&nn->tlv_caps)) { 318 nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd); 319 return -EIO; 320 } 321 322 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); 323 324 ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); 325 if (ret) { 326 nn_err(nn, "Mailbox update error\n"); 327 return ret; 328 } 329 330 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); 331 } 332 333 /* Interrupt configuration and handling 334 */ 335 336 /** 337 * nfp_net_irq_unmask() - Unmask automasked interrupt 338 * @nn: NFP Network structure 339 * @entry_nr: MSI-X table entry 340 * 341 * Clear the ICR for the IRQ entry. 342 */ 343 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) 344 { 345 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED); 346 nn_pci_flush(nn); 347 } 348 349 /** 350 * nfp_net_irqs_alloc() - allocates MSI-X irqs 351 * @pdev: PCI device structure 352 * @irq_entries: Array to be initialized and used to hold the irq entries 353 * @min_irqs: Minimal acceptable number of interrupts 354 * @wanted_irqs: Target number of interrupts to allocate 355 * 356 * Return: Number of irqs obtained or 0 on error. 357 */ 358 unsigned int 359 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, 360 unsigned int min_irqs, unsigned int wanted_irqs) 361 { 362 unsigned int i; 363 int got_irqs; 364 365 for (i = 0; i < wanted_irqs; i++) 366 irq_entries[i].entry = i; 367 368 got_irqs = pci_enable_msix_range(pdev, irq_entries, 369 min_irqs, wanted_irqs); 370 if (got_irqs < 0) { 371 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n", 372 min_irqs, wanted_irqs, got_irqs); 373 return 0; 374 } 375 376 if (got_irqs < wanted_irqs) 377 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n", 378 wanted_irqs, got_irqs); 379 380 return got_irqs; 381 } 382 383 /** 384 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev 385 * @nn: NFP Network structure 386 * @irq_entries: Table of allocated interrupts 387 * @n: Size of @irq_entries (number of entries to grab) 388 * 389 * After interrupts are allocated with nfp_net_irqs_alloc() this function 390 * should be called to assign them to a specific netdev (port). 391 */ 392 void 393 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, 394 unsigned int n) 395 { 396 struct nfp_net_dp *dp = &nn->dp; 397 398 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; 399 dp->num_r_vecs = nn->max_r_vecs; 400 401 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); 402 403 if (dp->num_rx_rings > dp->num_r_vecs || 404 dp->num_tx_rings > dp->num_r_vecs) 405 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n", 406 dp->num_rx_rings, dp->num_tx_rings, 407 dp->num_r_vecs); 408 409 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); 410 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); 411 dp->num_stack_tx_rings = dp->num_tx_rings; 412 } 413 414 /** 415 * nfp_net_irqs_disable() - Disable interrupts 416 * @pdev: PCI device structure 417 * 418 * Undoes what @nfp_net_irqs_alloc() does. 419 */ 420 void nfp_net_irqs_disable(struct pci_dev *pdev) 421 { 422 pci_disable_msix(pdev); 423 } 424 425 /** 426 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings. 427 * @irq: Interrupt 428 * @data: Opaque data structure 429 * 430 * Return: Indicate if the interrupt has been handled. 431 */ 432 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) 433 { 434 struct nfp_net_r_vector *r_vec = data; 435 436 napi_schedule_irqoff(&r_vec->napi); 437 438 /* The FW auto-masks any interrupt, either via the MASK bit in 439 * the MSI-X table or via the per entry ICR field. So there 440 * is no need to disable interrupts here. 441 */ 442 return IRQ_HANDLED; 443 } 444 445 static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data) 446 { 447 struct nfp_net_r_vector *r_vec = data; 448 449 tasklet_schedule(&r_vec->tasklet); 450 451 return IRQ_HANDLED; 452 } 453 454 /** 455 * nfp_net_read_link_status() - Reread link status from control BAR 456 * @nn: NFP Network structure 457 */ 458 static void nfp_net_read_link_status(struct nfp_net *nn) 459 { 460 unsigned long flags; 461 bool link_up; 462 u32 sts; 463 464 spin_lock_irqsave(&nn->link_status_lock, flags); 465 466 sts = nn_readl(nn, NFP_NET_CFG_STS); 467 link_up = !!(sts & NFP_NET_CFG_STS_LINK); 468 469 if (nn->link_up == link_up) 470 goto out; 471 472 nn->link_up = link_up; 473 if (nn->port) 474 set_bit(NFP_PORT_CHANGED, &nn->port->flags); 475 476 if (nn->link_up) { 477 netif_carrier_on(nn->dp.netdev); 478 netdev_info(nn->dp.netdev, "NIC Link is Up\n"); 479 } else { 480 netif_carrier_off(nn->dp.netdev); 481 netdev_info(nn->dp.netdev, "NIC Link is Down\n"); 482 } 483 out: 484 spin_unlock_irqrestore(&nn->link_status_lock, flags); 485 } 486 487 /** 488 * nfp_net_irq_lsc() - Interrupt service routine for link state changes 489 * @irq: Interrupt 490 * @data: Opaque data structure 491 * 492 * Return: Indicate if the interrupt has been handled. 493 */ 494 static irqreturn_t nfp_net_irq_lsc(int irq, void *data) 495 { 496 struct nfp_net *nn = data; 497 struct msix_entry *entry; 498 499 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX]; 500 501 nfp_net_read_link_status(nn); 502 503 nfp_net_irq_unmask(nn, entry->entry); 504 505 return IRQ_HANDLED; 506 } 507 508 /** 509 * nfp_net_irq_exn() - Interrupt service routine for exceptions 510 * @irq: Interrupt 511 * @data: Opaque data structure 512 * 513 * Return: Indicate if the interrupt has been handled. 514 */ 515 static irqreturn_t nfp_net_irq_exn(int irq, void *data) 516 { 517 struct nfp_net *nn = data; 518 519 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__); 520 /* XXX TO BE IMPLEMENTED */ 521 return IRQ_HANDLED; 522 } 523 524 /** 525 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring 526 * @tx_ring: TX ring structure 527 * @r_vec: IRQ vector servicing this ring 528 * @idx: Ring index 529 * @is_xdp: Is this an XDP TX ring? 530 */ 531 static void 532 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, 533 struct nfp_net_r_vector *r_vec, unsigned int idx, 534 bool is_xdp) 535 { 536 struct nfp_net *nn = r_vec->nfp_net; 537 538 tx_ring->idx = idx; 539 tx_ring->r_vec = r_vec; 540 tx_ring->is_xdp = is_xdp; 541 u64_stats_init(&tx_ring->r_vec->tx_sync); 542 543 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 544 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); 545 } 546 547 /** 548 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring 549 * @rx_ring: RX ring structure 550 * @r_vec: IRQ vector servicing this ring 551 * @idx: Ring index 552 */ 553 static void 554 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, 555 struct nfp_net_r_vector *r_vec, unsigned int idx) 556 { 557 struct nfp_net *nn = r_vec->nfp_net; 558 559 rx_ring->idx = idx; 560 rx_ring->r_vec = r_vec; 561 u64_stats_init(&rx_ring->r_vec->rx_sync); 562 563 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 564 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); 565 } 566 567 /** 568 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN) 569 * @nn: NFP Network structure 570 * @ctrl_offset: Control BAR offset where IRQ configuration should be written 571 * @format: printf-style format to construct the interrupt name 572 * @name: Pointer to allocated space for interrupt name 573 * @name_sz: Size of space for interrupt name 574 * @vector_idx: Index of MSI-X vector used for this interrupt 575 * @handler: IRQ handler to register for this interrupt 576 */ 577 static int 578 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, 579 const char *format, char *name, size_t name_sz, 580 unsigned int vector_idx, irq_handler_t handler) 581 { 582 struct msix_entry *entry; 583 int err; 584 585 entry = &nn->irq_entries[vector_idx]; 586 587 snprintf(name, name_sz, format, nfp_net_name(nn)); 588 err = request_irq(entry->vector, handler, 0, name, nn); 589 if (err) { 590 nn_err(nn, "Failed to request IRQ %d (err=%d).\n", 591 entry->vector, err); 592 return err; 593 } 594 nn_writeb(nn, ctrl_offset, entry->entry); 595 nfp_net_irq_unmask(nn, entry->entry); 596 597 return 0; 598 } 599 600 /** 601 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN) 602 * @nn: NFP Network structure 603 * @ctrl_offset: Control BAR offset where IRQ configuration should be written 604 * @vector_idx: Index of MSI-X vector used for this interrupt 605 */ 606 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, 607 unsigned int vector_idx) 608 { 609 nn_writeb(nn, ctrl_offset, 0xff); 610 nn_pci_flush(nn); 611 free_irq(nn->irq_entries[vector_idx].vector, nn); 612 } 613 614 /* Transmit 615 * 616 * One queue controller peripheral queue is used for transmit. The 617 * driver en-queues packets for transmit by advancing the write 618 * pointer. The device indicates that packets have transmitted by 619 * advancing the read pointer. The driver maintains a local copy of 620 * the read and write pointer in @struct nfp_net_tx_ring. The driver 621 * keeps @wr_p in sync with the queue controller write pointer and can 622 * determine how many packets have been transmitted by comparing its 623 * copy of the read pointer @rd_p with the read pointer maintained by 624 * the queue controller peripheral. 625 */ 626 627 /** 628 * nfp_net_tx_full() - Check if the TX ring is full 629 * @tx_ring: TX ring to check 630 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1) 631 * 632 * This function checks, based on the *host copy* of read/write 633 * pointer if a given TX ring is full. The real TX queue may have 634 * some newly made available slots. 635 * 636 * Return: True if the ring is full. 637 */ 638 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) 639 { 640 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); 641 } 642 643 /* Wrappers for deciding when to stop and restart TX queues */ 644 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) 645 { 646 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); 647 } 648 649 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) 650 { 651 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1); 652 } 653 654 /** 655 * nfp_net_tx_ring_stop() - stop tx ring 656 * @nd_q: netdev queue 657 * @tx_ring: driver tx queue structure 658 * 659 * Safely stop TX ring. Remember that while we are running .start_xmit() 660 * someone else may be cleaning the TX ring completions so we need to be 661 * extra careful here. 662 */ 663 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, 664 struct nfp_net_tx_ring *tx_ring) 665 { 666 netif_tx_stop_queue(nd_q); 667 668 /* We can race with the TX completion out of NAPI so recheck */ 669 smp_mb(); 670 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring))) 671 netif_tx_start_queue(nd_q); 672 } 673 674 /** 675 * nfp_net_tx_tso() - Set up Tx descriptor for LSO 676 * @r_vec: per-ring structure 677 * @txbuf: Pointer to driver soft TX descriptor 678 * @txd: Pointer to HW TX descriptor 679 * @skb: Pointer to SKB 680 * 681 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. 682 * Return error on packet header greater than maximum supported LSO header size. 683 */ 684 static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, 685 struct nfp_net_tx_buf *txbuf, 686 struct nfp_net_tx_desc *txd, struct sk_buff *skb) 687 { 688 u32 hdrlen; 689 u16 mss; 690 691 if (!skb_is_gso(skb)) 692 return; 693 694 if (!skb->encapsulation) { 695 txd->l3_offset = skb_network_offset(skb); 696 txd->l4_offset = skb_transport_offset(skb); 697 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 698 } else { 699 txd->l3_offset = skb_inner_network_offset(skb); 700 txd->l4_offset = skb_inner_transport_offset(skb); 701 hdrlen = skb_inner_transport_header(skb) - skb->data + 702 inner_tcp_hdrlen(skb); 703 } 704 705 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs; 706 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); 707 708 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; 709 txd->lso_hdrlen = hdrlen; 710 txd->mss = cpu_to_le16(mss); 711 txd->flags |= PCIE_DESC_TX_LSO; 712 713 u64_stats_update_begin(&r_vec->tx_sync); 714 r_vec->tx_lso++; 715 u64_stats_update_end(&r_vec->tx_sync); 716 } 717 718 /** 719 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor 720 * @dp: NFP Net data path struct 721 * @r_vec: per-ring structure 722 * @txbuf: Pointer to driver soft TX descriptor 723 * @txd: Pointer to TX descriptor 724 * @skb: Pointer to SKB 725 * 726 * This function sets the TX checksum flags in the TX descriptor based 727 * on the configuration and the protocol of the packet to be transmitted. 728 */ 729 static void nfp_net_tx_csum(struct nfp_net_dp *dp, 730 struct nfp_net_r_vector *r_vec, 731 struct nfp_net_tx_buf *txbuf, 732 struct nfp_net_tx_desc *txd, struct sk_buff *skb) 733 { 734 struct ipv6hdr *ipv6h; 735 struct iphdr *iph; 736 u8 l4_hdr; 737 738 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) 739 return; 740 741 if (skb->ip_summed != CHECKSUM_PARTIAL) 742 return; 743 744 txd->flags |= PCIE_DESC_TX_CSUM; 745 if (skb->encapsulation) 746 txd->flags |= PCIE_DESC_TX_ENCAP; 747 748 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 749 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 750 751 if (iph->version == 4) { 752 txd->flags |= PCIE_DESC_TX_IP4_CSUM; 753 l4_hdr = iph->protocol; 754 } else if (ipv6h->version == 6) { 755 l4_hdr = ipv6h->nexthdr; 756 } else { 757 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); 758 return; 759 } 760 761 switch (l4_hdr) { 762 case IPPROTO_TCP: 763 txd->flags |= PCIE_DESC_TX_TCP_CSUM; 764 break; 765 case IPPROTO_UDP: 766 txd->flags |= PCIE_DESC_TX_UDP_CSUM; 767 break; 768 default: 769 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr); 770 return; 771 } 772 773 u64_stats_update_begin(&r_vec->tx_sync); 774 if (skb->encapsulation) 775 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt; 776 else 777 r_vec->hw_csum_tx += txbuf->pkt_cnt; 778 u64_stats_update_end(&r_vec->tx_sync); 779 } 780 781 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) 782 { 783 wmb(); 784 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); 785 tx_ring->wr_ptr_add = 0; 786 } 787 788 static int nfp_net_prep_port_id(struct sk_buff *skb) 789 { 790 struct metadata_dst *md_dst = skb_metadata_dst(skb); 791 unsigned char *data; 792 793 if (likely(!md_dst)) 794 return 0; 795 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX)) 796 return 0; 797 798 if (unlikely(skb_cow_head(skb, 8))) 799 return -ENOMEM; 800 801 data = skb_push(skb, 8); 802 put_unaligned_be32(NFP_NET_META_PORTID, data); 803 put_unaligned_be32(md_dst->u.port_info.port_id, data + 4); 804 805 return 8; 806 } 807 808 /** 809 * nfp_net_tx() - Main transmit entry point 810 * @skb: SKB to transmit 811 * @netdev: netdev structure 812 * 813 * Return: NETDEV_TX_OK on success. 814 */ 815 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) 816 { 817 struct nfp_net *nn = netdev_priv(netdev); 818 const struct skb_frag_struct *frag; 819 struct nfp_net_tx_desc *txd, txdg; 820 int f, nr_frags, wr_idx, md_bytes; 821 struct nfp_net_tx_ring *tx_ring; 822 struct nfp_net_r_vector *r_vec; 823 struct nfp_net_tx_buf *txbuf; 824 struct netdev_queue *nd_q; 825 struct nfp_net_dp *dp; 826 dma_addr_t dma_addr; 827 unsigned int fsize; 828 u16 qidx; 829 830 dp = &nn->dp; 831 qidx = skb_get_queue_mapping(skb); 832 tx_ring = &dp->tx_rings[qidx]; 833 r_vec = tx_ring->r_vec; 834 nd_q = netdev_get_tx_queue(dp->netdev, qidx); 835 836 nr_frags = skb_shinfo(skb)->nr_frags; 837 838 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { 839 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", 840 qidx, tx_ring->wr_p, tx_ring->rd_p); 841 netif_tx_stop_queue(nd_q); 842 nfp_net_tx_xmit_more_flush(tx_ring); 843 u64_stats_update_begin(&r_vec->tx_sync); 844 r_vec->tx_busy++; 845 u64_stats_update_end(&r_vec->tx_sync); 846 return NETDEV_TX_BUSY; 847 } 848 849 md_bytes = nfp_net_prep_port_id(skb); 850 if (unlikely(md_bytes < 0)) { 851 nfp_net_tx_xmit_more_flush(tx_ring); 852 dev_kfree_skb_any(skb); 853 return NETDEV_TX_OK; 854 } 855 856 /* Start with the head skbuf */ 857 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), 858 DMA_TO_DEVICE); 859 if (dma_mapping_error(dp->dev, dma_addr)) 860 goto err_free; 861 862 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); 863 864 /* Stash the soft descriptor of the head then initialize it */ 865 txbuf = &tx_ring->txbufs[wr_idx]; 866 txbuf->skb = skb; 867 txbuf->dma_addr = dma_addr; 868 txbuf->fidx = -1; 869 txbuf->pkt_cnt = 1; 870 txbuf->real_len = skb->len; 871 872 /* Build TX descriptor */ 873 txd = &tx_ring->txds[wr_idx]; 874 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes; 875 txd->dma_len = cpu_to_le16(skb_headlen(skb)); 876 nfp_desc_set_dma_addr(txd, dma_addr); 877 txd->data_len = cpu_to_le16(skb->len); 878 879 txd->flags = 0; 880 txd->mss = 0; 881 txd->lso_hdrlen = 0; 882 883 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */ 884 nfp_net_tx_tso(r_vec, txbuf, txd, skb); 885 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); 886 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { 887 txd->flags |= PCIE_DESC_TX_VLAN; 888 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 889 } 890 891 /* Gather DMA */ 892 if (nr_frags > 0) { 893 /* all descs must match except for in addr, length and eop */ 894 txdg = *txd; 895 896 for (f = 0; f < nr_frags; f++) { 897 frag = &skb_shinfo(skb)->frags[f]; 898 fsize = skb_frag_size(frag); 899 900 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, 901 fsize, DMA_TO_DEVICE); 902 if (dma_mapping_error(dp->dev, dma_addr)) 903 goto err_unmap; 904 905 wr_idx = D_IDX(tx_ring, wr_idx + 1); 906 tx_ring->txbufs[wr_idx].skb = skb; 907 tx_ring->txbufs[wr_idx].dma_addr = dma_addr; 908 tx_ring->txbufs[wr_idx].fidx = f; 909 910 txd = &tx_ring->txds[wr_idx]; 911 *txd = txdg; 912 txd->dma_len = cpu_to_le16(fsize); 913 nfp_desc_set_dma_addr(txd, dma_addr); 914 txd->offset_eop |= 915 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; 916 } 917 918 u64_stats_update_begin(&r_vec->tx_sync); 919 r_vec->tx_gather++; 920 u64_stats_update_end(&r_vec->tx_sync); 921 } 922 923 netdev_tx_sent_queue(nd_q, txbuf->real_len); 924 925 skb_tx_timestamp(skb); 926 927 tx_ring->wr_p += nr_frags + 1; 928 if (nfp_net_tx_ring_should_stop(tx_ring)) 929 nfp_net_tx_ring_stop(nd_q, tx_ring); 930 931 tx_ring->wr_ptr_add += nr_frags + 1; 932 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) 933 nfp_net_tx_xmit_more_flush(tx_ring); 934 935 return NETDEV_TX_OK; 936 937 err_unmap: 938 while (--f >= 0) { 939 frag = &skb_shinfo(skb)->frags[f]; 940 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, 941 skb_frag_size(frag), DMA_TO_DEVICE); 942 tx_ring->txbufs[wr_idx].skb = NULL; 943 tx_ring->txbufs[wr_idx].dma_addr = 0; 944 tx_ring->txbufs[wr_idx].fidx = -2; 945 wr_idx = wr_idx - 1; 946 if (wr_idx < 0) 947 wr_idx += tx_ring->cnt; 948 } 949 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, 950 skb_headlen(skb), DMA_TO_DEVICE); 951 tx_ring->txbufs[wr_idx].skb = NULL; 952 tx_ring->txbufs[wr_idx].dma_addr = 0; 953 tx_ring->txbufs[wr_idx].fidx = -2; 954 err_free: 955 nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); 956 nfp_net_tx_xmit_more_flush(tx_ring); 957 u64_stats_update_begin(&r_vec->tx_sync); 958 r_vec->tx_errors++; 959 u64_stats_update_end(&r_vec->tx_sync); 960 dev_kfree_skb_any(skb); 961 return NETDEV_TX_OK; 962 } 963 964 /** 965 * nfp_net_tx_complete() - Handled completed TX packets 966 * @tx_ring: TX ring structure 967 * @budget: NAPI budget (only used as bool to determine if in NAPI context) 968 */ 969 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) 970 { 971 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 972 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 973 const struct skb_frag_struct *frag; 974 struct netdev_queue *nd_q; 975 u32 done_pkts = 0, done_bytes = 0; 976 struct sk_buff *skb; 977 int todo, nr_frags; 978 u32 qcp_rd_p; 979 int fidx; 980 int idx; 981 982 if (tx_ring->wr_p == tx_ring->rd_p) 983 return; 984 985 /* Work out how many descriptors have been transmitted */ 986 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); 987 988 if (qcp_rd_p == tx_ring->qcp_rd_p) 989 return; 990 991 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); 992 993 while (todo--) { 994 idx = D_IDX(tx_ring, tx_ring->rd_p++); 995 996 skb = tx_ring->txbufs[idx].skb; 997 if (!skb) 998 continue; 999 1000 nr_frags = skb_shinfo(skb)->nr_frags; 1001 fidx = tx_ring->txbufs[idx].fidx; 1002 1003 if (fidx == -1) { 1004 /* unmap head */ 1005 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, 1006 skb_headlen(skb), DMA_TO_DEVICE); 1007 1008 done_pkts += tx_ring->txbufs[idx].pkt_cnt; 1009 done_bytes += tx_ring->txbufs[idx].real_len; 1010 } else { 1011 /* unmap fragment */ 1012 frag = &skb_shinfo(skb)->frags[fidx]; 1013 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, 1014 skb_frag_size(frag), DMA_TO_DEVICE); 1015 } 1016 1017 /* check for last gather fragment */ 1018 if (fidx == nr_frags - 1) 1019 napi_consume_skb(skb, budget); 1020 1021 tx_ring->txbufs[idx].dma_addr = 0; 1022 tx_ring->txbufs[idx].skb = NULL; 1023 tx_ring->txbufs[idx].fidx = -2; 1024 } 1025 1026 tx_ring->qcp_rd_p = qcp_rd_p; 1027 1028 u64_stats_update_begin(&r_vec->tx_sync); 1029 r_vec->tx_bytes += done_bytes; 1030 r_vec->tx_pkts += done_pkts; 1031 u64_stats_update_end(&r_vec->tx_sync); 1032 1033 if (!dp->netdev) 1034 return; 1035 1036 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); 1037 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); 1038 if (nfp_net_tx_ring_should_wake(tx_ring)) { 1039 /* Make sure TX thread will see updated tx_ring->rd_p */ 1040 smp_mb(); 1041 1042 if (unlikely(netif_tx_queue_stopped(nd_q))) 1043 netif_tx_wake_queue(nd_q); 1044 } 1045 1046 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, 1047 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", 1048 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); 1049 } 1050 1051 static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) 1052 { 1053 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 1054 u32 done_pkts = 0, done_bytes = 0; 1055 bool done_all; 1056 int idx, todo; 1057 u32 qcp_rd_p; 1058 1059 /* Work out how many descriptors have been transmitted */ 1060 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); 1061 1062 if (qcp_rd_p == tx_ring->qcp_rd_p) 1063 return true; 1064 1065 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); 1066 1067 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE; 1068 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE); 1069 1070 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo); 1071 1072 done_pkts = todo; 1073 while (todo--) { 1074 idx = D_IDX(tx_ring, tx_ring->rd_p); 1075 tx_ring->rd_p++; 1076 1077 done_bytes += tx_ring->txbufs[idx].real_len; 1078 } 1079 1080 u64_stats_update_begin(&r_vec->tx_sync); 1081 r_vec->tx_bytes += done_bytes; 1082 r_vec->tx_pkts += done_pkts; 1083 u64_stats_update_end(&r_vec->tx_sync); 1084 1085 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, 1086 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", 1087 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); 1088 1089 return done_all; 1090 } 1091 1092 /** 1093 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers 1094 * @dp: NFP Net data path struct 1095 * @tx_ring: TX ring structure 1096 * 1097 * Assumes that the device is stopped, must be idempotent. 1098 */ 1099 static void 1100 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) 1101 { 1102 const struct skb_frag_struct *frag; 1103 struct netdev_queue *nd_q; 1104 1105 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { 1106 struct nfp_net_tx_buf *tx_buf; 1107 struct sk_buff *skb; 1108 int idx, nr_frags; 1109 1110 idx = D_IDX(tx_ring, tx_ring->rd_p); 1111 tx_buf = &tx_ring->txbufs[idx]; 1112 1113 skb = tx_ring->txbufs[idx].skb; 1114 nr_frags = skb_shinfo(skb)->nr_frags; 1115 1116 if (tx_buf->fidx == -1) { 1117 /* unmap head */ 1118 dma_unmap_single(dp->dev, tx_buf->dma_addr, 1119 skb_headlen(skb), DMA_TO_DEVICE); 1120 } else { 1121 /* unmap fragment */ 1122 frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; 1123 dma_unmap_page(dp->dev, tx_buf->dma_addr, 1124 skb_frag_size(frag), DMA_TO_DEVICE); 1125 } 1126 1127 /* check for last gather fragment */ 1128 if (tx_buf->fidx == nr_frags - 1) 1129 dev_kfree_skb_any(skb); 1130 1131 tx_buf->dma_addr = 0; 1132 tx_buf->skb = NULL; 1133 tx_buf->fidx = -2; 1134 1135 tx_ring->qcp_rd_p++; 1136 tx_ring->rd_p++; 1137 } 1138 1139 memset(tx_ring->txds, 0, tx_ring->size); 1140 tx_ring->wr_p = 0; 1141 tx_ring->rd_p = 0; 1142 tx_ring->qcp_rd_p = 0; 1143 tx_ring->wr_ptr_add = 0; 1144 1145 if (tx_ring->is_xdp || !dp->netdev) 1146 return; 1147 1148 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); 1149 netdev_tx_reset_queue(nd_q); 1150 } 1151 1152 static void nfp_net_tx_timeout(struct net_device *netdev) 1153 { 1154 struct nfp_net *nn = netdev_priv(netdev); 1155 int i; 1156 1157 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) { 1158 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) 1159 continue; 1160 nn_warn(nn, "TX timeout on ring: %d\n", i); 1161 } 1162 nn_warn(nn, "TX watchdog timeout\n"); 1163 } 1164 1165 /* Receive processing 1166 */ 1167 static unsigned int 1168 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) 1169 { 1170 unsigned int fl_bufsz; 1171 1172 fl_bufsz = NFP_NET_RX_BUF_HEADROOM; 1173 fl_bufsz += dp->rx_dma_off; 1174 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 1175 fl_bufsz += NFP_NET_MAX_PREPEND; 1176 else 1177 fl_bufsz += dp->rx_offset; 1178 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; 1179 1180 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz); 1181 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1182 1183 return fl_bufsz; 1184 } 1185 1186 static void 1187 nfp_net_free_frag(void *frag, bool xdp) 1188 { 1189 if (!xdp) 1190 skb_free_frag(frag); 1191 else 1192 __free_page(virt_to_page(frag)); 1193 } 1194 1195 /** 1196 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX 1197 * @dp: NFP Net data path struct 1198 * @dma_addr: Pointer to storage for DMA address (output param) 1199 * 1200 * This function will allcate a new page frag, map it for DMA. 1201 * 1202 * Return: allocated page frag or NULL on failure. 1203 */ 1204 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) 1205 { 1206 void *frag; 1207 1208 if (!dp->xdp_prog) { 1209 frag = netdev_alloc_frag(dp->fl_bufsz); 1210 } else { 1211 struct page *page; 1212 1213 page = alloc_page(GFP_KERNEL); 1214 frag = page ? page_address(page) : NULL; 1215 } 1216 if (!frag) { 1217 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1218 return NULL; 1219 } 1220 1221 *dma_addr = nfp_net_dma_map_rx(dp, frag); 1222 if (dma_mapping_error(dp->dev, *dma_addr)) { 1223 nfp_net_free_frag(frag, dp->xdp_prog); 1224 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); 1225 return NULL; 1226 } 1227 1228 return frag; 1229 } 1230 1231 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) 1232 { 1233 void *frag; 1234 1235 if (!dp->xdp_prog) { 1236 frag = napi_alloc_frag(dp->fl_bufsz); 1237 if (unlikely(!frag)) 1238 return NULL; 1239 } else { 1240 struct page *page; 1241 1242 page = dev_alloc_page(); 1243 if (unlikely(!page)) 1244 return NULL; 1245 frag = page_address(page); 1246 } 1247 1248 *dma_addr = nfp_net_dma_map_rx(dp, frag); 1249 if (dma_mapping_error(dp->dev, *dma_addr)) { 1250 nfp_net_free_frag(frag, dp->xdp_prog); 1251 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); 1252 return NULL; 1253 } 1254 1255 return frag; 1256 } 1257 1258 /** 1259 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings 1260 * @dp: NFP Net data path struct 1261 * @rx_ring: RX ring structure 1262 * @frag: page fragment buffer 1263 * @dma_addr: DMA address of skb mapping 1264 */ 1265 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, 1266 struct nfp_net_rx_ring *rx_ring, 1267 void *frag, dma_addr_t dma_addr) 1268 { 1269 unsigned int wr_idx; 1270 1271 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); 1272 1273 nfp_net_dma_sync_dev_rx(dp, dma_addr); 1274 1275 /* Stash SKB and DMA address away */ 1276 rx_ring->rxbufs[wr_idx].frag = frag; 1277 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; 1278 1279 /* Fill freelist descriptor */ 1280 rx_ring->rxds[wr_idx].fld.reserved = 0; 1281 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; 1282 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, 1283 dma_addr + dp->rx_dma_off); 1284 1285 rx_ring->wr_p++; 1286 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) { 1287 /* Update write pointer of the freelist queue. Make 1288 * sure all writes are flushed before telling the hardware. 1289 */ 1290 wmb(); 1291 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH); 1292 } 1293 } 1294 1295 /** 1296 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable 1297 * @rx_ring: RX ring structure 1298 * 1299 * Assumes that the device is stopped, must be idempotent. 1300 */ 1301 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) 1302 { 1303 unsigned int wr_idx, last_idx; 1304 1305 /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always 1306 * kept at cnt - 1 FL bufs. 1307 */ 1308 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0) 1309 return; 1310 1311 /* Move the empty entry to the end of the list */ 1312 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); 1313 last_idx = rx_ring->cnt - 1; 1314 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr; 1315 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag; 1316 rx_ring->rxbufs[last_idx].dma_addr = 0; 1317 rx_ring->rxbufs[last_idx].frag = NULL; 1318 1319 memset(rx_ring->rxds, 0, rx_ring->size); 1320 rx_ring->wr_p = 0; 1321 rx_ring->rd_p = 0; 1322 } 1323 1324 /** 1325 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring 1326 * @dp: NFP Net data path struct 1327 * @rx_ring: RX ring to remove buffers from 1328 * 1329 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) 1330 * entries. After device is disabled nfp_net_rx_ring_reset() must be called 1331 * to restore required ring geometry. 1332 */ 1333 static void 1334 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, 1335 struct nfp_net_rx_ring *rx_ring) 1336 { 1337 unsigned int i; 1338 1339 for (i = 0; i < rx_ring->cnt - 1; i++) { 1340 /* NULL skb can only happen when initial filling of the ring 1341 * fails to allocate enough buffers and calls here to free 1342 * already allocated ones. 1343 */ 1344 if (!rx_ring->rxbufs[i].frag) 1345 continue; 1346 1347 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); 1348 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); 1349 rx_ring->rxbufs[i].dma_addr = 0; 1350 rx_ring->rxbufs[i].frag = NULL; 1351 } 1352 } 1353 1354 /** 1355 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) 1356 * @dp: NFP Net data path struct 1357 * @rx_ring: RX ring to remove buffers from 1358 */ 1359 static int 1360 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, 1361 struct nfp_net_rx_ring *rx_ring) 1362 { 1363 struct nfp_net_rx_buf *rxbufs; 1364 unsigned int i; 1365 1366 rxbufs = rx_ring->rxbufs; 1367 1368 for (i = 0; i < rx_ring->cnt - 1; i++) { 1369 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); 1370 if (!rxbufs[i].frag) { 1371 nfp_net_rx_ring_bufs_free(dp, rx_ring); 1372 return -ENOMEM; 1373 } 1374 } 1375 1376 return 0; 1377 } 1378 1379 /** 1380 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW 1381 * @dp: NFP Net data path struct 1382 * @rx_ring: RX ring to fill 1383 */ 1384 static void 1385 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, 1386 struct nfp_net_rx_ring *rx_ring) 1387 { 1388 unsigned int i; 1389 1390 for (i = 0; i < rx_ring->cnt - 1; i++) 1391 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, 1392 rx_ring->rxbufs[i].dma_addr); 1393 } 1394 1395 /** 1396 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors 1397 * @flags: RX descriptor flags field in CPU byte order 1398 */ 1399 static int nfp_net_rx_csum_has_errors(u16 flags) 1400 { 1401 u16 csum_all_checked, csum_all_ok; 1402 1403 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL; 1404 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK; 1405 1406 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT); 1407 } 1408 1409 /** 1410 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags 1411 * @dp: NFP Net data path struct 1412 * @r_vec: per-ring structure 1413 * @rxd: Pointer to RX descriptor 1414 * @meta: Parsed metadata prepend 1415 * @skb: Pointer to SKB 1416 */ 1417 static void nfp_net_rx_csum(struct nfp_net_dp *dp, 1418 struct nfp_net_r_vector *r_vec, 1419 struct nfp_net_rx_desc *rxd, 1420 struct nfp_meta_parsed *meta, struct sk_buff *skb) 1421 { 1422 skb_checksum_none_assert(skb); 1423 1424 if (!(dp->netdev->features & NETIF_F_RXCSUM)) 1425 return; 1426 1427 if (meta->csum_type) { 1428 skb->ip_summed = meta->csum_type; 1429 skb->csum = meta->csum; 1430 u64_stats_update_begin(&r_vec->rx_sync); 1431 r_vec->hw_csum_rx_complete++; 1432 u64_stats_update_end(&r_vec->rx_sync); 1433 return; 1434 } 1435 1436 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { 1437 u64_stats_update_begin(&r_vec->rx_sync); 1438 r_vec->hw_csum_rx_error++; 1439 u64_stats_update_end(&r_vec->rx_sync); 1440 return; 1441 } 1442 1443 /* Assume that the firmware will never report inner CSUM_OK unless outer 1444 * L4 headers were successfully parsed. FW will always report zero UDP 1445 * checksum as CSUM_OK. 1446 */ 1447 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK || 1448 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) { 1449 __skb_incr_checksum_unnecessary(skb); 1450 u64_stats_update_begin(&r_vec->rx_sync); 1451 r_vec->hw_csum_rx_ok++; 1452 u64_stats_update_end(&r_vec->rx_sync); 1453 } 1454 1455 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK || 1456 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) { 1457 __skb_incr_checksum_unnecessary(skb); 1458 u64_stats_update_begin(&r_vec->rx_sync); 1459 r_vec->hw_csum_rx_inner_ok++; 1460 u64_stats_update_end(&r_vec->rx_sync); 1461 } 1462 } 1463 1464 static void 1465 nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta, 1466 unsigned int type, __be32 *hash) 1467 { 1468 if (!(netdev->features & NETIF_F_RXHASH)) 1469 return; 1470 1471 switch (type) { 1472 case NFP_NET_RSS_IPV4: 1473 case NFP_NET_RSS_IPV6: 1474 case NFP_NET_RSS_IPV6_EX: 1475 meta->hash_type = PKT_HASH_TYPE_L3; 1476 break; 1477 default: 1478 meta->hash_type = PKT_HASH_TYPE_L4; 1479 break; 1480 } 1481 1482 meta->hash = get_unaligned_be32(hash); 1483 } 1484 1485 static void 1486 nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta, 1487 void *data, struct nfp_net_rx_desc *rxd) 1488 { 1489 struct nfp_net_rx_hash *rx_hash = data; 1490 1491 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) 1492 return; 1493 1494 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type), 1495 &rx_hash->hash); 1496 } 1497 1498 static void * 1499 nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, 1500 void *data, int meta_len) 1501 { 1502 u32 meta_info; 1503 1504 meta_info = get_unaligned_be32(data); 1505 data += 4; 1506 1507 while (meta_info) { 1508 switch (meta_info & NFP_NET_META_FIELD_MASK) { 1509 case NFP_NET_META_HASH: 1510 meta_info >>= NFP_NET_META_FIELD_SIZE; 1511 nfp_net_set_hash(netdev, meta, 1512 meta_info & NFP_NET_META_FIELD_MASK, 1513 (__be32 *)data); 1514 data += 4; 1515 break; 1516 case NFP_NET_META_MARK: 1517 meta->mark = get_unaligned_be32(data); 1518 data += 4; 1519 break; 1520 case NFP_NET_META_PORTID: 1521 meta->portid = get_unaligned_be32(data); 1522 data += 4; 1523 break; 1524 case NFP_NET_META_CSUM: 1525 meta->csum_type = CHECKSUM_COMPLETE; 1526 meta->csum = 1527 (__force __wsum)__get_unaligned_cpu32(data); 1528 data += 4; 1529 break; 1530 default: 1531 return NULL; 1532 } 1533 1534 meta_info >>= NFP_NET_META_FIELD_SIZE; 1535 } 1536 1537 return data; 1538 } 1539 1540 static void 1541 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, 1542 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf, 1543 struct sk_buff *skb) 1544 { 1545 u64_stats_update_begin(&r_vec->rx_sync); 1546 r_vec->rx_drops++; 1547 /* If we have both skb and rxbuf the replacement buffer allocation 1548 * must have failed, count this as an alloc failure. 1549 */ 1550 if (skb && rxbuf) 1551 r_vec->rx_replace_buf_alloc_fail++; 1552 u64_stats_update_end(&r_vec->rx_sync); 1553 1554 /* skb is build based on the frag, free_skb() would free the frag 1555 * so to be able to reuse it we need an extra ref. 1556 */ 1557 if (skb && rxbuf && skb->head == rxbuf->frag) 1558 page_ref_inc(virt_to_head_page(rxbuf->frag)); 1559 if (rxbuf) 1560 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); 1561 if (skb) 1562 dev_kfree_skb_any(skb); 1563 } 1564 1565 static bool 1566 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, 1567 struct nfp_net_tx_ring *tx_ring, 1568 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off, 1569 unsigned int pkt_len, bool *completed) 1570 { 1571 struct nfp_net_tx_buf *txbuf; 1572 struct nfp_net_tx_desc *txd; 1573 int wr_idx; 1574 1575 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1576 if (!*completed) { 1577 nfp_net_xdp_complete(tx_ring); 1578 *completed = true; 1579 } 1580 1581 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1582 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, 1583 NULL); 1584 return false; 1585 } 1586 } 1587 1588 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); 1589 1590 /* Stash the soft descriptor of the head then initialize it */ 1591 txbuf = &tx_ring->txbufs[wr_idx]; 1592 1593 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); 1594 1595 txbuf->frag = rxbuf->frag; 1596 txbuf->dma_addr = rxbuf->dma_addr; 1597 txbuf->fidx = -1; 1598 txbuf->pkt_cnt = 1; 1599 txbuf->real_len = pkt_len; 1600 1601 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, 1602 pkt_len, DMA_BIDIRECTIONAL); 1603 1604 /* Build TX descriptor */ 1605 txd = &tx_ring->txds[wr_idx]; 1606 txd->offset_eop = PCIE_DESC_TX_EOP; 1607 txd->dma_len = cpu_to_le16(pkt_len); 1608 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off); 1609 txd->data_len = cpu_to_le16(pkt_len); 1610 1611 txd->flags = 0; 1612 txd->mss = 0; 1613 txd->lso_hdrlen = 0; 1614 1615 tx_ring->wr_p++; 1616 tx_ring->wr_ptr_add++; 1617 return true; 1618 } 1619 1620 /** 1621 * nfp_net_rx() - receive up to @budget packets on @rx_ring 1622 * @rx_ring: RX ring to receive from 1623 * @budget: NAPI budget 1624 * 1625 * Note, this function is separated out from the napi poll function to 1626 * more cleanly separate packet receive code from other bookkeeping 1627 * functions performed in the napi poll function. 1628 * 1629 * Return: Number of packets received. 1630 */ 1631 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) 1632 { 1633 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 1634 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 1635 struct nfp_net_tx_ring *tx_ring; 1636 struct bpf_prog *xdp_prog; 1637 bool xdp_tx_cmpl = false; 1638 unsigned int true_bufsz; 1639 struct sk_buff *skb; 1640 int pkts_polled = 0; 1641 struct xdp_buff xdp; 1642 int idx; 1643 1644 rcu_read_lock(); 1645 xdp_prog = READ_ONCE(dp->xdp_prog); 1646 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; 1647 xdp.rxq = &rx_ring->xdp_rxq; 1648 tx_ring = r_vec->xdp_ring; 1649 1650 while (pkts_polled < budget) { 1651 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; 1652 struct nfp_net_rx_buf *rxbuf; 1653 struct nfp_net_rx_desc *rxd; 1654 struct nfp_meta_parsed meta; 1655 struct net_device *netdev; 1656 dma_addr_t new_dma_addr; 1657 u32 meta_len_xdp = 0; 1658 void *new_frag; 1659 1660 idx = D_IDX(rx_ring, rx_ring->rd_p); 1661 1662 rxd = &rx_ring->rxds[idx]; 1663 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) 1664 break; 1665 1666 /* Memory barrier to ensure that we won't do other reads 1667 * before the DD bit. 1668 */ 1669 dma_rmb(); 1670 1671 memset(&meta, 0, sizeof(meta)); 1672 1673 rx_ring->rd_p++; 1674 pkts_polled++; 1675 1676 rxbuf = &rx_ring->rxbufs[idx]; 1677 /* < meta_len > 1678 * <-- [rx_offset] --> 1679 * --------------------------------------------------------- 1680 * | [XX] | metadata | packet | XXXX | 1681 * --------------------------------------------------------- 1682 * <---------------- data_len ---------------> 1683 * 1684 * The rx_offset is fixed for all packets, the meta_len can vary 1685 * on a packet by packet basis. If rx_offset is set to zero 1686 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the 1687 * buffer and is immediately followed by the packet (no [XX]). 1688 */ 1689 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; 1690 data_len = le16_to_cpu(rxd->rxd.data_len); 1691 pkt_len = data_len - meta_len; 1692 1693 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; 1694 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 1695 pkt_off += meta_len; 1696 else 1697 pkt_off += dp->rx_offset; 1698 meta_off = pkt_off - meta_len; 1699 1700 /* Stats update */ 1701 u64_stats_update_begin(&r_vec->rx_sync); 1702 r_vec->rx_pkts++; 1703 r_vec->rx_bytes += pkt_len; 1704 u64_stats_update_end(&r_vec->rx_sync); 1705 1706 if (unlikely(meta_len > NFP_NET_MAX_PREPEND || 1707 (dp->rx_offset && meta_len > dp->rx_offset))) { 1708 nn_dp_warn(dp, "oversized RX packet metadata %u\n", 1709 meta_len); 1710 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 1711 continue; 1712 } 1713 1714 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, 1715 data_len); 1716 1717 if (!dp->chained_metadata_format) { 1718 nfp_net_set_hash_desc(dp->netdev, &meta, 1719 rxbuf->frag + meta_off, rxd); 1720 } else if (meta_len) { 1721 void *end; 1722 1723 end = nfp_net_parse_meta(dp->netdev, &meta, 1724 rxbuf->frag + meta_off, 1725 meta_len); 1726 if (unlikely(end != rxbuf->frag + pkt_off)) { 1727 nn_dp_warn(dp, "invalid RX packet metadata\n"); 1728 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, 1729 NULL); 1730 continue; 1731 } 1732 } 1733 1734 if (xdp_prog && !meta.portid) { 1735 void *orig_data = rxbuf->frag + pkt_off; 1736 unsigned int dma_off; 1737 int act; 1738 1739 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; 1740 xdp.data = orig_data; 1741 xdp.data_meta = orig_data; 1742 xdp.data_end = orig_data + pkt_len; 1743 1744 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1745 1746 pkt_len = xdp.data_end - xdp.data; 1747 pkt_off += xdp.data - orig_data; 1748 1749 switch (act) { 1750 case XDP_PASS: 1751 meta_len_xdp = xdp.data - xdp.data_meta; 1752 break; 1753 case XDP_TX: 1754 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; 1755 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, 1756 tx_ring, rxbuf, 1757 dma_off, 1758 pkt_len, 1759 &xdp_tx_cmpl))) 1760 trace_xdp_exception(dp->netdev, 1761 xdp_prog, act); 1762 continue; 1763 default: 1764 bpf_warn_invalid_xdp_action(act); 1765 /* fall through */ 1766 case XDP_ABORTED: 1767 trace_xdp_exception(dp->netdev, xdp_prog, act); 1768 /* fall through */ 1769 case XDP_DROP: 1770 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, 1771 rxbuf->dma_addr); 1772 continue; 1773 } 1774 } 1775 1776 if (likely(!meta.portid)) { 1777 netdev = dp->netdev; 1778 } else if (meta.portid == NFP_META_PORT_ID_CTRL) { 1779 struct nfp_net *nn = netdev_priv(dp->netdev); 1780 1781 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off, 1782 pkt_len); 1783 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, 1784 rxbuf->dma_addr); 1785 continue; 1786 } else { 1787 struct nfp_net *nn; 1788 1789 nn = netdev_priv(dp->netdev); 1790 netdev = nfp_app_repr_get(nn->app, meta.portid); 1791 if (unlikely(!netdev)) { 1792 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, 1793 NULL); 1794 continue; 1795 } 1796 nfp_repr_inc_rx_stats(netdev, pkt_len); 1797 } 1798 1799 skb = build_skb(rxbuf->frag, true_bufsz); 1800 if (unlikely(!skb)) { 1801 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 1802 continue; 1803 } 1804 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); 1805 if (unlikely(!new_frag)) { 1806 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); 1807 continue; 1808 } 1809 1810 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); 1811 1812 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); 1813 1814 skb_reserve(skb, pkt_off); 1815 skb_put(skb, pkt_len); 1816 1817 skb->mark = meta.mark; 1818 skb_set_hash(skb, meta.hash, meta.hash_type); 1819 1820 skb_record_rx_queue(skb, rx_ring->idx); 1821 skb->protocol = eth_type_trans(skb, netdev); 1822 1823 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb); 1824 1825 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) 1826 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1827 le16_to_cpu(rxd->rxd.vlan)); 1828 if (meta_len_xdp) 1829 skb_metadata_set(skb, meta_len_xdp); 1830 1831 napi_gro_receive(&rx_ring->r_vec->napi, skb); 1832 } 1833 1834 if (xdp_prog) { 1835 if (tx_ring->wr_ptr_add) 1836 nfp_net_tx_xmit_more_flush(tx_ring); 1837 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) && 1838 !xdp_tx_cmpl) 1839 if (!nfp_net_xdp_complete(tx_ring)) 1840 pkts_polled = budget; 1841 } 1842 rcu_read_unlock(); 1843 1844 return pkts_polled; 1845 } 1846 1847 /** 1848 * nfp_net_poll() - napi poll function 1849 * @napi: NAPI structure 1850 * @budget: NAPI budget 1851 * 1852 * Return: number of packets polled. 1853 */ 1854 static int nfp_net_poll(struct napi_struct *napi, int budget) 1855 { 1856 struct nfp_net_r_vector *r_vec = 1857 container_of(napi, struct nfp_net_r_vector, napi); 1858 unsigned int pkts_polled = 0; 1859 1860 if (r_vec->tx_ring) 1861 nfp_net_tx_complete(r_vec->tx_ring, budget); 1862 if (r_vec->rx_ring) 1863 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); 1864 1865 if (pkts_polled < budget) 1866 if (napi_complete_done(napi, pkts_polled)) 1867 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); 1868 1869 return pkts_polled; 1870 } 1871 1872 /* Control device data path 1873 */ 1874 1875 static bool 1876 nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 1877 struct sk_buff *skb, bool old) 1878 { 1879 unsigned int real_len = skb->len, meta_len = 0; 1880 struct nfp_net_tx_ring *tx_ring; 1881 struct nfp_net_tx_buf *txbuf; 1882 struct nfp_net_tx_desc *txd; 1883 struct nfp_net_dp *dp; 1884 dma_addr_t dma_addr; 1885 int wr_idx; 1886 1887 dp = &r_vec->nfp_net->dp; 1888 tx_ring = r_vec->tx_ring; 1889 1890 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) { 1891 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n"); 1892 goto err_free; 1893 } 1894 1895 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1896 u64_stats_update_begin(&r_vec->tx_sync); 1897 r_vec->tx_busy++; 1898 u64_stats_update_end(&r_vec->tx_sync); 1899 if (!old) 1900 __skb_queue_tail(&r_vec->queue, skb); 1901 else 1902 __skb_queue_head(&r_vec->queue, skb); 1903 return true; 1904 } 1905 1906 if (nfp_app_ctrl_has_meta(nn->app)) { 1907 if (unlikely(skb_headroom(skb) < 8)) { 1908 nn_dp_warn(dp, "CTRL TX on skb without headroom\n"); 1909 goto err_free; 1910 } 1911 meta_len = 8; 1912 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4)); 1913 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4)); 1914 } 1915 1916 /* Start with the head skbuf */ 1917 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), 1918 DMA_TO_DEVICE); 1919 if (dma_mapping_error(dp->dev, dma_addr)) 1920 goto err_dma_warn; 1921 1922 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); 1923 1924 /* Stash the soft descriptor of the head then initialize it */ 1925 txbuf = &tx_ring->txbufs[wr_idx]; 1926 txbuf->skb = skb; 1927 txbuf->dma_addr = dma_addr; 1928 txbuf->fidx = -1; 1929 txbuf->pkt_cnt = 1; 1930 txbuf->real_len = real_len; 1931 1932 /* Build TX descriptor */ 1933 txd = &tx_ring->txds[wr_idx]; 1934 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP; 1935 txd->dma_len = cpu_to_le16(skb_headlen(skb)); 1936 nfp_desc_set_dma_addr(txd, dma_addr); 1937 txd->data_len = cpu_to_le16(skb->len); 1938 1939 txd->flags = 0; 1940 txd->mss = 0; 1941 txd->lso_hdrlen = 0; 1942 1943 tx_ring->wr_p++; 1944 tx_ring->wr_ptr_add++; 1945 nfp_net_tx_xmit_more_flush(tx_ring); 1946 1947 return false; 1948 1949 err_dma_warn: 1950 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n"); 1951 err_free: 1952 u64_stats_update_begin(&r_vec->tx_sync); 1953 r_vec->tx_errors++; 1954 u64_stats_update_end(&r_vec->tx_sync); 1955 dev_kfree_skb_any(skb); 1956 return false; 1957 } 1958 1959 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb) 1960 { 1961 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0]; 1962 1963 return nfp_ctrl_tx_one(nn, r_vec, skb, false); 1964 } 1965 1966 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb) 1967 { 1968 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0]; 1969 bool ret; 1970 1971 spin_lock_bh(&r_vec->lock); 1972 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false); 1973 spin_unlock_bh(&r_vec->lock); 1974 1975 return ret; 1976 } 1977 1978 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec) 1979 { 1980 struct sk_buff *skb; 1981 1982 while ((skb = __skb_dequeue(&r_vec->queue))) 1983 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true)) 1984 return; 1985 } 1986 1987 static bool 1988 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len) 1989 { 1990 u32 meta_type, meta_tag; 1991 1992 if (!nfp_app_ctrl_has_meta(nn->app)) 1993 return !meta_len; 1994 1995 if (meta_len != 8) 1996 return false; 1997 1998 meta_type = get_unaligned_be32(data); 1999 meta_tag = get_unaligned_be32(data + 4); 2000 2001 return (meta_type == NFP_NET_META_PORTID && 2002 meta_tag == NFP_META_PORT_ID_CTRL); 2003 } 2004 2005 static bool 2006 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp, 2007 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring) 2008 { 2009 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; 2010 struct nfp_net_rx_buf *rxbuf; 2011 struct nfp_net_rx_desc *rxd; 2012 dma_addr_t new_dma_addr; 2013 struct sk_buff *skb; 2014 void *new_frag; 2015 int idx; 2016 2017 idx = D_IDX(rx_ring, rx_ring->rd_p); 2018 2019 rxd = &rx_ring->rxds[idx]; 2020 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) 2021 return false; 2022 2023 /* Memory barrier to ensure that we won't do other reads 2024 * before the DD bit. 2025 */ 2026 dma_rmb(); 2027 2028 rx_ring->rd_p++; 2029 2030 rxbuf = &rx_ring->rxbufs[idx]; 2031 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; 2032 data_len = le16_to_cpu(rxd->rxd.data_len); 2033 pkt_len = data_len - meta_len; 2034 2035 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; 2036 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 2037 pkt_off += meta_len; 2038 else 2039 pkt_off += dp->rx_offset; 2040 meta_off = pkt_off - meta_len; 2041 2042 /* Stats update */ 2043 u64_stats_update_begin(&r_vec->rx_sync); 2044 r_vec->rx_pkts++; 2045 r_vec->rx_bytes += pkt_len; 2046 u64_stats_update_end(&r_vec->rx_sync); 2047 2048 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len); 2049 2050 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) { 2051 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n", 2052 meta_len); 2053 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 2054 return true; 2055 } 2056 2057 skb = build_skb(rxbuf->frag, dp->fl_bufsz); 2058 if (unlikely(!skb)) { 2059 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 2060 return true; 2061 } 2062 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); 2063 if (unlikely(!new_frag)) { 2064 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); 2065 return true; 2066 } 2067 2068 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); 2069 2070 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); 2071 2072 skb_reserve(skb, pkt_off); 2073 skb_put(skb, pkt_len); 2074 2075 nfp_app_ctrl_rx(nn->app, skb); 2076 2077 return true; 2078 } 2079 2080 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) 2081 { 2082 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; 2083 struct nfp_net *nn = r_vec->nfp_net; 2084 struct nfp_net_dp *dp = &nn->dp; 2085 unsigned int budget = 512; 2086 2087 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) 2088 continue; 2089 2090 return budget; 2091 } 2092 2093 static void nfp_ctrl_poll(unsigned long arg) 2094 { 2095 struct nfp_net_r_vector *r_vec = (void *)arg; 2096 2097 spin_lock(&r_vec->lock); 2098 nfp_net_tx_complete(r_vec->tx_ring, 0); 2099 __nfp_ctrl_tx_queued(r_vec); 2100 spin_unlock(&r_vec->lock); 2101 2102 if (nfp_ctrl_rx(r_vec)) { 2103 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); 2104 } else { 2105 tasklet_schedule(&r_vec->tasklet); 2106 nn_dp_warn(&r_vec->nfp_net->dp, 2107 "control message budget exceeded!\n"); 2108 } 2109 } 2110 2111 /* Setup and Configuration 2112 */ 2113 2114 /** 2115 * nfp_net_vecs_init() - Assign IRQs and setup rvecs. 2116 * @nn: NFP Network structure 2117 */ 2118 static void nfp_net_vecs_init(struct nfp_net *nn) 2119 { 2120 struct nfp_net_r_vector *r_vec; 2121 int r; 2122 2123 nn->lsc_handler = nfp_net_irq_lsc; 2124 nn->exn_handler = nfp_net_irq_exn; 2125 2126 for (r = 0; r < nn->max_r_vecs; r++) { 2127 struct msix_entry *entry; 2128 2129 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r]; 2130 2131 r_vec = &nn->r_vecs[r]; 2132 r_vec->nfp_net = nn; 2133 r_vec->irq_entry = entry->entry; 2134 r_vec->irq_vector = entry->vector; 2135 2136 if (nn->dp.netdev) { 2137 r_vec->handler = nfp_net_irq_rxtx; 2138 } else { 2139 r_vec->handler = nfp_ctrl_irq_rxtx; 2140 2141 __skb_queue_head_init(&r_vec->queue); 2142 spin_lock_init(&r_vec->lock); 2143 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll, 2144 (unsigned long)r_vec); 2145 tasklet_disable(&r_vec->tasklet); 2146 } 2147 2148 cpumask_set_cpu(r, &r_vec->affinity_mask); 2149 } 2150 } 2151 2152 /** 2153 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring 2154 * @tx_ring: TX ring to free 2155 */ 2156 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) 2157 { 2158 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 2159 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 2160 2161 kvfree(tx_ring->txbufs); 2162 2163 if (tx_ring->txds) 2164 dma_free_coherent(dp->dev, tx_ring->size, 2165 tx_ring->txds, tx_ring->dma); 2166 2167 tx_ring->cnt = 0; 2168 tx_ring->txbufs = NULL; 2169 tx_ring->txds = NULL; 2170 tx_ring->dma = 0; 2171 tx_ring->size = 0; 2172 } 2173 2174 /** 2175 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring 2176 * @dp: NFP Net data path struct 2177 * @tx_ring: TX Ring structure to allocate 2178 * 2179 * Return: 0 on success, negative errno otherwise. 2180 */ 2181 static int 2182 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) 2183 { 2184 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 2185 2186 tx_ring->cnt = dp->txd_cnt; 2187 2188 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); 2189 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, 2190 &tx_ring->dma, 2191 GFP_KERNEL | __GFP_NOWARN); 2192 if (!tx_ring->txds) { 2193 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2194 tx_ring->cnt); 2195 goto err_alloc; 2196 } 2197 2198 tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), 2199 GFP_KERNEL); 2200 if (!tx_ring->txbufs) 2201 goto err_alloc; 2202 2203 if (!tx_ring->is_xdp && dp->netdev) 2204 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, 2205 tx_ring->idx); 2206 2207 return 0; 2208 2209 err_alloc: 2210 nfp_net_tx_ring_free(tx_ring); 2211 return -ENOMEM; 2212 } 2213 2214 static void 2215 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp, 2216 struct nfp_net_tx_ring *tx_ring) 2217 { 2218 unsigned int i; 2219 2220 if (!tx_ring->is_xdp) 2221 return; 2222 2223 for (i = 0; i < tx_ring->cnt; i++) { 2224 if (!tx_ring->txbufs[i].frag) 2225 return; 2226 2227 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr); 2228 __free_page(virt_to_page(tx_ring->txbufs[i].frag)); 2229 } 2230 } 2231 2232 static int 2233 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp, 2234 struct nfp_net_tx_ring *tx_ring) 2235 { 2236 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs; 2237 unsigned int i; 2238 2239 if (!tx_ring->is_xdp) 2240 return 0; 2241 2242 for (i = 0; i < tx_ring->cnt; i++) { 2243 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr); 2244 if (!txbufs[i].frag) { 2245 nfp_net_tx_ring_bufs_free(dp, tx_ring); 2246 return -ENOMEM; 2247 } 2248 } 2249 2250 return 0; 2251 } 2252 2253 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) 2254 { 2255 unsigned int r; 2256 2257 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), 2258 GFP_KERNEL); 2259 if (!dp->tx_rings) 2260 return -ENOMEM; 2261 2262 for (r = 0; r < dp->num_tx_rings; r++) { 2263 int bias = 0; 2264 2265 if (r >= dp->num_stack_tx_rings) 2266 bias = dp->num_stack_tx_rings; 2267 2268 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], 2269 r, bias); 2270 2271 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) 2272 goto err_free_prev; 2273 2274 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) 2275 goto err_free_ring; 2276 } 2277 2278 return 0; 2279 2280 err_free_prev: 2281 while (r--) { 2282 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); 2283 err_free_ring: 2284 nfp_net_tx_ring_free(&dp->tx_rings[r]); 2285 } 2286 kfree(dp->tx_rings); 2287 return -ENOMEM; 2288 } 2289 2290 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp) 2291 { 2292 unsigned int r; 2293 2294 for (r = 0; r < dp->num_tx_rings; r++) { 2295 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); 2296 nfp_net_tx_ring_free(&dp->tx_rings[r]); 2297 } 2298 2299 kfree(dp->tx_rings); 2300 } 2301 2302 /** 2303 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring 2304 * @rx_ring: RX ring to free 2305 */ 2306 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) 2307 { 2308 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 2309 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 2310 2311 if (dp->netdev) 2312 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 2313 kvfree(rx_ring->rxbufs); 2314 2315 if (rx_ring->rxds) 2316 dma_free_coherent(dp->dev, rx_ring->size, 2317 rx_ring->rxds, rx_ring->dma); 2318 2319 rx_ring->cnt = 0; 2320 rx_ring->rxbufs = NULL; 2321 rx_ring->rxds = NULL; 2322 rx_ring->dma = 0; 2323 rx_ring->size = 0; 2324 } 2325 2326 /** 2327 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring 2328 * @dp: NFP Net data path struct 2329 * @rx_ring: RX ring to allocate 2330 * 2331 * Return: 0 on success, negative errno otherwise. 2332 */ 2333 static int 2334 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) 2335 { 2336 int err; 2337 2338 if (dp->netdev) { 2339 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, 2340 rx_ring->idx); 2341 if (err < 0) 2342 return err; 2343 } 2344 2345 rx_ring->cnt = dp->rxd_cnt; 2346 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); 2347 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, 2348 &rx_ring->dma, 2349 GFP_KERNEL | __GFP_NOWARN); 2350 if (!rx_ring->rxds) { 2351 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2352 rx_ring->cnt); 2353 goto err_alloc; 2354 } 2355 2356 rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), 2357 GFP_KERNEL); 2358 if (!rx_ring->rxbufs) 2359 goto err_alloc; 2360 2361 return 0; 2362 2363 err_alloc: 2364 nfp_net_rx_ring_free(rx_ring); 2365 return -ENOMEM; 2366 } 2367 2368 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) 2369 { 2370 unsigned int r; 2371 2372 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), 2373 GFP_KERNEL); 2374 if (!dp->rx_rings) 2375 return -ENOMEM; 2376 2377 for (r = 0; r < dp->num_rx_rings; r++) { 2378 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); 2379 2380 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) 2381 goto err_free_prev; 2382 2383 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) 2384 goto err_free_ring; 2385 } 2386 2387 return 0; 2388 2389 err_free_prev: 2390 while (r--) { 2391 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); 2392 err_free_ring: 2393 nfp_net_rx_ring_free(&dp->rx_rings[r]); 2394 } 2395 kfree(dp->rx_rings); 2396 return -ENOMEM; 2397 } 2398 2399 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) 2400 { 2401 unsigned int r; 2402 2403 for (r = 0; r < dp->num_rx_rings; r++) { 2404 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); 2405 nfp_net_rx_ring_free(&dp->rx_rings[r]); 2406 } 2407 2408 kfree(dp->rx_rings); 2409 } 2410 2411 static void 2412 nfp_net_vector_assign_rings(struct nfp_net_dp *dp, 2413 struct nfp_net_r_vector *r_vec, int idx) 2414 { 2415 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; 2416 r_vec->tx_ring = 2417 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; 2418 2419 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? 2420 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; 2421 } 2422 2423 static int 2424 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 2425 int idx) 2426 { 2427 int err; 2428 2429 /* Setup NAPI */ 2430 if (nn->dp.netdev) 2431 netif_napi_add(nn->dp.netdev, &r_vec->napi, 2432 nfp_net_poll, NAPI_POLL_WEIGHT); 2433 else 2434 tasklet_enable(&r_vec->tasklet); 2435 2436 snprintf(r_vec->name, sizeof(r_vec->name), 2437 "%s-rxtx-%d", nfp_net_name(nn), idx); 2438 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, 2439 r_vec); 2440 if (err) { 2441 if (nn->dp.netdev) 2442 netif_napi_del(&r_vec->napi); 2443 else 2444 tasklet_disable(&r_vec->tasklet); 2445 2446 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector); 2447 return err; 2448 } 2449 disable_irq(r_vec->irq_vector); 2450 2451 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask); 2452 2453 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector, 2454 r_vec->irq_entry); 2455 2456 return 0; 2457 } 2458 2459 static void 2460 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) 2461 { 2462 irq_set_affinity_hint(r_vec->irq_vector, NULL); 2463 if (nn->dp.netdev) 2464 netif_napi_del(&r_vec->napi); 2465 else 2466 tasklet_disable(&r_vec->tasklet); 2467 2468 free_irq(r_vec->irq_vector, r_vec); 2469 } 2470 2471 /** 2472 * nfp_net_rss_write_itbl() - Write RSS indirection table to device 2473 * @nn: NFP Net device to reconfigure 2474 */ 2475 void nfp_net_rss_write_itbl(struct nfp_net *nn) 2476 { 2477 int i; 2478 2479 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4) 2480 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i, 2481 get_unaligned_le32(nn->rss_itbl + i)); 2482 } 2483 2484 /** 2485 * nfp_net_rss_write_key() - Write RSS hash key to device 2486 * @nn: NFP Net device to reconfigure 2487 */ 2488 void nfp_net_rss_write_key(struct nfp_net *nn) 2489 { 2490 int i; 2491 2492 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4) 2493 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i, 2494 get_unaligned_le32(nn->rss_key + i)); 2495 } 2496 2497 /** 2498 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW 2499 * @nn: NFP Net device to reconfigure 2500 */ 2501 void nfp_net_coalesce_write_cfg(struct nfp_net *nn) 2502 { 2503 u8 i; 2504 u32 factor; 2505 u32 value; 2506 2507 /* Compute factor used to convert coalesce '_usecs' parameters to 2508 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp 2509 * count. 2510 */ 2511 factor = nn->tlv_caps.me_freq_mhz / 16; 2512 2513 /* copy RX interrupt coalesce parameters */ 2514 value = (nn->rx_coalesce_max_frames << 16) | 2515 (factor * nn->rx_coalesce_usecs); 2516 for (i = 0; i < nn->dp.num_rx_rings; i++) 2517 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); 2518 2519 /* copy TX interrupt coalesce parameters */ 2520 value = (nn->tx_coalesce_max_frames << 16) | 2521 (factor * nn->tx_coalesce_usecs); 2522 for (i = 0; i < nn->dp.num_tx_rings; i++) 2523 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); 2524 } 2525 2526 /** 2527 * nfp_net_write_mac_addr() - Write mac address to the device control BAR 2528 * @nn: NFP Net device to reconfigure 2529 * @addr: MAC address to write 2530 * 2531 * Writes the MAC address from the netdev to the device control BAR. Does not 2532 * perform the required reconfig. We do a bit of byte swapping dance because 2533 * firmware is LE. 2534 */ 2535 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr) 2536 { 2537 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr)); 2538 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4)); 2539 } 2540 2541 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) 2542 { 2543 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0); 2544 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0); 2545 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0); 2546 2547 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0); 2548 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0); 2549 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0); 2550 } 2551 2552 /** 2553 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP 2554 * @nn: NFP Net device to reconfigure 2555 * 2556 * Warning: must be fully idempotent. 2557 */ 2558 static void nfp_net_clear_config_and_disable(struct nfp_net *nn) 2559 { 2560 u32 new_ctrl, update; 2561 unsigned int r; 2562 int err; 2563 2564 new_ctrl = nn->dp.ctrl; 2565 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; 2566 update = NFP_NET_CFG_UPDATE_GEN; 2567 update |= NFP_NET_CFG_UPDATE_MSIX; 2568 update |= NFP_NET_CFG_UPDATE_RING; 2569 2570 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) 2571 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; 2572 2573 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); 2574 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); 2575 2576 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2577 err = nfp_net_reconfig(nn, update); 2578 if (err) 2579 nn_err(nn, "Could not disable device: %d\n", err); 2580 2581 for (r = 0; r < nn->dp.num_rx_rings; r++) 2582 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); 2583 for (r = 0; r < nn->dp.num_tx_rings; r++) 2584 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); 2585 for (r = 0; r < nn->dp.num_r_vecs; r++) 2586 nfp_net_vec_clear_ring_data(nn, r); 2587 2588 nn->dp.ctrl = new_ctrl; 2589 } 2590 2591 static void 2592 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn, 2593 struct nfp_net_rx_ring *rx_ring, unsigned int idx) 2594 { 2595 /* Write the DMA address, size and MSI-X info to the device */ 2596 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma); 2597 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt)); 2598 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry); 2599 } 2600 2601 static void 2602 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn, 2603 struct nfp_net_tx_ring *tx_ring, unsigned int idx) 2604 { 2605 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma); 2606 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt)); 2607 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry); 2608 } 2609 2610 /** 2611 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP 2612 * @nn: NFP Net device to reconfigure 2613 */ 2614 static int nfp_net_set_config_and_enable(struct nfp_net *nn) 2615 { 2616 u32 bufsz, new_ctrl, update = 0; 2617 unsigned int r; 2618 int err; 2619 2620 new_ctrl = nn->dp.ctrl; 2621 2622 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) { 2623 nfp_net_rss_write_key(nn); 2624 nfp_net_rss_write_itbl(nn); 2625 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg); 2626 update |= NFP_NET_CFG_UPDATE_RSS; 2627 } 2628 2629 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) { 2630 nfp_net_coalesce_write_cfg(nn); 2631 update |= NFP_NET_CFG_UPDATE_IRQMOD; 2632 } 2633 2634 for (r = 0; r < nn->dp.num_tx_rings; r++) 2635 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); 2636 for (r = 0; r < nn->dp.num_rx_rings; r++) 2637 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); 2638 2639 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ? 2640 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1); 2641 2642 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? 2643 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); 2644 2645 if (nn->dp.netdev) 2646 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); 2647 2648 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu); 2649 2650 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; 2651 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz); 2652 2653 /* Enable device */ 2654 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 2655 update |= NFP_NET_CFG_UPDATE_GEN; 2656 update |= NFP_NET_CFG_UPDATE_MSIX; 2657 update |= NFP_NET_CFG_UPDATE_RING; 2658 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) 2659 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 2660 2661 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2662 err = nfp_net_reconfig(nn, update); 2663 if (err) { 2664 nfp_net_clear_config_and_disable(nn); 2665 return err; 2666 } 2667 2668 nn->dp.ctrl = new_ctrl; 2669 2670 for (r = 0; r < nn->dp.num_rx_rings; r++) 2671 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); 2672 2673 /* Since reconfiguration requests while NFP is down are ignored we 2674 * have to wipe the entire VXLAN configuration and reinitialize it. 2675 */ 2676 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) { 2677 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); 2678 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); 2679 udp_tunnel_get_rx_info(nn->dp.netdev); 2680 } 2681 2682 return 0; 2683 } 2684 2685 /** 2686 * nfp_net_close_stack() - Quiesce the stack (part of close) 2687 * @nn: NFP Net device to reconfigure 2688 */ 2689 static void nfp_net_close_stack(struct nfp_net *nn) 2690 { 2691 unsigned int r; 2692 2693 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2694 netif_carrier_off(nn->dp.netdev); 2695 nn->link_up = false; 2696 2697 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2698 disable_irq(nn->r_vecs[r].irq_vector); 2699 napi_disable(&nn->r_vecs[r].napi); 2700 } 2701 2702 netif_tx_disable(nn->dp.netdev); 2703 } 2704 2705 /** 2706 * nfp_net_close_free_all() - Free all runtime resources 2707 * @nn: NFP Net device to reconfigure 2708 */ 2709 static void nfp_net_close_free_all(struct nfp_net *nn) 2710 { 2711 unsigned int r; 2712 2713 nfp_net_tx_rings_free(&nn->dp); 2714 nfp_net_rx_rings_free(&nn->dp); 2715 2716 for (r = 0; r < nn->dp.num_r_vecs; r++) 2717 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2718 2719 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 2720 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); 2721 } 2722 2723 /** 2724 * nfp_net_netdev_close() - Called when the device is downed 2725 * @netdev: netdev structure 2726 */ 2727 static int nfp_net_netdev_close(struct net_device *netdev) 2728 { 2729 struct nfp_net *nn = netdev_priv(netdev); 2730 2731 /* Step 1: Disable RX and TX rings from the Linux kernel perspective 2732 */ 2733 nfp_net_close_stack(nn); 2734 2735 /* Step 2: Tell NFP 2736 */ 2737 nfp_net_clear_config_and_disable(nn); 2738 nfp_port_configure(netdev, false); 2739 2740 /* Step 3: Free resources 2741 */ 2742 nfp_net_close_free_all(nn); 2743 2744 nn_dbg(nn, "%s down", netdev->name); 2745 return 0; 2746 } 2747 2748 void nfp_ctrl_close(struct nfp_net *nn) 2749 { 2750 int r; 2751 2752 rtnl_lock(); 2753 2754 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2755 disable_irq(nn->r_vecs[r].irq_vector); 2756 tasklet_disable(&nn->r_vecs[r].tasklet); 2757 } 2758 2759 nfp_net_clear_config_and_disable(nn); 2760 2761 nfp_net_close_free_all(nn); 2762 2763 rtnl_unlock(); 2764 } 2765 2766 /** 2767 * nfp_net_open_stack() - Start the device from stack's perspective 2768 * @nn: NFP Net device to reconfigure 2769 */ 2770 static void nfp_net_open_stack(struct nfp_net *nn) 2771 { 2772 unsigned int r; 2773 2774 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2775 napi_enable(&nn->r_vecs[r].napi); 2776 enable_irq(nn->r_vecs[r].irq_vector); 2777 } 2778 2779 netif_tx_wake_all_queues(nn->dp.netdev); 2780 2781 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2782 nfp_net_read_link_status(nn); 2783 } 2784 2785 static int nfp_net_open_alloc_all(struct nfp_net *nn) 2786 { 2787 int err, r; 2788 2789 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn", 2790 nn->exn_name, sizeof(nn->exn_name), 2791 NFP_NET_IRQ_EXN_IDX, nn->exn_handler); 2792 if (err) 2793 return err; 2794 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", 2795 nn->lsc_name, sizeof(nn->lsc_name), 2796 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); 2797 if (err) 2798 goto err_free_exn; 2799 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2800 2801 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2802 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); 2803 if (err) 2804 goto err_cleanup_vec_p; 2805 } 2806 2807 err = nfp_net_rx_rings_prepare(nn, &nn->dp); 2808 if (err) 2809 goto err_cleanup_vec; 2810 2811 err = nfp_net_tx_rings_prepare(nn, &nn->dp); 2812 if (err) 2813 goto err_free_rx_rings; 2814 2815 for (r = 0; r < nn->max_r_vecs; r++) 2816 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); 2817 2818 return 0; 2819 2820 err_free_rx_rings: 2821 nfp_net_rx_rings_free(&nn->dp); 2822 err_cleanup_vec: 2823 r = nn->dp.num_r_vecs; 2824 err_cleanup_vec_p: 2825 while (r--) 2826 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2827 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 2828 err_free_exn: 2829 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); 2830 return err; 2831 } 2832 2833 static int nfp_net_netdev_open(struct net_device *netdev) 2834 { 2835 struct nfp_net *nn = netdev_priv(netdev); 2836 int err; 2837 2838 /* Step 1: Allocate resources for rings and the like 2839 * - Request interrupts 2840 * - Allocate RX and TX ring resources 2841 * - Setup initial RSS table 2842 */ 2843 err = nfp_net_open_alloc_all(nn); 2844 if (err) 2845 return err; 2846 2847 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); 2848 if (err) 2849 goto err_free_all; 2850 2851 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); 2852 if (err) 2853 goto err_free_all; 2854 2855 /* Step 2: Configure the NFP 2856 * - Ifup the physical interface if it exists 2857 * - Enable rings from 0 to tx_rings/rx_rings - 1. 2858 * - Write MAC address (in case it changed) 2859 * - Set the MTU 2860 * - Set the Freelist buffer size 2861 * - Enable the FW 2862 */ 2863 err = nfp_port_configure(netdev, true); 2864 if (err) 2865 goto err_free_all; 2866 2867 err = nfp_net_set_config_and_enable(nn); 2868 if (err) 2869 goto err_port_disable; 2870 2871 /* Step 3: Enable for kernel 2872 * - put some freelist descriptors on each RX ring 2873 * - enable NAPI on each ring 2874 * - enable all TX queues 2875 * - set link state 2876 */ 2877 nfp_net_open_stack(nn); 2878 2879 return 0; 2880 2881 err_port_disable: 2882 nfp_port_configure(netdev, false); 2883 err_free_all: 2884 nfp_net_close_free_all(nn); 2885 return err; 2886 } 2887 2888 int nfp_ctrl_open(struct nfp_net *nn) 2889 { 2890 int err, r; 2891 2892 /* ring dumping depends on vNICs being opened/closed under rtnl */ 2893 rtnl_lock(); 2894 2895 err = nfp_net_open_alloc_all(nn); 2896 if (err) 2897 goto err_unlock; 2898 2899 err = nfp_net_set_config_and_enable(nn); 2900 if (err) 2901 goto err_free_all; 2902 2903 for (r = 0; r < nn->dp.num_r_vecs; r++) 2904 enable_irq(nn->r_vecs[r].irq_vector); 2905 2906 rtnl_unlock(); 2907 2908 return 0; 2909 2910 err_free_all: 2911 nfp_net_close_free_all(nn); 2912 err_unlock: 2913 rtnl_unlock(); 2914 return err; 2915 } 2916 2917 static void nfp_net_set_rx_mode(struct net_device *netdev) 2918 { 2919 struct nfp_net *nn = netdev_priv(netdev); 2920 u32 new_ctrl; 2921 2922 new_ctrl = nn->dp.ctrl; 2923 2924 if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI) 2925 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC; 2926 else 2927 new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC; 2928 2929 if (netdev->flags & IFF_PROMISC) { 2930 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) 2931 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC; 2932 else 2933 nn_warn(nn, "FW does not support promiscuous mode\n"); 2934 } else { 2935 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; 2936 } 2937 2938 if (new_ctrl == nn->dp.ctrl) 2939 return; 2940 2941 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2942 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); 2943 2944 nn->dp.ctrl = new_ctrl; 2945 } 2946 2947 static void nfp_net_rss_init_itbl(struct nfp_net *nn) 2948 { 2949 int i; 2950 2951 for (i = 0; i < sizeof(nn->rss_itbl); i++) 2952 nn->rss_itbl[i] = 2953 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); 2954 } 2955 2956 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) 2957 { 2958 struct nfp_net_dp new_dp = *dp; 2959 2960 *dp = nn->dp; 2961 nn->dp = new_dp; 2962 2963 nn->dp.netdev->mtu = new_dp.mtu; 2964 2965 if (!netif_is_rxfh_configured(nn->dp.netdev)) 2966 nfp_net_rss_init_itbl(nn); 2967 } 2968 2969 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) 2970 { 2971 unsigned int r; 2972 int err; 2973 2974 nfp_net_dp_swap(nn, dp); 2975 2976 for (r = 0; r < nn->max_r_vecs; r++) 2977 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); 2978 2979 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); 2980 if (err) 2981 return err; 2982 2983 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { 2984 err = netif_set_real_num_tx_queues(nn->dp.netdev, 2985 nn->dp.num_stack_tx_rings); 2986 if (err) 2987 return err; 2988 } 2989 2990 return nfp_net_set_config_and_enable(nn); 2991 } 2992 2993 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn) 2994 { 2995 struct nfp_net_dp *new; 2996 2997 new = kmalloc(sizeof(*new), GFP_KERNEL); 2998 if (!new) 2999 return NULL; 3000 3001 *new = nn->dp; 3002 3003 /* Clear things which need to be recomputed */ 3004 new->fl_bufsz = 0; 3005 new->tx_rings = NULL; 3006 new->rx_rings = NULL; 3007 new->num_r_vecs = 0; 3008 new->num_stack_tx_rings = 0; 3009 3010 return new; 3011 } 3012 3013 static int 3014 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, 3015 struct netlink_ext_ack *extack) 3016 { 3017 /* XDP-enabled tests */ 3018 if (!dp->xdp_prog) 3019 return 0; 3020 if (dp->fl_bufsz > PAGE_SIZE) { 3021 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled"); 3022 return -EINVAL; 3023 } 3024 if (dp->num_tx_rings > nn->max_tx_rings) { 3025 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled"); 3026 return -EINVAL; 3027 } 3028 3029 return 0; 3030 } 3031 3032 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, 3033 struct netlink_ext_ack *extack) 3034 { 3035 int r, err; 3036 3037 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); 3038 3039 dp->num_stack_tx_rings = dp->num_tx_rings; 3040 if (dp->xdp_prog) 3041 dp->num_stack_tx_rings -= dp->num_rx_rings; 3042 3043 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); 3044 3045 err = nfp_net_check_config(nn, dp, extack); 3046 if (err) 3047 goto exit_free_dp; 3048 3049 if (!netif_running(dp->netdev)) { 3050 nfp_net_dp_swap(nn, dp); 3051 err = 0; 3052 goto exit_free_dp; 3053 } 3054 3055 /* Prepare new rings */ 3056 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { 3057 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); 3058 if (err) { 3059 dp->num_r_vecs = r; 3060 goto err_cleanup_vecs; 3061 } 3062 } 3063 3064 err = nfp_net_rx_rings_prepare(nn, dp); 3065 if (err) 3066 goto err_cleanup_vecs; 3067 3068 err = nfp_net_tx_rings_prepare(nn, dp); 3069 if (err) 3070 goto err_free_rx; 3071 3072 /* Stop device, swap in new rings, try to start the firmware */ 3073 nfp_net_close_stack(nn); 3074 nfp_net_clear_config_and_disable(nn); 3075 3076 err = nfp_net_dp_swap_enable(nn, dp); 3077 if (err) { 3078 int err2; 3079 3080 nfp_net_clear_config_and_disable(nn); 3081 3082 /* Try with old configuration and old rings */ 3083 err2 = nfp_net_dp_swap_enable(nn, dp); 3084 if (err2) 3085 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", 3086 err, err2); 3087 } 3088 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) 3089 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 3090 3091 nfp_net_rx_rings_free(dp); 3092 nfp_net_tx_rings_free(dp); 3093 3094 nfp_net_open_stack(nn); 3095 exit_free_dp: 3096 kfree(dp); 3097 3098 return err; 3099 3100 err_free_rx: 3101 nfp_net_rx_rings_free(dp); 3102 err_cleanup_vecs: 3103 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) 3104 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 3105 kfree(dp); 3106 return err; 3107 } 3108 3109 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) 3110 { 3111 struct nfp_net *nn = netdev_priv(netdev); 3112 struct nfp_net_dp *dp; 3113 int err; 3114 3115 err = nfp_app_check_mtu(nn->app, netdev, new_mtu); 3116 if (err) 3117 return err; 3118 3119 dp = nfp_net_clone_dp(nn); 3120 if (!dp) 3121 return -ENOMEM; 3122 3123 dp->mtu = new_mtu; 3124 3125 return nfp_net_ring_reconfig(nn, dp, NULL); 3126 } 3127 3128 static int 3129 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3130 { 3131 struct nfp_net *nn = netdev_priv(netdev); 3132 3133 /* Priority tagged packets with vlan id 0 are processed by the 3134 * NFP as untagged packets 3135 */ 3136 if (!vid) 3137 return 0; 3138 3139 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); 3140 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, 3141 ETH_P_8021Q); 3142 3143 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD); 3144 } 3145 3146 static int 3147 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3148 { 3149 struct nfp_net *nn = netdev_priv(netdev); 3150 3151 /* Priority tagged packets with vlan id 0 are processed by the 3152 * NFP as untagged packets 3153 */ 3154 if (!vid) 3155 return 0; 3156 3157 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); 3158 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, 3159 ETH_P_8021Q); 3160 3161 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); 3162 } 3163 3164 static void nfp_net_stat64(struct net_device *netdev, 3165 struct rtnl_link_stats64 *stats) 3166 { 3167 struct nfp_net *nn = netdev_priv(netdev); 3168 int r; 3169 3170 /* Collect software stats */ 3171 for (r = 0; r < nn->max_r_vecs; r++) { 3172 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; 3173 u64 data[3]; 3174 unsigned int start; 3175 3176 do { 3177 start = u64_stats_fetch_begin(&r_vec->rx_sync); 3178 data[0] = r_vec->rx_pkts; 3179 data[1] = r_vec->rx_bytes; 3180 data[2] = r_vec->rx_drops; 3181 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); 3182 stats->rx_packets += data[0]; 3183 stats->rx_bytes += data[1]; 3184 stats->rx_dropped += data[2]; 3185 3186 do { 3187 start = u64_stats_fetch_begin(&r_vec->tx_sync); 3188 data[0] = r_vec->tx_pkts; 3189 data[1] = r_vec->tx_bytes; 3190 data[2] = r_vec->tx_errors; 3191 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); 3192 stats->tx_packets += data[0]; 3193 stats->tx_bytes += data[1]; 3194 stats->tx_errors += data[2]; 3195 } 3196 3197 /* Add in device stats */ 3198 stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES); 3199 stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS); 3200 stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS); 3201 3202 stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS); 3203 stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS); 3204 } 3205 3206 static int nfp_net_set_features(struct net_device *netdev, 3207 netdev_features_t features) 3208 { 3209 netdev_features_t changed = netdev->features ^ features; 3210 struct nfp_net *nn = netdev_priv(netdev); 3211 u32 new_ctrl; 3212 int err; 3213 3214 /* Assume this is not called with features we have not advertised */ 3215 3216 new_ctrl = nn->dp.ctrl; 3217 3218 if (changed & NETIF_F_RXCSUM) { 3219 if (features & NETIF_F_RXCSUM) 3220 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY; 3221 else 3222 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY; 3223 } 3224 3225 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3226 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 3227 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM; 3228 else 3229 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM; 3230 } 3231 3232 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 3233 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) 3234 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: 3235 NFP_NET_CFG_CTRL_LSO; 3236 else 3237 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY; 3238 } 3239 3240 if (changed & NETIF_F_HW_VLAN_CTAG_RX) { 3241 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3242 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; 3243 else 3244 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN; 3245 } 3246 3247 if (changed & NETIF_F_HW_VLAN_CTAG_TX) { 3248 if (features & NETIF_F_HW_VLAN_CTAG_TX) 3249 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; 3250 else 3251 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN; 3252 } 3253 3254 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 3255 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 3256 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER; 3257 else 3258 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER; 3259 } 3260 3261 if (changed & NETIF_F_SG) { 3262 if (features & NETIF_F_SG) 3263 new_ctrl |= NFP_NET_CFG_CTRL_GATHER; 3264 else 3265 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; 3266 } 3267 3268 err = nfp_port_set_features(netdev, features); 3269 if (err) 3270 return err; 3271 3272 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", 3273 netdev->features, features, changed); 3274 3275 if (new_ctrl == nn->dp.ctrl) 3276 return 0; 3277 3278 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); 3279 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 3280 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); 3281 if (err) 3282 return err; 3283 3284 nn->dp.ctrl = new_ctrl; 3285 3286 return 0; 3287 } 3288 3289 static netdev_features_t 3290 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, 3291 netdev_features_t features) 3292 { 3293 u8 l4_hdr; 3294 3295 /* We can't do TSO over double tagged packets (802.1AD) */ 3296 features &= vlan_features_check(skb, features); 3297 3298 if (!skb->encapsulation) 3299 return features; 3300 3301 /* Ensure that inner L4 header offset fits into TX descriptor field */ 3302 if (skb_is_gso(skb)) { 3303 u32 hdrlen; 3304 3305 hdrlen = skb_inner_transport_header(skb) - skb->data + 3306 inner_tcp_hdrlen(skb); 3307 3308 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ)) 3309 features &= ~NETIF_F_GSO_MASK; 3310 } 3311 3312 /* VXLAN/GRE check */ 3313 switch (vlan_get_protocol(skb)) { 3314 case htons(ETH_P_IP): 3315 l4_hdr = ip_hdr(skb)->protocol; 3316 break; 3317 case htons(ETH_P_IPV6): 3318 l4_hdr = ipv6_hdr(skb)->nexthdr; 3319 break; 3320 default: 3321 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3322 } 3323 3324 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 3325 skb->inner_protocol != htons(ETH_P_TEB) || 3326 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) || 3327 (l4_hdr == IPPROTO_UDP && 3328 (skb_inner_mac_header(skb) - skb_transport_header(skb) != 3329 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) 3330 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3331 3332 return features; 3333 } 3334 3335 static int 3336 nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) 3337 { 3338 struct nfp_net *nn = netdev_priv(netdev); 3339 int n; 3340 3341 if (nn->port) 3342 return nfp_port_get_phys_port_name(netdev, name, len); 3343 3344 if (nn->dp.is_vf || nn->vnic_no_name) 3345 return -EOPNOTSUPP; 3346 3347 n = snprintf(name, len, "n%d", nn->id); 3348 if (n >= len) 3349 return -EINVAL; 3350 3351 return 0; 3352 } 3353 3354 /** 3355 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW 3356 * @nn: NFP Net device to reconfigure 3357 * @idx: Index into the port table where new port should be written 3358 * @port: UDP port to configure (pass zero to remove VXLAN port) 3359 */ 3360 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) 3361 { 3362 int i; 3363 3364 nn->vxlan_ports[idx] = port; 3365 3366 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN)) 3367 return; 3368 3369 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); 3370 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) 3371 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port), 3372 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | 3373 be16_to_cpu(nn->vxlan_ports[i])); 3374 3375 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN); 3376 } 3377 3378 /** 3379 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one 3380 * @nn: NFP Network structure 3381 * @port: UDP port to look for 3382 * 3383 * Return: if the port is already in the table -- it's position; 3384 * if the port is not in the table -- free position to use; 3385 * if the table is full -- -ENOSPC. 3386 */ 3387 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) 3388 { 3389 int i, free_idx = -ENOSPC; 3390 3391 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 3392 if (nn->vxlan_ports[i] == port) 3393 return i; 3394 if (!nn->vxlan_usecnt[i]) 3395 free_idx = i; 3396 } 3397 3398 return free_idx; 3399 } 3400 3401 static void nfp_net_add_vxlan_port(struct net_device *netdev, 3402 struct udp_tunnel_info *ti) 3403 { 3404 struct nfp_net *nn = netdev_priv(netdev); 3405 int idx; 3406 3407 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3408 return; 3409 3410 idx = nfp_net_find_vxlan_idx(nn, ti->port); 3411 if (idx == -ENOSPC) 3412 return; 3413 3414 if (!nn->vxlan_usecnt[idx]++) 3415 nfp_net_set_vxlan_port(nn, idx, ti->port); 3416 } 3417 3418 static void nfp_net_del_vxlan_port(struct net_device *netdev, 3419 struct udp_tunnel_info *ti) 3420 { 3421 struct nfp_net *nn = netdev_priv(netdev); 3422 int idx; 3423 3424 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3425 return; 3426 3427 idx = nfp_net_find_vxlan_idx(nn, ti->port); 3428 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx]) 3429 return; 3430 3431 if (!--nn->vxlan_usecnt[idx]) 3432 nfp_net_set_vxlan_port(nn, idx, 0); 3433 } 3434 3435 static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf) 3436 { 3437 struct bpf_prog *prog = bpf->prog; 3438 struct nfp_net_dp *dp; 3439 int err; 3440 3441 if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) 3442 return -EBUSY; 3443 3444 if (!prog == !nn->dp.xdp_prog) { 3445 WRITE_ONCE(nn->dp.xdp_prog, prog); 3446 xdp_attachment_setup(&nn->xdp, bpf); 3447 return 0; 3448 } 3449 3450 dp = nfp_net_clone_dp(nn); 3451 if (!dp) 3452 return -ENOMEM; 3453 3454 dp->xdp_prog = prog; 3455 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; 3456 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 3457 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; 3458 3459 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ 3460 err = nfp_net_ring_reconfig(nn, dp, bpf->extack); 3461 if (err) 3462 return err; 3463 3464 xdp_attachment_setup(&nn->xdp, bpf); 3465 return 0; 3466 } 3467 3468 static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf) 3469 { 3470 int err; 3471 3472 if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf)) 3473 return -EBUSY; 3474 3475 err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack); 3476 if (err) 3477 return err; 3478 3479 xdp_attachment_setup(&nn->xdp_hw, bpf); 3480 return 0; 3481 } 3482 3483 static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) 3484 { 3485 struct nfp_net *nn = netdev_priv(netdev); 3486 3487 switch (xdp->command) { 3488 case XDP_SETUP_PROG: 3489 return nfp_net_xdp_setup_drv(nn, xdp); 3490 case XDP_SETUP_PROG_HW: 3491 return nfp_net_xdp_setup_hw(nn, xdp); 3492 case XDP_QUERY_PROG: 3493 return xdp_attachment_query(&nn->xdp, xdp); 3494 case XDP_QUERY_PROG_HW: 3495 return xdp_attachment_query(&nn->xdp_hw, xdp); 3496 default: 3497 return nfp_app_bpf(nn->app, nn, xdp); 3498 } 3499 } 3500 3501 static int nfp_net_set_mac_address(struct net_device *netdev, void *addr) 3502 { 3503 struct nfp_net *nn = netdev_priv(netdev); 3504 struct sockaddr *saddr = addr; 3505 int err; 3506 3507 err = eth_prepare_mac_addr_change(netdev, addr); 3508 if (err) 3509 return err; 3510 3511 nfp_net_write_mac_addr(nn, saddr->sa_data); 3512 3513 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR); 3514 if (err) 3515 return err; 3516 3517 eth_commit_mac_addr_change(netdev, addr); 3518 3519 return 0; 3520 } 3521 3522 const struct net_device_ops nfp_net_netdev_ops = { 3523 .ndo_init = nfp_app_ndo_init, 3524 .ndo_uninit = nfp_app_ndo_uninit, 3525 .ndo_open = nfp_net_netdev_open, 3526 .ndo_stop = nfp_net_netdev_close, 3527 .ndo_start_xmit = nfp_net_tx, 3528 .ndo_get_stats64 = nfp_net_stat64, 3529 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, 3530 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, 3531 .ndo_set_vf_mac = nfp_app_set_vf_mac, 3532 .ndo_set_vf_vlan = nfp_app_set_vf_vlan, 3533 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, 3534 .ndo_get_vf_config = nfp_app_get_vf_config, 3535 .ndo_set_vf_link_state = nfp_app_set_vf_link_state, 3536 .ndo_setup_tc = nfp_port_setup_tc, 3537 .ndo_tx_timeout = nfp_net_tx_timeout, 3538 .ndo_set_rx_mode = nfp_net_set_rx_mode, 3539 .ndo_change_mtu = nfp_net_change_mtu, 3540 .ndo_set_mac_address = nfp_net_set_mac_address, 3541 .ndo_set_features = nfp_net_set_features, 3542 .ndo_features_check = nfp_net_features_check, 3543 .ndo_get_phys_port_name = nfp_net_get_phys_port_name, 3544 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, 3545 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, 3546 .ndo_bpf = nfp_net_xdp, 3547 }; 3548 3549 /** 3550 * nfp_net_info() - Print general info about the NIC 3551 * @nn: NFP Net device to reconfigure 3552 */ 3553 void nfp_net_info(struct nfp_net *nn) 3554 { 3555 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", 3556 nn->dp.is_vf ? "VF " : "", 3557 nn->dp.num_tx_rings, nn->max_tx_rings, 3558 nn->dp.num_rx_rings, nn->max_rx_rings); 3559 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", 3560 nn->fw_ver.resv, nn->fw_ver.class, 3561 nn->fw_ver.major, nn->fw_ver.minor, 3562 nn->max_mtu); 3563 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 3564 nn->cap, 3565 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", 3566 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", 3567 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", 3568 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", 3569 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", 3570 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", 3571 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", 3572 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", 3573 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", 3574 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "", 3575 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "", 3576 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "", 3577 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "", 3578 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "", 3579 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "", 3580 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "", 3581 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "", 3582 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "", 3583 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "", 3584 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ? 3585 "RXCSUM_COMPLETE " : "", 3586 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", 3587 nfp_app_extra_cap(nn->app, nn)); 3588 } 3589 3590 /** 3591 * nfp_net_alloc() - Allocate netdev and related structure 3592 * @pdev: PCI device 3593 * @needs_netdev: Whether to allocate a netdev for this vNIC 3594 * @max_tx_rings: Maximum number of TX rings supported by device 3595 * @max_rx_rings: Maximum number of RX rings supported by device 3596 * 3597 * This function allocates a netdev device and fills in the initial 3598 * part of the @struct nfp_net structure. In case of control device 3599 * nfp_net structure is allocated without the netdev. 3600 * 3601 * Return: NFP Net device structure, or ERR_PTR on error. 3602 */ 3603 struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, 3604 unsigned int max_tx_rings, 3605 unsigned int max_rx_rings) 3606 { 3607 struct nfp_net *nn; 3608 3609 if (needs_netdev) { 3610 struct net_device *netdev; 3611 3612 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net), 3613 max_tx_rings, max_rx_rings); 3614 if (!netdev) 3615 return ERR_PTR(-ENOMEM); 3616 3617 SET_NETDEV_DEV(netdev, &pdev->dev); 3618 nn = netdev_priv(netdev); 3619 nn->dp.netdev = netdev; 3620 } else { 3621 nn = vzalloc(sizeof(*nn)); 3622 if (!nn) 3623 return ERR_PTR(-ENOMEM); 3624 } 3625 3626 nn->dp.dev = &pdev->dev; 3627 nn->pdev = pdev; 3628 3629 nn->max_tx_rings = max_tx_rings; 3630 nn->max_rx_rings = max_rx_rings; 3631 3632 nn->dp.num_tx_rings = min_t(unsigned int, 3633 max_tx_rings, num_online_cpus()); 3634 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, 3635 netif_get_num_default_rss_queues()); 3636 3637 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); 3638 nn->dp.num_r_vecs = min_t(unsigned int, 3639 nn->dp.num_r_vecs, num_online_cpus()); 3640 3641 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; 3642 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; 3643 3644 spin_lock_init(&nn->reconfig_lock); 3645 spin_lock_init(&nn->link_status_lock); 3646 3647 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); 3648 3649 return nn; 3650 } 3651 3652 /** 3653 * nfp_net_free() - Undo what @nfp_net_alloc() did 3654 * @nn: NFP Net device to reconfigure 3655 */ 3656 void nfp_net_free(struct nfp_net *nn) 3657 { 3658 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); 3659 if (nn->dp.netdev) 3660 free_netdev(nn->dp.netdev); 3661 else 3662 vfree(nn); 3663 } 3664 3665 /** 3666 * nfp_net_rss_key_sz() - Get current size of the RSS key 3667 * @nn: NFP Net device instance 3668 * 3669 * Return: size of the RSS key for currently selected hash function. 3670 */ 3671 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn) 3672 { 3673 switch (nn->rss_hfunc) { 3674 case ETH_RSS_HASH_TOP: 3675 return NFP_NET_CFG_RSS_KEY_SZ; 3676 case ETH_RSS_HASH_XOR: 3677 return 0; 3678 case ETH_RSS_HASH_CRC32: 3679 return 4; 3680 } 3681 3682 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc); 3683 return 0; 3684 } 3685 3686 /** 3687 * nfp_net_rss_init() - Set the initial RSS parameters 3688 * @nn: NFP Net device to reconfigure 3689 */ 3690 static void nfp_net_rss_init(struct nfp_net *nn) 3691 { 3692 unsigned long func_bit, rss_cap_hfunc; 3693 u32 reg; 3694 3695 /* Read the RSS function capability and select first supported func */ 3696 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP); 3697 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg); 3698 if (!rss_cap_hfunc) 3699 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, 3700 NFP_NET_CFG_RSS_TOEPLITZ); 3701 3702 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS); 3703 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) { 3704 dev_warn(nn->dp.dev, 3705 "Bad RSS config, defaulting to Toeplitz hash\n"); 3706 func_bit = ETH_RSS_HASH_TOP_BIT; 3707 } 3708 nn->rss_hfunc = 1 << func_bit; 3709 3710 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn)); 3711 3712 nfp_net_rss_init_itbl(nn); 3713 3714 /* Enable IPv4/IPv6 TCP by default */ 3715 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | 3716 NFP_NET_CFG_RSS_IPV6_TCP | 3717 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) | 3718 NFP_NET_CFG_RSS_MASK; 3719 } 3720 3721 /** 3722 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters 3723 * @nn: NFP Net device to reconfigure 3724 */ 3725 static void nfp_net_irqmod_init(struct nfp_net *nn) 3726 { 3727 nn->rx_coalesce_usecs = 50; 3728 nn->rx_coalesce_max_frames = 64; 3729 nn->tx_coalesce_usecs = 50; 3730 nn->tx_coalesce_max_frames = 64; 3731 } 3732 3733 static void nfp_net_netdev_init(struct nfp_net *nn) 3734 { 3735 struct net_device *netdev = nn->dp.netdev; 3736 3737 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); 3738 3739 netdev->mtu = nn->dp.mtu; 3740 3741 /* Advertise/enable offloads based on capabilities 3742 * 3743 * Note: netdev->features show the currently enabled features 3744 * and netdev->hw_features advertises which features are 3745 * supported. By default we enable most features. 3746 */ 3747 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) 3748 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 3749 3750 netdev->hw_features = NETIF_F_HIGHDMA; 3751 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) { 3752 netdev->hw_features |= NETIF_F_RXCSUM; 3753 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY; 3754 } 3755 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { 3756 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3757 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; 3758 } 3759 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { 3760 netdev->hw_features |= NETIF_F_SG; 3761 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; 3762 } 3763 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) || 3764 nn->cap & NFP_NET_CFG_CTRL_LSO2) { 3765 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3766 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: 3767 NFP_NET_CFG_CTRL_LSO; 3768 } 3769 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) 3770 netdev->hw_features |= NETIF_F_RXHASH; 3771 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { 3772 if (nn->cap & NFP_NET_CFG_CTRL_LSO) 3773 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; 3774 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; 3775 } 3776 if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { 3777 if (nn->cap & NFP_NET_CFG_CTRL_LSO) 3778 netdev->hw_features |= NETIF_F_GSO_GRE; 3779 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; 3780 } 3781 if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) 3782 netdev->hw_enc_features = netdev->hw_features; 3783 3784 netdev->vlan_features = netdev->hw_features; 3785 3786 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { 3787 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 3788 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN; 3789 } 3790 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { 3791 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) { 3792 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n"); 3793 } else { 3794 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 3795 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN; 3796 } 3797 } 3798 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) { 3799 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3800 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER; 3801 } 3802 3803 netdev->features = netdev->hw_features; 3804 3805 if (nfp_app_has_tc(nn->app) && nn->port) 3806 netdev->hw_features |= NETIF_F_HW_TC; 3807 3808 /* Advertise but disable TSO by default. */ 3809 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 3810 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY; 3811 3812 /* Finalise the netdev setup */ 3813 netdev->netdev_ops = &nfp_net_netdev_ops; 3814 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); 3815 3816 SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); 3817 3818 /* MTU range: 68 - hw-specific max */ 3819 netdev->min_mtu = ETH_MIN_MTU; 3820 netdev->max_mtu = nn->max_mtu; 3821 3822 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; 3823 3824 netif_carrier_off(netdev); 3825 3826 nfp_net_set_ethtool_ops(netdev); 3827 } 3828 3829 static int nfp_net_read_caps(struct nfp_net *nn) 3830 { 3831 /* Get some of the read-only fields from the BAR */ 3832 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); 3833 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); 3834 3835 /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases 3836 * we allow use of non-chained metadata if RSS(v1) is the only 3837 * advertised capability requiring metadata. 3838 */ 3839 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 || 3840 !nn->dp.netdev || 3841 !(nn->cap & NFP_NET_CFG_CTRL_RSS) || 3842 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META; 3843 /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where 3844 * it has the same meaning as RSSv2. 3845 */ 3846 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4) 3847 nn->cap &= ~NFP_NET_CFG_CTRL_RSS; 3848 3849 /* Determine RX packet/metadata boundary offset */ 3850 if (nn->fw_ver.major >= 2) { 3851 u32 reg; 3852 3853 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); 3854 if (reg > NFP_NET_MAX_PREPEND) { 3855 nn_err(nn, "Invalid rx offset: %d\n", reg); 3856 return -EINVAL; 3857 } 3858 nn->dp.rx_offset = reg; 3859 } else { 3860 nn->dp.rx_offset = NFP_NET_RX_OFFSET; 3861 } 3862 3863 /* For control vNICs mask out the capabilities app doesn't want. */ 3864 if (!nn->dp.netdev) 3865 nn->cap &= nn->app->type->ctrl_cap_mask; 3866 3867 return 0; 3868 } 3869 3870 /** 3871 * nfp_net_init() - Initialise/finalise the nfp_net structure 3872 * @nn: NFP Net device structure 3873 * 3874 * Return: 0 on success or negative errno on error. 3875 */ 3876 int nfp_net_init(struct nfp_net *nn) 3877 { 3878 int err; 3879 3880 nn->dp.rx_dma_dir = DMA_FROM_DEVICE; 3881 3882 err = nfp_net_read_caps(nn); 3883 if (err) 3884 return err; 3885 3886 /* Set default MTU and Freelist buffer size */ 3887 if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { 3888 if (nn->app->ctrl_mtu <= nn->max_mtu) { 3889 nn->dp.mtu = nn->app->ctrl_mtu; 3890 } else { 3891 if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX) 3892 nn_warn(nn, "app requested MTU above max supported %u > %u\n", 3893 nn->app->ctrl_mtu, nn->max_mtu); 3894 nn->dp.mtu = nn->max_mtu; 3895 } 3896 } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { 3897 nn->dp.mtu = nn->max_mtu; 3898 } else { 3899 nn->dp.mtu = NFP_NET_DEFAULT_MTU; 3900 } 3901 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); 3902 3903 if (nfp_app_ctrl_uses_data_vnics(nn->app)) 3904 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA; 3905 3906 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) { 3907 nfp_net_rss_init(nn); 3908 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?: 3909 NFP_NET_CFG_CTRL_RSS; 3910 } 3911 3912 /* Allow L2 Broadcast and Multicast through by default, if supported */ 3913 if (nn->cap & NFP_NET_CFG_CTRL_L2BC) 3914 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; 3915 3916 /* Allow IRQ moderation, if supported */ 3917 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { 3918 nfp_net_irqmod_init(nn); 3919 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; 3920 } 3921 3922 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, 3923 &nn->tlv_caps); 3924 if (err) 3925 return err; 3926 3927 if (nn->dp.netdev) 3928 nfp_net_netdev_init(nn); 3929 3930 /* Stash the re-configuration queue away. First odd queue in TX Bar */ 3931 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; 3932 3933 /* Make sure the FW knows the netdev is supposed to be disabled here */ 3934 nn_writel(nn, NFP_NET_CFG_CTRL, 0); 3935 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); 3936 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); 3937 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING | 3938 NFP_NET_CFG_UPDATE_GEN); 3939 if (err) 3940 return err; 3941 3942 nfp_net_vecs_init(nn); 3943 3944 if (!nn->dp.netdev) 3945 return 0; 3946 return register_netdev(nn->dp.netdev); 3947 } 3948 3949 /** 3950 * nfp_net_clean() - Undo what nfp_net_init() did. 3951 * @nn: NFP Net device structure 3952 */ 3953 void nfp_net_clean(struct nfp_net *nn) 3954 { 3955 if (!nn->dp.netdev) 3956 return; 3957 3958 unregister_netdev(nn->dp.netdev); 3959 nfp_net_reconfig_wait_posted(nn); 3960 } 3961