1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/ipv6.h> 6 #include <linux/types.h> 7 #include <net/netdev_queues.h> 8 9 #include "fbnic.h" 10 #include "fbnic_netdev.h" 11 #include "fbnic_txrx.h" 12 13 int __fbnic_open(struct fbnic_net *fbn) 14 { 15 struct fbnic_dev *fbd = fbn->fbd; 16 int err; 17 18 err = fbnic_alloc_napi_vectors(fbn); 19 if (err) 20 return err; 21 22 err = fbnic_alloc_resources(fbn); 23 if (err) 24 goto free_napi_vectors; 25 26 err = fbnic_set_netif_queues(fbn); 27 if (err) 28 goto free_resources; 29 30 /* Send ownership message and flush to verify FW has seen it */ 31 err = fbnic_fw_xmit_ownership_msg(fbd, true); 32 if (err) { 33 dev_warn(fbd->dev, 34 "Error %d sending host ownership message to the firmware\n", 35 err); 36 goto err_reset_queues; 37 } 38 39 err = fbnic_time_start(fbn); 40 if (err) 41 goto release_ownership; 42 43 err = fbnic_fw_init_heartbeat(fbd, false); 44 if (err) 45 goto time_stop; 46 47 err = fbnic_pcs_request_irq(fbd); 48 if (err) 49 goto time_stop; 50 51 /* Pull the BMC config and initialize the RPC */ 52 fbnic_bmc_rpc_init(fbd); 53 fbnic_rss_reinit(fbd, fbn); 54 55 phylink_resume(fbn->phylink); 56 57 return 0; 58 time_stop: 59 fbnic_time_stop(fbn); 60 release_ownership: 61 fbnic_fw_xmit_ownership_msg(fbn->fbd, false); 62 err_reset_queues: 63 fbnic_reset_netif_queues(fbn); 64 free_resources: 65 fbnic_free_resources(fbn); 66 free_napi_vectors: 67 fbnic_free_napi_vectors(fbn); 68 return err; 69 } 70 71 static int fbnic_open(struct net_device *netdev) 72 { 73 struct fbnic_net *fbn = netdev_priv(netdev); 74 int err; 75 76 fbnic_napi_name_irqs(fbn->fbd); 77 78 err = __fbnic_open(fbn); 79 if (!err) 80 fbnic_up(fbn); 81 82 return err; 83 } 84 85 static int fbnic_stop(struct net_device *netdev) 86 { 87 struct fbnic_net *fbn = netdev_priv(netdev); 88 89 phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd)); 90 91 fbnic_down(fbn); 92 fbnic_pcs_free_irq(fbn->fbd); 93 94 fbnic_time_stop(fbn); 95 fbnic_fw_xmit_ownership_msg(fbn->fbd, false); 96 97 fbnic_reset_netif_queues(fbn); 98 fbnic_free_resources(fbn); 99 fbnic_free_napi_vectors(fbn); 100 101 return 0; 102 } 103 104 static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr) 105 { 106 struct fbnic_net *fbn = netdev_priv(netdev); 107 struct fbnic_mac_addr *avail_addr; 108 109 if (WARN_ON(!is_valid_ether_addr(addr))) 110 return -EADDRNOTAVAIL; 111 112 avail_addr = __fbnic_uc_sync(fbn->fbd, addr); 113 if (!avail_addr) 114 return -ENOSPC; 115 116 /* Add type flag indicating this address is in use by the host */ 117 set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam); 118 119 return 0; 120 } 121 122 static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr) 123 { 124 struct fbnic_net *fbn = netdev_priv(netdev); 125 struct fbnic_dev *fbd = fbn->fbd; 126 int i, ret; 127 128 /* Scan from middle of list to bottom, filling bottom up. 129 * Skip the first entry which is reserved for dev_addr and 130 * leave the last entry to use for promiscuous filtering. 131 */ 132 for (i = fbd->mac_addr_boundary, ret = -ENOENT; 133 i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) { 134 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; 135 136 if (!ether_addr_equal(mac_addr->value.addr8, addr)) 137 continue; 138 139 ret = __fbnic_uc_unsync(mac_addr); 140 } 141 142 return ret; 143 } 144 145 static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr) 146 { 147 struct fbnic_net *fbn = netdev_priv(netdev); 148 struct fbnic_mac_addr *avail_addr; 149 150 if (WARN_ON(!is_multicast_ether_addr(addr))) 151 return -EADDRNOTAVAIL; 152 153 avail_addr = __fbnic_mc_sync(fbn->fbd, addr); 154 if (!avail_addr) 155 return -ENOSPC; 156 157 /* Add type flag indicating this address is in use by the host */ 158 set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam); 159 160 return 0; 161 } 162 163 static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr) 164 { 165 struct fbnic_net *fbn = netdev_priv(netdev); 166 struct fbnic_dev *fbd = fbn->fbd; 167 int i, ret; 168 169 /* Scan from middle of list to top, filling top down. 170 * Skip over the address reserved for the BMC MAC and 171 * exclude index 0 as that belongs to the broadcast address 172 */ 173 for (i = fbd->mac_addr_boundary, ret = -ENOENT; 174 --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) { 175 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; 176 177 if (!ether_addr_equal(mac_addr->value.addr8, addr)) 178 continue; 179 180 ret = __fbnic_mc_unsync(mac_addr); 181 } 182 183 return ret; 184 } 185 186 void __fbnic_set_rx_mode(struct net_device *netdev) 187 { 188 struct fbnic_net *fbn = netdev_priv(netdev); 189 bool uc_promisc = false, mc_promisc = false; 190 struct fbnic_dev *fbd = fbn->fbd; 191 struct fbnic_mac_addr *mac_addr; 192 int err; 193 194 /* Populate host address from dev_addr */ 195 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX]; 196 if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) || 197 mac_addr->state != FBNIC_TCAM_S_VALID) { 198 ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr); 199 mac_addr->state = FBNIC_TCAM_S_UPDATE; 200 set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam); 201 } 202 203 /* Populate broadcast address if broadcast is enabled */ 204 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX]; 205 if (netdev->flags & IFF_BROADCAST) { 206 if (!is_broadcast_ether_addr(mac_addr->value.addr8) || 207 mac_addr->state != FBNIC_TCAM_S_VALID) { 208 eth_broadcast_addr(mac_addr->value.addr8); 209 mac_addr->state = FBNIC_TCAM_S_ADD; 210 } 211 set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam); 212 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) { 213 __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST); 214 } 215 216 /* Synchronize unicast and multicast address lists */ 217 err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync); 218 if (err == -ENOSPC) 219 uc_promisc = true; 220 err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync); 221 if (err == -ENOSPC) 222 mc_promisc = true; 223 224 uc_promisc |= !!(netdev->flags & IFF_PROMISC); 225 mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc; 226 227 /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */ 228 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX]; 229 if (uc_promisc) { 230 if (!is_zero_ether_addr(mac_addr->value.addr8) || 231 mac_addr->state != FBNIC_TCAM_S_VALID) { 232 eth_zero_addr(mac_addr->value.addr8); 233 eth_broadcast_addr(mac_addr->mask.addr8); 234 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, 235 mac_addr->act_tcam); 236 set_bit(FBNIC_MAC_ADDR_T_PROMISC, 237 mac_addr->act_tcam); 238 mac_addr->state = FBNIC_TCAM_S_ADD; 239 } 240 } else if (mc_promisc && 241 (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) { 242 /* We have to add a special handler for multicast as the 243 * BMC may have an all-multi rule already in place. As such 244 * adding a rule ourselves won't do any good so we will have 245 * to modify the rules for the ALL MULTI below if the BMC 246 * already has the rule in place. 247 */ 248 if (!is_multicast_ether_addr(mac_addr->value.addr8) || 249 mac_addr->state != FBNIC_TCAM_S_VALID) { 250 eth_zero_addr(mac_addr->value.addr8); 251 eth_broadcast_addr(mac_addr->mask.addr8); 252 mac_addr->value.addr8[0] ^= 1; 253 mac_addr->mask.addr8[0] ^= 1; 254 set_bit(FBNIC_MAC_ADDR_T_ALLMULTI, 255 mac_addr->act_tcam); 256 clear_bit(FBNIC_MAC_ADDR_T_PROMISC, 257 mac_addr->act_tcam); 258 mac_addr->state = FBNIC_TCAM_S_ADD; 259 } 260 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) { 261 if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) { 262 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, 263 mac_addr->act_tcam); 264 clear_bit(FBNIC_MAC_ADDR_T_PROMISC, 265 mac_addr->act_tcam); 266 } else { 267 mac_addr->state = FBNIC_TCAM_S_DELETE; 268 } 269 } 270 271 /* Add rules for BMC all multicast if it is enabled */ 272 fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc); 273 274 /* Sift out any unshared BMC rules and place them in BMC only section */ 275 fbnic_sift_macda(fbd); 276 277 /* Write updates to hardware */ 278 fbnic_write_rules(fbd); 279 fbnic_write_macda(fbd); 280 fbnic_write_tce_tcam(fbd); 281 } 282 283 static void fbnic_set_rx_mode(struct net_device *netdev) 284 { 285 /* No need to update the hardware if we are not running */ 286 if (netif_running(netdev)) 287 __fbnic_set_rx_mode(netdev); 288 } 289 290 static int fbnic_set_mac(struct net_device *netdev, void *p) 291 { 292 struct sockaddr *addr = p; 293 294 if (!is_valid_ether_addr(addr->sa_data)) 295 return -EADDRNOTAVAIL; 296 297 eth_hw_addr_set(netdev, addr->sa_data); 298 299 fbnic_set_rx_mode(netdev); 300 301 return 0; 302 } 303 304 void fbnic_clear_rx_mode(struct net_device *netdev) 305 { 306 struct fbnic_net *fbn = netdev_priv(netdev); 307 struct fbnic_dev *fbd = fbn->fbd; 308 int idx; 309 310 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { 311 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; 312 313 if (mac_addr->state != FBNIC_TCAM_S_VALID) 314 continue; 315 316 bitmap_clear(mac_addr->act_tcam, 317 FBNIC_MAC_ADDR_T_HOST_START, 318 FBNIC_MAC_ADDR_T_HOST_LEN); 319 320 if (bitmap_empty(mac_addr->act_tcam, 321 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) 322 mac_addr->state = FBNIC_TCAM_S_DELETE; 323 } 324 325 /* Write updates to hardware */ 326 fbnic_write_macda(fbd); 327 328 __dev_uc_unsync(netdev, NULL); 329 __dev_mc_unsync(netdev, NULL); 330 } 331 332 static int fbnic_hwtstamp_get(struct net_device *netdev, 333 struct kernel_hwtstamp_config *config) 334 { 335 struct fbnic_net *fbn = netdev_priv(netdev); 336 337 *config = fbn->hwtstamp_config; 338 339 return 0; 340 } 341 342 static int fbnic_hwtstamp_set(struct net_device *netdev, 343 struct kernel_hwtstamp_config *config, 344 struct netlink_ext_ack *extack) 345 { 346 struct fbnic_net *fbn = netdev_priv(netdev); 347 int old_rx_filter; 348 349 if (config->source != HWTSTAMP_SOURCE_NETDEV) 350 return -EOPNOTSUPP; 351 352 if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config)) 353 return 0; 354 355 /* Upscale the filters */ 356 switch (config->rx_filter) { 357 case HWTSTAMP_FILTER_NONE: 358 case HWTSTAMP_FILTER_ALL: 359 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 360 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 361 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 362 case HWTSTAMP_FILTER_PTP_V2_EVENT: 363 break; 364 case HWTSTAMP_FILTER_NTP_ALL: 365 config->rx_filter = HWTSTAMP_FILTER_ALL; 366 break; 367 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 368 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 369 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 370 break; 371 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 372 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 373 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 374 break; 375 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 376 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 377 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 378 break; 379 case HWTSTAMP_FILTER_PTP_V2_SYNC: 380 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 381 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 382 break; 383 default: 384 return -ERANGE; 385 } 386 387 /* Configure */ 388 old_rx_filter = fbn->hwtstamp_config.rx_filter; 389 memcpy(&fbn->hwtstamp_config, config, sizeof(*config)); 390 391 if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) { 392 fbnic_rss_reinit(fbn->fbd, fbn); 393 fbnic_write_rules(fbn->fbd); 394 } 395 396 /* Save / report back filter configuration 397 * Note that our filter configuration is inexact. Instead of 398 * filtering for a specific UDP port or L2 Ethertype we are 399 * filtering in all UDP or all non-IP packets for timestamping. So 400 * if anything other than FILTER_ALL is requested we report 401 * FILTER_SOME indicating that we will be timestamping a few 402 * additional packets. 403 */ 404 if (config->rx_filter > HWTSTAMP_FILTER_ALL) 405 config->rx_filter = HWTSTAMP_FILTER_SOME; 406 407 return 0; 408 } 409 410 static void fbnic_get_stats64(struct net_device *dev, 411 struct rtnl_link_stats64 *stats64) 412 { 413 u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0; 414 u64 tx_bytes, tx_packets, tx_dropped = 0; 415 struct fbnic_net *fbn = netdev_priv(dev); 416 struct fbnic_dev *fbd = fbn->fbd; 417 struct fbnic_queue_stats *stats; 418 u64 rx_over = 0, rx_missed = 0; 419 unsigned int start, i; 420 421 fbnic_get_hw_stats(fbd); 422 423 stats = &fbn->tx_stats; 424 425 tx_bytes = stats->bytes; 426 tx_packets = stats->packets; 427 tx_dropped = stats->dropped; 428 429 /* Record drops from Tx HW Datapath */ 430 spin_lock(&fbd->hw_stats_lock); 431 tx_dropped += fbd->hw_stats.tmi.drop.frames.value + 432 fbd->hw_stats.tti.cm_drop.frames.value + 433 fbd->hw_stats.tti.frame_drop.frames.value + 434 fbd->hw_stats.tti.tbi_drop.frames.value; 435 spin_unlock(&fbd->hw_stats_lock); 436 437 stats64->tx_bytes = tx_bytes; 438 stats64->tx_packets = tx_packets; 439 stats64->tx_dropped = tx_dropped; 440 441 for (i = 0; i < fbn->num_tx_queues; i++) { 442 struct fbnic_ring *txr = fbn->tx[i]; 443 444 if (!txr) 445 continue; 446 447 stats = &txr->stats; 448 do { 449 start = u64_stats_fetch_begin(&stats->syncp); 450 tx_bytes = stats->bytes; 451 tx_packets = stats->packets; 452 tx_dropped = stats->dropped; 453 } while (u64_stats_fetch_retry(&stats->syncp, start)); 454 455 stats64->tx_bytes += tx_bytes; 456 stats64->tx_packets += tx_packets; 457 stats64->tx_dropped += tx_dropped; 458 } 459 460 stats = &fbn->rx_stats; 461 462 rx_bytes = stats->bytes; 463 rx_packets = stats->packets; 464 rx_dropped = stats->dropped; 465 466 spin_lock(&fbd->hw_stats_lock); 467 /* Record drops for the host FIFOs. 468 * 4: network to Host, 6: BMC to Host 469 * Exclude the BMC and MC FIFOs as those stats may contain drops 470 * due to unrelated items such as TCAM misses. They are still 471 * accessible through the ethtool stats. 472 */ 473 i = FBNIC_RXB_FIFO_HOST; 474 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value; 475 i = FBNIC_RXB_FIFO_BMC_TO_HOST; 476 rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value; 477 478 for (i = 0; i < fbd->max_num_queues; i++) { 479 /* Report packets dropped due to CQ/BDQ being full/empty */ 480 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value; 481 rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value; 482 483 /* Report packets with errors */ 484 rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value; 485 } 486 spin_unlock(&fbd->hw_stats_lock); 487 488 stats64->rx_bytes = rx_bytes; 489 stats64->rx_packets = rx_packets; 490 stats64->rx_dropped = rx_dropped; 491 stats64->rx_over_errors = rx_over; 492 stats64->rx_errors = rx_errors; 493 stats64->rx_missed_errors = rx_missed; 494 495 for (i = 0; i < fbn->num_rx_queues; i++) { 496 struct fbnic_ring *rxr = fbn->rx[i]; 497 498 if (!rxr) 499 continue; 500 501 stats = &rxr->stats; 502 do { 503 start = u64_stats_fetch_begin(&stats->syncp); 504 rx_bytes = stats->bytes; 505 rx_packets = stats->packets; 506 rx_dropped = stats->dropped; 507 } while (u64_stats_fetch_retry(&stats->syncp, start)); 508 509 stats64->rx_bytes += rx_bytes; 510 stats64->rx_packets += rx_packets; 511 stats64->rx_dropped += rx_dropped; 512 } 513 } 514 515 static const struct net_device_ops fbnic_netdev_ops = { 516 .ndo_open = fbnic_open, 517 .ndo_stop = fbnic_stop, 518 .ndo_validate_addr = eth_validate_addr, 519 .ndo_start_xmit = fbnic_xmit_frame, 520 .ndo_features_check = fbnic_features_check, 521 .ndo_set_mac_address = fbnic_set_mac, 522 .ndo_set_rx_mode = fbnic_set_rx_mode, 523 .ndo_get_stats64 = fbnic_get_stats64, 524 .ndo_hwtstamp_get = fbnic_hwtstamp_get, 525 .ndo_hwtstamp_set = fbnic_hwtstamp_set, 526 }; 527 528 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, 529 struct netdev_queue_stats_rx *rx) 530 { 531 struct fbnic_net *fbn = netdev_priv(dev); 532 struct fbnic_ring *rxr = fbn->rx[idx]; 533 struct fbnic_dev *fbd = fbn->fbd; 534 struct fbnic_queue_stats *stats; 535 u64 bytes, packets, alloc_fail; 536 u64 csum_complete, csum_none; 537 unsigned int start; 538 539 if (!rxr) 540 return; 541 542 stats = &rxr->stats; 543 do { 544 start = u64_stats_fetch_begin(&stats->syncp); 545 bytes = stats->bytes; 546 packets = stats->packets; 547 alloc_fail = stats->rx.alloc_failed; 548 csum_complete = stats->rx.csum_complete; 549 csum_none = stats->rx.csum_none; 550 } while (u64_stats_fetch_retry(&stats->syncp, start)); 551 552 rx->bytes = bytes; 553 rx->packets = packets; 554 rx->alloc_fail = alloc_fail; 555 rx->csum_complete = csum_complete; 556 rx->csum_none = csum_none; 557 558 fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q); 559 560 spin_lock(&fbd->hw_stats_lock); 561 rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value + 562 fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value; 563 rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value + 564 rx->hw_drop_overruns; 565 spin_unlock(&fbd->hw_stats_lock); 566 } 567 568 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx, 569 struct netdev_queue_stats_tx *tx) 570 { 571 struct fbnic_net *fbn = netdev_priv(dev); 572 struct fbnic_ring *txr = fbn->tx[idx]; 573 struct fbnic_queue_stats *stats; 574 u64 stop, wake, csum, lso; 575 unsigned int start; 576 u64 bytes, packets; 577 578 if (!txr) 579 return; 580 581 stats = &txr->stats; 582 do { 583 start = u64_stats_fetch_begin(&stats->syncp); 584 bytes = stats->bytes; 585 packets = stats->packets; 586 csum = stats->twq.csum_partial; 587 lso = stats->twq.lso; 588 stop = stats->twq.stop; 589 wake = stats->twq.wake; 590 } while (u64_stats_fetch_retry(&stats->syncp, start)); 591 592 tx->bytes = bytes; 593 tx->packets = packets; 594 tx->needs_csum = csum + lso; 595 tx->hw_gso_wire_packets = lso; 596 tx->stop = stop; 597 tx->wake = wake; 598 } 599 600 static void fbnic_get_base_stats(struct net_device *dev, 601 struct netdev_queue_stats_rx *rx, 602 struct netdev_queue_stats_tx *tx) 603 { 604 struct fbnic_net *fbn = netdev_priv(dev); 605 606 tx->bytes = fbn->tx_stats.bytes; 607 tx->packets = fbn->tx_stats.packets; 608 tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso; 609 tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso; 610 tx->stop = fbn->tx_stats.twq.stop; 611 tx->wake = fbn->tx_stats.twq.wake; 612 613 rx->bytes = fbn->rx_stats.bytes; 614 rx->packets = fbn->rx_stats.packets; 615 rx->alloc_fail = fbn->rx_stats.rx.alloc_failed; 616 rx->csum_complete = fbn->rx_stats.rx.csum_complete; 617 rx->csum_none = fbn->rx_stats.rx.csum_none; 618 } 619 620 static const struct netdev_stat_ops fbnic_stat_ops = { 621 .get_queue_stats_rx = fbnic_get_queue_stats_rx, 622 .get_queue_stats_tx = fbnic_get_queue_stats_tx, 623 .get_base_stats = fbnic_get_base_stats, 624 }; 625 626 void fbnic_reset_queues(struct fbnic_net *fbn, 627 unsigned int tx, unsigned int rx) 628 { 629 struct fbnic_dev *fbd = fbn->fbd; 630 unsigned int max_napis; 631 632 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS; 633 634 tx = min(tx, max_napis); 635 fbn->num_tx_queues = tx; 636 637 rx = min(rx, max_napis); 638 fbn->num_rx_queues = rx; 639 640 fbn->num_napi = max(tx, rx); 641 } 642 643 /** 644 * fbnic_netdev_free - Free the netdev associate with fbnic 645 * @fbd: Driver specific structure to free netdev from 646 * 647 * Allocate and initialize the netdev and netdev private structure. Bind 648 * together the hardware, netdev, and pci data structures. 649 **/ 650 void fbnic_netdev_free(struct fbnic_dev *fbd) 651 { 652 struct fbnic_net *fbn = netdev_priv(fbd->netdev); 653 654 if (fbn->phylink) 655 phylink_destroy(fbn->phylink); 656 657 free_netdev(fbd->netdev); 658 fbd->netdev = NULL; 659 } 660 661 /** 662 * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic 663 * @fbd: Driver specific structure to associate netdev with 664 * 665 * Allocate and initialize the netdev and netdev private structure. Bind 666 * together the hardware, netdev, and pci data structures. 667 * 668 * Return: Pointer to net_device on success, NULL on failure 669 **/ 670 struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) 671 { 672 struct net_device *netdev; 673 struct fbnic_net *fbn; 674 int default_queues; 675 676 netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS); 677 if (!netdev) 678 return NULL; 679 680 SET_NETDEV_DEV(netdev, fbd->dev); 681 fbd->netdev = netdev; 682 683 netdev->netdev_ops = &fbnic_netdev_ops; 684 netdev->stat_ops = &fbnic_stat_ops; 685 686 fbnic_set_ethtool_ops(netdev); 687 688 fbn = netdev_priv(netdev); 689 690 fbn->netdev = netdev; 691 fbn->fbd = fbd; 692 693 fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT; 694 fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT; 695 fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT; 696 fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT; 697 698 fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT; 699 fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT; 700 fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT; 701 702 default_queues = netif_get_num_default_rss_queues(); 703 if (default_queues > fbd->max_num_queues) 704 default_queues = fbd->max_num_queues; 705 706 fbnic_reset_queues(fbn, default_queues, default_queues); 707 708 fbnic_reset_indir_tbl(fbn); 709 fbnic_rss_key_fill(fbn->rss_key); 710 fbnic_rss_init_en_mask(fbn); 711 712 netdev->priv_flags |= IFF_UNICAST_FLT; 713 714 netdev->gso_partial_features = 715 NETIF_F_GSO_GRE | 716 NETIF_F_GSO_GRE_CSUM | 717 NETIF_F_GSO_IPXIP4 | 718 NETIF_F_GSO_UDP_TUNNEL | 719 NETIF_F_GSO_UDP_TUNNEL_CSUM; 720 721 netdev->features |= 722 netdev->gso_partial_features | 723 FBNIC_TUN_GSO_FEATURES | 724 NETIF_F_RXHASH | 725 NETIF_F_SG | 726 NETIF_F_HW_CSUM | 727 NETIF_F_RXCSUM | 728 NETIF_F_TSO | 729 NETIF_F_TSO_ECN | 730 NETIF_F_TSO6 | 731 NETIF_F_GSO_PARTIAL | 732 NETIF_F_GSO_UDP_L4; 733 734 netdev->hw_features |= netdev->features; 735 netdev->vlan_features |= netdev->features; 736 netdev->hw_enc_features |= netdev->features; 737 netdev->features |= NETIF_F_NTUPLE; 738 739 netdev->min_mtu = IPV6_MIN_MTU; 740 netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN; 741 742 /* TBD: This is workaround for BMC as phylink doesn't have support 743 * for leavling the link enabled if a BMC is present. 744 */ 745 netdev->ethtool->wol_enabled = true; 746 747 netif_carrier_off(netdev); 748 749 netif_tx_stop_all_queues(netdev); 750 751 if (fbnic_phylink_init(netdev)) { 752 fbnic_netdev_free(fbd); 753 return NULL; 754 } 755 756 return netdev; 757 } 758 759 static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr) 760 { 761 addr[0] = (dsn >> 56) & 0xFF; 762 addr[1] = (dsn >> 48) & 0xFF; 763 addr[2] = (dsn >> 40) & 0xFF; 764 addr[3] = (dsn >> 16) & 0xFF; 765 addr[4] = (dsn >> 8) & 0xFF; 766 addr[5] = dsn & 0xFF; 767 768 return is_valid_ether_addr(addr) ? 0 : -EINVAL; 769 } 770 771 /** 772 * fbnic_netdev_register - Initialize general software structures 773 * @netdev: Netdev containing structure to initialize and register 774 * 775 * Initialize the MAC address for the netdev and register it. 776 * 777 * Return: 0 on success, negative on failure 778 **/ 779 int fbnic_netdev_register(struct net_device *netdev) 780 { 781 struct fbnic_net *fbn = netdev_priv(netdev); 782 struct fbnic_dev *fbd = fbn->fbd; 783 u64 dsn = fbd->dsn; 784 u8 addr[ETH_ALEN]; 785 int err; 786 787 err = fbnic_dsn_to_mac_addr(dsn, addr); 788 if (!err) { 789 ether_addr_copy(netdev->perm_addr, addr); 790 eth_hw_addr_set(netdev, addr); 791 } else { 792 /* A randomly assigned MAC address will cause provisioning 793 * issues so instead just fail to spawn the netdev and 794 * avoid any confusion. 795 */ 796 dev_err(fbd->dev, "MAC addr %pM invalid\n", addr); 797 return err; 798 } 799 800 return register_netdev(netdev); 801 } 802 803 void fbnic_netdev_unregister(struct net_device *netdev) 804 { 805 unregister_netdev(netdev); 806 } 807