1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include "ice.h" 9 #include "ice_lib.h" 10 11 #define DRV_VERSION "0.7.2-k" 12 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 13 const char ice_drv_ver[] = DRV_VERSION; 14 static const char ice_driver_string[] = DRV_SUMMARY; 15 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 16 17 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 18 MODULE_DESCRIPTION(DRV_SUMMARY); 19 MODULE_LICENSE("GPL v2"); 20 MODULE_VERSION(DRV_VERSION); 21 22 static int debug = -1; 23 module_param(debug, int, 0644); 24 #ifndef CONFIG_DYNAMIC_DEBUG 25 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 26 #else 27 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 28 #endif /* !CONFIG_DYNAMIC_DEBUG */ 29 30 static struct workqueue_struct *ice_wq; 31 static const struct net_device_ops ice_netdev_ops; 32 33 static void ice_pf_dis_all_vsi(struct ice_pf *pf); 34 static void ice_rebuild(struct ice_pf *pf); 35 36 static void ice_vsi_release_all(struct ice_pf *pf); 37 static void ice_update_vsi_stats(struct ice_vsi *vsi); 38 static void ice_update_pf_stats(struct ice_pf *pf); 39 40 /** 41 * ice_get_tx_pending - returns number of Tx descriptors not processed 42 * @ring: the ring of descriptors 43 */ 44 static u32 ice_get_tx_pending(struct ice_ring *ring) 45 { 46 u32 head, tail; 47 48 head = ring->next_to_clean; 49 tail = readl(ring->tail); 50 51 if (head != tail) 52 return (head < tail) ? 53 tail - head : (tail + ring->count - head); 54 return 0; 55 } 56 57 /** 58 * ice_check_for_hang_subtask - check for and recover hung queues 59 * @pf: pointer to PF struct 60 */ 61 static void ice_check_for_hang_subtask(struct ice_pf *pf) 62 { 63 struct ice_vsi *vsi = NULL; 64 unsigned int i; 65 u32 v, v_idx; 66 int packets; 67 68 ice_for_each_vsi(pf, v) 69 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 70 vsi = pf->vsi[v]; 71 break; 72 } 73 74 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 75 return; 76 77 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 78 return; 79 80 for (i = 0; i < vsi->num_txq; i++) { 81 struct ice_ring *tx_ring = vsi->tx_rings[i]; 82 83 if (tx_ring && tx_ring->desc) { 84 int itr = ICE_ITR_NONE; 85 86 /* If packet counter has not changed the queue is 87 * likely stalled, so force an interrupt for this 88 * queue. 89 * 90 * prev_pkt would be negative if there was no 91 * pending work. 92 */ 93 packets = tx_ring->stats.pkts & INT_MAX; 94 if (tx_ring->tx_stats.prev_pkt == packets) { 95 /* Trigger sw interrupt to revive the queue */ 96 v_idx = tx_ring->q_vector->v_idx; 97 wr32(&vsi->back->hw, 98 GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 99 (itr << GLINT_DYN_CTL_ITR_INDX_S) | 100 GLINT_DYN_CTL_SWINT_TRIG_M | 101 GLINT_DYN_CTL_INTENA_MSK_M); 102 continue; 103 } 104 105 /* Memory barrier between read of packet count and call 106 * to ice_get_tx_pending() 107 */ 108 smp_rmb(); 109 tx_ring->tx_stats.prev_pkt = 110 ice_get_tx_pending(tx_ring) ? packets : -1; 111 } 112 } 113 } 114 115 /** 116 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced 117 * @netdev: the net device on which the sync is happening 118 * @addr: mac address to sync 119 * 120 * This is a callback function which is called by the in kernel device sync 121 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 122 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 123 * mac filters from the hardware. 124 */ 125 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 126 { 127 struct ice_netdev_priv *np = netdev_priv(netdev); 128 struct ice_vsi *vsi = np->vsi; 129 130 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr)) 131 return -EINVAL; 132 133 return 0; 134 } 135 136 /** 137 * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced 138 * @netdev: the net device on which the unsync is happening 139 * @addr: mac address to unsync 140 * 141 * This is a callback function which is called by the in kernel device unsync 142 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 143 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 144 * delete the mac filters from the hardware. 145 */ 146 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 147 { 148 struct ice_netdev_priv *np = netdev_priv(netdev); 149 struct ice_vsi *vsi = np->vsi; 150 151 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr)) 152 return -EINVAL; 153 154 return 0; 155 } 156 157 /** 158 * ice_vsi_fltr_changed - check if filter state changed 159 * @vsi: VSI to be checked 160 * 161 * returns true if filter state has changed, false otherwise. 162 */ 163 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 164 { 165 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) || 166 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) || 167 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 168 } 169 170 /** 171 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 172 * @vsi: ptr to the VSI 173 * 174 * Push any outstanding VSI filter changes through the AdminQ. 175 */ 176 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 177 { 178 struct device *dev = &vsi->back->pdev->dev; 179 struct net_device *netdev = vsi->netdev; 180 bool promisc_forced_on = false; 181 struct ice_pf *pf = vsi->back; 182 struct ice_hw *hw = &pf->hw; 183 enum ice_status status = 0; 184 u32 changed_flags = 0; 185 int err = 0; 186 187 if (!vsi->netdev) 188 return -EINVAL; 189 190 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) 191 usleep_range(1000, 2000); 192 193 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 194 vsi->current_netdev_flags = vsi->netdev->flags; 195 196 INIT_LIST_HEAD(&vsi->tmp_sync_list); 197 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 198 199 if (ice_vsi_fltr_changed(vsi)) { 200 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 201 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 202 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); 203 204 /* grab the netdev's addr_list_lock */ 205 netif_addr_lock_bh(netdev); 206 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 207 ice_add_mac_to_unsync_list); 208 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 209 ice_add_mac_to_unsync_list); 210 /* our temp lists are populated. release lock */ 211 netif_addr_unlock_bh(netdev); 212 } 213 214 /* Remove mac addresses in the unsync list */ 215 status = ice_remove_mac(hw, &vsi->tmp_unsync_list); 216 ice_free_fltr_list(dev, &vsi->tmp_unsync_list); 217 if (status) { 218 netdev_err(netdev, "Failed to delete MAC filters\n"); 219 /* if we failed because of alloc failures, just bail */ 220 if (status == ICE_ERR_NO_MEMORY) { 221 err = -ENOMEM; 222 goto out; 223 } 224 } 225 226 /* Add mac addresses in the sync list */ 227 status = ice_add_mac(hw, &vsi->tmp_sync_list); 228 ice_free_fltr_list(dev, &vsi->tmp_sync_list); 229 if (status) { 230 netdev_err(netdev, "Failed to add MAC filters\n"); 231 /* If there is no more space for new umac filters, vsi 232 * should go into promiscuous mode. There should be some 233 * space reserved for promiscuous filters. 234 */ 235 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 236 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC, 237 vsi->state)) { 238 promisc_forced_on = true; 239 netdev_warn(netdev, 240 "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 241 vsi->vsi_num); 242 } else { 243 err = -EIO; 244 goto out; 245 } 246 } 247 /* check for changes in promiscuous modes */ 248 if (changed_flags & IFF_ALLMULTI) 249 netdev_warn(netdev, "Unsupported configuration\n"); 250 251 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 252 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { 253 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 254 if (vsi->current_netdev_flags & IFF_PROMISC) { 255 /* Apply TX filter rule to get traffic from VMs */ 256 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 257 ICE_FLTR_TX); 258 if (status) { 259 netdev_err(netdev, "Error setting default VSI %i tx rule\n", 260 vsi->vsi_num); 261 vsi->current_netdev_flags &= ~IFF_PROMISC; 262 err = -EIO; 263 goto out_promisc; 264 } 265 /* Apply RX filter rule to get traffic from wire */ 266 status = ice_cfg_dflt_vsi(hw, vsi->idx, true, 267 ICE_FLTR_RX); 268 if (status) { 269 netdev_err(netdev, "Error setting default VSI %i rx rule\n", 270 vsi->vsi_num); 271 vsi->current_netdev_flags &= ~IFF_PROMISC; 272 err = -EIO; 273 goto out_promisc; 274 } 275 } else { 276 /* Clear TX filter rule to stop traffic from VMs */ 277 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 278 ICE_FLTR_TX); 279 if (status) { 280 netdev_err(netdev, "Error clearing default VSI %i tx rule\n", 281 vsi->vsi_num); 282 vsi->current_netdev_flags |= IFF_PROMISC; 283 err = -EIO; 284 goto out_promisc; 285 } 286 /* Clear RX filter to remove traffic from wire */ 287 status = ice_cfg_dflt_vsi(hw, vsi->idx, false, 288 ICE_FLTR_RX); 289 if (status) { 290 netdev_err(netdev, "Error clearing default VSI %i rx rule\n", 291 vsi->vsi_num); 292 vsi->current_netdev_flags |= IFF_PROMISC; 293 err = -EIO; 294 goto out_promisc; 295 } 296 } 297 } 298 goto exit; 299 300 out_promisc: 301 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); 302 goto exit; 303 out: 304 /* if something went wrong then set the changed flag so we try again */ 305 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 306 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 307 exit: 308 clear_bit(__ICE_CFG_BUSY, vsi->state); 309 return err; 310 } 311 312 /** 313 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 314 * @pf: board private structure 315 */ 316 static void ice_sync_fltr_subtask(struct ice_pf *pf) 317 { 318 int v; 319 320 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 321 return; 322 323 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 324 325 for (v = 0; v < pf->num_alloc_vsi; v++) 326 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 327 ice_vsi_sync_fltr(pf->vsi[v])) { 328 /* come back and try again later */ 329 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 330 break; 331 } 332 } 333 334 /** 335 * ice_prepare_for_reset - prep for the core to reset 336 * @pf: board private structure 337 * 338 * Inform or close all dependent features in prep for reset. 339 */ 340 static void 341 ice_prepare_for_reset(struct ice_pf *pf) 342 { 343 struct ice_hw *hw = &pf->hw; 344 345 /* Notify VFs of impending reset */ 346 if (ice_check_sq_alive(hw, &hw->mailboxq)) 347 ice_vc_notify_reset(pf); 348 349 /* disable the VSIs and their queues that are not already DOWN */ 350 ice_pf_dis_all_vsi(pf); 351 352 ice_shutdown_all_ctrlq(hw); 353 354 set_bit(__ICE_PREPARED_FOR_RESET, pf->state); 355 } 356 357 /** 358 * ice_do_reset - Initiate one of many types of resets 359 * @pf: board private structure 360 * @reset_type: reset type requested 361 * before this function was called. 362 */ 363 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 364 { 365 struct device *dev = &pf->pdev->dev; 366 struct ice_hw *hw = &pf->hw; 367 368 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 369 WARN_ON(in_interrupt()); 370 371 ice_prepare_for_reset(pf); 372 373 /* trigger the reset */ 374 if (ice_reset(hw, reset_type)) { 375 dev_err(dev, "reset %d failed\n", reset_type); 376 set_bit(__ICE_RESET_FAILED, pf->state); 377 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 378 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 379 clear_bit(__ICE_PFR_REQ, pf->state); 380 clear_bit(__ICE_CORER_REQ, pf->state); 381 clear_bit(__ICE_GLOBR_REQ, pf->state); 382 return; 383 } 384 385 /* PFR is a bit of a special case because it doesn't result in an OICR 386 * interrupt. So for PFR, rebuild after the reset and clear the reset- 387 * associated state bits. 388 */ 389 if (reset_type == ICE_RESET_PFR) { 390 pf->pfr_count++; 391 ice_rebuild(pf); 392 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 393 clear_bit(__ICE_PFR_REQ, pf->state); 394 } 395 } 396 397 /** 398 * ice_reset_subtask - Set up for resetting the device and driver 399 * @pf: board private structure 400 */ 401 static void ice_reset_subtask(struct ice_pf *pf) 402 { 403 enum ice_reset_req reset_type = ICE_RESET_INVAL; 404 405 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 406 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 407 * of reset is pending and sets bits in pf->state indicating the reset 408 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set 409 * prepare for pending reset if not already (for PF software-initiated 410 * global resets the software should already be prepared for it as 411 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated 412 * by firmware or software on other PFs, that bit is not set so prepare 413 * for the reset now), poll for reset done, rebuild and return. 414 */ 415 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { 416 clear_bit(__ICE_GLOBR_RECV, pf->state); 417 clear_bit(__ICE_CORER_RECV, pf->state); 418 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) 419 ice_prepare_for_reset(pf); 420 421 /* make sure we are ready to rebuild */ 422 if (ice_check_reset(&pf->hw)) { 423 set_bit(__ICE_RESET_FAILED, pf->state); 424 } else { 425 /* done with reset. start rebuild */ 426 pf->hw.reset_ongoing = false; 427 ice_rebuild(pf); 428 /* clear bit to resume normal operations, but 429 * ICE_NEEDS_RESTART bit is set incase rebuild failed 430 */ 431 clear_bit(__ICE_RESET_OICR_RECV, pf->state); 432 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); 433 clear_bit(__ICE_PFR_REQ, pf->state); 434 clear_bit(__ICE_CORER_REQ, pf->state); 435 clear_bit(__ICE_GLOBR_REQ, pf->state); 436 } 437 438 return; 439 } 440 441 /* No pending resets to finish processing. Check for new resets */ 442 if (test_bit(__ICE_PFR_REQ, pf->state)) 443 reset_type = ICE_RESET_PFR; 444 if (test_bit(__ICE_CORER_REQ, pf->state)) 445 reset_type = ICE_RESET_CORER; 446 if (test_bit(__ICE_GLOBR_REQ, pf->state)) 447 reset_type = ICE_RESET_GLOBR; 448 /* If no valid reset type requested just return */ 449 if (reset_type == ICE_RESET_INVAL) 450 return; 451 452 /* reset if not already down or busy */ 453 if (!test_bit(__ICE_DOWN, pf->state) && 454 !test_bit(__ICE_CFG_BUSY, pf->state)) { 455 ice_do_reset(pf, reset_type); 456 } 457 } 458 459 /** 460 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 461 * @pf: board private structure 462 */ 463 static void ice_watchdog_subtask(struct ice_pf *pf) 464 { 465 int i; 466 467 /* if interface is down do nothing */ 468 if (test_bit(__ICE_DOWN, pf->state) || 469 test_bit(__ICE_CFG_BUSY, pf->state)) 470 return; 471 472 /* make sure we don't do these things too often */ 473 if (time_before(jiffies, 474 pf->serv_tmr_prev + pf->serv_tmr_period)) 475 return; 476 477 pf->serv_tmr_prev = jiffies; 478 479 /* Update the stats for active netdevs so the network stack 480 * can look at updated numbers whenever it cares to 481 */ 482 ice_update_pf_stats(pf); 483 for (i = 0; i < pf->num_alloc_vsi; i++) 484 if (pf->vsi[i] && pf->vsi[i]->netdev) 485 ice_update_vsi_stats(pf->vsi[i]); 486 } 487 488 /** 489 * ice_print_link_msg - print link up or down message 490 * @vsi: the VSI whose link status is being queried 491 * @isup: boolean for if the link is now up or down 492 */ 493 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 494 { 495 const char *speed; 496 const char *fc; 497 498 if (vsi->current_isup == isup) 499 return; 500 501 vsi->current_isup = isup; 502 503 if (!isup) { 504 netdev_info(vsi->netdev, "NIC Link is Down\n"); 505 return; 506 } 507 508 switch (vsi->port_info->phy.link_info.link_speed) { 509 case ICE_AQ_LINK_SPEED_40GB: 510 speed = "40 G"; 511 break; 512 case ICE_AQ_LINK_SPEED_25GB: 513 speed = "25 G"; 514 break; 515 case ICE_AQ_LINK_SPEED_20GB: 516 speed = "20 G"; 517 break; 518 case ICE_AQ_LINK_SPEED_10GB: 519 speed = "10 G"; 520 break; 521 case ICE_AQ_LINK_SPEED_5GB: 522 speed = "5 G"; 523 break; 524 case ICE_AQ_LINK_SPEED_2500MB: 525 speed = "2.5 G"; 526 break; 527 case ICE_AQ_LINK_SPEED_1000MB: 528 speed = "1 G"; 529 break; 530 case ICE_AQ_LINK_SPEED_100MB: 531 speed = "100 M"; 532 break; 533 default: 534 speed = "Unknown"; 535 break; 536 } 537 538 switch (vsi->port_info->fc.current_mode) { 539 case ICE_FC_FULL: 540 fc = "RX/TX"; 541 break; 542 case ICE_FC_TX_PAUSE: 543 fc = "TX"; 544 break; 545 case ICE_FC_RX_PAUSE: 546 fc = "RX"; 547 break; 548 default: 549 fc = "Unknown"; 550 break; 551 } 552 553 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n", 554 speed, fc); 555 } 556 557 /** 558 * ice_init_link_events - enable/initialize link events 559 * @pi: pointer to the port_info instance 560 * 561 * Returns -EIO on failure, 0 on success 562 */ 563 static int ice_init_link_events(struct ice_port_info *pi) 564 { 565 u16 mask; 566 567 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 568 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); 569 570 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 571 dev_dbg(ice_hw_to_dev(pi->hw), 572 "Failed to set link event mask for port %d\n", 573 pi->lport); 574 return -EIO; 575 } 576 577 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 578 dev_dbg(ice_hw_to_dev(pi->hw), 579 "Failed to enable link events for port %d\n", 580 pi->lport); 581 return -EIO; 582 } 583 584 return 0; 585 } 586 587 /** 588 * ice_vsi_link_event - update the vsi's netdev 589 * @vsi: the vsi on which the link event occurred 590 * @link_up: whether or not the vsi needs to be set up or down 591 */ 592 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 593 { 594 if (!vsi || test_bit(__ICE_DOWN, vsi->state)) 595 return; 596 597 if (vsi->type == ICE_VSI_PF) { 598 if (!vsi->netdev) { 599 dev_dbg(&vsi->back->pdev->dev, 600 "vsi->netdev is not initialized!\n"); 601 return; 602 } 603 if (link_up) { 604 netif_carrier_on(vsi->netdev); 605 netif_tx_wake_all_queues(vsi->netdev); 606 } else { 607 netif_carrier_off(vsi->netdev); 608 netif_tx_stop_all_queues(vsi->netdev); 609 } 610 } 611 } 612 613 /** 614 * ice_link_event - process the link event 615 * @pf: pf that the link event is associated with 616 * @pi: port_info for the port that the link event is associated with 617 * 618 * Returns -EIO if ice_get_link_status() fails 619 * Returns 0 on success 620 */ 621 static int 622 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) 623 { 624 u8 new_link_speed, old_link_speed; 625 struct ice_phy_info *phy_info; 626 bool new_link_same_as_old; 627 bool new_link, old_link; 628 u8 lport; 629 u16 v; 630 631 phy_info = &pi->phy; 632 phy_info->link_info_old = phy_info->link_info; 633 /* Force ice_get_link_status() to update link info */ 634 phy_info->get_link_info = true; 635 636 old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 637 old_link_speed = phy_info->link_info_old.link_speed; 638 639 lport = pi->lport; 640 if (ice_get_link_status(pi, &new_link)) { 641 dev_dbg(&pf->pdev->dev, 642 "Could not get link status for port %d\n", lport); 643 return -EIO; 644 } 645 646 new_link_speed = phy_info->link_info.link_speed; 647 648 new_link_same_as_old = (new_link == old_link && 649 new_link_speed == old_link_speed); 650 651 ice_for_each_vsi(pf, v) { 652 struct ice_vsi *vsi = pf->vsi[v]; 653 654 if (!vsi || !vsi->port_info) 655 continue; 656 657 if (new_link_same_as_old && 658 (test_bit(__ICE_DOWN, vsi->state) || 659 new_link == netif_carrier_ok(vsi->netdev))) 660 continue; 661 662 if (vsi->port_info->lport == lport) { 663 ice_print_link_msg(vsi, new_link); 664 ice_vsi_link_event(vsi, new_link); 665 } 666 } 667 668 ice_vc_notify_link_state(pf); 669 670 return 0; 671 } 672 673 /** 674 * ice_handle_link_event - handle link event via ARQ 675 * @pf: pf that the link event is associated with 676 * 677 * Return -EINVAL if port_info is null 678 * Return status on succes 679 */ 680 static int ice_handle_link_event(struct ice_pf *pf) 681 { 682 struct ice_port_info *port_info; 683 int status; 684 685 port_info = pf->hw.port_info; 686 if (!port_info) 687 return -EINVAL; 688 689 status = ice_link_event(pf, port_info); 690 if (status) 691 dev_dbg(&pf->pdev->dev, 692 "Could not process link event, error %d\n", status); 693 694 return status; 695 } 696 697 /** 698 * __ice_clean_ctrlq - helper function to clean controlq rings 699 * @pf: ptr to struct ice_pf 700 * @q_type: specific Control queue type 701 */ 702 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 703 { 704 struct ice_rq_event_info event; 705 struct ice_hw *hw = &pf->hw; 706 struct ice_ctl_q_info *cq; 707 u16 pending, i = 0; 708 const char *qtype; 709 u32 oldval, val; 710 711 /* Do not clean control queue if/when PF reset fails */ 712 if (test_bit(__ICE_RESET_FAILED, pf->state)) 713 return 0; 714 715 switch (q_type) { 716 case ICE_CTL_Q_ADMIN: 717 cq = &hw->adminq; 718 qtype = "Admin"; 719 break; 720 case ICE_CTL_Q_MAILBOX: 721 cq = &hw->mailboxq; 722 qtype = "Mailbox"; 723 break; 724 default: 725 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", 726 q_type); 727 return 0; 728 } 729 730 /* check for error indications - PF_xx_AxQLEN register layout for 731 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 732 */ 733 val = rd32(hw, cq->rq.len); 734 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 735 PF_FW_ARQLEN_ARQCRIT_M)) { 736 oldval = val; 737 if (val & PF_FW_ARQLEN_ARQVFE_M) 738 dev_dbg(&pf->pdev->dev, 739 "%s Receive Queue VF Error detected\n", qtype); 740 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 741 dev_dbg(&pf->pdev->dev, 742 "%s Receive Queue Overflow Error detected\n", 743 qtype); 744 } 745 if (val & PF_FW_ARQLEN_ARQCRIT_M) 746 dev_dbg(&pf->pdev->dev, 747 "%s Receive Queue Critical Error detected\n", 748 qtype); 749 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 750 PF_FW_ARQLEN_ARQCRIT_M); 751 if (oldval != val) 752 wr32(hw, cq->rq.len, val); 753 } 754 755 val = rd32(hw, cq->sq.len); 756 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 757 PF_FW_ATQLEN_ATQCRIT_M)) { 758 oldval = val; 759 if (val & PF_FW_ATQLEN_ATQVFE_M) 760 dev_dbg(&pf->pdev->dev, 761 "%s Send Queue VF Error detected\n", qtype); 762 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 763 dev_dbg(&pf->pdev->dev, 764 "%s Send Queue Overflow Error detected\n", 765 qtype); 766 } 767 if (val & PF_FW_ATQLEN_ATQCRIT_M) 768 dev_dbg(&pf->pdev->dev, 769 "%s Send Queue Critical Error detected\n", 770 qtype); 771 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 772 PF_FW_ATQLEN_ATQCRIT_M); 773 if (oldval != val) 774 wr32(hw, cq->sq.len, val); 775 } 776 777 event.buf_len = cq->rq_buf_size; 778 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, 779 GFP_KERNEL); 780 if (!event.msg_buf) 781 return 0; 782 783 do { 784 enum ice_status ret; 785 u16 opcode; 786 787 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 788 if (ret == ICE_ERR_AQ_NO_WORK) 789 break; 790 if (ret) { 791 dev_err(&pf->pdev->dev, 792 "%s Receive Queue event error %d\n", qtype, 793 ret); 794 break; 795 } 796 797 opcode = le16_to_cpu(event.desc.opcode); 798 799 switch (opcode) { 800 case ice_aqc_opc_get_link_status: 801 if (ice_handle_link_event(pf)) 802 dev_err(&pf->pdev->dev, 803 "Could not handle link event\n"); 804 break; 805 case ice_mbx_opc_send_msg_to_pf: 806 ice_vc_process_vf_msg(pf, &event); 807 break; 808 case ice_aqc_opc_fw_logging: 809 ice_output_fw_log(hw, &event.desc, event.msg_buf); 810 break; 811 default: 812 dev_dbg(&pf->pdev->dev, 813 "%s Receive Queue unknown event 0x%04x ignored\n", 814 qtype, opcode); 815 break; 816 } 817 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 818 819 devm_kfree(&pf->pdev->dev, event.msg_buf); 820 821 return pending && (i == ICE_DFLT_IRQ_WORK); 822 } 823 824 /** 825 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 826 * @hw: pointer to hardware info 827 * @cq: control queue information 828 * 829 * returns true if there are pending messages in a queue, false if there aren't 830 */ 831 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 832 { 833 u16 ntu; 834 835 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 836 return cq->rq.next_to_clean != ntu; 837 } 838 839 /** 840 * ice_clean_adminq_subtask - clean the AdminQ rings 841 * @pf: board private structure 842 */ 843 static void ice_clean_adminq_subtask(struct ice_pf *pf) 844 { 845 struct ice_hw *hw = &pf->hw; 846 847 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 848 return; 849 850 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 851 return; 852 853 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 854 855 /* There might be a situation where new messages arrive to a control 856 * queue between processing the last message and clearing the 857 * EVENT_PENDING bit. So before exiting, check queue head again (using 858 * ice_ctrlq_pending) and process new messages if any. 859 */ 860 if (ice_ctrlq_pending(hw, &hw->adminq)) 861 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 862 863 ice_flush(hw); 864 } 865 866 /** 867 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 868 * @pf: board private structure 869 */ 870 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 871 { 872 struct ice_hw *hw = &pf->hw; 873 874 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 875 return; 876 877 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 878 return; 879 880 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 881 882 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 883 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 884 885 ice_flush(hw); 886 } 887 888 /** 889 * ice_service_task_schedule - schedule the service task to wake up 890 * @pf: board private structure 891 * 892 * If not already scheduled, this puts the task into the work queue. 893 */ 894 static void ice_service_task_schedule(struct ice_pf *pf) 895 { 896 if (!test_bit(__ICE_SERVICE_DIS, pf->state) && 897 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && 898 !test_bit(__ICE_NEEDS_RESTART, pf->state)) 899 queue_work(ice_wq, &pf->serv_task); 900 } 901 902 /** 903 * ice_service_task_complete - finish up the service task 904 * @pf: board private structure 905 */ 906 static void ice_service_task_complete(struct ice_pf *pf) 907 { 908 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state)); 909 910 /* force memory (pf->state) to sync before next service task */ 911 smp_mb__before_atomic(); 912 clear_bit(__ICE_SERVICE_SCHED, pf->state); 913 } 914 915 /** 916 * ice_service_task_stop - stop service task and cancel works 917 * @pf: board private structure 918 */ 919 static void ice_service_task_stop(struct ice_pf *pf) 920 { 921 set_bit(__ICE_SERVICE_DIS, pf->state); 922 923 if (pf->serv_tmr.function) 924 del_timer_sync(&pf->serv_tmr); 925 if (pf->serv_task.func) 926 cancel_work_sync(&pf->serv_task); 927 928 clear_bit(__ICE_SERVICE_SCHED, pf->state); 929 } 930 931 /** 932 * ice_service_timer - timer callback to schedule service task 933 * @t: pointer to timer_list 934 */ 935 static void ice_service_timer(struct timer_list *t) 936 { 937 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 938 939 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 940 ice_service_task_schedule(pf); 941 } 942 943 /** 944 * ice_handle_mdd_event - handle malicious driver detect event 945 * @pf: pointer to the PF structure 946 * 947 * Called from service task. OICR interrupt handler indicates MDD event 948 */ 949 static void ice_handle_mdd_event(struct ice_pf *pf) 950 { 951 struct ice_hw *hw = &pf->hw; 952 bool mdd_detected = false; 953 u32 reg; 954 int i; 955 956 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) 957 return; 958 959 /* find what triggered the MDD event */ 960 reg = rd32(hw, GL_MDET_TX_PQM); 961 if (reg & GL_MDET_TX_PQM_VALID_M) { 962 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 963 GL_MDET_TX_PQM_PF_NUM_S; 964 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 965 GL_MDET_TX_PQM_VF_NUM_S; 966 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 967 GL_MDET_TX_PQM_MAL_TYPE_S; 968 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 969 GL_MDET_TX_PQM_QNUM_S); 970 971 if (netif_msg_tx_err(pf)) 972 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 973 event, queue, pf_num, vf_num); 974 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 975 mdd_detected = true; 976 } 977 978 reg = rd32(hw, GL_MDET_TX_TCLAN); 979 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 980 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 981 GL_MDET_TX_TCLAN_PF_NUM_S; 982 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 983 GL_MDET_TX_TCLAN_VF_NUM_S; 984 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 985 GL_MDET_TX_TCLAN_MAL_TYPE_S; 986 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 987 GL_MDET_TX_TCLAN_QNUM_S); 988 989 if (netif_msg_rx_err(pf)) 990 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 991 event, queue, pf_num, vf_num); 992 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 993 mdd_detected = true; 994 } 995 996 reg = rd32(hw, GL_MDET_RX); 997 if (reg & GL_MDET_RX_VALID_M) { 998 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 999 GL_MDET_RX_PF_NUM_S; 1000 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1001 GL_MDET_RX_VF_NUM_S; 1002 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1003 GL_MDET_RX_MAL_TYPE_S; 1004 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1005 GL_MDET_RX_QNUM_S); 1006 1007 if (netif_msg_rx_err(pf)) 1008 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1009 event, queue, pf_num, vf_num); 1010 wr32(hw, GL_MDET_RX, 0xffffffff); 1011 mdd_detected = true; 1012 } 1013 1014 if (mdd_detected) { 1015 bool pf_mdd_detected = false; 1016 1017 reg = rd32(hw, PF_MDET_TX_PQM); 1018 if (reg & PF_MDET_TX_PQM_VALID_M) { 1019 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1020 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 1021 pf_mdd_detected = true; 1022 } 1023 1024 reg = rd32(hw, PF_MDET_TX_TCLAN); 1025 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1026 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 1027 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); 1028 pf_mdd_detected = true; 1029 } 1030 1031 reg = rd32(hw, PF_MDET_RX); 1032 if (reg & PF_MDET_RX_VALID_M) { 1033 wr32(hw, PF_MDET_RX, 0xFFFF); 1034 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); 1035 pf_mdd_detected = true; 1036 } 1037 /* Queue belongs to the PF initiate a reset */ 1038 if (pf_mdd_detected) { 1039 set_bit(__ICE_NEEDS_RESTART, pf->state); 1040 ice_service_task_schedule(pf); 1041 } 1042 } 1043 1044 /* see if one of the VFs needs to be reset */ 1045 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { 1046 struct ice_vf *vf = &pf->vf[i]; 1047 1048 reg = rd32(hw, VP_MDET_TX_PQM(i)); 1049 if (reg & VP_MDET_TX_PQM_VALID_M) { 1050 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 1051 vf->num_mdd_events++; 1052 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1053 i); 1054 } 1055 1056 reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 1057 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1058 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 1059 vf->num_mdd_events++; 1060 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1061 i); 1062 } 1063 1064 reg = rd32(hw, VP_MDET_TX_TDPU(i)); 1065 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1066 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 1067 vf->num_mdd_events++; 1068 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", 1069 i); 1070 } 1071 1072 reg = rd32(hw, VP_MDET_RX(i)); 1073 if (reg & VP_MDET_RX_VALID_M) { 1074 wr32(hw, VP_MDET_RX(i), 0xFFFF); 1075 vf->num_mdd_events++; 1076 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", 1077 i); 1078 } 1079 1080 if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { 1081 dev_info(&pf->pdev->dev, 1082 "Too many MDD events on VF %d, disabled\n", i); 1083 dev_info(&pf->pdev->dev, 1084 "Use PF Control I/F to re-enable the VF\n"); 1085 set_bit(ICE_VF_STATE_DIS, vf->vf_states); 1086 } 1087 } 1088 1089 /* re-enable MDD interrupt cause */ 1090 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1091 reg = rd32(hw, PFINT_OICR_ENA); 1092 reg |= PFINT_OICR_MAL_DETECT_M; 1093 wr32(hw, PFINT_OICR_ENA, reg); 1094 ice_flush(hw); 1095 } 1096 1097 /** 1098 * ice_service_task - manage and run subtasks 1099 * @work: pointer to work_struct contained by the PF struct 1100 */ 1101 static void ice_service_task(struct work_struct *work) 1102 { 1103 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 1104 unsigned long start_time = jiffies; 1105 1106 /* subtasks */ 1107 1108 /* process reset requests first */ 1109 ice_reset_subtask(pf); 1110 1111 /* bail if a reset/recovery cycle is pending or rebuild failed */ 1112 if (ice_is_reset_in_progress(pf->state) || 1113 test_bit(__ICE_SUSPENDED, pf->state) || 1114 test_bit(__ICE_NEEDS_RESTART, pf->state)) { 1115 ice_service_task_complete(pf); 1116 return; 1117 } 1118 1119 ice_check_for_hang_subtask(pf); 1120 ice_sync_fltr_subtask(pf); 1121 ice_handle_mdd_event(pf); 1122 ice_process_vflr_event(pf); 1123 ice_watchdog_subtask(pf); 1124 ice_clean_adminq_subtask(pf); 1125 ice_clean_mailboxq_subtask(pf); 1126 1127 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ 1128 ice_service_task_complete(pf); 1129 1130 /* If the tasks have taken longer than one service timer period 1131 * or there is more work to be done, reset the service timer to 1132 * schedule the service task now. 1133 */ 1134 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 1135 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || 1136 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || 1137 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 1138 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 1139 mod_timer(&pf->serv_tmr, jiffies); 1140 } 1141 1142 /** 1143 * ice_set_ctrlq_len - helper function to set controlq length 1144 * @hw: pointer to the hw instance 1145 */ 1146 static void ice_set_ctrlq_len(struct ice_hw *hw) 1147 { 1148 hw->adminq.num_rq_entries = ICE_AQ_LEN; 1149 hw->adminq.num_sq_entries = ICE_AQ_LEN; 1150 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 1151 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 1152 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 1153 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 1154 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1155 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 1156 } 1157 1158 /** 1159 * ice_irq_affinity_notify - Callback for affinity changes 1160 * @notify: context as to what irq was changed 1161 * @mask: the new affinity mask 1162 * 1163 * This is a callback function used by the irq_set_affinity_notifier function 1164 * so that we may register to receive changes to the irq affinity masks. 1165 */ 1166 static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, 1167 const cpumask_t *mask) 1168 { 1169 struct ice_q_vector *q_vector = 1170 container_of(notify, struct ice_q_vector, affinity_notify); 1171 1172 cpumask_copy(&q_vector->affinity_mask, mask); 1173 } 1174 1175 /** 1176 * ice_irq_affinity_release - Callback for affinity notifier release 1177 * @ref: internal core kernel usage 1178 * 1179 * This is a callback function used by the irq_set_affinity_notifier function 1180 * to inform the current notification subscriber that they will no longer 1181 * receive notifications. 1182 */ 1183 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 1184 1185 /** 1186 * ice_vsi_ena_irq - Enable IRQ for the given VSI 1187 * @vsi: the VSI being configured 1188 */ 1189 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 1190 { 1191 struct ice_pf *pf = vsi->back; 1192 struct ice_hw *hw = &pf->hw; 1193 1194 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 1195 int i; 1196 1197 for (i = 0; i < vsi->num_q_vectors; i++) 1198 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 1199 } 1200 1201 ice_flush(hw); 1202 return 0; 1203 } 1204 1205 /** 1206 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 1207 * @vsi: the VSI being configured 1208 * @basename: name for the vector 1209 */ 1210 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 1211 { 1212 int q_vectors = vsi->num_q_vectors; 1213 struct ice_pf *pf = vsi->back; 1214 int base = vsi->sw_base_vector; 1215 int rx_int_idx = 0; 1216 int tx_int_idx = 0; 1217 int vector, err; 1218 int irq_num; 1219 1220 for (vector = 0; vector < q_vectors; vector++) { 1221 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 1222 1223 irq_num = pf->msix_entries[base + vector].vector; 1224 1225 if (q_vector->tx.ring && q_vector->rx.ring) { 1226 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1227 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 1228 tx_int_idx++; 1229 } else if (q_vector->rx.ring) { 1230 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1231 "%s-%s-%d", basename, "rx", rx_int_idx++); 1232 } else if (q_vector->tx.ring) { 1233 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 1234 "%s-%s-%d", basename, "tx", tx_int_idx++); 1235 } else { 1236 /* skip this unused q_vector */ 1237 continue; 1238 } 1239 err = devm_request_irq(&pf->pdev->dev, 1240 pf->msix_entries[base + vector].vector, 1241 vsi->irq_handler, 0, q_vector->name, 1242 q_vector); 1243 if (err) { 1244 netdev_err(vsi->netdev, 1245 "MSIX request_irq failed, error: %d\n", err); 1246 goto free_q_irqs; 1247 } 1248 1249 /* register for affinity change notifications */ 1250 q_vector->affinity_notify.notify = ice_irq_affinity_notify; 1251 q_vector->affinity_notify.release = ice_irq_affinity_release; 1252 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); 1253 1254 /* assign the mask for this irq */ 1255 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 1256 } 1257 1258 vsi->irqs_ready = true; 1259 return 0; 1260 1261 free_q_irqs: 1262 while (vector) { 1263 vector--; 1264 irq_num = pf->msix_entries[base + vector].vector, 1265 irq_set_affinity_notifier(irq_num, NULL); 1266 irq_set_affinity_hint(irq_num, NULL); 1267 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); 1268 } 1269 return err; 1270 } 1271 1272 /** 1273 * ice_ena_misc_vector - enable the non-queue interrupts 1274 * @pf: board private structure 1275 */ 1276 static void ice_ena_misc_vector(struct ice_pf *pf) 1277 { 1278 struct ice_hw *hw = &pf->hw; 1279 u32 val; 1280 1281 /* clear things first */ 1282 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1283 rd32(hw, PFINT_OICR); /* read to clear */ 1284 1285 val = (PFINT_OICR_ECC_ERR_M | 1286 PFINT_OICR_MAL_DETECT_M | 1287 PFINT_OICR_GRST_M | 1288 PFINT_OICR_PCI_EXCEPTION_M | 1289 PFINT_OICR_VFLR_M | 1290 PFINT_OICR_HMC_ERR_M | 1291 PFINT_OICR_PE_CRITERR_M); 1292 1293 wr32(hw, PFINT_OICR_ENA, val); 1294 1295 /* SW_ITR_IDX = 0, but don't change INTENA */ 1296 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), 1297 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 1298 } 1299 1300 /** 1301 * ice_misc_intr - misc interrupt handler 1302 * @irq: interrupt number 1303 * @data: pointer to a q_vector 1304 */ 1305 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 1306 { 1307 struct ice_pf *pf = (struct ice_pf *)data; 1308 struct ice_hw *hw = &pf->hw; 1309 irqreturn_t ret = IRQ_NONE; 1310 u32 oicr, ena_mask; 1311 1312 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 1313 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1314 1315 oicr = rd32(hw, PFINT_OICR); 1316 ena_mask = rd32(hw, PFINT_OICR_ENA); 1317 1318 if (oicr & PFINT_OICR_MAL_DETECT_M) { 1319 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 1320 set_bit(__ICE_MDD_EVENT_PENDING, pf->state); 1321 } 1322 if (oicr & PFINT_OICR_VFLR_M) { 1323 ena_mask &= ~PFINT_OICR_VFLR_M; 1324 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); 1325 } 1326 1327 if (oicr & PFINT_OICR_GRST_M) { 1328 u32 reset; 1329 1330 /* we have a reset warning */ 1331 ena_mask &= ~PFINT_OICR_GRST_M; 1332 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 1333 GLGEN_RSTAT_RESET_TYPE_S; 1334 1335 if (reset == ICE_RESET_CORER) 1336 pf->corer_count++; 1337 else if (reset == ICE_RESET_GLOBR) 1338 pf->globr_count++; 1339 else if (reset == ICE_RESET_EMPR) 1340 pf->empr_count++; 1341 else 1342 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", 1343 reset); 1344 1345 /* If a reset cycle isn't already in progress, we set a bit in 1346 * pf->state so that the service task can start a reset/rebuild. 1347 * We also make note of which reset happened so that peer 1348 * devices/drivers can be informed. 1349 */ 1350 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) { 1351 if (reset == ICE_RESET_CORER) 1352 set_bit(__ICE_CORER_RECV, pf->state); 1353 else if (reset == ICE_RESET_GLOBR) 1354 set_bit(__ICE_GLOBR_RECV, pf->state); 1355 else 1356 set_bit(__ICE_EMPR_RECV, pf->state); 1357 1358 /* There are couple of different bits at play here. 1359 * hw->reset_ongoing indicates whether the hardware is 1360 * in reset. This is set to true when a reset interrupt 1361 * is received and set back to false after the driver 1362 * has determined that the hardware is out of reset. 1363 * 1364 * __ICE_RESET_OICR_RECV in pf->state indicates 1365 * that a post reset rebuild is required before the 1366 * driver is operational again. This is set above. 1367 * 1368 * As this is the start of the reset/rebuild cycle, set 1369 * both to indicate that. 1370 */ 1371 hw->reset_ongoing = true; 1372 } 1373 } 1374 1375 if (oicr & PFINT_OICR_HMC_ERR_M) { 1376 ena_mask &= ~PFINT_OICR_HMC_ERR_M; 1377 dev_dbg(&pf->pdev->dev, 1378 "HMC Error interrupt - info 0x%x, data 0x%x\n", 1379 rd32(hw, PFHMC_ERRORINFO), 1380 rd32(hw, PFHMC_ERRORDATA)); 1381 } 1382 1383 /* Report and mask off any remaining unexpected interrupts */ 1384 oicr &= ena_mask; 1385 if (oicr) { 1386 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", 1387 oicr); 1388 /* If a critical error is pending there is no choice but to 1389 * reset the device. 1390 */ 1391 if (oicr & (PFINT_OICR_PE_CRITERR_M | 1392 PFINT_OICR_PCI_EXCEPTION_M | 1393 PFINT_OICR_ECC_ERR_M)) { 1394 set_bit(__ICE_PFR_REQ, pf->state); 1395 ice_service_task_schedule(pf); 1396 } 1397 ena_mask &= ~oicr; 1398 } 1399 ret = IRQ_HANDLED; 1400 1401 /* re-enable interrupt causes that are not handled during this pass */ 1402 wr32(hw, PFINT_OICR_ENA, ena_mask); 1403 if (!test_bit(__ICE_DOWN, pf->state)) { 1404 ice_service_task_schedule(pf); 1405 ice_irq_dynamic_ena(hw, NULL, NULL); 1406 } 1407 1408 return ret; 1409 } 1410 1411 /** 1412 * ice_free_irq_msix_misc - Unroll misc vector setup 1413 * @pf: board private structure 1414 */ 1415 static void ice_free_irq_msix_misc(struct ice_pf *pf) 1416 { 1417 /* disable OICR interrupt */ 1418 wr32(&pf->hw, PFINT_OICR_ENA, 0); 1419 ice_flush(&pf->hw); 1420 1421 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { 1422 synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); 1423 devm_free_irq(&pf->pdev->dev, 1424 pf->msix_entries[pf->sw_oicr_idx].vector, pf); 1425 } 1426 1427 pf->num_avail_sw_msix += 1; 1428 ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID); 1429 pf->num_avail_hw_msix += 1; 1430 ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); 1431 } 1432 1433 /** 1434 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 1435 * @pf: board private structure 1436 * 1437 * This sets up the handler for MSIX 0, which is used to manage the 1438 * non-queue interrupts, e.g. AdminQ and errors. This is not used 1439 * when in MSI or Legacy interrupt mode. 1440 */ 1441 static int ice_req_irq_msix_misc(struct ice_pf *pf) 1442 { 1443 struct ice_hw *hw = &pf->hw; 1444 int oicr_idx, err = 0; 1445 u8 itr_gran; 1446 u32 val; 1447 1448 if (!pf->int_name[0]) 1449 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 1450 dev_driver_string(&pf->pdev->dev), 1451 dev_name(&pf->pdev->dev)); 1452 1453 /* Do not request IRQ but do enable OICR interrupt since settings are 1454 * lost during reset. Note that this function is called only during 1455 * rebuild path and not while reset is in progress. 1456 */ 1457 if (ice_is_reset_in_progress(pf->state)) 1458 goto skip_req_irq; 1459 1460 /* reserve one vector in sw_irq_tracker for misc interrupts */ 1461 oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1462 if (oicr_idx < 0) 1463 return oicr_idx; 1464 1465 pf->num_avail_sw_msix -= 1; 1466 pf->sw_oicr_idx = oicr_idx; 1467 1468 /* reserve one vector in hw_irq_tracker for misc interrupts */ 1469 oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1470 if (oicr_idx < 0) { 1471 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1472 pf->num_avail_sw_msix += 1; 1473 return oicr_idx; 1474 } 1475 pf->num_avail_hw_msix -= 1; 1476 pf->hw_oicr_idx = oicr_idx; 1477 1478 err = devm_request_irq(&pf->pdev->dev, 1479 pf->msix_entries[pf->sw_oicr_idx].vector, 1480 ice_misc_intr, 0, pf->int_name, pf); 1481 if (err) { 1482 dev_err(&pf->pdev->dev, 1483 "devm_request_irq for %s failed: %d\n", 1484 pf->int_name, err); 1485 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1486 pf->num_avail_sw_msix += 1; 1487 ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID); 1488 pf->num_avail_hw_msix += 1; 1489 return err; 1490 } 1491 1492 skip_req_irq: 1493 ice_ena_misc_vector(pf); 1494 1495 val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 1496 PFINT_OICR_CTL_CAUSE_ENA_M); 1497 wr32(hw, PFINT_OICR_CTL, val); 1498 1499 /* This enables Admin queue Interrupt causes */ 1500 val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 1501 PFINT_FW_CTL_CAUSE_ENA_M); 1502 wr32(hw, PFINT_FW_CTL, val); 1503 1504 /* This enables Mailbox queue Interrupt causes */ 1505 val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 1506 PFINT_MBX_CTL_CAUSE_ENA_M); 1507 wr32(hw, PFINT_MBX_CTL, val); 1508 1509 itr_gran = hw->itr_gran; 1510 1511 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), 1512 ITR_TO_REG(ICE_ITR_8K, itr_gran)); 1513 1514 ice_flush(hw); 1515 ice_irq_dynamic_ena(hw, NULL, NULL); 1516 1517 return 0; 1518 } 1519 1520 /** 1521 * ice_napi_del - Remove NAPI handler for the VSI 1522 * @vsi: VSI for which NAPI handler is to be removed 1523 */ 1524 static void ice_napi_del(struct ice_vsi *vsi) 1525 { 1526 int v_idx; 1527 1528 if (!vsi->netdev) 1529 return; 1530 1531 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1532 netif_napi_del(&vsi->q_vectors[v_idx]->napi); 1533 } 1534 1535 /** 1536 * ice_napi_add - register NAPI handler for the VSI 1537 * @vsi: VSI for which NAPI handler is to be registered 1538 * 1539 * This function is only called in the driver's load path. Registering the NAPI 1540 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 1541 * reset/rebuild, etc.) 1542 */ 1543 static void ice_napi_add(struct ice_vsi *vsi) 1544 { 1545 int v_idx; 1546 1547 if (!vsi->netdev) 1548 return; 1549 1550 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1551 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 1552 ice_napi_poll, NAPI_POLL_WEIGHT); 1553 } 1554 1555 /** 1556 * ice_cfg_netdev - Allocate, configure and register a netdev 1557 * @vsi: the VSI associated with the new netdev 1558 * 1559 * Returns 0 on success, negative value on failure 1560 */ 1561 static int ice_cfg_netdev(struct ice_vsi *vsi) 1562 { 1563 netdev_features_t csumo_features; 1564 netdev_features_t vlano_features; 1565 netdev_features_t dflt_features; 1566 netdev_features_t tso_features; 1567 struct ice_netdev_priv *np; 1568 struct net_device *netdev; 1569 u8 mac_addr[ETH_ALEN]; 1570 int err; 1571 1572 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), 1573 vsi->alloc_txq, vsi->alloc_rxq); 1574 if (!netdev) 1575 return -ENOMEM; 1576 1577 vsi->netdev = netdev; 1578 np = netdev_priv(netdev); 1579 np->vsi = vsi; 1580 1581 dflt_features = NETIF_F_SG | 1582 NETIF_F_HIGHDMA | 1583 NETIF_F_RXHASH; 1584 1585 csumo_features = NETIF_F_RXCSUM | 1586 NETIF_F_IP_CSUM | 1587 NETIF_F_IPV6_CSUM; 1588 1589 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 1590 NETIF_F_HW_VLAN_CTAG_TX | 1591 NETIF_F_HW_VLAN_CTAG_RX; 1592 1593 tso_features = NETIF_F_TSO; 1594 1595 /* set features that user can change */ 1596 netdev->hw_features = dflt_features | csumo_features | 1597 vlano_features | tso_features; 1598 1599 /* enable features */ 1600 netdev->features |= netdev->hw_features; 1601 /* encap and VLAN devices inherit default, csumo and tso features */ 1602 netdev->hw_enc_features |= dflt_features | csumo_features | 1603 tso_features; 1604 netdev->vlan_features |= dflt_features | csumo_features | 1605 tso_features; 1606 1607 if (vsi->type == ICE_VSI_PF) { 1608 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); 1609 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 1610 1611 ether_addr_copy(netdev->dev_addr, mac_addr); 1612 ether_addr_copy(netdev->perm_addr, mac_addr); 1613 } 1614 1615 netdev->priv_flags |= IFF_UNICAST_FLT; 1616 1617 /* assign netdev_ops */ 1618 netdev->netdev_ops = &ice_netdev_ops; 1619 1620 /* setup watchdog timeout value to be 5 second */ 1621 netdev->watchdog_timeo = 5 * HZ; 1622 1623 ice_set_ethtool_ops(netdev); 1624 1625 netdev->min_mtu = ETH_MIN_MTU; 1626 netdev->max_mtu = ICE_MAX_MTU; 1627 1628 err = register_netdev(vsi->netdev); 1629 if (err) 1630 return err; 1631 1632 netif_carrier_off(vsi->netdev); 1633 1634 /* make sure transmit queues start off as stopped */ 1635 netif_tx_stop_all_queues(vsi->netdev); 1636 1637 return 0; 1638 } 1639 1640 /** 1641 * ice_fill_rss_lut - Fill the RSS lookup table with default values 1642 * @lut: Lookup table 1643 * @rss_table_size: Lookup table size 1644 * @rss_size: Range of queue number for hashing 1645 */ 1646 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 1647 { 1648 u16 i; 1649 1650 for (i = 0; i < rss_table_size; i++) 1651 lut[i] = i % rss_size; 1652 } 1653 1654 /** 1655 * ice_pf_vsi_setup - Set up a PF VSI 1656 * @pf: board private structure 1657 * @pi: pointer to the port_info instance 1658 * 1659 * Returns pointer to the successfully allocated VSI sw struct on success, 1660 * otherwise returns NULL on failure. 1661 */ 1662 static struct ice_vsi * 1663 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 1664 { 1665 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); 1666 } 1667 1668 /** 1669 * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload 1670 * @netdev: network interface to be adjusted 1671 * @proto: unused protocol 1672 * @vid: vlan id to be added 1673 * 1674 * net_device_ops implementation for adding vlan ids 1675 */ 1676 static int ice_vlan_rx_add_vid(struct net_device *netdev, 1677 __always_unused __be16 proto, u16 vid) 1678 { 1679 struct ice_netdev_priv *np = netdev_priv(netdev); 1680 struct ice_vsi *vsi = np->vsi; 1681 int ret; 1682 1683 if (vid >= VLAN_N_VID) { 1684 netdev_err(netdev, "VLAN id requested %d is out of range %d\n", 1685 vid, VLAN_N_VID); 1686 return -EINVAL; 1687 } 1688 1689 if (vsi->info.pvid) 1690 return -EINVAL; 1691 1692 /* Enable VLAN pruning when VLAN 0 is added */ 1693 if (unlikely(!vid)) { 1694 ret = ice_cfg_vlan_pruning(vsi, true); 1695 if (ret) 1696 return ret; 1697 } 1698 1699 /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is 1700 * needed to continue allowing all untagged packets since VLAN prune 1701 * list is applied to all packets by the switch 1702 */ 1703 ret = ice_vsi_add_vlan(vsi, vid); 1704 1705 if (!ret) 1706 set_bit(vid, vsi->active_vlans); 1707 1708 return ret; 1709 } 1710 1711 /** 1712 * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 1713 * @netdev: network interface to be adjusted 1714 * @proto: unused protocol 1715 * @vid: vlan id to be removed 1716 * 1717 * net_device_ops implementation for removing vlan ids 1718 */ 1719 static int ice_vlan_rx_kill_vid(struct net_device *netdev, 1720 __always_unused __be16 proto, u16 vid) 1721 { 1722 struct ice_netdev_priv *np = netdev_priv(netdev); 1723 struct ice_vsi *vsi = np->vsi; 1724 int status; 1725 1726 if (vsi->info.pvid) 1727 return -EINVAL; 1728 1729 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 1730 * information 1731 */ 1732 status = ice_vsi_kill_vlan(vsi, vid); 1733 if (status) 1734 return status; 1735 1736 clear_bit(vid, vsi->active_vlans); 1737 1738 /* Disable VLAN pruning when VLAN 0 is removed */ 1739 if (unlikely(!vid)) 1740 status = ice_cfg_vlan_pruning(vsi, false); 1741 1742 return status; 1743 } 1744 1745 /** 1746 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 1747 * @pf: board private structure 1748 * 1749 * Returns 0 on success, negative value on failure 1750 */ 1751 static int ice_setup_pf_sw(struct ice_pf *pf) 1752 { 1753 LIST_HEAD(tmp_add_list); 1754 u8 broadcast[ETH_ALEN]; 1755 struct ice_vsi *vsi; 1756 int status = 0; 1757 1758 if (ice_is_reset_in_progress(pf->state)) 1759 return -EBUSY; 1760 1761 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 1762 if (!vsi) { 1763 status = -ENOMEM; 1764 goto unroll_vsi_setup; 1765 } 1766 1767 status = ice_cfg_netdev(vsi); 1768 if (status) { 1769 status = -ENODEV; 1770 goto unroll_vsi_setup; 1771 } 1772 1773 /* registering the NAPI handler requires both the queues and 1774 * netdev to be created, which are done in ice_pf_vsi_setup() 1775 * and ice_cfg_netdev() respectively 1776 */ 1777 ice_napi_add(vsi); 1778 1779 /* To add a MAC filter, first add the MAC to a list and then 1780 * pass the list to ice_add_mac. 1781 */ 1782 1783 /* Add a unicast MAC filter so the VSI can get its packets */ 1784 status = ice_add_mac_to_list(vsi, &tmp_add_list, 1785 vsi->port_info->mac.perm_addr); 1786 if (status) 1787 goto unroll_napi_add; 1788 1789 /* VSI needs to receive broadcast traffic, so add the broadcast 1790 * MAC address to the list as well. 1791 */ 1792 eth_broadcast_addr(broadcast); 1793 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); 1794 if (status) 1795 goto free_mac_list; 1796 1797 /* program MAC filters for entries in tmp_add_list */ 1798 status = ice_add_mac(&pf->hw, &tmp_add_list); 1799 if (status) { 1800 dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); 1801 status = -ENOMEM; 1802 goto free_mac_list; 1803 } 1804 1805 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1806 return status; 1807 1808 free_mac_list: 1809 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1810 1811 unroll_napi_add: 1812 if (vsi) { 1813 ice_napi_del(vsi); 1814 if (vsi->netdev) { 1815 if (vsi->netdev->reg_state == NETREG_REGISTERED) 1816 unregister_netdev(vsi->netdev); 1817 free_netdev(vsi->netdev); 1818 vsi->netdev = NULL; 1819 } 1820 } 1821 1822 unroll_vsi_setup: 1823 if (vsi) { 1824 ice_vsi_free_q_vectors(vsi); 1825 ice_vsi_delete(vsi); 1826 ice_vsi_put_qs(vsi); 1827 pf->q_left_tx += vsi->alloc_txq; 1828 pf->q_left_rx += vsi->alloc_rxq; 1829 ice_vsi_clear(vsi); 1830 } 1831 return status; 1832 } 1833 1834 /** 1835 * ice_determine_q_usage - Calculate queue distribution 1836 * @pf: board private structure 1837 * 1838 * Return -ENOMEM if we don't get enough queues for all ports 1839 */ 1840 static void ice_determine_q_usage(struct ice_pf *pf) 1841 { 1842 u16 q_left_tx, q_left_rx; 1843 1844 q_left_tx = pf->hw.func_caps.common_cap.num_txq; 1845 q_left_rx = pf->hw.func_caps.common_cap.num_rxq; 1846 1847 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); 1848 1849 /* only 1 rx queue unless RSS is enabled */ 1850 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 1851 pf->num_lan_rx = 1; 1852 else 1853 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus()); 1854 1855 pf->q_left_tx = q_left_tx - pf->num_lan_tx; 1856 pf->q_left_rx = q_left_rx - pf->num_lan_rx; 1857 } 1858 1859 /** 1860 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 1861 * @pf: board private structure to initialize 1862 */ 1863 static void ice_deinit_pf(struct ice_pf *pf) 1864 { 1865 ice_service_task_stop(pf); 1866 mutex_destroy(&pf->sw_mutex); 1867 mutex_destroy(&pf->avail_q_mutex); 1868 } 1869 1870 /** 1871 * ice_init_pf - Initialize general software structures (struct ice_pf) 1872 * @pf: board private structure to initialize 1873 */ 1874 static void ice_init_pf(struct ice_pf *pf) 1875 { 1876 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); 1877 set_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1878 #ifdef CONFIG_PCI_IOV 1879 if (pf->hw.func_caps.common_cap.sr_iov_1_1) { 1880 struct ice_hw *hw = &pf->hw; 1881 1882 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 1883 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, 1884 ICE_MAX_VF_COUNT); 1885 } 1886 #endif /* CONFIG_PCI_IOV */ 1887 1888 mutex_init(&pf->sw_mutex); 1889 mutex_init(&pf->avail_q_mutex); 1890 1891 /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */ 1892 mutex_lock(&pf->avail_q_mutex); 1893 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS); 1894 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS); 1895 mutex_unlock(&pf->avail_q_mutex); 1896 1897 if (pf->hw.func_caps.common_cap.rss_table_size) 1898 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 1899 1900 /* setup service timer and periodic service task */ 1901 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 1902 pf->serv_tmr_period = HZ; 1903 INIT_WORK(&pf->serv_task, ice_service_task); 1904 clear_bit(__ICE_SERVICE_SCHED, pf->state); 1905 } 1906 1907 /** 1908 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 1909 * @pf: board private structure 1910 * 1911 * compute the number of MSIX vectors required (v_budget) and request from 1912 * the OS. Return the number of vectors reserved or negative on failure 1913 */ 1914 static int ice_ena_msix_range(struct ice_pf *pf) 1915 { 1916 int v_left, v_actual, v_budget = 0; 1917 int needed, err, i; 1918 1919 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 1920 1921 /* reserve one vector for miscellaneous handler */ 1922 needed = 1; 1923 v_budget += needed; 1924 v_left -= needed; 1925 1926 /* reserve vectors for LAN traffic */ 1927 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); 1928 v_budget += pf->num_lan_msix; 1929 v_left -= pf->num_lan_msix; 1930 1931 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, 1932 sizeof(struct msix_entry), GFP_KERNEL); 1933 1934 if (!pf->msix_entries) { 1935 err = -ENOMEM; 1936 goto exit_err; 1937 } 1938 1939 for (i = 0; i < v_budget; i++) 1940 pf->msix_entries[i].entry = i; 1941 1942 /* actually reserve the vectors */ 1943 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 1944 ICE_MIN_MSIX, v_budget); 1945 1946 if (v_actual < 0) { 1947 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); 1948 err = v_actual; 1949 goto msix_err; 1950 } 1951 1952 if (v_actual < v_budget) { 1953 dev_warn(&pf->pdev->dev, 1954 "not enough vectors. requested = %d, obtained = %d\n", 1955 v_budget, v_actual); 1956 if (v_actual >= (pf->num_lan_msix + 1)) { 1957 pf->num_avail_sw_msix = v_actual - 1958 (pf->num_lan_msix + 1); 1959 } else if (v_actual >= 2) { 1960 pf->num_lan_msix = 1; 1961 pf->num_avail_sw_msix = v_actual - 2; 1962 } else { 1963 pci_disable_msix(pf->pdev); 1964 err = -ERANGE; 1965 goto msix_err; 1966 } 1967 } 1968 1969 return v_actual; 1970 1971 msix_err: 1972 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1973 goto exit_err; 1974 1975 exit_err: 1976 pf->num_lan_msix = 0; 1977 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1978 return err; 1979 } 1980 1981 /** 1982 * ice_dis_msix - Disable MSI-X interrupt setup in OS 1983 * @pf: board private structure 1984 */ 1985 static void ice_dis_msix(struct ice_pf *pf) 1986 { 1987 pci_disable_msix(pf->pdev); 1988 devm_kfree(&pf->pdev->dev, pf->msix_entries); 1989 pf->msix_entries = NULL; 1990 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags); 1991 } 1992 1993 /** 1994 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 1995 * @pf: board private structure 1996 */ 1997 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 1998 { 1999 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 2000 ice_dis_msix(pf); 2001 2002 if (pf->sw_irq_tracker) { 2003 devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker); 2004 pf->sw_irq_tracker = NULL; 2005 } 2006 2007 if (pf->hw_irq_tracker) { 2008 devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker); 2009 pf->hw_irq_tracker = NULL; 2010 } 2011 } 2012 2013 /** 2014 * ice_init_interrupt_scheme - Determine proper interrupt scheme 2015 * @pf: board private structure to initialize 2016 */ 2017 static int ice_init_interrupt_scheme(struct ice_pf *pf) 2018 { 2019 int vectors = 0, hw_vectors = 0; 2020 ssize_t size; 2021 2022 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 2023 vectors = ice_ena_msix_range(pf); 2024 else 2025 return -ENODEV; 2026 2027 if (vectors < 0) 2028 return vectors; 2029 2030 /* set up vector assignment tracking */ 2031 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); 2032 2033 pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 2034 if (!pf->sw_irq_tracker) { 2035 ice_dis_msix(pf); 2036 return -ENOMEM; 2037 } 2038 2039 /* populate SW interrupts pool with number of OS granted IRQs. */ 2040 pf->num_avail_sw_msix = vectors; 2041 pf->sw_irq_tracker->num_entries = vectors; 2042 2043 /* set up HW vector assignment tracking */ 2044 hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 2045 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); 2046 2047 pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 2048 if (!pf->hw_irq_tracker) { 2049 ice_clear_interrupt_scheme(pf); 2050 return -ENOMEM; 2051 } 2052 2053 /* populate HW interrupts pool with number of HW supported irqs. */ 2054 pf->num_avail_hw_msix = hw_vectors; 2055 pf->hw_irq_tracker->num_entries = hw_vectors; 2056 2057 return 0; 2058 } 2059 2060 /** 2061 * ice_probe - Device initialization routine 2062 * @pdev: PCI device information struct 2063 * @ent: entry in ice_pci_tbl 2064 * 2065 * Returns 0 on success, negative on failure 2066 */ 2067 static int ice_probe(struct pci_dev *pdev, 2068 const struct pci_device_id __always_unused *ent) 2069 { 2070 struct ice_pf *pf; 2071 struct ice_hw *hw; 2072 int err; 2073 2074 /* this driver uses devres, see Documentation/driver-model/devres.txt */ 2075 err = pcim_enable_device(pdev); 2076 if (err) 2077 return err; 2078 2079 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 2080 if (err) { 2081 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); 2082 return err; 2083 } 2084 2085 pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); 2086 if (!pf) 2087 return -ENOMEM; 2088 2089 /* set up for high or low dma */ 2090 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2091 if (err) 2092 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2093 if (err) { 2094 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); 2095 return err; 2096 } 2097 2098 pci_enable_pcie_error_reporting(pdev); 2099 pci_set_master(pdev); 2100 2101 pf->pdev = pdev; 2102 pci_set_drvdata(pdev, pf); 2103 set_bit(__ICE_DOWN, pf->state); 2104 /* Disable service task until DOWN bit is cleared */ 2105 set_bit(__ICE_SERVICE_DIS, pf->state); 2106 2107 hw = &pf->hw; 2108 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 2109 hw->back = pf; 2110 hw->vendor_id = pdev->vendor; 2111 hw->device_id = pdev->device; 2112 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2113 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2114 hw->subsystem_device_id = pdev->subsystem_device; 2115 hw->bus.device = PCI_SLOT(pdev->devfn); 2116 hw->bus.func = PCI_FUNC(pdev->devfn); 2117 ice_set_ctrlq_len(hw); 2118 2119 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 2120 2121 #ifndef CONFIG_DYNAMIC_DEBUG 2122 if (debug < -1) 2123 hw->debug_mask = debug; 2124 #endif 2125 2126 err = ice_init_hw(hw); 2127 if (err) { 2128 dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); 2129 err = -EIO; 2130 goto err_exit_unroll; 2131 } 2132 2133 dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", 2134 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, 2135 hw->api_maj_ver, hw->api_min_ver); 2136 2137 ice_init_pf(pf); 2138 2139 ice_determine_q_usage(pf); 2140 2141 pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC, 2142 hw->func_caps.guaranteed_num_vsi); 2143 if (!pf->num_alloc_vsi) { 2144 err = -EIO; 2145 goto err_init_pf_unroll; 2146 } 2147 2148 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, 2149 sizeof(struct ice_vsi *), GFP_KERNEL); 2150 if (!pf->vsi) { 2151 err = -ENOMEM; 2152 goto err_init_pf_unroll; 2153 } 2154 2155 err = ice_init_interrupt_scheme(pf); 2156 if (err) { 2157 dev_err(&pdev->dev, 2158 "ice_init_interrupt_scheme failed: %d\n", err); 2159 err = -EIO; 2160 goto err_init_interrupt_unroll; 2161 } 2162 2163 /* Driver is mostly up */ 2164 clear_bit(__ICE_DOWN, pf->state); 2165 2166 /* In case of MSIX we are going to setup the misc vector right here 2167 * to handle admin queue events etc. In case of legacy and MSI 2168 * the misc functionality and queue processing is combined in 2169 * the same vector and that gets setup at open. 2170 */ 2171 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 2172 err = ice_req_irq_msix_misc(pf); 2173 if (err) { 2174 dev_err(&pdev->dev, 2175 "setup of misc vector failed: %d\n", err); 2176 goto err_init_interrupt_unroll; 2177 } 2178 } 2179 2180 /* create switch struct for the switch element created by FW on boot */ 2181 pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), 2182 GFP_KERNEL); 2183 if (!pf->first_sw) { 2184 err = -ENOMEM; 2185 goto err_msix_misc_unroll; 2186 } 2187 2188 if (hw->evb_veb) 2189 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 2190 else 2191 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 2192 2193 pf->first_sw->pf = pf; 2194 2195 /* record the sw_id available for later use */ 2196 pf->first_sw->sw_id = hw->port_info->sw_id; 2197 2198 err = ice_setup_pf_sw(pf); 2199 if (err) { 2200 dev_err(&pdev->dev, 2201 "probe failed due to setup pf switch:%d\n", err); 2202 goto err_alloc_sw_unroll; 2203 } 2204 2205 clear_bit(__ICE_SERVICE_DIS, pf->state); 2206 2207 /* since everything is good, start the service timer */ 2208 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 2209 2210 err = ice_init_link_events(pf->hw.port_info); 2211 if (err) { 2212 dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err); 2213 goto err_alloc_sw_unroll; 2214 } 2215 2216 return 0; 2217 2218 err_alloc_sw_unroll: 2219 set_bit(__ICE_SERVICE_DIS, pf->state); 2220 set_bit(__ICE_DOWN, pf->state); 2221 devm_kfree(&pf->pdev->dev, pf->first_sw); 2222 err_msix_misc_unroll: 2223 ice_free_irq_msix_misc(pf); 2224 err_init_interrupt_unroll: 2225 ice_clear_interrupt_scheme(pf); 2226 devm_kfree(&pdev->dev, pf->vsi); 2227 err_init_pf_unroll: 2228 ice_deinit_pf(pf); 2229 ice_deinit_hw(hw); 2230 err_exit_unroll: 2231 pci_disable_pcie_error_reporting(pdev); 2232 return err; 2233 } 2234 2235 /** 2236 * ice_remove - Device removal routine 2237 * @pdev: PCI device information struct 2238 */ 2239 static void ice_remove(struct pci_dev *pdev) 2240 { 2241 struct ice_pf *pf = pci_get_drvdata(pdev); 2242 int i; 2243 2244 if (!pf) 2245 return; 2246 2247 set_bit(__ICE_DOWN, pf->state); 2248 ice_service_task_stop(pf); 2249 2250 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) 2251 ice_free_vfs(pf); 2252 ice_vsi_release_all(pf); 2253 ice_free_irq_msix_misc(pf); 2254 ice_for_each_vsi(pf, i) { 2255 if (!pf->vsi[i]) 2256 continue; 2257 ice_vsi_free_q_vectors(pf->vsi[i]); 2258 } 2259 ice_clear_interrupt_scheme(pf); 2260 ice_deinit_pf(pf); 2261 ice_deinit_hw(&pf->hw); 2262 pci_disable_pcie_error_reporting(pdev); 2263 } 2264 2265 /* ice_pci_tbl - PCI Device ID Table 2266 * 2267 * Wildcard entries (PCI_ANY_ID) should come last 2268 * Last entry must be all 0s 2269 * 2270 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 2271 * Class, Class Mask, private data (not used) } 2272 */ 2273 static const struct pci_device_id ice_pci_tbl[] = { 2274 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 }, 2275 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 }, 2276 { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 }, 2277 /* required last entry */ 2278 { 0, } 2279 }; 2280 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 2281 2282 static struct pci_driver ice_driver = { 2283 .name = KBUILD_MODNAME, 2284 .id_table = ice_pci_tbl, 2285 .probe = ice_probe, 2286 .remove = ice_remove, 2287 .sriov_configure = ice_sriov_configure, 2288 }; 2289 2290 /** 2291 * ice_module_init - Driver registration routine 2292 * 2293 * ice_module_init is the first routine called when the driver is 2294 * loaded. All it does is register with the PCI subsystem. 2295 */ 2296 static int __init ice_module_init(void) 2297 { 2298 int status; 2299 2300 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); 2301 pr_info("%s\n", ice_copyright); 2302 2303 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 2304 if (!ice_wq) { 2305 pr_err("Failed to create workqueue\n"); 2306 return -ENOMEM; 2307 } 2308 2309 status = pci_register_driver(&ice_driver); 2310 if (status) { 2311 pr_err("failed to register pci driver, err %d\n", status); 2312 destroy_workqueue(ice_wq); 2313 } 2314 2315 return status; 2316 } 2317 module_init(ice_module_init); 2318 2319 /** 2320 * ice_module_exit - Driver exit cleanup routine 2321 * 2322 * ice_module_exit is called just before the driver is removed 2323 * from memory. 2324 */ 2325 static void __exit ice_module_exit(void) 2326 { 2327 pci_unregister_driver(&ice_driver); 2328 destroy_workqueue(ice_wq); 2329 pr_info("module unloaded\n"); 2330 } 2331 module_exit(ice_module_exit); 2332 2333 /** 2334 * ice_set_mac_address - NDO callback to set mac address 2335 * @netdev: network interface device structure 2336 * @pi: pointer to an address structure 2337 * 2338 * Returns 0 on success, negative on failure 2339 */ 2340 static int ice_set_mac_address(struct net_device *netdev, void *pi) 2341 { 2342 struct ice_netdev_priv *np = netdev_priv(netdev); 2343 struct ice_vsi *vsi = np->vsi; 2344 struct ice_pf *pf = vsi->back; 2345 struct ice_hw *hw = &pf->hw; 2346 struct sockaddr *addr = pi; 2347 enum ice_status status; 2348 LIST_HEAD(a_mac_list); 2349 LIST_HEAD(r_mac_list); 2350 u8 flags = 0; 2351 int err; 2352 u8 *mac; 2353 2354 mac = (u8 *)addr->sa_data; 2355 2356 if (!is_valid_ether_addr(mac)) 2357 return -EADDRNOTAVAIL; 2358 2359 if (ether_addr_equal(netdev->dev_addr, mac)) { 2360 netdev_warn(netdev, "already using mac %pM\n", mac); 2361 return 0; 2362 } 2363 2364 if (test_bit(__ICE_DOWN, pf->state) || 2365 ice_is_reset_in_progress(pf->state)) { 2366 netdev_err(netdev, "can't set mac %pM. device not ready\n", 2367 mac); 2368 return -EBUSY; 2369 } 2370 2371 /* When we change the mac address we also have to change the mac address 2372 * based filter rules that were created previously for the old mac 2373 * address. So first, we remove the old filter rule using ice_remove_mac 2374 * and then create a new filter rule using ice_add_mac. Note that for 2375 * both these operations, we first need to form a "list" of mac 2376 * addresses (even though in this case, we have only 1 mac address to be 2377 * added/removed) and this done using ice_add_mac_to_list. Depending on 2378 * the ensuing operation this "list" of mac addresses is either to be 2379 * added or removed from the filter. 2380 */ 2381 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); 2382 if (err) { 2383 err = -EADDRNOTAVAIL; 2384 goto free_lists; 2385 } 2386 2387 status = ice_remove_mac(hw, &r_mac_list); 2388 if (status) { 2389 err = -EADDRNOTAVAIL; 2390 goto free_lists; 2391 } 2392 2393 err = ice_add_mac_to_list(vsi, &a_mac_list, mac); 2394 if (err) { 2395 err = -EADDRNOTAVAIL; 2396 goto free_lists; 2397 } 2398 2399 status = ice_add_mac(hw, &a_mac_list); 2400 if (status) { 2401 err = -EADDRNOTAVAIL; 2402 goto free_lists; 2403 } 2404 2405 free_lists: 2406 /* free list entries */ 2407 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list); 2408 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list); 2409 2410 if (err) { 2411 netdev_err(netdev, "can't set mac %pM. filter update failed\n", 2412 mac); 2413 return err; 2414 } 2415 2416 /* change the netdev's mac address */ 2417 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2418 netdev_dbg(vsi->netdev, "updated mac address to %pM\n", 2419 netdev->dev_addr); 2420 2421 /* write new mac address to the firmware */ 2422 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 2423 status = ice_aq_manage_mac_write(hw, mac, flags, NULL); 2424 if (status) { 2425 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n", 2426 mac); 2427 } 2428 return 0; 2429 } 2430 2431 /** 2432 * ice_set_rx_mode - NDO callback to set the netdev filters 2433 * @netdev: network interface device structure 2434 */ 2435 static void ice_set_rx_mode(struct net_device *netdev) 2436 { 2437 struct ice_netdev_priv *np = netdev_priv(netdev); 2438 struct ice_vsi *vsi = np->vsi; 2439 2440 if (!vsi) 2441 return; 2442 2443 /* Set the flags to synchronize filters 2444 * ndo_set_rx_mode may be triggered even without a change in netdev 2445 * flags 2446 */ 2447 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags); 2448 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags); 2449 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 2450 2451 /* schedule our worker thread which will take care of 2452 * applying the new filter changes 2453 */ 2454 ice_service_task_schedule(vsi->back); 2455 } 2456 2457 /** 2458 * ice_fdb_add - add an entry to the hardware database 2459 * @ndm: the input from the stack 2460 * @tb: pointer to array of nladdr (unused) 2461 * @dev: the net device pointer 2462 * @addr: the MAC address entry being added 2463 * @vid: VLAN id 2464 * @flags: instructions from stack about fdb operation 2465 */ 2466 static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 2467 struct net_device *dev, const unsigned char *addr, 2468 u16 vid, u16 flags) 2469 { 2470 int err; 2471 2472 if (vid) { 2473 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 2474 return -EINVAL; 2475 } 2476 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 2477 netdev_err(dev, "FDB only supports static addresses\n"); 2478 return -EINVAL; 2479 } 2480 2481 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 2482 err = dev_uc_add_excl(dev, addr); 2483 else if (is_multicast_ether_addr(addr)) 2484 err = dev_mc_add_excl(dev, addr); 2485 else 2486 err = -EINVAL; 2487 2488 /* Only return duplicate errors if NLM_F_EXCL is set */ 2489 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 2490 err = 0; 2491 2492 return err; 2493 } 2494 2495 /** 2496 * ice_fdb_del - delete an entry from the hardware database 2497 * @ndm: the input from the stack 2498 * @tb: pointer to array of nladdr (unused) 2499 * @dev: the net device pointer 2500 * @addr: the MAC address entry being added 2501 * @vid: VLAN id 2502 */ 2503 static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 2504 struct net_device *dev, const unsigned char *addr, 2505 __always_unused u16 vid) 2506 { 2507 int err; 2508 2509 if (ndm->ndm_state & NUD_PERMANENT) { 2510 netdev_err(dev, "FDB only supports static addresses\n"); 2511 return -EINVAL; 2512 } 2513 2514 if (is_unicast_ether_addr(addr)) 2515 err = dev_uc_del(dev, addr); 2516 else if (is_multicast_ether_addr(addr)) 2517 err = dev_mc_del(dev, addr); 2518 else 2519 err = -EINVAL; 2520 2521 return err; 2522 } 2523 2524 /** 2525 * ice_set_features - set the netdev feature flags 2526 * @netdev: ptr to the netdev being adjusted 2527 * @features: the feature set that the stack is suggesting 2528 */ 2529 static int ice_set_features(struct net_device *netdev, 2530 netdev_features_t features) 2531 { 2532 struct ice_netdev_priv *np = netdev_priv(netdev); 2533 struct ice_vsi *vsi = np->vsi; 2534 int ret = 0; 2535 2536 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 2537 ret = ice_vsi_manage_rss_lut(vsi, true); 2538 else if (!(features & NETIF_F_RXHASH) && 2539 netdev->features & NETIF_F_RXHASH) 2540 ret = ice_vsi_manage_rss_lut(vsi, false); 2541 2542 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 2543 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2544 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2545 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 2546 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 2547 ret = ice_vsi_manage_vlan_stripping(vsi, false); 2548 else if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 2549 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2550 ret = ice_vsi_manage_vlan_insertion(vsi); 2551 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 2552 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 2553 ret = ice_vsi_manage_vlan_insertion(vsi); 2554 2555 return ret; 2556 } 2557 2558 /** 2559 * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI 2560 * @vsi: VSI to setup vlan properties for 2561 */ 2562 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 2563 { 2564 int ret = 0; 2565 2566 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2567 ret = ice_vsi_manage_vlan_stripping(vsi, true); 2568 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 2569 ret = ice_vsi_manage_vlan_insertion(vsi); 2570 2571 return ret; 2572 } 2573 2574 /** 2575 * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up 2576 * @vsi: the VSI being brought back up 2577 */ 2578 static int ice_restore_vlan(struct ice_vsi *vsi) 2579 { 2580 int err; 2581 u16 vid; 2582 2583 if (!vsi->netdev) 2584 return -EINVAL; 2585 2586 err = ice_vsi_vlan_setup(vsi); 2587 if (err) 2588 return err; 2589 2590 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) { 2591 err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid); 2592 if (err) 2593 break; 2594 } 2595 2596 return err; 2597 } 2598 2599 /** 2600 * ice_vsi_cfg - Setup the VSI 2601 * @vsi: the VSI being configured 2602 * 2603 * Return 0 on success and negative value on error 2604 */ 2605 static int ice_vsi_cfg(struct ice_vsi *vsi) 2606 { 2607 int err; 2608 2609 if (vsi->netdev) { 2610 ice_set_rx_mode(vsi->netdev); 2611 err = ice_restore_vlan(vsi); 2612 if (err) 2613 return err; 2614 } 2615 2616 err = ice_vsi_cfg_txqs(vsi); 2617 if (!err) 2618 err = ice_vsi_cfg_rxqs(vsi); 2619 2620 return err; 2621 } 2622 2623 /** 2624 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 2625 * @vsi: the VSI being configured 2626 */ 2627 static void ice_napi_enable_all(struct ice_vsi *vsi) 2628 { 2629 int q_idx; 2630 2631 if (!vsi->netdev) 2632 return; 2633 2634 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 2635 napi_enable(&vsi->q_vectors[q_idx]->napi); 2636 } 2637 2638 /** 2639 * ice_up_complete - Finish the last steps of bringing up a connection 2640 * @vsi: The VSI being configured 2641 * 2642 * Return 0 on success and negative value on error 2643 */ 2644 static int ice_up_complete(struct ice_vsi *vsi) 2645 { 2646 struct ice_pf *pf = vsi->back; 2647 int err; 2648 2649 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 2650 ice_vsi_cfg_msix(vsi); 2651 else 2652 return -ENOTSUPP; 2653 2654 /* Enable only Rx rings, Tx rings were enabled by the FW when the 2655 * Tx queue group list was configured and the context bits were 2656 * programmed using ice_vsi_cfg_txqs 2657 */ 2658 err = ice_vsi_start_rx_rings(vsi); 2659 if (err) 2660 return err; 2661 2662 clear_bit(__ICE_DOWN, vsi->state); 2663 ice_napi_enable_all(vsi); 2664 ice_vsi_ena_irq(vsi); 2665 2666 if (vsi->port_info && 2667 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 2668 vsi->netdev) { 2669 ice_print_link_msg(vsi, true); 2670 netif_tx_start_all_queues(vsi->netdev); 2671 netif_carrier_on(vsi->netdev); 2672 } 2673 2674 ice_service_task_schedule(pf); 2675 2676 return err; 2677 } 2678 2679 /** 2680 * ice_up - Bring the connection back up after being down 2681 * @vsi: VSI being configured 2682 */ 2683 int ice_up(struct ice_vsi *vsi) 2684 { 2685 int err; 2686 2687 err = ice_vsi_cfg(vsi); 2688 if (!err) 2689 err = ice_up_complete(vsi); 2690 2691 return err; 2692 } 2693 2694 /** 2695 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 2696 * @ring: Tx or Rx ring to read stats from 2697 * @pkts: packets stats counter 2698 * @bytes: bytes stats counter 2699 * 2700 * This function fetches stats from the ring considering the atomic operations 2701 * that needs to be performed to read u64 values in 32 bit machine. 2702 */ 2703 static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, 2704 u64 *bytes) 2705 { 2706 unsigned int start; 2707 *pkts = 0; 2708 *bytes = 0; 2709 2710 if (!ring) 2711 return; 2712 do { 2713 start = u64_stats_fetch_begin_irq(&ring->syncp); 2714 *pkts = ring->stats.pkts; 2715 *bytes = ring->stats.bytes; 2716 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 2717 } 2718 2719 /** 2720 * ice_update_vsi_ring_stats - Update VSI stats counters 2721 * @vsi: the VSI to be updated 2722 */ 2723 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 2724 { 2725 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 2726 struct ice_ring *ring; 2727 u64 pkts, bytes; 2728 int i; 2729 2730 /* reset netdev stats */ 2731 vsi_stats->tx_packets = 0; 2732 vsi_stats->tx_bytes = 0; 2733 vsi_stats->rx_packets = 0; 2734 vsi_stats->rx_bytes = 0; 2735 2736 /* reset non-netdev (extended) stats */ 2737 vsi->tx_restart = 0; 2738 vsi->tx_busy = 0; 2739 vsi->tx_linearize = 0; 2740 vsi->rx_buf_failed = 0; 2741 vsi->rx_page_failed = 0; 2742 2743 rcu_read_lock(); 2744 2745 /* update Tx rings counters */ 2746 ice_for_each_txq(vsi, i) { 2747 ring = READ_ONCE(vsi->tx_rings[i]); 2748 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2749 vsi_stats->tx_packets += pkts; 2750 vsi_stats->tx_bytes += bytes; 2751 vsi->tx_restart += ring->tx_stats.restart_q; 2752 vsi->tx_busy += ring->tx_stats.tx_busy; 2753 vsi->tx_linearize += ring->tx_stats.tx_linearize; 2754 } 2755 2756 /* update Rx rings counters */ 2757 ice_for_each_rxq(vsi, i) { 2758 ring = READ_ONCE(vsi->rx_rings[i]); 2759 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 2760 vsi_stats->rx_packets += pkts; 2761 vsi_stats->rx_bytes += bytes; 2762 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 2763 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 2764 } 2765 2766 rcu_read_unlock(); 2767 } 2768 2769 /** 2770 * ice_update_vsi_stats - Update VSI stats counters 2771 * @vsi: the VSI to be updated 2772 */ 2773 static void ice_update_vsi_stats(struct ice_vsi *vsi) 2774 { 2775 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 2776 struct ice_eth_stats *cur_es = &vsi->eth_stats; 2777 struct ice_pf *pf = vsi->back; 2778 2779 if (test_bit(__ICE_DOWN, vsi->state) || 2780 test_bit(__ICE_CFG_BUSY, pf->state)) 2781 return; 2782 2783 /* get stats as recorded by Tx/Rx rings */ 2784 ice_update_vsi_ring_stats(vsi); 2785 2786 /* get VSI stats as recorded by the hardware */ 2787 ice_update_eth_stats(vsi); 2788 2789 cur_ns->tx_errors = cur_es->tx_errors; 2790 cur_ns->rx_dropped = cur_es->rx_discards; 2791 cur_ns->tx_dropped = cur_es->tx_discards; 2792 cur_ns->multicast = cur_es->rx_multicast; 2793 2794 /* update some more netdev stats if this is main VSI */ 2795 if (vsi->type == ICE_VSI_PF) { 2796 cur_ns->rx_crc_errors = pf->stats.crc_errors; 2797 cur_ns->rx_errors = pf->stats.crc_errors + 2798 pf->stats.illegal_bytes; 2799 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 2800 } 2801 } 2802 2803 /** 2804 * ice_update_pf_stats - Update PF port stats counters 2805 * @pf: PF whose stats needs to be updated 2806 */ 2807 static void ice_update_pf_stats(struct ice_pf *pf) 2808 { 2809 struct ice_hw_port_stats *prev_ps, *cur_ps; 2810 struct ice_hw *hw = &pf->hw; 2811 u8 pf_id; 2812 2813 prev_ps = &pf->stats_prev; 2814 cur_ps = &pf->stats; 2815 pf_id = hw->pf_id; 2816 2817 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id), 2818 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes, 2819 &cur_ps->eth.rx_bytes); 2820 2821 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id), 2822 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast, 2823 &cur_ps->eth.rx_unicast); 2824 2825 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id), 2826 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast, 2827 &cur_ps->eth.rx_multicast); 2828 2829 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id), 2830 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast, 2831 &cur_ps->eth.rx_broadcast); 2832 2833 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id), 2834 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes, 2835 &cur_ps->eth.tx_bytes); 2836 2837 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id), 2838 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast, 2839 &cur_ps->eth.tx_unicast); 2840 2841 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id), 2842 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast, 2843 &cur_ps->eth.tx_multicast); 2844 2845 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id), 2846 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast, 2847 &cur_ps->eth.tx_broadcast); 2848 2849 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, 2850 &prev_ps->tx_dropped_link_down, 2851 &cur_ps->tx_dropped_link_down); 2852 2853 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id), 2854 pf->stat_prev_loaded, &prev_ps->rx_size_64, 2855 &cur_ps->rx_size_64); 2856 2857 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id), 2858 pf->stat_prev_loaded, &prev_ps->rx_size_127, 2859 &cur_ps->rx_size_127); 2860 2861 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id), 2862 pf->stat_prev_loaded, &prev_ps->rx_size_255, 2863 &cur_ps->rx_size_255); 2864 2865 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id), 2866 pf->stat_prev_loaded, &prev_ps->rx_size_511, 2867 &cur_ps->rx_size_511); 2868 2869 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id), 2870 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, 2871 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 2872 2873 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id), 2874 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, 2875 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 2876 2877 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id), 2878 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, 2879 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 2880 2881 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id), 2882 pf->stat_prev_loaded, &prev_ps->tx_size_64, 2883 &cur_ps->tx_size_64); 2884 2885 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id), 2886 pf->stat_prev_loaded, &prev_ps->tx_size_127, 2887 &cur_ps->tx_size_127); 2888 2889 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id), 2890 pf->stat_prev_loaded, &prev_ps->tx_size_255, 2891 &cur_ps->tx_size_255); 2892 2893 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id), 2894 pf->stat_prev_loaded, &prev_ps->tx_size_511, 2895 &cur_ps->tx_size_511); 2896 2897 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id), 2898 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, 2899 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 2900 2901 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id), 2902 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, 2903 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 2904 2905 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id), 2906 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, 2907 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 2908 2909 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, 2910 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 2911 2912 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, 2913 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 2914 2915 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, 2916 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 2917 2918 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, 2919 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 2920 2921 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, 2922 &prev_ps->crc_errors, &cur_ps->crc_errors); 2923 2924 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, 2925 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 2926 2927 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, 2928 &prev_ps->mac_local_faults, 2929 &cur_ps->mac_local_faults); 2930 2931 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, 2932 &prev_ps->mac_remote_faults, 2933 &cur_ps->mac_remote_faults); 2934 2935 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, 2936 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 2937 2938 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, 2939 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 2940 2941 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, 2942 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 2943 2944 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, 2945 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 2946 2947 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, 2948 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 2949 2950 pf->stat_prev_loaded = true; 2951 } 2952 2953 /** 2954 * ice_get_stats64 - get statistics for network device structure 2955 * @netdev: network interface device structure 2956 * @stats: main device statistics structure 2957 */ 2958 static 2959 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 2960 { 2961 struct ice_netdev_priv *np = netdev_priv(netdev); 2962 struct rtnl_link_stats64 *vsi_stats; 2963 struct ice_vsi *vsi = np->vsi; 2964 2965 vsi_stats = &vsi->net_stats; 2966 2967 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq) 2968 return; 2969 /* netdev packet/byte stats come from ring counter. These are obtained 2970 * by summing up ring counters (done by ice_update_vsi_ring_stats). 2971 */ 2972 ice_update_vsi_ring_stats(vsi); 2973 stats->tx_packets = vsi_stats->tx_packets; 2974 stats->tx_bytes = vsi_stats->tx_bytes; 2975 stats->rx_packets = vsi_stats->rx_packets; 2976 stats->rx_bytes = vsi_stats->rx_bytes; 2977 2978 /* The rest of the stats can be read from the hardware but instead we 2979 * just return values that the watchdog task has already obtained from 2980 * the hardware. 2981 */ 2982 stats->multicast = vsi_stats->multicast; 2983 stats->tx_errors = vsi_stats->tx_errors; 2984 stats->tx_dropped = vsi_stats->tx_dropped; 2985 stats->rx_errors = vsi_stats->rx_errors; 2986 stats->rx_dropped = vsi_stats->rx_dropped; 2987 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 2988 stats->rx_length_errors = vsi_stats->rx_length_errors; 2989 } 2990 2991 /** 2992 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 2993 * @vsi: VSI having NAPI disabled 2994 */ 2995 static void ice_napi_disable_all(struct ice_vsi *vsi) 2996 { 2997 int q_idx; 2998 2999 if (!vsi->netdev) 3000 return; 3001 3002 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 3003 napi_disable(&vsi->q_vectors[q_idx]->napi); 3004 } 3005 3006 /** 3007 * ice_down - Shutdown the connection 3008 * @vsi: The VSI being stopped 3009 */ 3010 int ice_down(struct ice_vsi *vsi) 3011 { 3012 int i, tx_err, rx_err; 3013 3014 /* Caller of this function is expected to set the 3015 * vsi->state __ICE_DOWN bit 3016 */ 3017 if (vsi->netdev) { 3018 netif_carrier_off(vsi->netdev); 3019 netif_tx_disable(vsi->netdev); 3020 } 3021 3022 ice_vsi_dis_irq(vsi); 3023 tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0); 3024 if (tx_err) 3025 netdev_err(vsi->netdev, 3026 "Failed stop Tx rings, VSI %d error %d\n", 3027 vsi->vsi_num, tx_err); 3028 3029 rx_err = ice_vsi_stop_rx_rings(vsi); 3030 if (rx_err) 3031 netdev_err(vsi->netdev, 3032 "Failed stop Rx rings, VSI %d error %d\n", 3033 vsi->vsi_num, rx_err); 3034 3035 ice_napi_disable_all(vsi); 3036 3037 ice_for_each_txq(vsi, i) 3038 ice_clean_tx_ring(vsi->tx_rings[i]); 3039 3040 ice_for_each_rxq(vsi, i) 3041 ice_clean_rx_ring(vsi->rx_rings[i]); 3042 3043 if (tx_err || rx_err) { 3044 netdev_err(vsi->netdev, 3045 "Failed to close VSI 0x%04X on switch 0x%04X\n", 3046 vsi->vsi_num, vsi->vsw->sw_id); 3047 return -EIO; 3048 } 3049 3050 return 0; 3051 } 3052 3053 /** 3054 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 3055 * @vsi: VSI having resources allocated 3056 * 3057 * Return 0 on success, negative on failure 3058 */ 3059 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 3060 { 3061 int i, err = 0; 3062 3063 if (!vsi->num_txq) { 3064 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 3065 vsi->vsi_num); 3066 return -EINVAL; 3067 } 3068 3069 ice_for_each_txq(vsi, i) { 3070 vsi->tx_rings[i]->netdev = vsi->netdev; 3071 err = ice_setup_tx_ring(vsi->tx_rings[i]); 3072 if (err) 3073 break; 3074 } 3075 3076 return err; 3077 } 3078 3079 /** 3080 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 3081 * @vsi: VSI having resources allocated 3082 * 3083 * Return 0 on success, negative on failure 3084 */ 3085 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 3086 { 3087 int i, err = 0; 3088 3089 if (!vsi->num_rxq) { 3090 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 3091 vsi->vsi_num); 3092 return -EINVAL; 3093 } 3094 3095 ice_for_each_rxq(vsi, i) { 3096 vsi->rx_rings[i]->netdev = vsi->netdev; 3097 err = ice_setup_rx_ring(vsi->rx_rings[i]); 3098 if (err) 3099 break; 3100 } 3101 3102 return err; 3103 } 3104 3105 /** 3106 * ice_vsi_req_irq - Request IRQ from the OS 3107 * @vsi: The VSI IRQ is being requested for 3108 * @basename: name for the vector 3109 * 3110 * Return 0 on success and a negative value on error 3111 */ 3112 static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename) 3113 { 3114 struct ice_pf *pf = vsi->back; 3115 int err = -EINVAL; 3116 3117 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3118 err = ice_vsi_req_irq_msix(vsi, basename); 3119 3120 return err; 3121 } 3122 3123 /** 3124 * ice_vsi_open - Called when a network interface is made active 3125 * @vsi: the VSI to open 3126 * 3127 * Initialization of the VSI 3128 * 3129 * Returns 0 on success, negative value on error 3130 */ 3131 static int ice_vsi_open(struct ice_vsi *vsi) 3132 { 3133 char int_name[ICE_INT_NAME_STR_LEN]; 3134 struct ice_pf *pf = vsi->back; 3135 int err; 3136 3137 /* allocate descriptors */ 3138 err = ice_vsi_setup_tx_rings(vsi); 3139 if (err) 3140 goto err_setup_tx; 3141 3142 err = ice_vsi_setup_rx_rings(vsi); 3143 if (err) 3144 goto err_setup_rx; 3145 3146 err = ice_vsi_cfg(vsi); 3147 if (err) 3148 goto err_setup_rx; 3149 3150 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 3151 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); 3152 err = ice_vsi_req_irq(vsi, int_name); 3153 if (err) 3154 goto err_setup_rx; 3155 3156 /* Notify the stack of the actual queue counts. */ 3157 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 3158 if (err) 3159 goto err_set_qs; 3160 3161 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 3162 if (err) 3163 goto err_set_qs; 3164 3165 err = ice_up_complete(vsi); 3166 if (err) 3167 goto err_up_complete; 3168 3169 return 0; 3170 3171 err_up_complete: 3172 ice_down(vsi); 3173 err_set_qs: 3174 ice_vsi_free_irq(vsi); 3175 err_setup_rx: 3176 ice_vsi_free_rx_rings(vsi); 3177 err_setup_tx: 3178 ice_vsi_free_tx_rings(vsi); 3179 3180 return err; 3181 } 3182 3183 /** 3184 * ice_vsi_release_all - Delete all VSIs 3185 * @pf: PF from which all VSIs are being removed 3186 */ 3187 static void ice_vsi_release_all(struct ice_pf *pf) 3188 { 3189 int err, i; 3190 3191 if (!pf->vsi) 3192 return; 3193 3194 for (i = 0; i < pf->num_alloc_vsi; i++) { 3195 if (!pf->vsi[i]) 3196 continue; 3197 3198 err = ice_vsi_release(pf->vsi[i]); 3199 if (err) 3200 dev_dbg(&pf->pdev->dev, 3201 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 3202 i, err, pf->vsi[i]->vsi_num); 3203 } 3204 } 3205 3206 /** 3207 * ice_dis_vsi - pause a VSI 3208 * @vsi: the VSI being paused 3209 */ 3210 static void ice_dis_vsi(struct ice_vsi *vsi) 3211 { 3212 if (test_bit(__ICE_DOWN, vsi->state)) 3213 return; 3214 3215 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3216 3217 if (vsi->type == ICE_VSI_PF && vsi->netdev) { 3218 if (netif_running(vsi->netdev)) { 3219 rtnl_lock(); 3220 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3221 rtnl_unlock(); 3222 } else { 3223 ice_vsi_close(vsi); 3224 } 3225 } 3226 } 3227 3228 /** 3229 * ice_ena_vsi - resume a VSI 3230 * @vsi: the VSI being resume 3231 */ 3232 static int ice_ena_vsi(struct ice_vsi *vsi) 3233 { 3234 int err = 0; 3235 3236 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && 3237 vsi->netdev) { 3238 if (netif_running(vsi->netdev)) { 3239 rtnl_lock(); 3240 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3241 rtnl_unlock(); 3242 } else { 3243 err = ice_vsi_open(vsi); 3244 } 3245 } 3246 3247 return err; 3248 } 3249 3250 /** 3251 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 3252 * @pf: the PF 3253 */ 3254 static void ice_pf_dis_all_vsi(struct ice_pf *pf) 3255 { 3256 int v; 3257 3258 ice_for_each_vsi(pf, v) 3259 if (pf->vsi[v]) 3260 ice_dis_vsi(pf->vsi[v]); 3261 } 3262 3263 /** 3264 * ice_pf_ena_all_vsi - Resume all VSIs on a PF 3265 * @pf: the PF 3266 */ 3267 static int ice_pf_ena_all_vsi(struct ice_pf *pf) 3268 { 3269 int v; 3270 3271 ice_for_each_vsi(pf, v) 3272 if (pf->vsi[v]) 3273 if (ice_ena_vsi(pf->vsi[v])) 3274 return -EIO; 3275 3276 return 0; 3277 } 3278 3279 /** 3280 * ice_vsi_rebuild_all - rebuild all VSIs in pf 3281 * @pf: the PF 3282 */ 3283 static int ice_vsi_rebuild_all(struct ice_pf *pf) 3284 { 3285 int i; 3286 3287 /* loop through pf->vsi array and reinit the VSI if found */ 3288 for (i = 0; i < pf->num_alloc_vsi; i++) { 3289 int err; 3290 3291 if (!pf->vsi[i]) 3292 continue; 3293 3294 /* VF VSI rebuild isn't supported yet */ 3295 if (pf->vsi[i]->type == ICE_VSI_VF) 3296 continue; 3297 3298 err = ice_vsi_rebuild(pf->vsi[i]); 3299 if (err) { 3300 dev_err(&pf->pdev->dev, 3301 "VSI at index %d rebuild failed\n", 3302 pf->vsi[i]->idx); 3303 return err; 3304 } 3305 3306 dev_info(&pf->pdev->dev, 3307 "VSI at index %d rebuilt. vsi_num = 0x%x\n", 3308 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3309 } 3310 3311 return 0; 3312 } 3313 3314 /** 3315 * ice_vsi_replay_all - replay all VSIs configuration in the PF 3316 * @pf: the PF 3317 */ 3318 static int ice_vsi_replay_all(struct ice_pf *pf) 3319 { 3320 struct ice_hw *hw = &pf->hw; 3321 enum ice_status ret; 3322 int i; 3323 3324 /* loop through pf->vsi array and replay the VSI if found */ 3325 for (i = 0; i < pf->num_alloc_vsi; i++) { 3326 if (!pf->vsi[i]) 3327 continue; 3328 3329 ret = ice_replay_vsi(hw, pf->vsi[i]->idx); 3330 if (ret) { 3331 dev_err(&pf->pdev->dev, 3332 "VSI at index %d replay failed %d\n", 3333 pf->vsi[i]->idx, ret); 3334 return -EIO; 3335 } 3336 3337 /* Re-map HW VSI number, using VSI handle that has been 3338 * previously validated in ice_replay_vsi() call above 3339 */ 3340 pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); 3341 3342 dev_info(&pf->pdev->dev, 3343 "VSI at index %d filter replayed successfully - vsi_num %i\n", 3344 pf->vsi[i]->idx, pf->vsi[i]->vsi_num); 3345 } 3346 3347 /* Clean up replay filter after successful re-configuration */ 3348 ice_replay_post(hw); 3349 return 0; 3350 } 3351 3352 /** 3353 * ice_rebuild - rebuild after reset 3354 * @pf: pf to rebuild 3355 */ 3356 static void ice_rebuild(struct ice_pf *pf) 3357 { 3358 struct device *dev = &pf->pdev->dev; 3359 struct ice_hw *hw = &pf->hw; 3360 enum ice_status ret; 3361 int err; 3362 3363 if (test_bit(__ICE_DOWN, pf->state)) 3364 goto clear_recovery; 3365 3366 dev_dbg(dev, "rebuilding pf\n"); 3367 3368 ret = ice_init_all_ctrlq(hw); 3369 if (ret) { 3370 dev_err(dev, "control queues init failed %d\n", ret); 3371 goto err_init_ctrlq; 3372 } 3373 3374 ret = ice_clear_pf_cfg(hw); 3375 if (ret) { 3376 dev_err(dev, "clear PF configuration failed %d\n", ret); 3377 goto err_init_ctrlq; 3378 } 3379 3380 ice_clear_pxe_mode(hw); 3381 3382 ret = ice_get_caps(hw); 3383 if (ret) { 3384 dev_err(dev, "ice_get_caps failed %d\n", ret); 3385 goto err_init_ctrlq; 3386 } 3387 3388 err = ice_sched_init_port(hw->port_info); 3389 if (err) 3390 goto err_sched_init_port; 3391 3392 /* reset search_hint of irq_trackers to 0 since interrupts are 3393 * reclaimed and could be allocated from beginning during VSI rebuild 3394 */ 3395 pf->sw_irq_tracker->search_hint = 0; 3396 pf->hw_irq_tracker->search_hint = 0; 3397 3398 err = ice_vsi_rebuild_all(pf); 3399 if (err) { 3400 dev_err(dev, "ice_vsi_rebuild_all failed\n"); 3401 goto err_vsi_rebuild; 3402 } 3403 3404 err = ice_update_link_info(hw->port_info); 3405 if (err) 3406 dev_err(&pf->pdev->dev, "Get link status error %d\n", err); 3407 3408 /* Replay all VSIs Configuration, including filters after reset */ 3409 if (ice_vsi_replay_all(pf)) { 3410 dev_err(&pf->pdev->dev, 3411 "error replaying VSI configurations with switch filter rules\n"); 3412 goto err_vsi_rebuild; 3413 } 3414 3415 /* start misc vector */ 3416 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { 3417 err = ice_req_irq_msix_misc(pf); 3418 if (err) { 3419 dev_err(dev, "misc vector setup failed: %d\n", err); 3420 goto err_vsi_rebuild; 3421 } 3422 } 3423 3424 /* restart the VSIs that were rebuilt and running before the reset */ 3425 err = ice_pf_ena_all_vsi(pf); 3426 if (err) { 3427 dev_err(&pf->pdev->dev, "error enabling VSIs\n"); 3428 /* no need to disable VSIs in tear down path in ice_rebuild() 3429 * since its already taken care in ice_vsi_open() 3430 */ 3431 goto err_vsi_rebuild; 3432 } 3433 3434 ice_reset_all_vfs(pf, true); 3435 /* if we get here, reset flow is successful */ 3436 clear_bit(__ICE_RESET_FAILED, pf->state); 3437 return; 3438 3439 err_vsi_rebuild: 3440 ice_vsi_release_all(pf); 3441 err_sched_init_port: 3442 ice_sched_cleanup_all(hw); 3443 err_init_ctrlq: 3444 ice_shutdown_all_ctrlq(hw); 3445 set_bit(__ICE_RESET_FAILED, pf->state); 3446 clear_recovery: 3447 /* set this bit in PF state to control service task scheduling */ 3448 set_bit(__ICE_NEEDS_RESTART, pf->state); 3449 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 3450 } 3451 3452 /** 3453 * ice_change_mtu - NDO callback to change the MTU 3454 * @netdev: network interface device structure 3455 * @new_mtu: new value for maximum frame size 3456 * 3457 * Returns 0 on success, negative on failure 3458 */ 3459 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 3460 { 3461 struct ice_netdev_priv *np = netdev_priv(netdev); 3462 struct ice_vsi *vsi = np->vsi; 3463 struct ice_pf *pf = vsi->back; 3464 u8 count = 0; 3465 3466 if (new_mtu == netdev->mtu) { 3467 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); 3468 return 0; 3469 } 3470 3471 if (new_mtu < netdev->min_mtu) { 3472 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n", 3473 netdev->min_mtu); 3474 return -EINVAL; 3475 } else if (new_mtu > netdev->max_mtu) { 3476 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n", 3477 netdev->min_mtu); 3478 return -EINVAL; 3479 } 3480 /* if a reset is in progress, wait for some time for it to complete */ 3481 do { 3482 if (ice_is_reset_in_progress(pf->state)) { 3483 count++; 3484 usleep_range(1000, 2000); 3485 } else { 3486 break; 3487 } 3488 3489 } while (count < 100); 3490 3491 if (count == 100) { 3492 netdev_err(netdev, "can't change mtu. Device is busy\n"); 3493 return -EBUSY; 3494 } 3495 3496 netdev->mtu = new_mtu; 3497 3498 /* if VSI is up, bring it down and then back up */ 3499 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) { 3500 int err; 3501 3502 err = ice_down(vsi); 3503 if (err) { 3504 netdev_err(netdev, "change mtu if_up err %d\n", err); 3505 return err; 3506 } 3507 3508 err = ice_up(vsi); 3509 if (err) { 3510 netdev_err(netdev, "change mtu if_up err %d\n", err); 3511 return err; 3512 } 3513 } 3514 3515 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu); 3516 return 0; 3517 } 3518 3519 /** 3520 * ice_set_rss - Set RSS keys and lut 3521 * @vsi: Pointer to VSI structure 3522 * @seed: RSS hash seed 3523 * @lut: Lookup table 3524 * @lut_size: Lookup table size 3525 * 3526 * Returns 0 on success, negative on failure 3527 */ 3528 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3529 { 3530 struct ice_pf *pf = vsi->back; 3531 struct ice_hw *hw = &pf->hw; 3532 enum ice_status status; 3533 3534 if (seed) { 3535 struct ice_aqc_get_set_rss_keys *buf = 3536 (struct ice_aqc_get_set_rss_keys *)seed; 3537 3538 status = ice_aq_set_rss_key(hw, vsi->idx, buf); 3539 3540 if (status) { 3541 dev_err(&pf->pdev->dev, 3542 "Cannot set RSS key, err %d aq_err %d\n", 3543 status, hw->adminq.rq_last_status); 3544 return -EIO; 3545 } 3546 } 3547 3548 if (lut) { 3549 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3550 lut, lut_size); 3551 if (status) { 3552 dev_err(&pf->pdev->dev, 3553 "Cannot set RSS lut, err %d aq_err %d\n", 3554 status, hw->adminq.rq_last_status); 3555 return -EIO; 3556 } 3557 } 3558 3559 return 0; 3560 } 3561 3562 /** 3563 * ice_get_rss - Get RSS keys and lut 3564 * @vsi: Pointer to VSI structure 3565 * @seed: Buffer to store the keys 3566 * @lut: Buffer to store the lookup table entries 3567 * @lut_size: Size of buffer to store the lookup table entries 3568 * 3569 * Returns 0 on success, negative on failure 3570 */ 3571 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 3572 { 3573 struct ice_pf *pf = vsi->back; 3574 struct ice_hw *hw = &pf->hw; 3575 enum ice_status status; 3576 3577 if (seed) { 3578 struct ice_aqc_get_set_rss_keys *buf = 3579 (struct ice_aqc_get_set_rss_keys *)seed; 3580 3581 status = ice_aq_get_rss_key(hw, vsi->idx, buf); 3582 if (status) { 3583 dev_err(&pf->pdev->dev, 3584 "Cannot get RSS key, err %d aq_err %d\n", 3585 status, hw->adminq.rq_last_status); 3586 return -EIO; 3587 } 3588 } 3589 3590 if (lut) { 3591 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 3592 lut, lut_size); 3593 if (status) { 3594 dev_err(&pf->pdev->dev, 3595 "Cannot get RSS lut, err %d aq_err %d\n", 3596 status, hw->adminq.rq_last_status); 3597 return -EIO; 3598 } 3599 } 3600 3601 return 0; 3602 } 3603 3604 /** 3605 * ice_bridge_getlink - Get the hardware bridge mode 3606 * @skb: skb buff 3607 * @pid: process id 3608 * @seq: RTNL message seq 3609 * @dev: the netdev being configured 3610 * @filter_mask: filter mask passed in 3611 * @nlflags: netlink flags passed in 3612 * 3613 * Return the bridge mode (VEB/VEPA) 3614 */ 3615 static int 3616 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 3617 struct net_device *dev, u32 filter_mask, int nlflags) 3618 { 3619 struct ice_netdev_priv *np = netdev_priv(dev); 3620 struct ice_vsi *vsi = np->vsi; 3621 struct ice_pf *pf = vsi->back; 3622 u16 bmode; 3623 3624 bmode = pf->first_sw->bridge_mode; 3625 3626 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 3627 filter_mask, NULL); 3628 } 3629 3630 /** 3631 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 3632 * @vsi: Pointer to VSI structure 3633 * @bmode: Hardware bridge mode (VEB/VEPA) 3634 * 3635 * Returns 0 on success, negative on failure 3636 */ 3637 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 3638 { 3639 struct device *dev = &vsi->back->pdev->dev; 3640 struct ice_aqc_vsi_props *vsi_props; 3641 struct ice_hw *hw = &vsi->back->hw; 3642 struct ice_vsi_ctx ctxt = { 0 }; 3643 enum ice_status status; 3644 3645 vsi_props = &vsi->info; 3646 ctxt.info = vsi->info; 3647 3648 if (bmode == BRIDGE_MODE_VEB) 3649 /* change from VEPA to VEB mode */ 3650 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3651 else 3652 /* change from VEB to VEPA mode */ 3653 ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3654 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 3655 3656 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 3657 if (status) { 3658 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", 3659 bmode, status, hw->adminq.sq_last_status); 3660 return -EIO; 3661 } 3662 /* Update sw flags for book keeping */ 3663 vsi_props->sw_flags = ctxt.info.sw_flags; 3664 3665 return 0; 3666 } 3667 3668 /** 3669 * ice_bridge_setlink - Set the hardware bridge mode 3670 * @dev: the netdev being configured 3671 * @nlh: RTNL message 3672 * @flags: bridge setlink flags 3673 * 3674 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 3675 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 3676 * not already set for all VSIs connected to this switch. And also update the 3677 * unicast switch filter rules for the corresponding switch of the netdev. 3678 */ 3679 static int 3680 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 3681 u16 __always_unused flags) 3682 { 3683 struct ice_netdev_priv *np = netdev_priv(dev); 3684 struct ice_pf *pf = np->vsi->back; 3685 struct nlattr *attr, *br_spec; 3686 struct ice_hw *hw = &pf->hw; 3687 enum ice_status status; 3688 struct ice_sw *pf_sw; 3689 int rem, v, err = 0; 3690 3691 pf_sw = pf->first_sw; 3692 /* find the attribute in the netlink message */ 3693 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 3694 3695 nla_for_each_nested(attr, br_spec, rem) { 3696 __u16 mode; 3697 3698 if (nla_type(attr) != IFLA_BRIDGE_MODE) 3699 continue; 3700 mode = nla_get_u16(attr); 3701 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 3702 return -EINVAL; 3703 /* Continue if bridge mode is not being flipped */ 3704 if (mode == pf_sw->bridge_mode) 3705 continue; 3706 /* Iterates through the PF VSI list and update the loopback 3707 * mode of the VSI 3708 */ 3709 ice_for_each_vsi(pf, v) { 3710 if (!pf->vsi[v]) 3711 continue; 3712 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 3713 if (err) 3714 return err; 3715 } 3716 3717 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 3718 /* Update the unicast switch filter rules for the corresponding 3719 * switch of the netdev 3720 */ 3721 status = ice_update_sw_rule_bridge_mode(hw); 3722 if (status) { 3723 netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", 3724 mode, status, hw->adminq.sq_last_status); 3725 /* revert hw->evb_veb */ 3726 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 3727 return -EIO; 3728 } 3729 3730 pf_sw->bridge_mode = mode; 3731 } 3732 3733 return 0; 3734 } 3735 3736 /** 3737 * ice_tx_timeout - Respond to a Tx Hang 3738 * @netdev: network interface device structure 3739 */ 3740 static void ice_tx_timeout(struct net_device *netdev) 3741 { 3742 struct ice_netdev_priv *np = netdev_priv(netdev); 3743 struct ice_ring *tx_ring = NULL; 3744 struct ice_vsi *vsi = np->vsi; 3745 struct ice_pf *pf = vsi->back; 3746 u32 head, val = 0, i; 3747 int hung_queue = -1; 3748 3749 pf->tx_timeout_count++; 3750 3751 /* find the stopped queue the same way the stack does */ 3752 for (i = 0; i < netdev->num_tx_queues; i++) { 3753 struct netdev_queue *q; 3754 unsigned long trans_start; 3755 3756 q = netdev_get_tx_queue(netdev, i); 3757 trans_start = q->trans_start; 3758 if (netif_xmit_stopped(q) && 3759 time_after(jiffies, 3760 (trans_start + netdev->watchdog_timeo))) { 3761 hung_queue = i; 3762 break; 3763 } 3764 } 3765 3766 if (i == netdev->num_tx_queues) { 3767 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); 3768 } else { 3769 /* now that we have an index, find the tx_ring struct */ 3770 for (i = 0; i < vsi->num_txq; i++) { 3771 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { 3772 if (hung_queue == 3773 vsi->tx_rings[i]->q_index) { 3774 tx_ring = vsi->tx_rings[i]; 3775 break; 3776 } 3777 } 3778 } 3779 } 3780 3781 /* Reset recovery level if enough time has elapsed after last timeout. 3782 * Also ensure no new reset action happens before next timeout period. 3783 */ 3784 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 3785 pf->tx_timeout_recovery_level = 1; 3786 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 3787 netdev->watchdog_timeo))) 3788 return; 3789 3790 if (tx_ring) { 3791 head = tx_ring->next_to_clean; 3792 /* Read interrupt register */ 3793 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3794 val = rd32(&pf->hw, 3795 GLINT_DYN_CTL(tx_ring->q_vector->v_idx + 3796 tx_ring->vsi->hw_base_vector)); 3797 3798 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", 3799 vsi->vsi_num, hung_queue, tx_ring->next_to_clean, 3800 head, tx_ring->next_to_use, 3801 readl(tx_ring->tail), val); 3802 } 3803 3804 pf->tx_timeout_last_recovery = jiffies; 3805 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", 3806 pf->tx_timeout_recovery_level, hung_queue); 3807 3808 switch (pf->tx_timeout_recovery_level) { 3809 case 1: 3810 set_bit(__ICE_PFR_REQ, pf->state); 3811 break; 3812 case 2: 3813 set_bit(__ICE_CORER_REQ, pf->state); 3814 break; 3815 case 3: 3816 set_bit(__ICE_GLOBR_REQ, pf->state); 3817 break; 3818 default: 3819 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 3820 set_bit(__ICE_DOWN, pf->state); 3821 set_bit(__ICE_NEEDS_RESTART, vsi->state); 3822 set_bit(__ICE_SERVICE_DIS, pf->state); 3823 break; 3824 } 3825 3826 ice_service_task_schedule(pf); 3827 pf->tx_timeout_recovery_level++; 3828 } 3829 3830 /** 3831 * ice_open - Called when a network interface becomes active 3832 * @netdev: network interface device structure 3833 * 3834 * The open entry point is called when a network interface is made 3835 * active by the system (IFF_UP). At this point all resources needed 3836 * for transmit and receive operations are allocated, the interrupt 3837 * handler is registered with the OS, the netdev watchdog is enabled, 3838 * and the stack is notified that the interface is ready. 3839 * 3840 * Returns 0 on success, negative value on failure 3841 */ 3842 static int ice_open(struct net_device *netdev) 3843 { 3844 struct ice_netdev_priv *np = netdev_priv(netdev); 3845 struct ice_vsi *vsi = np->vsi; 3846 int err; 3847 3848 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { 3849 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 3850 return -EIO; 3851 } 3852 3853 netif_carrier_off(netdev); 3854 3855 err = ice_vsi_open(vsi); 3856 3857 if (err) 3858 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 3859 vsi->vsi_num, vsi->vsw->sw_id); 3860 return err; 3861 } 3862 3863 /** 3864 * ice_stop - Disables a network interface 3865 * @netdev: network interface device structure 3866 * 3867 * The stop entry point is called when an interface is de-activated by the OS, 3868 * and the netdevice enters the DOWN state. The hardware is still under the 3869 * driver's control, but the netdev interface is disabled. 3870 * 3871 * Returns success only - not allowed to fail 3872 */ 3873 static int ice_stop(struct net_device *netdev) 3874 { 3875 struct ice_netdev_priv *np = netdev_priv(netdev); 3876 struct ice_vsi *vsi = np->vsi; 3877 3878 ice_vsi_close(vsi); 3879 3880 return 0; 3881 } 3882 3883 /** 3884 * ice_features_check - Validate encapsulated packet conforms to limits 3885 * @skb: skb buffer 3886 * @netdev: This port's netdev 3887 * @features: Offload features that the stack believes apply 3888 */ 3889 static netdev_features_t 3890 ice_features_check(struct sk_buff *skb, 3891 struct net_device __always_unused *netdev, 3892 netdev_features_t features) 3893 { 3894 size_t len; 3895 3896 /* No point in doing any of this if neither checksum nor GSO are 3897 * being requested for this frame. We can rule out both by just 3898 * checking for CHECKSUM_PARTIAL 3899 */ 3900 if (skb->ip_summed != CHECKSUM_PARTIAL) 3901 return features; 3902 3903 /* We cannot support GSO if the MSS is going to be less than 3904 * 64 bytes. If it is then we need to drop support for GSO. 3905 */ 3906 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 3907 features &= ~NETIF_F_GSO_MASK; 3908 3909 len = skb_network_header(skb) - skb->data; 3910 if (len & ~(ICE_TXD_MACLEN_MAX)) 3911 goto out_rm_features; 3912 3913 len = skb_transport_header(skb) - skb_network_header(skb); 3914 if (len & ~(ICE_TXD_IPLEN_MAX)) 3915 goto out_rm_features; 3916 3917 if (skb->encapsulation) { 3918 len = skb_inner_network_header(skb) - skb_transport_header(skb); 3919 if (len & ~(ICE_TXD_L4LEN_MAX)) 3920 goto out_rm_features; 3921 3922 len = skb_inner_transport_header(skb) - 3923 skb_inner_network_header(skb); 3924 if (len & ~(ICE_TXD_IPLEN_MAX)) 3925 goto out_rm_features; 3926 } 3927 3928 return features; 3929 out_rm_features: 3930 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3931 } 3932 3933 static const struct net_device_ops ice_netdev_ops = { 3934 .ndo_open = ice_open, 3935 .ndo_stop = ice_stop, 3936 .ndo_start_xmit = ice_start_xmit, 3937 .ndo_features_check = ice_features_check, 3938 .ndo_set_rx_mode = ice_set_rx_mode, 3939 .ndo_set_mac_address = ice_set_mac_address, 3940 .ndo_validate_addr = eth_validate_addr, 3941 .ndo_change_mtu = ice_change_mtu, 3942 .ndo_get_stats64 = ice_get_stats64, 3943 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 3944 .ndo_set_vf_mac = ice_set_vf_mac, 3945 .ndo_get_vf_config = ice_get_vf_cfg, 3946 .ndo_set_vf_trust = ice_set_vf_trust, 3947 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 3948 .ndo_set_vf_link_state = ice_set_vf_link_state, 3949 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 3950 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 3951 .ndo_set_features = ice_set_features, 3952 .ndo_bridge_getlink = ice_bridge_getlink, 3953 .ndo_bridge_setlink = ice_bridge_setlink, 3954 .ndo_fdb_add = ice_fdb_add, 3955 .ndo_fdb_del = ice_fdb_del, 3956 .ndo_tx_timeout = ice_tx_timeout, 3957 }; 3958