1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include "ice.h" 10 #include "ice_base.h" 11 #include "ice_lib.h" 12 #include "ice_fltr.h" 13 #include "ice_dcb_lib.h" 14 #include "ice_dcb_nl.h" 15 #include "ice_devlink.h" 16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 17 * ice tracepoint functions. This must be done exactly once across the 18 * ice driver. 19 */ 20 #define CREATE_TRACE_POINTS 21 #include "ice_trace.h" 22 23 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 24 static const char ice_driver_string[] = DRV_SUMMARY; 25 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 26 27 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 28 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 29 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 30 31 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 32 MODULE_DESCRIPTION(DRV_SUMMARY); 33 MODULE_LICENSE("GPL v2"); 34 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 35 36 static int debug = -1; 37 module_param(debug, int, 0644); 38 #ifndef CONFIG_DYNAMIC_DEBUG 39 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 40 #else 41 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 42 #endif /* !CONFIG_DYNAMIC_DEBUG */ 43 44 static DEFINE_IDA(ice_aux_ida); 45 46 static struct workqueue_struct *ice_wq; 47 static const struct net_device_ops ice_netdev_safe_mode_ops; 48 static const struct net_device_ops ice_netdev_ops; 49 static int ice_vsi_open(struct ice_vsi *vsi); 50 51 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 52 53 static void ice_vsi_release_all(struct ice_pf *pf); 54 55 bool netif_is_ice(struct net_device *dev) 56 { 57 return dev && (dev->netdev_ops == &ice_netdev_ops); 58 } 59 60 /** 61 * ice_get_tx_pending - returns number of Tx descriptors not processed 62 * @ring: the ring of descriptors 63 */ 64 static u16 ice_get_tx_pending(struct ice_ring *ring) 65 { 66 u16 head, tail; 67 68 head = ring->next_to_clean; 69 tail = ring->next_to_use; 70 71 if (head != tail) 72 return (head < tail) ? 73 tail - head : (tail + ring->count - head); 74 return 0; 75 } 76 77 /** 78 * ice_check_for_hang_subtask - check for and recover hung queues 79 * @pf: pointer to PF struct 80 */ 81 static void ice_check_for_hang_subtask(struct ice_pf *pf) 82 { 83 struct ice_vsi *vsi = NULL; 84 struct ice_hw *hw; 85 unsigned int i; 86 int packets; 87 u32 v; 88 89 ice_for_each_vsi(pf, v) 90 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 91 vsi = pf->vsi[v]; 92 break; 93 } 94 95 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 96 return; 97 98 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 99 return; 100 101 hw = &vsi->back->hw; 102 103 for (i = 0; i < vsi->num_txq; i++) { 104 struct ice_ring *tx_ring = vsi->tx_rings[i]; 105 106 if (tx_ring && tx_ring->desc) { 107 /* If packet counter has not changed the queue is 108 * likely stalled, so force an interrupt for this 109 * queue. 110 * 111 * prev_pkt would be negative if there was no 112 * pending work. 113 */ 114 packets = tx_ring->stats.pkts & INT_MAX; 115 if (tx_ring->tx_stats.prev_pkt == packets) { 116 /* Trigger sw interrupt to revive the queue */ 117 ice_trigger_sw_intr(hw, tx_ring->q_vector); 118 continue; 119 } 120 121 /* Memory barrier between read of packet count and call 122 * to ice_get_tx_pending() 123 */ 124 smp_rmb(); 125 tx_ring->tx_stats.prev_pkt = 126 ice_get_tx_pending(tx_ring) ? packets : -1; 127 } 128 } 129 } 130 131 /** 132 * ice_init_mac_fltr - Set initial MAC filters 133 * @pf: board private structure 134 * 135 * Set initial set of MAC filters for PF VSI; configure filters for permanent 136 * address and broadcast address. If an error is encountered, netdevice will be 137 * unregistered. 138 */ 139 static int ice_init_mac_fltr(struct ice_pf *pf) 140 { 141 enum ice_status status; 142 struct ice_vsi *vsi; 143 u8 *perm_addr; 144 145 vsi = ice_get_main_vsi(pf); 146 if (!vsi) 147 return -EINVAL; 148 149 perm_addr = vsi->port_info->mac.perm_addr; 150 status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 151 if (status) 152 return -EIO; 153 154 return 0; 155 } 156 157 /** 158 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 159 * @netdev: the net device on which the sync is happening 160 * @addr: MAC address to sync 161 * 162 * This is a callback function which is called by the in kernel device sync 163 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 164 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 165 * MAC filters from the hardware. 166 */ 167 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 168 { 169 struct ice_netdev_priv *np = netdev_priv(netdev); 170 struct ice_vsi *vsi = np->vsi; 171 172 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 173 ICE_FWD_TO_VSI)) 174 return -EINVAL; 175 176 return 0; 177 } 178 179 /** 180 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 181 * @netdev: the net device on which the unsync is happening 182 * @addr: MAC address to unsync 183 * 184 * This is a callback function which is called by the in kernel device unsync 185 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 186 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 187 * delete the MAC filters from the hardware. 188 */ 189 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 190 { 191 struct ice_netdev_priv *np = netdev_priv(netdev); 192 struct ice_vsi *vsi = np->vsi; 193 194 /* Under some circumstances, we might receive a request to delete our 195 * own device address from our uc list. Because we store the device 196 * address in the VSI's MAC filter list, we need to ignore such 197 * requests and not delete our device address from this list. 198 */ 199 if (ether_addr_equal(addr, netdev->dev_addr)) 200 return 0; 201 202 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 203 ICE_FWD_TO_VSI)) 204 return -EINVAL; 205 206 return 0; 207 } 208 209 /** 210 * ice_vsi_fltr_changed - check if filter state changed 211 * @vsi: VSI to be checked 212 * 213 * returns true if filter state has changed, false otherwise. 214 */ 215 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 216 { 217 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 218 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || 219 test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 220 } 221 222 /** 223 * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF 224 * @vsi: the VSI being configured 225 * @promisc_m: mask of promiscuous config bits 226 * @set_promisc: enable or disable promisc flag request 227 * 228 */ 229 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) 230 { 231 struct ice_hw *hw = &vsi->back->hw; 232 enum ice_status status = 0; 233 234 if (vsi->type != ICE_VSI_PF) 235 return 0; 236 237 if (vsi->num_vlan > 1) { 238 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, 239 set_promisc); 240 } else { 241 if (set_promisc) 242 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, 243 0); 244 else 245 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, 246 0); 247 } 248 249 if (status) 250 return -EIO; 251 252 return 0; 253 } 254 255 /** 256 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 257 * @vsi: ptr to the VSI 258 * 259 * Push any outstanding VSI filter changes through the AdminQ. 260 */ 261 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 262 { 263 struct device *dev = ice_pf_to_dev(vsi->back); 264 struct net_device *netdev = vsi->netdev; 265 bool promisc_forced_on = false; 266 struct ice_pf *pf = vsi->back; 267 struct ice_hw *hw = &pf->hw; 268 enum ice_status status = 0; 269 u32 changed_flags = 0; 270 u8 promisc_m; 271 int err = 0; 272 273 if (!vsi->netdev) 274 return -EINVAL; 275 276 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 277 usleep_range(1000, 2000); 278 279 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 280 vsi->current_netdev_flags = vsi->netdev->flags; 281 282 INIT_LIST_HEAD(&vsi->tmp_sync_list); 283 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 284 285 if (ice_vsi_fltr_changed(vsi)) { 286 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 287 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 288 clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 289 290 /* grab the netdev's addr_list_lock */ 291 netif_addr_lock_bh(netdev); 292 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 293 ice_add_mac_to_unsync_list); 294 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 295 ice_add_mac_to_unsync_list); 296 /* our temp lists are populated. release lock */ 297 netif_addr_unlock_bh(netdev); 298 } 299 300 /* Remove MAC addresses in the unsync list */ 301 status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 302 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 303 if (status) { 304 netdev_err(netdev, "Failed to delete MAC filters\n"); 305 /* if we failed because of alloc failures, just bail */ 306 if (status == ICE_ERR_NO_MEMORY) { 307 err = -ENOMEM; 308 goto out; 309 } 310 } 311 312 /* Add MAC addresses in the sync list */ 313 status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 314 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 315 /* If filter is added successfully or already exists, do not go into 316 * 'if' condition and report it as error. Instead continue processing 317 * rest of the function. 318 */ 319 if (status && status != ICE_ERR_ALREADY_EXISTS) { 320 netdev_err(netdev, "Failed to add MAC filters\n"); 321 /* If there is no more space for new umac filters, VSI 322 * should go into promiscuous mode. There should be some 323 * space reserved for promiscuous filters. 324 */ 325 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 326 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 327 vsi->state)) { 328 promisc_forced_on = true; 329 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 330 vsi->vsi_num); 331 } else { 332 err = -EIO; 333 goto out; 334 } 335 } 336 /* check for changes in promiscuous modes */ 337 if (changed_flags & IFF_ALLMULTI) { 338 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 339 if (vsi->num_vlan > 1) 340 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 341 else 342 promisc_m = ICE_MCAST_PROMISC_BITS; 343 344 err = ice_cfg_promisc(vsi, promisc_m, true); 345 if (err) { 346 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", 347 vsi->vsi_num); 348 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 349 goto out_promisc; 350 } 351 } else { 352 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 353 if (vsi->num_vlan > 1) 354 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; 355 else 356 promisc_m = ICE_MCAST_PROMISC_BITS; 357 358 err = ice_cfg_promisc(vsi, promisc_m, false); 359 if (err) { 360 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", 361 vsi->vsi_num); 362 vsi->current_netdev_flags |= IFF_ALLMULTI; 363 goto out_promisc; 364 } 365 } 366 } 367 368 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 369 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 370 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 371 if (vsi->current_netdev_flags & IFF_PROMISC) { 372 /* Apply Rx filter rule to get traffic from wire */ 373 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { 374 err = ice_set_dflt_vsi(pf->first_sw, vsi); 375 if (err && err != -EEXIST) { 376 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 377 err, vsi->vsi_num); 378 vsi->current_netdev_flags &= 379 ~IFF_PROMISC; 380 goto out_promisc; 381 } 382 ice_cfg_vlan_pruning(vsi, false, false); 383 } 384 } else { 385 /* Clear Rx filter to remove traffic from wire */ 386 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { 387 err = ice_clear_dflt_vsi(pf->first_sw); 388 if (err) { 389 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 390 err, vsi->vsi_num); 391 vsi->current_netdev_flags |= 392 IFF_PROMISC; 393 goto out_promisc; 394 } 395 if (vsi->num_vlan > 1) 396 ice_cfg_vlan_pruning(vsi, true, false); 397 } 398 } 399 } 400 goto exit; 401 402 out_promisc: 403 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 404 goto exit; 405 out: 406 /* if something went wrong then set the changed flag so we try again */ 407 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 408 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 409 exit: 410 clear_bit(ICE_CFG_BUSY, vsi->state); 411 return err; 412 } 413 414 /** 415 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 416 * @pf: board private structure 417 */ 418 static void ice_sync_fltr_subtask(struct ice_pf *pf) 419 { 420 int v; 421 422 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 423 return; 424 425 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 426 427 ice_for_each_vsi(pf, v) 428 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 429 ice_vsi_sync_fltr(pf->vsi[v])) { 430 /* come back and try again later */ 431 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 432 break; 433 } 434 } 435 436 /** 437 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 438 * @pf: the PF 439 * @locked: is the rtnl_lock already held 440 */ 441 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 442 { 443 int node; 444 int v; 445 446 ice_for_each_vsi(pf, v) 447 if (pf->vsi[v]) 448 ice_dis_vsi(pf->vsi[v], locked); 449 450 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 451 pf->pf_agg_node[node].num_vsis = 0; 452 453 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 454 pf->vf_agg_node[node].num_vsis = 0; 455 } 456 457 /** 458 * ice_prepare_for_reset - prep for the core to reset 459 * @pf: board private structure 460 * 461 * Inform or close all dependent features in prep for reset. 462 */ 463 static void 464 ice_prepare_for_reset(struct ice_pf *pf) 465 { 466 struct ice_hw *hw = &pf->hw; 467 unsigned int i; 468 469 /* already prepared for reset */ 470 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 471 return; 472 473 ice_unplug_aux_dev(pf); 474 475 /* Notify VFs of impending reset */ 476 if (ice_check_sq_alive(hw, &hw->mailboxq)) 477 ice_vc_notify_reset(pf); 478 479 /* Disable VFs until reset is completed */ 480 ice_for_each_vf(pf, i) 481 ice_set_vf_state_qs_dis(&pf->vf[i]); 482 483 /* clear SW filtering DB */ 484 ice_clear_hw_tbls(hw); 485 /* disable the VSIs and their queues that are not already DOWN */ 486 ice_pf_dis_all_vsi(pf, false); 487 488 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 489 ice_ptp_release(pf); 490 491 if (hw->port_info) 492 ice_sched_clear_port(hw->port_info); 493 494 ice_shutdown_all_ctrlq(hw); 495 496 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 497 } 498 499 /** 500 * ice_do_reset - Initiate one of many types of resets 501 * @pf: board private structure 502 * @reset_type: reset type requested 503 * before this function was called. 504 */ 505 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 506 { 507 struct device *dev = ice_pf_to_dev(pf); 508 struct ice_hw *hw = &pf->hw; 509 510 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 511 512 ice_prepare_for_reset(pf); 513 514 /* trigger the reset */ 515 if (ice_reset(hw, reset_type)) { 516 dev_err(dev, "reset %d failed\n", reset_type); 517 set_bit(ICE_RESET_FAILED, pf->state); 518 clear_bit(ICE_RESET_OICR_RECV, pf->state); 519 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 520 clear_bit(ICE_PFR_REQ, pf->state); 521 clear_bit(ICE_CORER_REQ, pf->state); 522 clear_bit(ICE_GLOBR_REQ, pf->state); 523 wake_up(&pf->reset_wait_queue); 524 return; 525 } 526 527 /* PFR is a bit of a special case because it doesn't result in an OICR 528 * interrupt. So for PFR, rebuild after the reset and clear the reset- 529 * associated state bits. 530 */ 531 if (reset_type == ICE_RESET_PFR) { 532 pf->pfr_count++; 533 ice_rebuild(pf, reset_type); 534 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 535 clear_bit(ICE_PFR_REQ, pf->state); 536 wake_up(&pf->reset_wait_queue); 537 ice_reset_all_vfs(pf, true); 538 } 539 } 540 541 /** 542 * ice_reset_subtask - Set up for resetting the device and driver 543 * @pf: board private structure 544 */ 545 static void ice_reset_subtask(struct ice_pf *pf) 546 { 547 enum ice_reset_req reset_type = ICE_RESET_INVAL; 548 549 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 550 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 551 * of reset is pending and sets bits in pf->state indicating the reset 552 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 553 * prepare for pending reset if not already (for PF software-initiated 554 * global resets the software should already be prepared for it as 555 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 556 * by firmware or software on other PFs, that bit is not set so prepare 557 * for the reset now), poll for reset done, rebuild and return. 558 */ 559 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 560 /* Perform the largest reset requested */ 561 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 562 reset_type = ICE_RESET_CORER; 563 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 564 reset_type = ICE_RESET_GLOBR; 565 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 566 reset_type = ICE_RESET_EMPR; 567 /* return if no valid reset type requested */ 568 if (reset_type == ICE_RESET_INVAL) 569 return; 570 ice_prepare_for_reset(pf); 571 572 /* make sure we are ready to rebuild */ 573 if (ice_check_reset(&pf->hw)) { 574 set_bit(ICE_RESET_FAILED, pf->state); 575 } else { 576 /* done with reset. start rebuild */ 577 pf->hw.reset_ongoing = false; 578 ice_rebuild(pf, reset_type); 579 /* clear bit to resume normal operations, but 580 * ICE_NEEDS_RESTART bit is set in case rebuild failed 581 */ 582 clear_bit(ICE_RESET_OICR_RECV, pf->state); 583 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 584 clear_bit(ICE_PFR_REQ, pf->state); 585 clear_bit(ICE_CORER_REQ, pf->state); 586 clear_bit(ICE_GLOBR_REQ, pf->state); 587 wake_up(&pf->reset_wait_queue); 588 ice_reset_all_vfs(pf, true); 589 } 590 591 return; 592 } 593 594 /* No pending resets to finish processing. Check for new resets */ 595 if (test_bit(ICE_PFR_REQ, pf->state)) 596 reset_type = ICE_RESET_PFR; 597 if (test_bit(ICE_CORER_REQ, pf->state)) 598 reset_type = ICE_RESET_CORER; 599 if (test_bit(ICE_GLOBR_REQ, pf->state)) 600 reset_type = ICE_RESET_GLOBR; 601 /* If no valid reset type requested just return */ 602 if (reset_type == ICE_RESET_INVAL) 603 return; 604 605 /* reset if not already down or busy */ 606 if (!test_bit(ICE_DOWN, pf->state) && 607 !test_bit(ICE_CFG_BUSY, pf->state)) { 608 ice_do_reset(pf, reset_type); 609 } 610 } 611 612 /** 613 * ice_print_topo_conflict - print topology conflict message 614 * @vsi: the VSI whose topology status is being checked 615 */ 616 static void ice_print_topo_conflict(struct ice_vsi *vsi) 617 { 618 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 619 case ICE_AQ_LINK_TOPO_CONFLICT: 620 case ICE_AQ_LINK_MEDIA_CONFLICT: 621 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 622 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 623 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 624 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 625 break; 626 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 627 netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 628 break; 629 default: 630 break; 631 } 632 } 633 634 /** 635 * ice_print_link_msg - print link up or down message 636 * @vsi: the VSI whose link status is being queried 637 * @isup: boolean for if the link is now up or down 638 */ 639 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 640 { 641 struct ice_aqc_get_phy_caps_data *caps; 642 const char *an_advertised; 643 enum ice_status status; 644 const char *fec_req; 645 const char *speed; 646 const char *fec; 647 const char *fc; 648 const char *an; 649 650 if (!vsi) 651 return; 652 653 if (vsi->current_isup == isup) 654 return; 655 656 vsi->current_isup = isup; 657 658 if (!isup) { 659 netdev_info(vsi->netdev, "NIC Link is Down\n"); 660 return; 661 } 662 663 switch (vsi->port_info->phy.link_info.link_speed) { 664 case ICE_AQ_LINK_SPEED_100GB: 665 speed = "100 G"; 666 break; 667 case ICE_AQ_LINK_SPEED_50GB: 668 speed = "50 G"; 669 break; 670 case ICE_AQ_LINK_SPEED_40GB: 671 speed = "40 G"; 672 break; 673 case ICE_AQ_LINK_SPEED_25GB: 674 speed = "25 G"; 675 break; 676 case ICE_AQ_LINK_SPEED_20GB: 677 speed = "20 G"; 678 break; 679 case ICE_AQ_LINK_SPEED_10GB: 680 speed = "10 G"; 681 break; 682 case ICE_AQ_LINK_SPEED_5GB: 683 speed = "5 G"; 684 break; 685 case ICE_AQ_LINK_SPEED_2500MB: 686 speed = "2.5 G"; 687 break; 688 case ICE_AQ_LINK_SPEED_1000MB: 689 speed = "1 G"; 690 break; 691 case ICE_AQ_LINK_SPEED_100MB: 692 speed = "100 M"; 693 break; 694 default: 695 speed = "Unknown "; 696 break; 697 } 698 699 switch (vsi->port_info->fc.current_mode) { 700 case ICE_FC_FULL: 701 fc = "Rx/Tx"; 702 break; 703 case ICE_FC_TX_PAUSE: 704 fc = "Tx"; 705 break; 706 case ICE_FC_RX_PAUSE: 707 fc = "Rx"; 708 break; 709 case ICE_FC_NONE: 710 fc = "None"; 711 break; 712 default: 713 fc = "Unknown"; 714 break; 715 } 716 717 /* Get FEC mode based on negotiated link info */ 718 switch (vsi->port_info->phy.link_info.fec_info) { 719 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 720 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 721 fec = "RS-FEC"; 722 break; 723 case ICE_AQ_LINK_25G_KR_FEC_EN: 724 fec = "FC-FEC/BASE-R"; 725 break; 726 default: 727 fec = "NONE"; 728 break; 729 } 730 731 /* check if autoneg completed, might be false due to not supported */ 732 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 733 an = "True"; 734 else 735 an = "False"; 736 737 /* Get FEC mode requested based on PHY caps last SW configuration */ 738 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 739 if (!caps) { 740 fec_req = "Unknown"; 741 an_advertised = "Unknown"; 742 goto done; 743 } 744 745 status = ice_aq_get_phy_caps(vsi->port_info, false, 746 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 747 if (status) 748 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 749 750 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 751 752 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 753 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 754 fec_req = "RS-FEC"; 755 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 756 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 757 fec_req = "FC-FEC/BASE-R"; 758 else 759 fec_req = "NONE"; 760 761 kfree(caps); 762 763 done: 764 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 765 speed, fec_req, fec, an_advertised, an, fc); 766 ice_print_topo_conflict(vsi); 767 } 768 769 /** 770 * ice_vsi_link_event - update the VSI's netdev 771 * @vsi: the VSI on which the link event occurred 772 * @link_up: whether or not the VSI needs to be set up or down 773 */ 774 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 775 { 776 if (!vsi) 777 return; 778 779 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 780 return; 781 782 if (vsi->type == ICE_VSI_PF) { 783 if (link_up == netif_carrier_ok(vsi->netdev)) 784 return; 785 786 if (link_up) { 787 netif_carrier_on(vsi->netdev); 788 netif_tx_wake_all_queues(vsi->netdev); 789 } else { 790 netif_carrier_off(vsi->netdev); 791 netif_tx_stop_all_queues(vsi->netdev); 792 } 793 } 794 } 795 796 /** 797 * ice_set_dflt_mib - send a default config MIB to the FW 798 * @pf: private PF struct 799 * 800 * This function sends a default configuration MIB to the FW. 801 * 802 * If this function errors out at any point, the driver is still able to 803 * function. The main impact is that LFC may not operate as expected. 804 * Therefore an error state in this function should be treated with a DBG 805 * message and continue on with driver rebuild/reenable. 806 */ 807 static void ice_set_dflt_mib(struct ice_pf *pf) 808 { 809 struct device *dev = ice_pf_to_dev(pf); 810 u8 mib_type, *buf, *lldpmib = NULL; 811 u16 len, typelen, offset = 0; 812 struct ice_lldp_org_tlv *tlv; 813 struct ice_hw *hw = &pf->hw; 814 u32 ouisubtype; 815 816 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 817 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 818 if (!lldpmib) { 819 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 820 __func__); 821 return; 822 } 823 824 /* Add ETS CFG TLV */ 825 tlv = (struct ice_lldp_org_tlv *)lldpmib; 826 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 827 ICE_IEEE_ETS_TLV_LEN); 828 tlv->typelen = htons(typelen); 829 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 830 ICE_IEEE_SUBTYPE_ETS_CFG); 831 tlv->ouisubtype = htonl(ouisubtype); 832 833 buf = tlv->tlvinfo; 834 buf[0] = 0; 835 836 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 837 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 838 * Octets 13 - 20 are TSA values - leave as zeros 839 */ 840 buf[5] = 0x64; 841 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 842 offset += len + 2; 843 tlv = (struct ice_lldp_org_tlv *) 844 ((char *)tlv + sizeof(tlv->typelen) + len); 845 846 /* Add ETS REC TLV */ 847 buf = tlv->tlvinfo; 848 tlv->typelen = htons(typelen); 849 850 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 851 ICE_IEEE_SUBTYPE_ETS_REC); 852 tlv->ouisubtype = htonl(ouisubtype); 853 854 /* First octet of buf is reserved 855 * Octets 1 - 4 map UP to TC - all UPs map to zero 856 * Octets 5 - 12 are BW values - set TC 0 to 100%. 857 * Octets 13 - 20 are TSA value - leave as zeros 858 */ 859 buf[5] = 0x64; 860 offset += len + 2; 861 tlv = (struct ice_lldp_org_tlv *) 862 ((char *)tlv + sizeof(tlv->typelen) + len); 863 864 /* Add PFC CFG TLV */ 865 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 866 ICE_IEEE_PFC_TLV_LEN); 867 tlv->typelen = htons(typelen); 868 869 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 870 ICE_IEEE_SUBTYPE_PFC_CFG); 871 tlv->ouisubtype = htonl(ouisubtype); 872 873 /* Octet 1 left as all zeros - PFC disabled */ 874 buf[0] = 0x08; 875 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 876 offset += len + 2; 877 878 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 879 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 880 881 kfree(lldpmib); 882 } 883 884 /** 885 * ice_check_module_power 886 * @pf: pointer to PF struct 887 * @link_cfg_err: bitmap from the link info structure 888 * 889 * check module power level returned by a previous call to aq_get_link_info 890 * and print error messages if module power level is not supported 891 */ 892 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 893 { 894 /* if module power level is supported, clear the flag */ 895 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 896 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 897 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 898 return; 899 } 900 901 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 902 * above block didn't clear this bit, there's nothing to do 903 */ 904 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 905 return; 906 907 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 908 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 909 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 910 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 911 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 912 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 913 } 914 } 915 916 /** 917 * ice_link_event - process the link event 918 * @pf: PF that the link event is associated with 919 * @pi: port_info for the port that the link event is associated with 920 * @link_up: true if the physical link is up and false if it is down 921 * @link_speed: current link speed received from the link event 922 * 923 * Returns 0 on success and negative on failure 924 */ 925 static int 926 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 927 u16 link_speed) 928 { 929 struct device *dev = ice_pf_to_dev(pf); 930 struct ice_phy_info *phy_info; 931 enum ice_status status; 932 struct ice_vsi *vsi; 933 u16 old_link_speed; 934 bool old_link; 935 936 phy_info = &pi->phy; 937 phy_info->link_info_old = phy_info->link_info; 938 939 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 940 old_link_speed = phy_info->link_info_old.link_speed; 941 942 /* update the link info structures and re-enable link events, 943 * don't bail on failure due to other book keeping needed 944 */ 945 status = ice_update_link_info(pi); 946 if (status) 947 dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", 948 pi->lport, ice_stat_str(status), 949 ice_aq_str(pi->hw->adminq.sq_last_status)); 950 951 ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); 952 953 /* Check if the link state is up after updating link info, and treat 954 * this event as an UP event since the link is actually UP now. 955 */ 956 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 957 link_up = true; 958 959 vsi = ice_get_main_vsi(pf); 960 if (!vsi || !vsi->port_info) 961 return -EINVAL; 962 963 /* turn off PHY if media was removed */ 964 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 965 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 966 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 967 ice_set_link(vsi, false); 968 } 969 970 /* if the old link up/down and speed is the same as the new */ 971 if (link_up == old_link && link_speed == old_link_speed) 972 return 0; 973 974 if (ice_is_dcb_active(pf)) { 975 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 976 ice_dcb_rebuild(pf); 977 } else { 978 if (link_up) 979 ice_set_dflt_mib(pf); 980 } 981 ice_vsi_link_event(vsi, link_up); 982 ice_print_link_msg(vsi, link_up); 983 984 ice_vc_notify_link_state(pf); 985 986 return 0; 987 } 988 989 /** 990 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 991 * @pf: board private structure 992 */ 993 static void ice_watchdog_subtask(struct ice_pf *pf) 994 { 995 int i; 996 997 /* if interface is down do nothing */ 998 if (test_bit(ICE_DOWN, pf->state) || 999 test_bit(ICE_CFG_BUSY, pf->state)) 1000 return; 1001 1002 /* make sure we don't do these things too often */ 1003 if (time_before(jiffies, 1004 pf->serv_tmr_prev + pf->serv_tmr_period)) 1005 return; 1006 1007 pf->serv_tmr_prev = jiffies; 1008 1009 /* Update the stats for active netdevs so the network stack 1010 * can look at updated numbers whenever it cares to 1011 */ 1012 ice_update_pf_stats(pf); 1013 ice_for_each_vsi(pf, i) 1014 if (pf->vsi[i] && pf->vsi[i]->netdev) 1015 ice_update_vsi_stats(pf->vsi[i]); 1016 } 1017 1018 /** 1019 * ice_init_link_events - enable/initialize link events 1020 * @pi: pointer to the port_info instance 1021 * 1022 * Returns -EIO on failure, 0 on success 1023 */ 1024 static int ice_init_link_events(struct ice_port_info *pi) 1025 { 1026 u16 mask; 1027 1028 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1029 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); 1030 1031 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1032 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1033 pi->lport); 1034 return -EIO; 1035 } 1036 1037 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1038 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1039 pi->lport); 1040 return -EIO; 1041 } 1042 1043 return 0; 1044 } 1045 1046 /** 1047 * ice_handle_link_event - handle link event via ARQ 1048 * @pf: PF that the link event is associated with 1049 * @event: event structure containing link status info 1050 */ 1051 static int 1052 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1053 { 1054 struct ice_aqc_get_link_status_data *link_data; 1055 struct ice_port_info *port_info; 1056 int status; 1057 1058 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1059 port_info = pf->hw.port_info; 1060 if (!port_info) 1061 return -EINVAL; 1062 1063 status = ice_link_event(pf, port_info, 1064 !!(link_data->link_info & ICE_AQ_LINK_UP), 1065 le16_to_cpu(link_data->link_speed)); 1066 if (status) 1067 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1068 status); 1069 1070 return status; 1071 } 1072 1073 enum ice_aq_task_state { 1074 ICE_AQ_TASK_WAITING = 0, 1075 ICE_AQ_TASK_COMPLETE, 1076 ICE_AQ_TASK_CANCELED, 1077 }; 1078 1079 struct ice_aq_task { 1080 struct hlist_node entry; 1081 1082 u16 opcode; 1083 struct ice_rq_event_info *event; 1084 enum ice_aq_task_state state; 1085 }; 1086 1087 /** 1088 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1089 * @pf: pointer to the PF private structure 1090 * @opcode: the opcode to wait for 1091 * @timeout: how long to wait, in jiffies 1092 * @event: storage for the event info 1093 * 1094 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1095 * current thread will be put to sleep until the specified event occurs or 1096 * until the given timeout is reached. 1097 * 1098 * To obtain only the descriptor contents, pass an event without an allocated 1099 * msg_buf. If the complete data buffer is desired, allocate the 1100 * event->msg_buf with enough space ahead of time. 1101 * 1102 * Returns: zero on success, or a negative error code on failure. 1103 */ 1104 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, 1105 struct ice_rq_event_info *event) 1106 { 1107 struct device *dev = ice_pf_to_dev(pf); 1108 struct ice_aq_task *task; 1109 unsigned long start; 1110 long ret; 1111 int err; 1112 1113 task = kzalloc(sizeof(*task), GFP_KERNEL); 1114 if (!task) 1115 return -ENOMEM; 1116 1117 INIT_HLIST_NODE(&task->entry); 1118 task->opcode = opcode; 1119 task->event = event; 1120 task->state = ICE_AQ_TASK_WAITING; 1121 1122 spin_lock_bh(&pf->aq_wait_lock); 1123 hlist_add_head(&task->entry, &pf->aq_wait_list); 1124 spin_unlock_bh(&pf->aq_wait_lock); 1125 1126 start = jiffies; 1127 1128 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, 1129 timeout); 1130 switch (task->state) { 1131 case ICE_AQ_TASK_WAITING: 1132 err = ret < 0 ? ret : -ETIMEDOUT; 1133 break; 1134 case ICE_AQ_TASK_CANCELED: 1135 err = ret < 0 ? ret : -ECANCELED; 1136 break; 1137 case ICE_AQ_TASK_COMPLETE: 1138 err = ret < 0 ? ret : 0; 1139 break; 1140 default: 1141 WARN(1, "Unexpected AdminQ wait task state %u", task->state); 1142 err = -EINVAL; 1143 break; 1144 } 1145 1146 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1147 jiffies_to_msecs(jiffies - start), 1148 jiffies_to_msecs(timeout), 1149 opcode); 1150 1151 spin_lock_bh(&pf->aq_wait_lock); 1152 hlist_del(&task->entry); 1153 spin_unlock_bh(&pf->aq_wait_lock); 1154 kfree(task); 1155 1156 return err; 1157 } 1158 1159 /** 1160 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1161 * @pf: pointer to the PF private structure 1162 * @opcode: the opcode of the event 1163 * @event: the event to check 1164 * 1165 * Loops over the current list of pending threads waiting for an AdminQ event. 1166 * For each matching task, copy the contents of the event into the task 1167 * structure and wake up the thread. 1168 * 1169 * If multiple threads wait for the same opcode, they will all be woken up. 1170 * 1171 * Note that event->msg_buf will only be duplicated if the event has a buffer 1172 * with enough space already allocated. Otherwise, only the descriptor and 1173 * message length will be copied. 1174 * 1175 * Returns: true if an event was found, false otherwise 1176 */ 1177 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1178 struct ice_rq_event_info *event) 1179 { 1180 struct ice_aq_task *task; 1181 bool found = false; 1182 1183 spin_lock_bh(&pf->aq_wait_lock); 1184 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1185 if (task->state || task->opcode != opcode) 1186 continue; 1187 1188 memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); 1189 task->event->msg_len = event->msg_len; 1190 1191 /* Only copy the data buffer if a destination was set */ 1192 if (task->event->msg_buf && 1193 task->event->buf_len > event->buf_len) { 1194 memcpy(task->event->msg_buf, event->msg_buf, 1195 event->buf_len); 1196 task->event->buf_len = event->buf_len; 1197 } 1198 1199 task->state = ICE_AQ_TASK_COMPLETE; 1200 found = true; 1201 } 1202 spin_unlock_bh(&pf->aq_wait_lock); 1203 1204 if (found) 1205 wake_up(&pf->aq_wait_queue); 1206 } 1207 1208 /** 1209 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1210 * @pf: the PF private structure 1211 * 1212 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1213 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1214 */ 1215 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1216 { 1217 struct ice_aq_task *task; 1218 1219 spin_lock_bh(&pf->aq_wait_lock); 1220 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1221 task->state = ICE_AQ_TASK_CANCELED; 1222 spin_unlock_bh(&pf->aq_wait_lock); 1223 1224 wake_up(&pf->aq_wait_queue); 1225 } 1226 1227 /** 1228 * __ice_clean_ctrlq - helper function to clean controlq rings 1229 * @pf: ptr to struct ice_pf 1230 * @q_type: specific Control queue type 1231 */ 1232 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1233 { 1234 struct device *dev = ice_pf_to_dev(pf); 1235 struct ice_rq_event_info event; 1236 struct ice_hw *hw = &pf->hw; 1237 struct ice_ctl_q_info *cq; 1238 u16 pending, i = 0; 1239 const char *qtype; 1240 u32 oldval, val; 1241 1242 /* Do not clean control queue if/when PF reset fails */ 1243 if (test_bit(ICE_RESET_FAILED, pf->state)) 1244 return 0; 1245 1246 switch (q_type) { 1247 case ICE_CTL_Q_ADMIN: 1248 cq = &hw->adminq; 1249 qtype = "Admin"; 1250 break; 1251 case ICE_CTL_Q_SB: 1252 cq = &hw->sbq; 1253 qtype = "Sideband"; 1254 break; 1255 case ICE_CTL_Q_MAILBOX: 1256 cq = &hw->mailboxq; 1257 qtype = "Mailbox"; 1258 /* we are going to try to detect a malicious VF, so set the 1259 * state to begin detection 1260 */ 1261 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1262 break; 1263 default: 1264 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1265 return 0; 1266 } 1267 1268 /* check for error indications - PF_xx_AxQLEN register layout for 1269 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1270 */ 1271 val = rd32(hw, cq->rq.len); 1272 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1273 PF_FW_ARQLEN_ARQCRIT_M)) { 1274 oldval = val; 1275 if (val & PF_FW_ARQLEN_ARQVFE_M) 1276 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1277 qtype); 1278 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1279 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1280 qtype); 1281 } 1282 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1283 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1284 qtype); 1285 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1286 PF_FW_ARQLEN_ARQCRIT_M); 1287 if (oldval != val) 1288 wr32(hw, cq->rq.len, val); 1289 } 1290 1291 val = rd32(hw, cq->sq.len); 1292 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1293 PF_FW_ATQLEN_ATQCRIT_M)) { 1294 oldval = val; 1295 if (val & PF_FW_ATQLEN_ATQVFE_M) 1296 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1297 qtype); 1298 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1299 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1300 qtype); 1301 } 1302 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1303 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1304 qtype); 1305 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1306 PF_FW_ATQLEN_ATQCRIT_M); 1307 if (oldval != val) 1308 wr32(hw, cq->sq.len, val); 1309 } 1310 1311 event.buf_len = cq->rq_buf_size; 1312 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1313 if (!event.msg_buf) 1314 return 0; 1315 1316 do { 1317 enum ice_status ret; 1318 u16 opcode; 1319 1320 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1321 if (ret == ICE_ERR_AQ_NO_WORK) 1322 break; 1323 if (ret) { 1324 dev_err(dev, "%s Receive Queue event error %s\n", qtype, 1325 ice_stat_str(ret)); 1326 break; 1327 } 1328 1329 opcode = le16_to_cpu(event.desc.opcode); 1330 1331 /* Notify any thread that might be waiting for this event */ 1332 ice_aq_check_events(pf, opcode, &event); 1333 1334 switch (opcode) { 1335 case ice_aqc_opc_get_link_status: 1336 if (ice_handle_link_event(pf, &event)) 1337 dev_err(dev, "Could not handle link event\n"); 1338 break; 1339 case ice_aqc_opc_event_lan_overflow: 1340 ice_vf_lan_overflow_event(pf, &event); 1341 break; 1342 case ice_mbx_opc_send_msg_to_pf: 1343 if (!ice_is_malicious_vf(pf, &event, i, pending)) 1344 ice_vc_process_vf_msg(pf, &event); 1345 break; 1346 case ice_aqc_opc_fw_logging: 1347 ice_output_fw_log(hw, &event.desc, event.msg_buf); 1348 break; 1349 case ice_aqc_opc_lldp_set_mib_change: 1350 ice_dcb_process_lldp_set_mib_change(pf, &event); 1351 break; 1352 default: 1353 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1354 qtype, opcode); 1355 break; 1356 } 1357 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1358 1359 kfree(event.msg_buf); 1360 1361 return pending && (i == ICE_DFLT_IRQ_WORK); 1362 } 1363 1364 /** 1365 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1366 * @hw: pointer to hardware info 1367 * @cq: control queue information 1368 * 1369 * returns true if there are pending messages in a queue, false if there aren't 1370 */ 1371 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1372 { 1373 u16 ntu; 1374 1375 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1376 return cq->rq.next_to_clean != ntu; 1377 } 1378 1379 /** 1380 * ice_clean_adminq_subtask - clean the AdminQ rings 1381 * @pf: board private structure 1382 */ 1383 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1384 { 1385 struct ice_hw *hw = &pf->hw; 1386 1387 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1388 return; 1389 1390 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1391 return; 1392 1393 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1394 1395 /* There might be a situation where new messages arrive to a control 1396 * queue between processing the last message and clearing the 1397 * EVENT_PENDING bit. So before exiting, check queue head again (using 1398 * ice_ctrlq_pending) and process new messages if any. 1399 */ 1400 if (ice_ctrlq_pending(hw, &hw->adminq)) 1401 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1402 1403 ice_flush(hw); 1404 } 1405 1406 /** 1407 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1408 * @pf: board private structure 1409 */ 1410 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1411 { 1412 struct ice_hw *hw = &pf->hw; 1413 1414 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1415 return; 1416 1417 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1418 return; 1419 1420 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1421 1422 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1423 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1424 1425 ice_flush(hw); 1426 } 1427 1428 /** 1429 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1430 * @pf: board private structure 1431 */ 1432 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1433 { 1434 struct ice_hw *hw = &pf->hw; 1435 1436 /* Nothing to do here if sideband queue is not supported */ 1437 if (!ice_is_sbq_supported(hw)) { 1438 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1439 return; 1440 } 1441 1442 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1443 return; 1444 1445 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1446 return; 1447 1448 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1449 1450 if (ice_ctrlq_pending(hw, &hw->sbq)) 1451 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1452 1453 ice_flush(hw); 1454 } 1455 1456 /** 1457 * ice_service_task_schedule - schedule the service task to wake up 1458 * @pf: board private structure 1459 * 1460 * If not already scheduled, this puts the task into the work queue. 1461 */ 1462 void ice_service_task_schedule(struct ice_pf *pf) 1463 { 1464 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1465 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1466 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1467 queue_work(ice_wq, &pf->serv_task); 1468 } 1469 1470 /** 1471 * ice_service_task_complete - finish up the service task 1472 * @pf: board private structure 1473 */ 1474 static void ice_service_task_complete(struct ice_pf *pf) 1475 { 1476 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1477 1478 /* force memory (pf->state) to sync before next service task */ 1479 smp_mb__before_atomic(); 1480 clear_bit(ICE_SERVICE_SCHED, pf->state); 1481 } 1482 1483 /** 1484 * ice_service_task_stop - stop service task and cancel works 1485 * @pf: board private structure 1486 * 1487 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1488 * 1 otherwise. 1489 */ 1490 static int ice_service_task_stop(struct ice_pf *pf) 1491 { 1492 int ret; 1493 1494 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1495 1496 if (pf->serv_tmr.function) 1497 del_timer_sync(&pf->serv_tmr); 1498 if (pf->serv_task.func) 1499 cancel_work_sync(&pf->serv_task); 1500 1501 clear_bit(ICE_SERVICE_SCHED, pf->state); 1502 return ret; 1503 } 1504 1505 /** 1506 * ice_service_task_restart - restart service task and schedule works 1507 * @pf: board private structure 1508 * 1509 * This function is needed for suspend and resume works (e.g WoL scenario) 1510 */ 1511 static void ice_service_task_restart(struct ice_pf *pf) 1512 { 1513 clear_bit(ICE_SERVICE_DIS, pf->state); 1514 ice_service_task_schedule(pf); 1515 } 1516 1517 /** 1518 * ice_service_timer - timer callback to schedule service task 1519 * @t: pointer to timer_list 1520 */ 1521 static void ice_service_timer(struct timer_list *t) 1522 { 1523 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1524 1525 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1526 ice_service_task_schedule(pf); 1527 } 1528 1529 /** 1530 * ice_handle_mdd_event - handle malicious driver detect event 1531 * @pf: pointer to the PF structure 1532 * 1533 * Called from service task. OICR interrupt handler indicates MDD event. 1534 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1535 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1536 * disable the queue, the PF can be configured to reset the VF using ethtool 1537 * private flag mdd-auto-reset-vf. 1538 */ 1539 static void ice_handle_mdd_event(struct ice_pf *pf) 1540 { 1541 struct device *dev = ice_pf_to_dev(pf); 1542 struct ice_hw *hw = &pf->hw; 1543 unsigned int i; 1544 u32 reg; 1545 1546 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1547 /* Since the VF MDD event logging is rate limited, check if 1548 * there are pending MDD events. 1549 */ 1550 ice_print_vfs_mdd_events(pf); 1551 return; 1552 } 1553 1554 /* find what triggered an MDD event */ 1555 reg = rd32(hw, GL_MDET_TX_PQM); 1556 if (reg & GL_MDET_TX_PQM_VALID_M) { 1557 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1558 GL_MDET_TX_PQM_PF_NUM_S; 1559 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1560 GL_MDET_TX_PQM_VF_NUM_S; 1561 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1562 GL_MDET_TX_PQM_MAL_TYPE_S; 1563 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1564 GL_MDET_TX_PQM_QNUM_S); 1565 1566 if (netif_msg_tx_err(pf)) 1567 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1568 event, queue, pf_num, vf_num); 1569 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1570 } 1571 1572 reg = rd32(hw, GL_MDET_TX_TCLAN); 1573 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1574 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1575 GL_MDET_TX_TCLAN_PF_NUM_S; 1576 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1577 GL_MDET_TX_TCLAN_VF_NUM_S; 1578 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1579 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1580 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1581 GL_MDET_TX_TCLAN_QNUM_S); 1582 1583 if (netif_msg_tx_err(pf)) 1584 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1585 event, queue, pf_num, vf_num); 1586 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 1587 } 1588 1589 reg = rd32(hw, GL_MDET_RX); 1590 if (reg & GL_MDET_RX_VALID_M) { 1591 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1592 GL_MDET_RX_PF_NUM_S; 1593 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1594 GL_MDET_RX_VF_NUM_S; 1595 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1596 GL_MDET_RX_MAL_TYPE_S; 1597 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1598 GL_MDET_RX_QNUM_S); 1599 1600 if (netif_msg_rx_err(pf)) 1601 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1602 event, queue, pf_num, vf_num); 1603 wr32(hw, GL_MDET_RX, 0xffffffff); 1604 } 1605 1606 /* check to see if this PF caused an MDD event */ 1607 reg = rd32(hw, PF_MDET_TX_PQM); 1608 if (reg & PF_MDET_TX_PQM_VALID_M) { 1609 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1610 if (netif_msg_tx_err(pf)) 1611 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1612 } 1613 1614 reg = rd32(hw, PF_MDET_TX_TCLAN); 1615 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1616 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); 1617 if (netif_msg_tx_err(pf)) 1618 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1619 } 1620 1621 reg = rd32(hw, PF_MDET_RX); 1622 if (reg & PF_MDET_RX_VALID_M) { 1623 wr32(hw, PF_MDET_RX, 0xFFFF); 1624 if (netif_msg_rx_err(pf)) 1625 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1626 } 1627 1628 /* Check to see if one of the VFs caused an MDD event, and then 1629 * increment counters and set print pending 1630 */ 1631 ice_for_each_vf(pf, i) { 1632 struct ice_vf *vf = &pf->vf[i]; 1633 1634 reg = rd32(hw, VP_MDET_TX_PQM(i)); 1635 if (reg & VP_MDET_TX_PQM_VALID_M) { 1636 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); 1637 vf->mdd_tx_events.count++; 1638 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1639 if (netif_msg_tx_err(pf)) 1640 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1641 i); 1642 } 1643 1644 reg = rd32(hw, VP_MDET_TX_TCLAN(i)); 1645 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1646 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); 1647 vf->mdd_tx_events.count++; 1648 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1649 if (netif_msg_tx_err(pf)) 1650 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1651 i); 1652 } 1653 1654 reg = rd32(hw, VP_MDET_TX_TDPU(i)); 1655 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1656 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); 1657 vf->mdd_tx_events.count++; 1658 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1659 if (netif_msg_tx_err(pf)) 1660 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1661 i); 1662 } 1663 1664 reg = rd32(hw, VP_MDET_RX(i)); 1665 if (reg & VP_MDET_RX_VALID_M) { 1666 wr32(hw, VP_MDET_RX(i), 0xFFFF); 1667 vf->mdd_rx_events.count++; 1668 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1669 if (netif_msg_rx_err(pf)) 1670 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1671 i); 1672 1673 /* Since the queue is disabled on VF Rx MDD events, the 1674 * PF can be configured to reset the VF through ethtool 1675 * private flag mdd-auto-reset-vf. 1676 */ 1677 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 1678 /* VF MDD event counters will be cleared by 1679 * reset, so print the event prior to reset. 1680 */ 1681 ice_print_vf_rx_mdd_event(vf); 1682 ice_reset_vf(&pf->vf[i], false); 1683 } 1684 } 1685 } 1686 1687 ice_print_vfs_mdd_events(pf); 1688 } 1689 1690 /** 1691 * ice_force_phys_link_state - Force the physical link state 1692 * @vsi: VSI to force the physical link state to up/down 1693 * @link_up: true/false indicates to set the physical link to up/down 1694 * 1695 * Force the physical link state by getting the current PHY capabilities from 1696 * hardware and setting the PHY config based on the determined capabilities. If 1697 * link changes a link event will be triggered because both the Enable Automatic 1698 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1699 * 1700 * Returns 0 on success, negative on failure 1701 */ 1702 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1703 { 1704 struct ice_aqc_get_phy_caps_data *pcaps; 1705 struct ice_aqc_set_phy_cfg_data *cfg; 1706 struct ice_port_info *pi; 1707 struct device *dev; 1708 int retcode; 1709 1710 if (!vsi || !vsi->port_info || !vsi->back) 1711 return -EINVAL; 1712 if (vsi->type != ICE_VSI_PF) 1713 return 0; 1714 1715 dev = ice_pf_to_dev(vsi->back); 1716 1717 pi = vsi->port_info; 1718 1719 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1720 if (!pcaps) 1721 return -ENOMEM; 1722 1723 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1724 NULL); 1725 if (retcode) { 1726 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1727 vsi->vsi_num, retcode); 1728 retcode = -EIO; 1729 goto out; 1730 } 1731 1732 /* No change in link */ 1733 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1734 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1735 goto out; 1736 1737 /* Use the current user PHY configuration. The current user PHY 1738 * configuration is initialized during probe from PHY capabilities 1739 * software mode, and updated on set PHY configuration. 1740 */ 1741 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1742 if (!cfg) { 1743 retcode = -ENOMEM; 1744 goto out; 1745 } 1746 1747 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1748 if (link_up) 1749 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1750 else 1751 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1752 1753 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1754 if (retcode) { 1755 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1756 vsi->vsi_num, retcode); 1757 retcode = -EIO; 1758 } 1759 1760 kfree(cfg); 1761 out: 1762 kfree(pcaps); 1763 return retcode; 1764 } 1765 1766 /** 1767 * ice_init_nvm_phy_type - Initialize the NVM PHY type 1768 * @pi: port info structure 1769 * 1770 * Initialize nvm_phy_type_[low|high] for link lenient mode support 1771 */ 1772 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 1773 { 1774 struct ice_aqc_get_phy_caps_data *pcaps; 1775 struct ice_pf *pf = pi->hw->back; 1776 enum ice_status status; 1777 int err = 0; 1778 1779 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1780 if (!pcaps) 1781 return -ENOMEM; 1782 1783 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, 1784 NULL); 1785 1786 if (status) { 1787 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1788 err = -EIO; 1789 goto out; 1790 } 1791 1792 pf->nvm_phy_type_hi = pcaps->phy_type_high; 1793 pf->nvm_phy_type_lo = pcaps->phy_type_low; 1794 1795 out: 1796 kfree(pcaps); 1797 return err; 1798 } 1799 1800 /** 1801 * ice_init_link_dflt_override - Initialize link default override 1802 * @pi: port info structure 1803 * 1804 * Initialize link default override and PHY total port shutdown during probe 1805 */ 1806 static void ice_init_link_dflt_override(struct ice_port_info *pi) 1807 { 1808 struct ice_link_default_override_tlv *ldo; 1809 struct ice_pf *pf = pi->hw->back; 1810 1811 ldo = &pf->link_dflt_override; 1812 if (ice_get_link_default_override(ldo, pi)) 1813 return; 1814 1815 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 1816 return; 1817 1818 /* Enable Total Port Shutdown (override/replace link-down-on-close 1819 * ethtool private flag) for ports with Port Disable bit set. 1820 */ 1821 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 1822 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1823 } 1824 1825 /** 1826 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 1827 * @pi: port info structure 1828 * 1829 * If default override is enabled, initialize the user PHY cfg speed and FEC 1830 * settings using the default override mask from the NVM. 1831 * 1832 * The PHY should only be configured with the default override settings the 1833 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 1834 * is used to indicate that the user PHY cfg default override is initialized 1835 * and the PHY has not been configured with the default override settings. The 1836 * state is set here, and cleared in ice_configure_phy the first time the PHY is 1837 * configured. 1838 * 1839 * This function should be called only if the FW doesn't support default 1840 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 1841 */ 1842 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 1843 { 1844 struct ice_link_default_override_tlv *ldo; 1845 struct ice_aqc_set_phy_cfg_data *cfg; 1846 struct ice_phy_info *phy = &pi->phy; 1847 struct ice_pf *pf = pi->hw->back; 1848 1849 ldo = &pf->link_dflt_override; 1850 1851 /* If link default override is enabled, use to mask NVM PHY capabilities 1852 * for speed and FEC default configuration. 1853 */ 1854 cfg = &phy->curr_user_phy_cfg; 1855 1856 if (ldo->phy_type_low || ldo->phy_type_high) { 1857 cfg->phy_type_low = pf->nvm_phy_type_lo & 1858 cpu_to_le64(ldo->phy_type_low); 1859 cfg->phy_type_high = pf->nvm_phy_type_hi & 1860 cpu_to_le64(ldo->phy_type_high); 1861 } 1862 cfg->link_fec_opt = ldo->fec_options; 1863 phy->curr_user_fec_req = ICE_FEC_AUTO; 1864 1865 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 1866 } 1867 1868 /** 1869 * ice_init_phy_user_cfg - Initialize the PHY user configuration 1870 * @pi: port info structure 1871 * 1872 * Initialize the current user PHY configuration, speed, FEC, and FC requested 1873 * mode to default. The PHY defaults are from get PHY capabilities topology 1874 * with media so call when media is first available. An error is returned if 1875 * called when media is not available. The PHY initialization completed state is 1876 * set here. 1877 * 1878 * These configurations are used when setting PHY 1879 * configuration. The user PHY configuration is updated on set PHY 1880 * configuration. Returns 0 on success, negative on failure 1881 */ 1882 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 1883 { 1884 struct ice_aqc_get_phy_caps_data *pcaps; 1885 struct ice_phy_info *phy = &pi->phy; 1886 struct ice_pf *pf = pi->hw->back; 1887 enum ice_status status; 1888 int err = 0; 1889 1890 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 1891 return -EIO; 1892 1893 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1894 if (!pcaps) 1895 return -ENOMEM; 1896 1897 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 1898 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 1899 pcaps, NULL); 1900 else 1901 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 1902 pcaps, NULL); 1903 if (status) { 1904 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1905 err = -EIO; 1906 goto err_out; 1907 } 1908 1909 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 1910 1911 /* check if lenient mode is supported and enabled */ 1912 if (ice_fw_supports_link_override(pi->hw) && 1913 !(pcaps->module_compliance_enforcement & 1914 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 1915 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 1916 1917 /* if the FW supports default PHY configuration mode, then the driver 1918 * does not have to apply link override settings. If not, 1919 * initialize user PHY configuration with link override values 1920 */ 1921 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 1922 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 1923 ice_init_phy_cfg_dflt_override(pi); 1924 goto out; 1925 } 1926 } 1927 1928 /* if link default override is not enabled, set user flow control and 1929 * FEC settings based on what get_phy_caps returned 1930 */ 1931 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 1932 pcaps->link_fec_options); 1933 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 1934 1935 out: 1936 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 1937 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 1938 err_out: 1939 kfree(pcaps); 1940 return err; 1941 } 1942 1943 /** 1944 * ice_configure_phy - configure PHY 1945 * @vsi: VSI of PHY 1946 * 1947 * Set the PHY configuration. If the current PHY configuration is the same as 1948 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 1949 * configure the based get PHY capabilities for topology with media. 1950 */ 1951 static int ice_configure_phy(struct ice_vsi *vsi) 1952 { 1953 struct device *dev = ice_pf_to_dev(vsi->back); 1954 struct ice_port_info *pi = vsi->port_info; 1955 struct ice_aqc_get_phy_caps_data *pcaps; 1956 struct ice_aqc_set_phy_cfg_data *cfg; 1957 struct ice_phy_info *phy = &pi->phy; 1958 struct ice_pf *pf = vsi->back; 1959 enum ice_status status; 1960 int err = 0; 1961 1962 /* Ensure we have media as we cannot configure a medialess port */ 1963 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 1964 return -EPERM; 1965 1966 ice_print_topo_conflict(vsi); 1967 1968 if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 1969 return -EPERM; 1970 1971 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 1972 return ice_force_phys_link_state(vsi, true); 1973 1974 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1975 if (!pcaps) 1976 return -ENOMEM; 1977 1978 /* Get current PHY config */ 1979 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1980 NULL); 1981 if (status) { 1982 dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", 1983 vsi->vsi_num, ice_stat_str(status)); 1984 err = -EIO; 1985 goto done; 1986 } 1987 1988 /* If PHY enable link is configured and configuration has not changed, 1989 * there's nothing to do 1990 */ 1991 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 1992 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 1993 goto done; 1994 1995 /* Use PHY topology as baseline for configuration */ 1996 memset(pcaps, 0, sizeof(*pcaps)); 1997 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 1998 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 1999 pcaps, NULL); 2000 else 2001 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2002 pcaps, NULL); 2003 if (status) { 2004 dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", 2005 vsi->vsi_num, ice_stat_str(status)); 2006 err = -EIO; 2007 goto done; 2008 } 2009 2010 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2011 if (!cfg) { 2012 err = -ENOMEM; 2013 goto done; 2014 } 2015 2016 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2017 2018 /* Speed - If default override pending, use curr_user_phy_cfg set in 2019 * ice_init_phy_user_cfg_ldo. 2020 */ 2021 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2022 vsi->back->state)) { 2023 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2024 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2025 } else { 2026 u64 phy_low = 0, phy_high = 0; 2027 2028 ice_update_phy_type(&phy_low, &phy_high, 2029 pi->phy.curr_user_speed_req); 2030 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2031 cfg->phy_type_high = pcaps->phy_type_high & 2032 cpu_to_le64(phy_high); 2033 } 2034 2035 /* Can't provide what was requested; use PHY capabilities */ 2036 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2037 cfg->phy_type_low = pcaps->phy_type_low; 2038 cfg->phy_type_high = pcaps->phy_type_high; 2039 } 2040 2041 /* FEC */ 2042 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2043 2044 /* Can't provide what was requested; use PHY capabilities */ 2045 if (cfg->link_fec_opt != 2046 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2047 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2048 cfg->link_fec_opt = pcaps->link_fec_options; 2049 } 2050 2051 /* Flow Control - always supported; no need to check against 2052 * capabilities 2053 */ 2054 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2055 2056 /* Enable link and link update */ 2057 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2058 2059 status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2060 if (status) { 2061 dev_err(dev, "Failed to set phy config, VSI %d error %s\n", 2062 vsi->vsi_num, ice_stat_str(status)); 2063 err = -EIO; 2064 } 2065 2066 kfree(cfg); 2067 done: 2068 kfree(pcaps); 2069 return err; 2070 } 2071 2072 /** 2073 * ice_check_media_subtask - Check for media 2074 * @pf: pointer to PF struct 2075 * 2076 * If media is available, then initialize PHY user configuration if it is not 2077 * been, and configure the PHY if the interface is up. 2078 */ 2079 static void ice_check_media_subtask(struct ice_pf *pf) 2080 { 2081 struct ice_port_info *pi; 2082 struct ice_vsi *vsi; 2083 int err; 2084 2085 /* No need to check for media if it's already present */ 2086 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2087 return; 2088 2089 vsi = ice_get_main_vsi(pf); 2090 if (!vsi) 2091 return; 2092 2093 /* Refresh link info and check if media is present */ 2094 pi = vsi->port_info; 2095 err = ice_update_link_info(pi); 2096 if (err) 2097 return; 2098 2099 ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); 2100 2101 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2102 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2103 ice_init_phy_user_cfg(pi); 2104 2105 /* PHY settings are reset on media insertion, reconfigure 2106 * PHY to preserve settings. 2107 */ 2108 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2109 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2110 return; 2111 2112 err = ice_configure_phy(vsi); 2113 if (!err) 2114 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2115 2116 /* A Link Status Event will be generated; the event handler 2117 * will complete bringing the interface up 2118 */ 2119 } 2120 } 2121 2122 /** 2123 * ice_service_task - manage and run subtasks 2124 * @work: pointer to work_struct contained by the PF struct 2125 */ 2126 static void ice_service_task(struct work_struct *work) 2127 { 2128 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2129 unsigned long start_time = jiffies; 2130 2131 /* subtasks */ 2132 2133 /* process reset requests first */ 2134 ice_reset_subtask(pf); 2135 2136 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2137 if (ice_is_reset_in_progress(pf->state) || 2138 test_bit(ICE_SUSPENDED, pf->state) || 2139 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2140 ice_service_task_complete(pf); 2141 return; 2142 } 2143 2144 ice_clean_adminq_subtask(pf); 2145 ice_check_media_subtask(pf); 2146 ice_check_for_hang_subtask(pf); 2147 ice_sync_fltr_subtask(pf); 2148 ice_handle_mdd_event(pf); 2149 ice_watchdog_subtask(pf); 2150 2151 if (ice_is_safe_mode(pf)) { 2152 ice_service_task_complete(pf); 2153 return; 2154 } 2155 2156 ice_process_vflr_event(pf); 2157 ice_clean_mailboxq_subtask(pf); 2158 ice_clean_sbq_subtask(pf); 2159 ice_sync_arfs_fltrs(pf); 2160 ice_flush_fdir_ctx(pf); 2161 2162 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2163 ice_service_task_complete(pf); 2164 2165 /* If the tasks have taken longer than one service timer period 2166 * or there is more work to be done, reset the service timer to 2167 * schedule the service task now. 2168 */ 2169 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2170 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2171 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2172 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2173 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2174 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2175 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2176 mod_timer(&pf->serv_tmr, jiffies); 2177 } 2178 2179 /** 2180 * ice_set_ctrlq_len - helper function to set controlq length 2181 * @hw: pointer to the HW instance 2182 */ 2183 static void ice_set_ctrlq_len(struct ice_hw *hw) 2184 { 2185 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2186 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2187 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2188 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2189 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2190 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2191 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2192 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2193 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2194 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2195 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2196 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2197 } 2198 2199 /** 2200 * ice_schedule_reset - schedule a reset 2201 * @pf: board private structure 2202 * @reset: reset being requested 2203 */ 2204 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2205 { 2206 struct device *dev = ice_pf_to_dev(pf); 2207 2208 /* bail out if earlier reset has failed */ 2209 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2210 dev_dbg(dev, "earlier reset has failed\n"); 2211 return -EIO; 2212 } 2213 /* bail if reset/recovery already in progress */ 2214 if (ice_is_reset_in_progress(pf->state)) { 2215 dev_dbg(dev, "Reset already in progress\n"); 2216 return -EBUSY; 2217 } 2218 2219 ice_unplug_aux_dev(pf); 2220 2221 switch (reset) { 2222 case ICE_RESET_PFR: 2223 set_bit(ICE_PFR_REQ, pf->state); 2224 break; 2225 case ICE_RESET_CORER: 2226 set_bit(ICE_CORER_REQ, pf->state); 2227 break; 2228 case ICE_RESET_GLOBR: 2229 set_bit(ICE_GLOBR_REQ, pf->state); 2230 break; 2231 default: 2232 return -EINVAL; 2233 } 2234 2235 ice_service_task_schedule(pf); 2236 return 0; 2237 } 2238 2239 /** 2240 * ice_irq_affinity_notify - Callback for affinity changes 2241 * @notify: context as to what irq was changed 2242 * @mask: the new affinity mask 2243 * 2244 * This is a callback function used by the irq_set_affinity_notifier function 2245 * so that we may register to receive changes to the irq affinity masks. 2246 */ 2247 static void 2248 ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2249 const cpumask_t *mask) 2250 { 2251 struct ice_q_vector *q_vector = 2252 container_of(notify, struct ice_q_vector, affinity_notify); 2253 2254 cpumask_copy(&q_vector->affinity_mask, mask); 2255 } 2256 2257 /** 2258 * ice_irq_affinity_release - Callback for affinity notifier release 2259 * @ref: internal core kernel usage 2260 * 2261 * This is a callback function used by the irq_set_affinity_notifier function 2262 * to inform the current notification subscriber that they will no longer 2263 * receive notifications. 2264 */ 2265 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2266 2267 /** 2268 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2269 * @vsi: the VSI being configured 2270 */ 2271 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2272 { 2273 struct ice_hw *hw = &vsi->back->hw; 2274 int i; 2275 2276 ice_for_each_q_vector(vsi, i) 2277 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2278 2279 ice_flush(hw); 2280 return 0; 2281 } 2282 2283 /** 2284 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2285 * @vsi: the VSI being configured 2286 * @basename: name for the vector 2287 */ 2288 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2289 { 2290 int q_vectors = vsi->num_q_vectors; 2291 struct ice_pf *pf = vsi->back; 2292 int base = vsi->base_vector; 2293 struct device *dev; 2294 int rx_int_idx = 0; 2295 int tx_int_idx = 0; 2296 int vector, err; 2297 int irq_num; 2298 2299 dev = ice_pf_to_dev(pf); 2300 for (vector = 0; vector < q_vectors; vector++) { 2301 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2302 2303 irq_num = pf->msix_entries[base + vector].vector; 2304 2305 if (q_vector->tx.ring && q_vector->rx.ring) { 2306 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2307 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2308 tx_int_idx++; 2309 } else if (q_vector->rx.ring) { 2310 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2311 "%s-%s-%d", basename, "rx", rx_int_idx++); 2312 } else if (q_vector->tx.ring) { 2313 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2314 "%s-%s-%d", basename, "tx", tx_int_idx++); 2315 } else { 2316 /* skip this unused q_vector */ 2317 continue; 2318 } 2319 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) 2320 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2321 IRQF_SHARED, q_vector->name, 2322 q_vector); 2323 else 2324 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2325 0, q_vector->name, q_vector); 2326 if (err) { 2327 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2328 err); 2329 goto free_q_irqs; 2330 } 2331 2332 /* register for affinity change notifications */ 2333 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 2334 struct irq_affinity_notify *affinity_notify; 2335 2336 affinity_notify = &q_vector->affinity_notify; 2337 affinity_notify->notify = ice_irq_affinity_notify; 2338 affinity_notify->release = ice_irq_affinity_release; 2339 irq_set_affinity_notifier(irq_num, affinity_notify); 2340 } 2341 2342 /* assign the mask for this irq */ 2343 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 2344 } 2345 2346 vsi->irqs_ready = true; 2347 return 0; 2348 2349 free_q_irqs: 2350 while (vector) { 2351 vector--; 2352 irq_num = pf->msix_entries[base + vector].vector; 2353 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2354 irq_set_affinity_notifier(irq_num, NULL); 2355 irq_set_affinity_hint(irq_num, NULL); 2356 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2357 } 2358 return err; 2359 } 2360 2361 /** 2362 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2363 * @vsi: VSI to setup Tx rings used by XDP 2364 * 2365 * Return 0 on success and negative value on error 2366 */ 2367 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2368 { 2369 struct device *dev = ice_pf_to_dev(vsi->back); 2370 int i; 2371 2372 for (i = 0; i < vsi->num_xdp_txq; i++) { 2373 u16 xdp_q_idx = vsi->alloc_txq + i; 2374 struct ice_ring *xdp_ring; 2375 2376 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2377 2378 if (!xdp_ring) 2379 goto free_xdp_rings; 2380 2381 xdp_ring->q_index = xdp_q_idx; 2382 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2383 xdp_ring->ring_active = false; 2384 xdp_ring->vsi = vsi; 2385 xdp_ring->netdev = NULL; 2386 xdp_ring->dev = dev; 2387 xdp_ring->count = vsi->num_tx_desc; 2388 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2389 if (ice_setup_tx_ring(xdp_ring)) 2390 goto free_xdp_rings; 2391 ice_set_ring_xdp(xdp_ring); 2392 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); 2393 } 2394 2395 return 0; 2396 2397 free_xdp_rings: 2398 for (; i >= 0; i--) 2399 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) 2400 ice_free_tx_ring(vsi->xdp_rings[i]); 2401 return -ENOMEM; 2402 } 2403 2404 /** 2405 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2406 * @vsi: VSI to set the bpf prog on 2407 * @prog: the bpf prog pointer 2408 */ 2409 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2410 { 2411 struct bpf_prog *old_prog; 2412 int i; 2413 2414 old_prog = xchg(&vsi->xdp_prog, prog); 2415 if (old_prog) 2416 bpf_prog_put(old_prog); 2417 2418 ice_for_each_rxq(vsi, i) 2419 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2420 } 2421 2422 /** 2423 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2424 * @vsi: VSI to bring up Tx rings used by XDP 2425 * @prog: bpf program that will be assigned to VSI 2426 * 2427 * Return 0 on success and negative value on error 2428 */ 2429 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 2430 { 2431 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2432 int xdp_rings_rem = vsi->num_xdp_txq; 2433 struct ice_pf *pf = vsi->back; 2434 struct ice_qs_cfg xdp_qs_cfg = { 2435 .qs_mutex = &pf->avail_q_mutex, 2436 .pf_map = pf->avail_txqs, 2437 .pf_map_size = pf->max_pf_txqs, 2438 .q_count = vsi->num_xdp_txq, 2439 .scatter_count = ICE_MAX_SCATTER_TXQS, 2440 .vsi_map = vsi->txq_map, 2441 .vsi_map_offset = vsi->alloc_txq, 2442 .mapping_mode = ICE_VSI_MAP_CONTIG 2443 }; 2444 enum ice_status status; 2445 struct device *dev; 2446 int i, v_idx; 2447 2448 dev = ice_pf_to_dev(pf); 2449 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2450 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2451 if (!vsi->xdp_rings) 2452 return -ENOMEM; 2453 2454 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2455 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2456 goto err_map_xdp; 2457 2458 if (ice_xdp_alloc_setup_rings(vsi)) 2459 goto clear_xdp_rings; 2460 2461 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2462 ice_for_each_q_vector(vsi, v_idx) { 2463 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2464 int xdp_rings_per_v, q_id, q_base; 2465 2466 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2467 vsi->num_q_vectors - v_idx); 2468 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2469 2470 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2471 struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; 2472 2473 xdp_ring->q_vector = q_vector; 2474 xdp_ring->next = q_vector->tx.ring; 2475 q_vector->tx.ring = xdp_ring; 2476 } 2477 xdp_rings_rem -= xdp_rings_per_v; 2478 } 2479 2480 /* omit the scheduler update if in reset path; XDP queues will be 2481 * taken into account at the end of ice_vsi_rebuild, where 2482 * ice_cfg_vsi_lan is being called 2483 */ 2484 if (ice_is_reset_in_progress(pf->state)) 2485 return 0; 2486 2487 /* tell the Tx scheduler that right now we have 2488 * additional queues 2489 */ 2490 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2491 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2492 2493 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2494 max_txqs); 2495 if (status) { 2496 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", 2497 ice_stat_str(status)); 2498 goto clear_xdp_rings; 2499 } 2500 ice_vsi_assign_bpf_prog(vsi, prog); 2501 2502 return 0; 2503 clear_xdp_rings: 2504 for (i = 0; i < vsi->num_xdp_txq; i++) 2505 if (vsi->xdp_rings[i]) { 2506 kfree_rcu(vsi->xdp_rings[i], rcu); 2507 vsi->xdp_rings[i] = NULL; 2508 } 2509 2510 err_map_xdp: 2511 mutex_lock(&pf->avail_q_mutex); 2512 for (i = 0; i < vsi->num_xdp_txq; i++) { 2513 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2514 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2515 } 2516 mutex_unlock(&pf->avail_q_mutex); 2517 2518 devm_kfree(dev, vsi->xdp_rings); 2519 return -ENOMEM; 2520 } 2521 2522 /** 2523 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2524 * @vsi: VSI to remove XDP rings 2525 * 2526 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2527 * resources 2528 */ 2529 int ice_destroy_xdp_rings(struct ice_vsi *vsi) 2530 { 2531 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2532 struct ice_pf *pf = vsi->back; 2533 int i, v_idx; 2534 2535 /* q_vectors are freed in reset path so there's no point in detaching 2536 * rings; in case of rebuild being triggered not from reset bits 2537 * in pf->state won't be set, so additionally check first q_vector 2538 * against NULL 2539 */ 2540 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2541 goto free_qmap; 2542 2543 ice_for_each_q_vector(vsi, v_idx) { 2544 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2545 struct ice_ring *ring; 2546 2547 ice_for_each_ring(ring, q_vector->tx) 2548 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2549 break; 2550 2551 /* restore the value of last node prior to XDP setup */ 2552 q_vector->tx.ring = ring; 2553 } 2554 2555 free_qmap: 2556 mutex_lock(&pf->avail_q_mutex); 2557 for (i = 0; i < vsi->num_xdp_txq; i++) { 2558 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2559 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2560 } 2561 mutex_unlock(&pf->avail_q_mutex); 2562 2563 for (i = 0; i < vsi->num_xdp_txq; i++) 2564 if (vsi->xdp_rings[i]) { 2565 if (vsi->xdp_rings[i]->desc) 2566 ice_free_tx_ring(vsi->xdp_rings[i]); 2567 kfree_rcu(vsi->xdp_rings[i], rcu); 2568 vsi->xdp_rings[i] = NULL; 2569 } 2570 2571 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2572 vsi->xdp_rings = NULL; 2573 2574 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2575 return 0; 2576 2577 ice_vsi_assign_bpf_prog(vsi, NULL); 2578 2579 /* notify Tx scheduler that we destroyed XDP queues and bring 2580 * back the old number of child nodes 2581 */ 2582 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2583 max_txqs[i] = vsi->num_txq; 2584 2585 /* change number of XDP Tx queues to 0 */ 2586 vsi->num_xdp_txq = 0; 2587 2588 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2589 max_txqs); 2590 } 2591 2592 /** 2593 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2594 * @vsi: VSI to schedule napi on 2595 */ 2596 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2597 { 2598 int i; 2599 2600 ice_for_each_rxq(vsi, i) { 2601 struct ice_ring *rx_ring = vsi->rx_rings[i]; 2602 2603 if (rx_ring->xsk_pool) 2604 napi_schedule(&rx_ring->q_vector->napi); 2605 } 2606 } 2607 2608 /** 2609 * ice_xdp_setup_prog - Add or remove XDP eBPF program 2610 * @vsi: VSI to setup XDP for 2611 * @prog: XDP program 2612 * @extack: netlink extended ack 2613 */ 2614 static int 2615 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2616 struct netlink_ext_ack *extack) 2617 { 2618 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2619 bool if_running = netif_running(vsi->netdev); 2620 int ret = 0, xdp_ring_err = 0; 2621 2622 if (frame_size > vsi->rx_buf_len) { 2623 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); 2624 return -EOPNOTSUPP; 2625 } 2626 2627 /* need to stop netdev while setting up the program for Rx rings */ 2628 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 2629 ret = ice_down(vsi); 2630 if (ret) { 2631 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 2632 return ret; 2633 } 2634 } 2635 2636 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 2637 vsi->num_xdp_txq = vsi->alloc_rxq; 2638 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 2639 if (xdp_ring_err) 2640 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 2641 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 2642 xdp_ring_err = ice_destroy_xdp_rings(vsi); 2643 if (xdp_ring_err) 2644 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 2645 } else { 2646 ice_vsi_assign_bpf_prog(vsi, prog); 2647 } 2648 2649 if (if_running) 2650 ret = ice_up(vsi); 2651 2652 if (!ret && prog) 2653 ice_vsi_rx_napi_schedule(vsi); 2654 2655 return (ret || xdp_ring_err) ? -ENOMEM : 0; 2656 } 2657 2658 /** 2659 * ice_xdp_safe_mode - XDP handler for safe mode 2660 * @dev: netdevice 2661 * @xdp: XDP command 2662 */ 2663 static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 2664 struct netdev_bpf *xdp) 2665 { 2666 NL_SET_ERR_MSG_MOD(xdp->extack, 2667 "Please provide working DDP firmware package in order to use XDP\n" 2668 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 2669 return -EOPNOTSUPP; 2670 } 2671 2672 /** 2673 * ice_xdp - implements XDP handler 2674 * @dev: netdevice 2675 * @xdp: XDP command 2676 */ 2677 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2678 { 2679 struct ice_netdev_priv *np = netdev_priv(dev); 2680 struct ice_vsi *vsi = np->vsi; 2681 2682 if (vsi->type != ICE_VSI_PF) { 2683 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); 2684 return -EINVAL; 2685 } 2686 2687 switch (xdp->command) { 2688 case XDP_SETUP_PROG: 2689 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 2690 case XDP_SETUP_XSK_POOL: 2691 return ice_xsk_pool_setup(vsi, xdp->xsk.pool, 2692 xdp->xsk.queue_id); 2693 default: 2694 return -EINVAL; 2695 } 2696 } 2697 2698 /** 2699 * ice_ena_misc_vector - enable the non-queue interrupts 2700 * @pf: board private structure 2701 */ 2702 static void ice_ena_misc_vector(struct ice_pf *pf) 2703 { 2704 struct ice_hw *hw = &pf->hw; 2705 u32 val; 2706 2707 /* Disable anti-spoof detection interrupt to prevent spurious event 2708 * interrupts during a function reset. Anti-spoof functionally is 2709 * still supported. 2710 */ 2711 val = rd32(hw, GL_MDCK_TX_TDPU); 2712 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 2713 wr32(hw, GL_MDCK_TX_TDPU, val); 2714 2715 /* clear things first */ 2716 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 2717 rd32(hw, PFINT_OICR); /* read to clear */ 2718 2719 val = (PFINT_OICR_ECC_ERR_M | 2720 PFINT_OICR_MAL_DETECT_M | 2721 PFINT_OICR_GRST_M | 2722 PFINT_OICR_PCI_EXCEPTION_M | 2723 PFINT_OICR_VFLR_M | 2724 PFINT_OICR_HMC_ERR_M | 2725 PFINT_OICR_PE_PUSH_M | 2726 PFINT_OICR_PE_CRITERR_M); 2727 2728 wr32(hw, PFINT_OICR_ENA, val); 2729 2730 /* SW_ITR_IDX = 0, but don't change INTENA */ 2731 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), 2732 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 2733 } 2734 2735 /** 2736 * ice_misc_intr - misc interrupt handler 2737 * @irq: interrupt number 2738 * @data: pointer to a q_vector 2739 */ 2740 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 2741 { 2742 struct ice_pf *pf = (struct ice_pf *)data; 2743 struct ice_hw *hw = &pf->hw; 2744 irqreturn_t ret = IRQ_NONE; 2745 struct device *dev; 2746 u32 oicr, ena_mask; 2747 2748 dev = ice_pf_to_dev(pf); 2749 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 2750 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 2751 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 2752 2753 oicr = rd32(hw, PFINT_OICR); 2754 ena_mask = rd32(hw, PFINT_OICR_ENA); 2755 2756 if (oicr & PFINT_OICR_SWINT_M) { 2757 ena_mask &= ~PFINT_OICR_SWINT_M; 2758 pf->sw_int_count++; 2759 } 2760 2761 if (oicr & PFINT_OICR_MAL_DETECT_M) { 2762 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 2763 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 2764 } 2765 if (oicr & PFINT_OICR_VFLR_M) { 2766 /* disable any further VFLR event notifications */ 2767 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 2768 u32 reg = rd32(hw, PFINT_OICR_ENA); 2769 2770 reg &= ~PFINT_OICR_VFLR_M; 2771 wr32(hw, PFINT_OICR_ENA, reg); 2772 } else { 2773 ena_mask &= ~PFINT_OICR_VFLR_M; 2774 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 2775 } 2776 } 2777 2778 if (oicr & PFINT_OICR_GRST_M) { 2779 u32 reset; 2780 2781 /* we have a reset warning */ 2782 ena_mask &= ~PFINT_OICR_GRST_M; 2783 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 2784 GLGEN_RSTAT_RESET_TYPE_S; 2785 2786 if (reset == ICE_RESET_CORER) 2787 pf->corer_count++; 2788 else if (reset == ICE_RESET_GLOBR) 2789 pf->globr_count++; 2790 else if (reset == ICE_RESET_EMPR) 2791 pf->empr_count++; 2792 else 2793 dev_dbg(dev, "Invalid reset type %d\n", reset); 2794 2795 /* If a reset cycle isn't already in progress, we set a bit in 2796 * pf->state so that the service task can start a reset/rebuild. 2797 */ 2798 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 2799 if (reset == ICE_RESET_CORER) 2800 set_bit(ICE_CORER_RECV, pf->state); 2801 else if (reset == ICE_RESET_GLOBR) 2802 set_bit(ICE_GLOBR_RECV, pf->state); 2803 else 2804 set_bit(ICE_EMPR_RECV, pf->state); 2805 2806 /* There are couple of different bits at play here. 2807 * hw->reset_ongoing indicates whether the hardware is 2808 * in reset. This is set to true when a reset interrupt 2809 * is received and set back to false after the driver 2810 * has determined that the hardware is out of reset. 2811 * 2812 * ICE_RESET_OICR_RECV in pf->state indicates 2813 * that a post reset rebuild is required before the 2814 * driver is operational again. This is set above. 2815 * 2816 * As this is the start of the reset/rebuild cycle, set 2817 * both to indicate that. 2818 */ 2819 hw->reset_ongoing = true; 2820 } 2821 } 2822 2823 if (oicr & PFINT_OICR_TSYN_TX_M) { 2824 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 2825 ice_ptp_process_ts(pf); 2826 } 2827 2828 if (oicr & PFINT_OICR_TSYN_EVNT_M) { 2829 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2830 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 2831 2832 /* Save EVENTs from GTSYN register */ 2833 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | 2834 GLTSYN_STAT_EVENT1_M | 2835 GLTSYN_STAT_EVENT2_M); 2836 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 2837 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); 2838 } 2839 2840 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 2841 if (oicr & ICE_AUX_CRIT_ERR) { 2842 struct iidc_event *event; 2843 2844 ena_mask &= ~ICE_AUX_CRIT_ERR; 2845 event = kzalloc(sizeof(*event), GFP_KERNEL); 2846 if (event) { 2847 set_bit(IIDC_EVENT_CRIT_ERR, event->type); 2848 /* report the entire OICR value to AUX driver */ 2849 event->reg = oicr; 2850 ice_send_event_to_aux(pf, event); 2851 kfree(event); 2852 } 2853 } 2854 2855 /* Report any remaining unexpected interrupts */ 2856 oicr &= ena_mask; 2857 if (oicr) { 2858 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 2859 /* If a critical error is pending there is no choice but to 2860 * reset the device. 2861 */ 2862 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 2863 PFINT_OICR_ECC_ERR_M)) { 2864 set_bit(ICE_PFR_REQ, pf->state); 2865 ice_service_task_schedule(pf); 2866 } 2867 } 2868 ret = IRQ_HANDLED; 2869 2870 ice_service_task_schedule(pf); 2871 ice_irq_dynamic_ena(hw, NULL, NULL); 2872 2873 return ret; 2874 } 2875 2876 /** 2877 * ice_dis_ctrlq_interrupts - disable control queue interrupts 2878 * @hw: pointer to HW structure 2879 */ 2880 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 2881 { 2882 /* disable Admin queue Interrupt causes */ 2883 wr32(hw, PFINT_FW_CTL, 2884 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 2885 2886 /* disable Mailbox queue Interrupt causes */ 2887 wr32(hw, PFINT_MBX_CTL, 2888 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 2889 2890 wr32(hw, PFINT_SB_CTL, 2891 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 2892 2893 /* disable Control queue Interrupt causes */ 2894 wr32(hw, PFINT_OICR_CTL, 2895 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 2896 2897 ice_flush(hw); 2898 } 2899 2900 /** 2901 * ice_free_irq_msix_misc - Unroll misc vector setup 2902 * @pf: board private structure 2903 */ 2904 static void ice_free_irq_msix_misc(struct ice_pf *pf) 2905 { 2906 struct ice_hw *hw = &pf->hw; 2907 2908 ice_dis_ctrlq_interrupts(hw); 2909 2910 /* disable OICR interrupt */ 2911 wr32(hw, PFINT_OICR_ENA, 0); 2912 ice_flush(hw); 2913 2914 if (pf->msix_entries) { 2915 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 2916 devm_free_irq(ice_pf_to_dev(pf), 2917 pf->msix_entries[pf->oicr_idx].vector, pf); 2918 } 2919 2920 pf->num_avail_sw_msix += 1; 2921 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); 2922 } 2923 2924 /** 2925 * ice_ena_ctrlq_interrupts - enable control queue interrupts 2926 * @hw: pointer to HW structure 2927 * @reg_idx: HW vector index to associate the control queue interrupts with 2928 */ 2929 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 2930 { 2931 u32 val; 2932 2933 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 2934 PFINT_OICR_CTL_CAUSE_ENA_M); 2935 wr32(hw, PFINT_OICR_CTL, val); 2936 2937 /* enable Admin queue Interrupt causes */ 2938 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 2939 PFINT_FW_CTL_CAUSE_ENA_M); 2940 wr32(hw, PFINT_FW_CTL, val); 2941 2942 /* enable Mailbox queue Interrupt causes */ 2943 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 2944 PFINT_MBX_CTL_CAUSE_ENA_M); 2945 wr32(hw, PFINT_MBX_CTL, val); 2946 2947 /* This enables Sideband queue Interrupt causes */ 2948 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 2949 PFINT_SB_CTL_CAUSE_ENA_M); 2950 wr32(hw, PFINT_SB_CTL, val); 2951 2952 ice_flush(hw); 2953 } 2954 2955 /** 2956 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 2957 * @pf: board private structure 2958 * 2959 * This sets up the handler for MSIX 0, which is used to manage the 2960 * non-queue interrupts, e.g. AdminQ and errors. This is not used 2961 * when in MSI or Legacy interrupt mode. 2962 */ 2963 static int ice_req_irq_msix_misc(struct ice_pf *pf) 2964 { 2965 struct device *dev = ice_pf_to_dev(pf); 2966 struct ice_hw *hw = &pf->hw; 2967 int oicr_idx, err = 0; 2968 2969 if (!pf->int_name[0]) 2970 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 2971 dev_driver_string(dev), dev_name(dev)); 2972 2973 /* Do not request IRQ but do enable OICR interrupt since settings are 2974 * lost during reset. Note that this function is called only during 2975 * rebuild path and not while reset is in progress. 2976 */ 2977 if (ice_is_reset_in_progress(pf->state)) 2978 goto skip_req_irq; 2979 2980 /* reserve one vector in irq_tracker for misc interrupts */ 2981 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 2982 if (oicr_idx < 0) 2983 return oicr_idx; 2984 2985 pf->num_avail_sw_msix -= 1; 2986 pf->oicr_idx = (u16)oicr_idx; 2987 2988 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, 2989 ice_misc_intr, 0, pf->int_name, pf); 2990 if (err) { 2991 dev_err(dev, "devm_request_irq for %s failed: %d\n", 2992 pf->int_name, err); 2993 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); 2994 pf->num_avail_sw_msix += 1; 2995 return err; 2996 } 2997 2998 skip_req_irq: 2999 ice_ena_misc_vector(pf); 3000 3001 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); 3002 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), 3003 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3004 3005 ice_flush(hw); 3006 ice_irq_dynamic_ena(hw, NULL, NULL); 3007 3008 return 0; 3009 } 3010 3011 /** 3012 * ice_napi_add - register NAPI handler for the VSI 3013 * @vsi: VSI for which NAPI handler is to be registered 3014 * 3015 * This function is only called in the driver's load path. Registering the NAPI 3016 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 3017 * reset/rebuild, etc.) 3018 */ 3019 static void ice_napi_add(struct ice_vsi *vsi) 3020 { 3021 int v_idx; 3022 3023 if (!vsi->netdev) 3024 return; 3025 3026 ice_for_each_q_vector(vsi, v_idx) 3027 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 3028 ice_napi_poll, NAPI_POLL_WEIGHT); 3029 } 3030 3031 /** 3032 * ice_set_ops - set netdev and ethtools ops for the given netdev 3033 * @netdev: netdev instance 3034 */ 3035 static void ice_set_ops(struct net_device *netdev) 3036 { 3037 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3038 3039 if (ice_is_safe_mode(pf)) { 3040 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3041 ice_set_ethtool_safe_mode_ops(netdev); 3042 return; 3043 } 3044 3045 netdev->netdev_ops = &ice_netdev_ops; 3046 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3047 ice_set_ethtool_ops(netdev); 3048 } 3049 3050 /** 3051 * ice_set_netdev_features - set features for the given netdev 3052 * @netdev: netdev instance 3053 */ 3054 static void ice_set_netdev_features(struct net_device *netdev) 3055 { 3056 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3057 netdev_features_t csumo_features; 3058 netdev_features_t vlano_features; 3059 netdev_features_t dflt_features; 3060 netdev_features_t tso_features; 3061 3062 if (ice_is_safe_mode(pf)) { 3063 /* safe mode */ 3064 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3065 netdev->hw_features = netdev->features; 3066 return; 3067 } 3068 3069 dflt_features = NETIF_F_SG | 3070 NETIF_F_HIGHDMA | 3071 NETIF_F_NTUPLE | 3072 NETIF_F_RXHASH; 3073 3074 csumo_features = NETIF_F_RXCSUM | 3075 NETIF_F_IP_CSUM | 3076 NETIF_F_SCTP_CRC | 3077 NETIF_F_IPV6_CSUM; 3078 3079 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3080 NETIF_F_HW_VLAN_CTAG_TX | 3081 NETIF_F_HW_VLAN_CTAG_RX; 3082 3083 tso_features = NETIF_F_TSO | 3084 NETIF_F_TSO_ECN | 3085 NETIF_F_TSO6 | 3086 NETIF_F_GSO_GRE | 3087 NETIF_F_GSO_UDP_TUNNEL | 3088 NETIF_F_GSO_GRE_CSUM | 3089 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3090 NETIF_F_GSO_PARTIAL | 3091 NETIF_F_GSO_IPXIP4 | 3092 NETIF_F_GSO_IPXIP6 | 3093 NETIF_F_GSO_UDP_L4; 3094 3095 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3096 NETIF_F_GSO_GRE_CSUM; 3097 /* set features that user can change */ 3098 netdev->hw_features = dflt_features | csumo_features | 3099 vlano_features | tso_features; 3100 3101 /* add support for HW_CSUM on packets with MPLS header */ 3102 netdev->mpls_features = NETIF_F_HW_CSUM; 3103 3104 /* enable features */ 3105 netdev->features |= netdev->hw_features; 3106 /* encap and VLAN devices inherit default, csumo and tso features */ 3107 netdev->hw_enc_features |= dflt_features | csumo_features | 3108 tso_features; 3109 netdev->vlan_features |= dflt_features | csumo_features | 3110 tso_features; 3111 } 3112 3113 /** 3114 * ice_cfg_netdev - Allocate, configure and register a netdev 3115 * @vsi: the VSI associated with the new netdev 3116 * 3117 * Returns 0 on success, negative value on failure 3118 */ 3119 static int ice_cfg_netdev(struct ice_vsi *vsi) 3120 { 3121 struct ice_netdev_priv *np; 3122 struct net_device *netdev; 3123 u8 mac_addr[ETH_ALEN]; 3124 3125 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 3126 vsi->alloc_rxq); 3127 if (!netdev) 3128 return -ENOMEM; 3129 3130 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3131 vsi->netdev = netdev; 3132 np = netdev_priv(netdev); 3133 np->vsi = vsi; 3134 3135 ice_set_netdev_features(netdev); 3136 3137 ice_set_ops(netdev); 3138 3139 if (vsi->type == ICE_VSI_PF) { 3140 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 3141 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 3142 ether_addr_copy(netdev->dev_addr, mac_addr); 3143 ether_addr_copy(netdev->perm_addr, mac_addr); 3144 } 3145 3146 netdev->priv_flags |= IFF_UNICAST_FLT; 3147 3148 /* Setup netdev TC information */ 3149 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 3150 3151 /* setup watchdog timeout value to be 5 second */ 3152 netdev->watchdog_timeo = 5 * HZ; 3153 3154 netdev->min_mtu = ETH_MIN_MTU; 3155 netdev->max_mtu = ICE_MAX_MTU; 3156 3157 return 0; 3158 } 3159 3160 /** 3161 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3162 * @lut: Lookup table 3163 * @rss_table_size: Lookup table size 3164 * @rss_size: Range of queue number for hashing 3165 */ 3166 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3167 { 3168 u16 i; 3169 3170 for (i = 0; i < rss_table_size; i++) 3171 lut[i] = i % rss_size; 3172 } 3173 3174 /** 3175 * ice_pf_vsi_setup - Set up a PF VSI 3176 * @pf: board private structure 3177 * @pi: pointer to the port_info instance 3178 * 3179 * Returns pointer to the successfully allocated VSI software struct 3180 * on success, otherwise returns NULL on failure. 3181 */ 3182 static struct ice_vsi * 3183 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3184 { 3185 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); 3186 } 3187 3188 /** 3189 * ice_ctrl_vsi_setup - Set up a control VSI 3190 * @pf: board private structure 3191 * @pi: pointer to the port_info instance 3192 * 3193 * Returns pointer to the successfully allocated VSI software struct 3194 * on success, otherwise returns NULL on failure. 3195 */ 3196 static struct ice_vsi * 3197 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3198 { 3199 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); 3200 } 3201 3202 /** 3203 * ice_lb_vsi_setup - Set up a loopback VSI 3204 * @pf: board private structure 3205 * @pi: pointer to the port_info instance 3206 * 3207 * Returns pointer to the successfully allocated VSI software struct 3208 * on success, otherwise returns NULL on failure. 3209 */ 3210 struct ice_vsi * 3211 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3212 { 3213 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); 3214 } 3215 3216 /** 3217 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3218 * @netdev: network interface to be adjusted 3219 * @proto: unused protocol 3220 * @vid: VLAN ID to be added 3221 * 3222 * net_device_ops implementation for adding VLAN IDs 3223 */ 3224 static int 3225 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, 3226 u16 vid) 3227 { 3228 struct ice_netdev_priv *np = netdev_priv(netdev); 3229 struct ice_vsi *vsi = np->vsi; 3230 int ret; 3231 3232 /* VLAN 0 is added by default during load/reset */ 3233 if (!vid) 3234 return 0; 3235 3236 /* Enable VLAN pruning when a VLAN other than 0 is added */ 3237 if (!ice_vsi_is_vlan_pruning_ena(vsi)) { 3238 ret = ice_cfg_vlan_pruning(vsi, true, false); 3239 if (ret) 3240 return ret; 3241 } 3242 3243 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3244 * packets aren't pruned by the device's internal switch on Rx 3245 */ 3246 ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); 3247 if (!ret) 3248 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3249 3250 return ret; 3251 } 3252 3253 /** 3254 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3255 * @netdev: network interface to be adjusted 3256 * @proto: unused protocol 3257 * @vid: VLAN ID to be removed 3258 * 3259 * net_device_ops implementation for removing VLAN IDs 3260 */ 3261 static int 3262 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, 3263 u16 vid) 3264 { 3265 struct ice_netdev_priv *np = netdev_priv(netdev); 3266 struct ice_vsi *vsi = np->vsi; 3267 int ret; 3268 3269 /* don't allow removal of VLAN 0 */ 3270 if (!vid) 3271 return 0; 3272 3273 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN 3274 * information 3275 */ 3276 ret = ice_vsi_kill_vlan(vsi, vid); 3277 if (ret) 3278 return ret; 3279 3280 /* Disable pruning when VLAN 0 is the only VLAN rule */ 3281 if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) 3282 ret = ice_cfg_vlan_pruning(vsi, false, false); 3283 3284 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); 3285 return ret; 3286 } 3287 3288 /** 3289 * ice_setup_pf_sw - Setup the HW switch on startup or after reset 3290 * @pf: board private structure 3291 * 3292 * Returns 0 on success, negative value on failure 3293 */ 3294 static int ice_setup_pf_sw(struct ice_pf *pf) 3295 { 3296 struct ice_vsi *vsi; 3297 int status = 0; 3298 3299 if (ice_is_reset_in_progress(pf->state)) 3300 return -EBUSY; 3301 3302 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 3303 if (!vsi) 3304 return -ENOMEM; 3305 3306 status = ice_cfg_netdev(vsi); 3307 if (status) { 3308 status = -ENODEV; 3309 goto unroll_vsi_setup; 3310 } 3311 /* netdev has to be configured before setting frame size */ 3312 ice_vsi_cfg_frame_size(vsi); 3313 3314 /* Setup DCB netlink interface */ 3315 ice_dcbnl_setup(vsi); 3316 3317 /* registering the NAPI handler requires both the queues and 3318 * netdev to be created, which are done in ice_pf_vsi_setup() 3319 * and ice_cfg_netdev() respectively 3320 */ 3321 ice_napi_add(vsi); 3322 3323 status = ice_set_cpu_rx_rmap(vsi); 3324 if (status) { 3325 dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", 3326 vsi->vsi_num, status); 3327 status = -EINVAL; 3328 goto unroll_napi_add; 3329 } 3330 status = ice_init_mac_fltr(pf); 3331 if (status) 3332 goto free_cpu_rx_map; 3333 3334 return status; 3335 3336 free_cpu_rx_map: 3337 ice_free_cpu_rx_rmap(vsi); 3338 3339 unroll_napi_add: 3340 if (vsi) { 3341 ice_napi_del(vsi); 3342 if (vsi->netdev) { 3343 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 3344 free_netdev(vsi->netdev); 3345 vsi->netdev = NULL; 3346 } 3347 } 3348 3349 unroll_vsi_setup: 3350 ice_vsi_release(vsi); 3351 return status; 3352 } 3353 3354 /** 3355 * ice_get_avail_q_count - Get count of queues in use 3356 * @pf_qmap: bitmap to get queue use count from 3357 * @lock: pointer to a mutex that protects access to pf_qmap 3358 * @size: size of the bitmap 3359 */ 3360 static u16 3361 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3362 { 3363 unsigned long bit; 3364 u16 count = 0; 3365 3366 mutex_lock(lock); 3367 for_each_clear_bit(bit, pf_qmap, size) 3368 count++; 3369 mutex_unlock(lock); 3370 3371 return count; 3372 } 3373 3374 /** 3375 * ice_get_avail_txq_count - Get count of Tx queues in use 3376 * @pf: pointer to an ice_pf instance 3377 */ 3378 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3379 { 3380 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3381 pf->max_pf_txqs); 3382 } 3383 3384 /** 3385 * ice_get_avail_rxq_count - Get count of Rx queues in use 3386 * @pf: pointer to an ice_pf instance 3387 */ 3388 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3389 { 3390 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3391 pf->max_pf_rxqs); 3392 } 3393 3394 /** 3395 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3396 * @pf: board private structure to initialize 3397 */ 3398 static void ice_deinit_pf(struct ice_pf *pf) 3399 { 3400 ice_service_task_stop(pf); 3401 mutex_destroy(&pf->sw_mutex); 3402 mutex_destroy(&pf->tc_mutex); 3403 mutex_destroy(&pf->avail_q_mutex); 3404 3405 if (pf->avail_txqs) { 3406 bitmap_free(pf->avail_txqs); 3407 pf->avail_txqs = NULL; 3408 } 3409 3410 if (pf->avail_rxqs) { 3411 bitmap_free(pf->avail_rxqs); 3412 pf->avail_rxqs = NULL; 3413 } 3414 3415 if (pf->ptp.clock) 3416 ptp_clock_unregister(pf->ptp.clock); 3417 } 3418 3419 /** 3420 * ice_set_pf_caps - set PFs capability flags 3421 * @pf: pointer to the PF instance 3422 */ 3423 static void ice_set_pf_caps(struct ice_pf *pf) 3424 { 3425 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3426 3427 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3428 clear_bit(ICE_FLAG_AUX_ENA, pf->flags); 3429 if (func_caps->common_cap.rdma) { 3430 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3431 set_bit(ICE_FLAG_AUX_ENA, pf->flags); 3432 } 3433 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3434 if (func_caps->common_cap.dcb) 3435 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3436 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3437 if (func_caps->common_cap.sr_iov_1_1) { 3438 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3439 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, 3440 ICE_MAX_VF_COUNT); 3441 } 3442 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 3443 if (func_caps->common_cap.rss_table_size) 3444 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 3445 3446 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 3447 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 3448 u16 unused; 3449 3450 /* ctrl_vsi_idx will be set to a valid value when flow director 3451 * is setup by ice_init_fdir 3452 */ 3453 pf->ctrl_vsi_idx = ICE_NO_VSI; 3454 set_bit(ICE_FLAG_FD_ENA, pf->flags); 3455 /* force guaranteed filter pool for PF */ 3456 ice_alloc_fd_guar_item(&pf->hw, &unused, 3457 func_caps->fd_fltr_guar); 3458 /* force shared filter pool for PF */ 3459 ice_alloc_fd_shrd_item(&pf->hw, &unused, 3460 func_caps->fd_fltr_best_effort); 3461 } 3462 3463 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3464 if (func_caps->common_cap.ieee_1588) 3465 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3466 3467 pf->max_pf_txqs = func_caps->common_cap.num_txq; 3468 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 3469 } 3470 3471 /** 3472 * ice_init_pf - Initialize general software structures (struct ice_pf) 3473 * @pf: board private structure to initialize 3474 */ 3475 static int ice_init_pf(struct ice_pf *pf) 3476 { 3477 ice_set_pf_caps(pf); 3478 3479 mutex_init(&pf->sw_mutex); 3480 mutex_init(&pf->tc_mutex); 3481 3482 INIT_HLIST_HEAD(&pf->aq_wait_list); 3483 spin_lock_init(&pf->aq_wait_lock); 3484 init_waitqueue_head(&pf->aq_wait_queue); 3485 3486 init_waitqueue_head(&pf->reset_wait_queue); 3487 3488 /* setup service timer and periodic service task */ 3489 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 3490 pf->serv_tmr_period = HZ; 3491 INIT_WORK(&pf->serv_task, ice_service_task); 3492 clear_bit(ICE_SERVICE_SCHED, pf->state); 3493 3494 mutex_init(&pf->avail_q_mutex); 3495 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 3496 if (!pf->avail_txqs) 3497 return -ENOMEM; 3498 3499 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 3500 if (!pf->avail_rxqs) { 3501 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); 3502 pf->avail_txqs = NULL; 3503 return -ENOMEM; 3504 } 3505 3506 return 0; 3507 } 3508 3509 /** 3510 * ice_ena_msix_range - Request a range of MSIX vectors from the OS 3511 * @pf: board private structure 3512 * 3513 * compute the number of MSIX vectors required (v_budget) and request from 3514 * the OS. Return the number of vectors reserved or negative on failure 3515 */ 3516 static int ice_ena_msix_range(struct ice_pf *pf) 3517 { 3518 int num_cpus, v_left, v_actual, v_other, v_budget = 0; 3519 struct device *dev = ice_pf_to_dev(pf); 3520 int needed, err, i; 3521 3522 v_left = pf->hw.func_caps.common_cap.num_msix_vectors; 3523 num_cpus = num_online_cpus(); 3524 3525 /* reserve for LAN miscellaneous handler */ 3526 needed = ICE_MIN_LAN_OICR_MSIX; 3527 if (v_left < needed) 3528 goto no_hw_vecs_left_err; 3529 v_budget += needed; 3530 v_left -= needed; 3531 3532 /* reserve for flow director */ 3533 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 3534 needed = ICE_FDIR_MSIX; 3535 if (v_left < needed) 3536 goto no_hw_vecs_left_err; 3537 v_budget += needed; 3538 v_left -= needed; 3539 } 3540 3541 /* total used for non-traffic vectors */ 3542 v_other = v_budget; 3543 3544 /* reserve vectors for LAN traffic */ 3545 needed = num_cpus; 3546 if (v_left < needed) 3547 goto no_hw_vecs_left_err; 3548 pf->num_lan_msix = needed; 3549 v_budget += needed; 3550 v_left -= needed; 3551 3552 /* reserve vectors for RDMA auxiliary driver */ 3553 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3554 needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; 3555 if (v_left < needed) 3556 goto no_hw_vecs_left_err; 3557 pf->num_rdma_msix = needed; 3558 v_budget += needed; 3559 v_left -= needed; 3560 } 3561 3562 pf->msix_entries = devm_kcalloc(dev, v_budget, 3563 sizeof(*pf->msix_entries), GFP_KERNEL); 3564 if (!pf->msix_entries) { 3565 err = -ENOMEM; 3566 goto exit_err; 3567 } 3568 3569 for (i = 0; i < v_budget; i++) 3570 pf->msix_entries[i].entry = i; 3571 3572 /* actually reserve the vectors */ 3573 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, 3574 ICE_MIN_MSIX, v_budget); 3575 if (v_actual < 0) { 3576 dev_err(dev, "unable to reserve MSI-X vectors\n"); 3577 err = v_actual; 3578 goto msix_err; 3579 } 3580 3581 if (v_actual < v_budget) { 3582 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", 3583 v_budget, v_actual); 3584 3585 if (v_actual < ICE_MIN_MSIX) { 3586 /* error if we can't get minimum vectors */ 3587 pci_disable_msix(pf->pdev); 3588 err = -ERANGE; 3589 goto msix_err; 3590 } else { 3591 int v_remain = v_actual - v_other; 3592 int v_rdma = 0, v_min_rdma = 0; 3593 3594 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { 3595 /* Need at least 1 interrupt in addition to 3596 * AEQ MSIX 3597 */ 3598 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; 3599 v_min_rdma = ICE_MIN_RDMA_MSIX; 3600 } 3601 3602 if (v_actual == ICE_MIN_MSIX || 3603 v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { 3604 dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); 3605 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3606 3607 pf->num_rdma_msix = 0; 3608 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; 3609 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || 3610 (v_remain - v_rdma < v_rdma)) { 3611 /* Support minimum RDMA and give remaining 3612 * vectors to LAN MSIX 3613 */ 3614 pf->num_rdma_msix = v_min_rdma; 3615 pf->num_lan_msix = v_remain - v_min_rdma; 3616 } else { 3617 /* Split remaining MSIX with RDMA after 3618 * accounting for AEQ MSIX 3619 */ 3620 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + 3621 ICE_RDMA_NUM_AEQ_MSIX; 3622 pf->num_lan_msix = v_remain - pf->num_rdma_msix; 3623 } 3624 3625 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", 3626 pf->num_lan_msix); 3627 3628 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) 3629 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", 3630 pf->num_rdma_msix); 3631 } 3632 } 3633 3634 return v_actual; 3635 3636 msix_err: 3637 devm_kfree(dev, pf->msix_entries); 3638 goto exit_err; 3639 3640 no_hw_vecs_left_err: 3641 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", 3642 needed, v_left); 3643 err = -ERANGE; 3644 exit_err: 3645 pf->num_rdma_msix = 0; 3646 pf->num_lan_msix = 0; 3647 return err; 3648 } 3649 3650 /** 3651 * ice_dis_msix - Disable MSI-X interrupt setup in OS 3652 * @pf: board private structure 3653 */ 3654 static void ice_dis_msix(struct ice_pf *pf) 3655 { 3656 pci_disable_msix(pf->pdev); 3657 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); 3658 pf->msix_entries = NULL; 3659 } 3660 3661 /** 3662 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme 3663 * @pf: board private structure 3664 */ 3665 static void ice_clear_interrupt_scheme(struct ice_pf *pf) 3666 { 3667 ice_dis_msix(pf); 3668 3669 if (pf->irq_tracker) { 3670 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); 3671 pf->irq_tracker = NULL; 3672 } 3673 } 3674 3675 /** 3676 * ice_init_interrupt_scheme - Determine proper interrupt scheme 3677 * @pf: board private structure to initialize 3678 */ 3679 static int ice_init_interrupt_scheme(struct ice_pf *pf) 3680 { 3681 int vectors; 3682 3683 vectors = ice_ena_msix_range(pf); 3684 3685 if (vectors < 0) 3686 return vectors; 3687 3688 /* set up vector assignment tracking */ 3689 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), 3690 struct_size(pf->irq_tracker, list, vectors), 3691 GFP_KERNEL); 3692 if (!pf->irq_tracker) { 3693 ice_dis_msix(pf); 3694 return -ENOMEM; 3695 } 3696 3697 /* populate SW interrupts pool with number of OS granted IRQs. */ 3698 pf->num_avail_sw_msix = (u16)vectors; 3699 pf->irq_tracker->num_entries = (u16)vectors; 3700 pf->irq_tracker->end = pf->irq_tracker->num_entries; 3701 3702 return 0; 3703 } 3704 3705 /** 3706 * ice_is_wol_supported - check if WoL is supported 3707 * @hw: pointer to hardware info 3708 * 3709 * Check if WoL is supported based on the HW configuration. 3710 * Returns true if NVM supports and enables WoL for this port, false otherwise 3711 */ 3712 bool ice_is_wol_supported(struct ice_hw *hw) 3713 { 3714 u16 wol_ctrl; 3715 3716 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 3717 * word) indicates WoL is not supported on the corresponding PF ID. 3718 */ 3719 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 3720 return false; 3721 3722 return !(BIT(hw->port_info->lport) & wol_ctrl); 3723 } 3724 3725 /** 3726 * ice_vsi_recfg_qs - Change the number of queues on a VSI 3727 * @vsi: VSI being changed 3728 * @new_rx: new number of Rx queues 3729 * @new_tx: new number of Tx queues 3730 * 3731 * Only change the number of queues if new_tx, or new_rx is non-0. 3732 * 3733 * Returns 0 on success. 3734 */ 3735 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) 3736 { 3737 struct ice_pf *pf = vsi->back; 3738 int err = 0, timeout = 50; 3739 3740 if (!new_rx && !new_tx) 3741 return -EINVAL; 3742 3743 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 3744 timeout--; 3745 if (!timeout) 3746 return -EBUSY; 3747 usleep_range(1000, 2000); 3748 } 3749 3750 if (new_tx) 3751 vsi->req_txq = (u16)new_tx; 3752 if (new_rx) 3753 vsi->req_rxq = (u16)new_rx; 3754 3755 /* set for the next time the netdev is started */ 3756 if (!netif_running(vsi->netdev)) { 3757 ice_vsi_rebuild(vsi, false); 3758 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 3759 goto done; 3760 } 3761 3762 ice_vsi_close(vsi); 3763 ice_vsi_rebuild(vsi, false); 3764 ice_pf_dcb_recfg(pf); 3765 ice_vsi_open(vsi); 3766 done: 3767 clear_bit(ICE_CFG_BUSY, pf->state); 3768 return err; 3769 } 3770 3771 /** 3772 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 3773 * @pf: PF to configure 3774 * 3775 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 3776 * VSI can still Tx/Rx VLAN tagged packets. 3777 */ 3778 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 3779 { 3780 struct ice_vsi *vsi = ice_get_main_vsi(pf); 3781 struct ice_vsi_ctx *ctxt; 3782 enum ice_status status; 3783 struct ice_hw *hw; 3784 3785 if (!vsi) 3786 return; 3787 3788 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 3789 if (!ctxt) 3790 return; 3791 3792 hw = &pf->hw; 3793 ctxt->info = vsi->info; 3794 3795 ctxt->info.valid_sections = 3796 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 3797 ICE_AQ_VSI_PROP_SECURITY_VALID | 3798 ICE_AQ_VSI_PROP_SW_VALID); 3799 3800 /* disable VLAN anti-spoof */ 3801 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 3802 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 3803 3804 /* disable VLAN pruning and keep all other settings */ 3805 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 3806 3807 /* allow all VLANs on Tx and don't strip on Rx */ 3808 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | 3809 ICE_AQ_VSI_VLAN_EMOD_NOTHING; 3810 3811 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 3812 if (status) { 3813 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", 3814 ice_stat_str(status), 3815 ice_aq_str(hw->adminq.sq_last_status)); 3816 } else { 3817 vsi->info.sec_flags = ctxt->info.sec_flags; 3818 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 3819 vsi->info.vlan_flags = ctxt->info.vlan_flags; 3820 } 3821 3822 kfree(ctxt); 3823 } 3824 3825 /** 3826 * ice_log_pkg_init - log result of DDP package load 3827 * @hw: pointer to hardware info 3828 * @status: status of package load 3829 */ 3830 static void 3831 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) 3832 { 3833 struct ice_pf *pf = (struct ice_pf *)hw->back; 3834 struct device *dev = ice_pf_to_dev(pf); 3835 3836 switch (*status) { 3837 case ICE_SUCCESS: 3838 /* The package download AdminQ command returned success because 3839 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is 3840 * already a package loaded on the device. 3841 */ 3842 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 3843 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 3844 hw->pkg_ver.update == hw->active_pkg_ver.update && 3845 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 3846 !memcmp(hw->pkg_name, hw->active_pkg_name, 3847 sizeof(hw->pkg_name))) { 3848 if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) 3849 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 3850 hw->active_pkg_name, 3851 hw->active_pkg_ver.major, 3852 hw->active_pkg_ver.minor, 3853 hw->active_pkg_ver.update, 3854 hw->active_pkg_ver.draft); 3855 else 3856 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 3857 hw->active_pkg_name, 3858 hw->active_pkg_ver.major, 3859 hw->active_pkg_ver.minor, 3860 hw->active_pkg_ver.update, 3861 hw->active_pkg_ver.draft); 3862 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || 3863 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { 3864 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 3865 hw->active_pkg_name, 3866 hw->active_pkg_ver.major, 3867 hw->active_pkg_ver.minor, 3868 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 3869 *status = ICE_ERR_NOT_SUPPORTED; 3870 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 3871 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { 3872 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 3873 hw->active_pkg_name, 3874 hw->active_pkg_ver.major, 3875 hw->active_pkg_ver.minor, 3876 hw->active_pkg_ver.update, 3877 hw->active_pkg_ver.draft, 3878 hw->pkg_name, 3879 hw->pkg_ver.major, 3880 hw->pkg_ver.minor, 3881 hw->pkg_ver.update, 3882 hw->pkg_ver.draft); 3883 } else { 3884 dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); 3885 *status = ICE_ERR_NOT_SUPPORTED; 3886 } 3887 break; 3888 case ICE_ERR_FW_DDP_MISMATCH: 3889 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 3890 break; 3891 case ICE_ERR_BUF_TOO_SHORT: 3892 case ICE_ERR_CFG: 3893 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 3894 break; 3895 case ICE_ERR_NOT_SUPPORTED: 3896 /* Package File version not supported */ 3897 if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || 3898 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 3899 hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) 3900 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 3901 else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || 3902 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && 3903 hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) 3904 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 3905 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 3906 break; 3907 case ICE_ERR_AQ_ERROR: 3908 switch (hw->pkg_dwnld_status) { 3909 case ICE_AQ_RC_ENOSEC: 3910 case ICE_AQ_RC_EBADSIG: 3911 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 3912 return; 3913 case ICE_AQ_RC_ESVN: 3914 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 3915 return; 3916 case ICE_AQ_RC_EBADMAN: 3917 case ICE_AQ_RC_EBADBUF: 3918 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 3919 /* poll for reset to complete */ 3920 if (ice_check_reset(hw)) 3921 dev_err(dev, "Error resetting device. Please reload the driver\n"); 3922 return; 3923 default: 3924 break; 3925 } 3926 fallthrough; 3927 default: 3928 dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", 3929 *status); 3930 break; 3931 } 3932 } 3933 3934 /** 3935 * ice_load_pkg - load/reload the DDP Package file 3936 * @firmware: firmware structure when firmware requested or NULL for reload 3937 * @pf: pointer to the PF instance 3938 * 3939 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 3940 * initialize HW tables. 3941 */ 3942 static void 3943 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 3944 { 3945 enum ice_status status = ICE_ERR_PARAM; 3946 struct device *dev = ice_pf_to_dev(pf); 3947 struct ice_hw *hw = &pf->hw; 3948 3949 /* Load DDP Package */ 3950 if (firmware && !hw->pkg_copy) { 3951 status = ice_copy_and_init_pkg(hw, firmware->data, 3952 firmware->size); 3953 ice_log_pkg_init(hw, &status); 3954 } else if (!firmware && hw->pkg_copy) { 3955 /* Reload package during rebuild after CORER/GLOBR reset */ 3956 status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 3957 ice_log_pkg_init(hw, &status); 3958 } else { 3959 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 3960 } 3961 3962 if (status) { 3963 /* Safe Mode */ 3964 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 3965 return; 3966 } 3967 3968 /* Successful download package is the precondition for advanced 3969 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 3970 */ 3971 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 3972 } 3973 3974 /** 3975 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 3976 * @pf: pointer to the PF structure 3977 * 3978 * There is no error returned here because the driver should be able to handle 3979 * 128 Byte cache lines, so we only print a warning in case issues are seen, 3980 * specifically with Tx. 3981 */ 3982 static void ice_verify_cacheline_size(struct ice_pf *pf) 3983 { 3984 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 3985 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 3986 ICE_CACHE_LINE_BYTES); 3987 } 3988 3989 /** 3990 * ice_send_version - update firmware with driver version 3991 * @pf: PF struct 3992 * 3993 * Returns ICE_SUCCESS on success, else error code 3994 */ 3995 static enum ice_status ice_send_version(struct ice_pf *pf) 3996 { 3997 struct ice_driver_ver dv; 3998 3999 dv.major_ver = 0xff; 4000 dv.minor_ver = 0xff; 4001 dv.build_ver = 0xff; 4002 dv.subbuild_ver = 0; 4003 strscpy((char *)dv.driver_string, UTS_RELEASE, 4004 sizeof(dv.driver_string)); 4005 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4006 } 4007 4008 /** 4009 * ice_init_fdir - Initialize flow director VSI and configuration 4010 * @pf: pointer to the PF instance 4011 * 4012 * returns 0 on success, negative on error 4013 */ 4014 static int ice_init_fdir(struct ice_pf *pf) 4015 { 4016 struct device *dev = ice_pf_to_dev(pf); 4017 struct ice_vsi *ctrl_vsi; 4018 int err; 4019 4020 /* Side Band Flow Director needs to have a control VSI. 4021 * Allocate it and store it in the PF. 4022 */ 4023 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4024 if (!ctrl_vsi) { 4025 dev_dbg(dev, "could not create control VSI\n"); 4026 return -ENOMEM; 4027 } 4028 4029 err = ice_vsi_open_ctrl(ctrl_vsi); 4030 if (err) { 4031 dev_dbg(dev, "could not open control VSI\n"); 4032 goto err_vsi_open; 4033 } 4034 4035 mutex_init(&pf->hw.fdir_fltr_lock); 4036 4037 err = ice_fdir_create_dflt_rules(pf); 4038 if (err) 4039 goto err_fdir_rule; 4040 4041 return 0; 4042 4043 err_fdir_rule: 4044 ice_fdir_release_flows(&pf->hw); 4045 ice_vsi_close(ctrl_vsi); 4046 err_vsi_open: 4047 ice_vsi_release(ctrl_vsi); 4048 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4049 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4050 pf->ctrl_vsi_idx = ICE_NO_VSI; 4051 } 4052 return err; 4053 } 4054 4055 /** 4056 * ice_get_opt_fw_name - return optional firmware file name or NULL 4057 * @pf: pointer to the PF instance 4058 */ 4059 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4060 { 4061 /* Optional firmware name same as default with additional dash 4062 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4063 */ 4064 struct pci_dev *pdev = pf->pdev; 4065 char *opt_fw_filename; 4066 u64 dsn; 4067 4068 /* Determine the name of the optional file using the DSN (two 4069 * dwords following the start of the DSN Capability). 4070 */ 4071 dsn = pci_get_dsn(pdev); 4072 if (!dsn) 4073 return NULL; 4074 4075 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4076 if (!opt_fw_filename) 4077 return NULL; 4078 4079 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4080 ICE_DDP_PKG_PATH, dsn); 4081 4082 return opt_fw_filename; 4083 } 4084 4085 /** 4086 * ice_request_fw - Device initialization routine 4087 * @pf: pointer to the PF instance 4088 */ 4089 static void ice_request_fw(struct ice_pf *pf) 4090 { 4091 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4092 const struct firmware *firmware = NULL; 4093 struct device *dev = ice_pf_to_dev(pf); 4094 int err = 0; 4095 4096 /* optional device-specific DDP (if present) overrides the default DDP 4097 * package file. kernel logs a debug message if the file doesn't exist, 4098 * and warning messages for other errors. 4099 */ 4100 if (opt_fw_filename) { 4101 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 4102 if (err) { 4103 kfree(opt_fw_filename); 4104 goto dflt_pkg_load; 4105 } 4106 4107 /* request for firmware was successful. Download to device */ 4108 ice_load_pkg(firmware, pf); 4109 kfree(opt_fw_filename); 4110 release_firmware(firmware); 4111 return; 4112 } 4113 4114 dflt_pkg_load: 4115 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 4116 if (err) { 4117 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4118 return; 4119 } 4120 4121 /* request for firmware was successful. Download to device */ 4122 ice_load_pkg(firmware, pf); 4123 release_firmware(firmware); 4124 } 4125 4126 /** 4127 * ice_print_wake_reason - show the wake up cause in the log 4128 * @pf: pointer to the PF struct 4129 */ 4130 static void ice_print_wake_reason(struct ice_pf *pf) 4131 { 4132 u32 wus = pf->wakeup_reason; 4133 const char *wake_str; 4134 4135 /* if no wake event, nothing to print */ 4136 if (!wus) 4137 return; 4138 4139 if (wus & PFPM_WUS_LNKC_M) 4140 wake_str = "Link\n"; 4141 else if (wus & PFPM_WUS_MAG_M) 4142 wake_str = "Magic Packet\n"; 4143 else if (wus & PFPM_WUS_MNG_M) 4144 wake_str = "Management\n"; 4145 else if (wus & PFPM_WUS_FW_RST_WK_M) 4146 wake_str = "Firmware Reset\n"; 4147 else 4148 wake_str = "Unknown\n"; 4149 4150 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4151 } 4152 4153 /** 4154 * ice_register_netdev - register netdev and devlink port 4155 * @pf: pointer to the PF struct 4156 */ 4157 static int ice_register_netdev(struct ice_pf *pf) 4158 { 4159 struct ice_vsi *vsi; 4160 int err = 0; 4161 4162 vsi = ice_get_main_vsi(pf); 4163 if (!vsi || !vsi->netdev) 4164 return -EIO; 4165 4166 err = register_netdev(vsi->netdev); 4167 if (err) 4168 goto err_register_netdev; 4169 4170 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4171 netif_carrier_off(vsi->netdev); 4172 netif_tx_stop_all_queues(vsi->netdev); 4173 err = ice_devlink_create_port(vsi); 4174 if (err) 4175 goto err_devlink_create; 4176 4177 devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); 4178 4179 return 0; 4180 err_devlink_create: 4181 unregister_netdev(vsi->netdev); 4182 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4183 err_register_netdev: 4184 free_netdev(vsi->netdev); 4185 vsi->netdev = NULL; 4186 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4187 return err; 4188 } 4189 4190 /** 4191 * ice_probe - Device initialization routine 4192 * @pdev: PCI device information struct 4193 * @ent: entry in ice_pci_tbl 4194 * 4195 * Returns 0 on success, negative on failure 4196 */ 4197 static int 4198 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 4199 { 4200 struct device *dev = &pdev->dev; 4201 struct ice_pf *pf; 4202 struct ice_hw *hw; 4203 int i, err; 4204 4205 if (pdev->is_virtfn) { 4206 dev_err(dev, "can't probe a virtual function\n"); 4207 return -EINVAL; 4208 } 4209 4210 /* this driver uses devres, see 4211 * Documentation/driver-api/driver-model/devres.rst 4212 */ 4213 err = pcim_enable_device(pdev); 4214 if (err) 4215 return err; 4216 4217 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 4218 if (err) { 4219 dev_err(dev, "BAR0 I/O map error %d\n", err); 4220 return err; 4221 } 4222 4223 pf = ice_allocate_pf(dev); 4224 if (!pf) 4225 return -ENOMEM; 4226 4227 /* initialize Auxiliary index to invalid value */ 4228 pf->aux_idx = -1; 4229 4230 /* set up for high or low DMA */ 4231 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4232 if (err) 4233 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4234 if (err) { 4235 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 4236 return err; 4237 } 4238 4239 pci_enable_pcie_error_reporting(pdev); 4240 pci_set_master(pdev); 4241 4242 pf->pdev = pdev; 4243 pci_set_drvdata(pdev, pf); 4244 set_bit(ICE_DOWN, pf->state); 4245 /* Disable service task until DOWN bit is cleared */ 4246 set_bit(ICE_SERVICE_DIS, pf->state); 4247 4248 hw = &pf->hw; 4249 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 4250 pci_save_state(pdev); 4251 4252 hw->back = pf; 4253 hw->vendor_id = pdev->vendor; 4254 hw->device_id = pdev->device; 4255 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 4256 hw->subsystem_vendor_id = pdev->subsystem_vendor; 4257 hw->subsystem_device_id = pdev->subsystem_device; 4258 hw->bus.device = PCI_SLOT(pdev->devfn); 4259 hw->bus.func = PCI_FUNC(pdev->devfn); 4260 ice_set_ctrlq_len(hw); 4261 4262 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 4263 4264 err = ice_devlink_register(pf); 4265 if (err) { 4266 dev_err(dev, "ice_devlink_register failed: %d\n", err); 4267 goto err_exit_unroll; 4268 } 4269 4270 #ifndef CONFIG_DYNAMIC_DEBUG 4271 if (debug < -1) 4272 hw->debug_mask = debug; 4273 #endif 4274 4275 err = ice_init_hw(hw); 4276 if (err) { 4277 dev_err(dev, "ice_init_hw failed: %d\n", err); 4278 err = -EIO; 4279 goto err_exit_unroll; 4280 } 4281 4282 ice_request_fw(pf); 4283 4284 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 4285 * set in pf->state, which will cause ice_is_safe_mode to return 4286 * true 4287 */ 4288 if (ice_is_safe_mode(pf)) { 4289 dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); 4290 /* we already got function/device capabilities but these don't 4291 * reflect what the driver needs to do in safe mode. Instead of 4292 * adding conditional logic everywhere to ignore these 4293 * device/function capabilities, override them. 4294 */ 4295 ice_set_safe_mode_caps(hw); 4296 } 4297 4298 err = ice_init_pf(pf); 4299 if (err) { 4300 dev_err(dev, "ice_init_pf failed: %d\n", err); 4301 goto err_init_pf_unroll; 4302 } 4303 4304 ice_devlink_init_regions(pf); 4305 4306 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4307 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4308 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4309 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4310 i = 0; 4311 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4312 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4313 pf->hw.tnl.valid_count[TNL_VXLAN]; 4314 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4315 UDP_TUNNEL_TYPE_VXLAN; 4316 i++; 4317 } 4318 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4319 pf->hw.udp_tunnel_nic.tables[i].n_entries = 4320 pf->hw.tnl.valid_count[TNL_GENEVE]; 4321 pf->hw.udp_tunnel_nic.tables[i].tunnel_types = 4322 UDP_TUNNEL_TYPE_GENEVE; 4323 i++; 4324 } 4325 4326 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; 4327 if (!pf->num_alloc_vsi) { 4328 err = -EIO; 4329 goto err_init_pf_unroll; 4330 } 4331 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4332 dev_warn(&pf->pdev->dev, 4333 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4334 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4335 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4336 } 4337 4338 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 4339 GFP_KERNEL); 4340 if (!pf->vsi) { 4341 err = -ENOMEM; 4342 goto err_init_pf_unroll; 4343 } 4344 4345 err = ice_init_interrupt_scheme(pf); 4346 if (err) { 4347 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4348 err = -EIO; 4349 goto err_init_vsi_unroll; 4350 } 4351 4352 /* In case of MSIX we are going to setup the misc vector right here 4353 * to handle admin queue events etc. In case of legacy and MSI 4354 * the misc functionality and queue processing is combined in 4355 * the same vector and that gets setup at open. 4356 */ 4357 err = ice_req_irq_msix_misc(pf); 4358 if (err) { 4359 dev_err(dev, "setup of misc vector failed: %d\n", err); 4360 goto err_init_interrupt_unroll; 4361 } 4362 4363 /* create switch struct for the switch element created by FW on boot */ 4364 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); 4365 if (!pf->first_sw) { 4366 err = -ENOMEM; 4367 goto err_msix_misc_unroll; 4368 } 4369 4370 if (hw->evb_veb) 4371 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4372 else 4373 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4374 4375 pf->first_sw->pf = pf; 4376 4377 /* record the sw_id available for later use */ 4378 pf->first_sw->sw_id = hw->port_info->sw_id; 4379 4380 err = ice_setup_pf_sw(pf); 4381 if (err) { 4382 dev_err(dev, "probe failed due to setup PF switch: %d\n", err); 4383 goto err_alloc_sw_unroll; 4384 } 4385 4386 clear_bit(ICE_SERVICE_DIS, pf->state); 4387 4388 /* tell the firmware we are up */ 4389 err = ice_send_version(pf); 4390 if (err) { 4391 dev_err(dev, "probe failed sending driver version %s. error: %d\n", 4392 UTS_RELEASE, err); 4393 goto err_send_version_unroll; 4394 } 4395 4396 /* since everything is good, start the service timer */ 4397 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4398 4399 err = ice_init_link_events(pf->hw.port_info); 4400 if (err) { 4401 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4402 goto err_send_version_unroll; 4403 } 4404 4405 /* not a fatal error if this fails */ 4406 err = ice_init_nvm_phy_type(pf->hw.port_info); 4407 if (err) 4408 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4409 4410 /* not a fatal error if this fails */ 4411 err = ice_update_link_info(pf->hw.port_info); 4412 if (err) 4413 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4414 4415 ice_init_link_dflt_override(pf->hw.port_info); 4416 4417 ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err); 4418 4419 /* if media available, initialize PHY settings */ 4420 if (pf->hw.port_info->phy.link_info.link_info & 4421 ICE_AQ_MEDIA_AVAILABLE) { 4422 /* not a fatal error if this fails */ 4423 err = ice_init_phy_user_cfg(pf->hw.port_info); 4424 if (err) 4425 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4426 4427 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4428 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4429 4430 if (vsi) 4431 ice_configure_phy(vsi); 4432 } 4433 } else { 4434 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4435 } 4436 4437 ice_verify_cacheline_size(pf); 4438 4439 /* Save wakeup reason register for later use */ 4440 pf->wakeup_reason = rd32(hw, PFPM_WUS); 4441 4442 /* check for a power management event */ 4443 ice_print_wake_reason(pf); 4444 4445 /* clear wake status, all bits */ 4446 wr32(hw, PFPM_WUS, U32_MAX); 4447 4448 /* Disable WoL at init, wait for user to enable */ 4449 device_set_wakeup_enable(dev, false); 4450 4451 if (ice_is_safe_mode(pf)) { 4452 ice_set_safe_mode_vlan_cfg(pf); 4453 goto probe_done; 4454 } 4455 4456 /* initialize DDP driven features */ 4457 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4458 ice_ptp_init(pf); 4459 4460 /* Note: Flow director init failure is non-fatal to load */ 4461 if (ice_init_fdir(pf)) 4462 dev_err(dev, "could not initialize flow director\n"); 4463 4464 /* Note: DCB init failure is non-fatal to load */ 4465 if (ice_init_pf_dcb(pf, false)) { 4466 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4467 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4468 } else { 4469 ice_cfg_lldp_mib_change(&pf->hw, true); 4470 } 4471 4472 if (ice_init_lag(pf)) 4473 dev_warn(dev, "Failed to init link aggregation support\n"); 4474 4475 /* print PCI link speed and width */ 4476 pcie_print_link_status(pf->pdev); 4477 4478 probe_done: 4479 err = ice_register_netdev(pf); 4480 if (err) 4481 goto err_netdev_reg; 4482 4483 /* ready to go, so clear down state bit */ 4484 clear_bit(ICE_DOWN, pf->state); 4485 if (ice_is_aux_ena(pf)) { 4486 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); 4487 if (pf->aux_idx < 0) { 4488 dev_err(dev, "Failed to allocate device ID for AUX driver\n"); 4489 err = -ENOMEM; 4490 goto err_netdev_reg; 4491 } 4492 4493 err = ice_init_rdma(pf); 4494 if (err) { 4495 dev_err(dev, "Failed to initialize RDMA: %d\n", err); 4496 err = -EIO; 4497 goto err_init_aux_unroll; 4498 } 4499 } else { 4500 dev_warn(dev, "RDMA is not supported on this device\n"); 4501 } 4502 4503 return 0; 4504 4505 err_init_aux_unroll: 4506 pf->adev = NULL; 4507 ida_free(&ice_aux_ida, pf->aux_idx); 4508 err_netdev_reg: 4509 err_send_version_unroll: 4510 ice_vsi_release_all(pf); 4511 err_alloc_sw_unroll: 4512 set_bit(ICE_SERVICE_DIS, pf->state); 4513 set_bit(ICE_DOWN, pf->state); 4514 devm_kfree(dev, pf->first_sw); 4515 err_msix_misc_unroll: 4516 ice_free_irq_msix_misc(pf); 4517 err_init_interrupt_unroll: 4518 ice_clear_interrupt_scheme(pf); 4519 err_init_vsi_unroll: 4520 devm_kfree(dev, pf->vsi); 4521 err_init_pf_unroll: 4522 ice_deinit_pf(pf); 4523 ice_devlink_destroy_regions(pf); 4524 ice_deinit_hw(hw); 4525 err_exit_unroll: 4526 ice_devlink_unregister(pf); 4527 pci_disable_pcie_error_reporting(pdev); 4528 pci_disable_device(pdev); 4529 return err; 4530 } 4531 4532 /** 4533 * ice_set_wake - enable or disable Wake on LAN 4534 * @pf: pointer to the PF struct 4535 * 4536 * Simple helper for WoL control 4537 */ 4538 static void ice_set_wake(struct ice_pf *pf) 4539 { 4540 struct ice_hw *hw = &pf->hw; 4541 bool wol = pf->wol_ena; 4542 4543 /* clear wake state, otherwise new wake events won't fire */ 4544 wr32(hw, PFPM_WUS, U32_MAX); 4545 4546 /* enable / disable APM wake up, no RMW needed */ 4547 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 4548 4549 /* set magic packet filter enabled */ 4550 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 4551 } 4552 4553 /** 4554 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 4555 * @pf: pointer to the PF struct 4556 * 4557 * Issue firmware command to enable multicast magic wake, making 4558 * sure that any locally administered address (LAA) is used for 4559 * wake, and that PF reset doesn't undo the LAA. 4560 */ 4561 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 4562 { 4563 struct device *dev = ice_pf_to_dev(pf); 4564 struct ice_hw *hw = &pf->hw; 4565 enum ice_status status; 4566 u8 mac_addr[ETH_ALEN]; 4567 struct ice_vsi *vsi; 4568 u8 flags; 4569 4570 if (!pf->wol_ena) 4571 return; 4572 4573 vsi = ice_get_main_vsi(pf); 4574 if (!vsi) 4575 return; 4576 4577 /* Get current MAC address in case it's an LAA */ 4578 if (vsi->netdev) 4579 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 4580 else 4581 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4582 4583 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 4584 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 4585 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 4586 4587 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 4588 if (status) 4589 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", 4590 ice_stat_str(status), 4591 ice_aq_str(hw->adminq.sq_last_status)); 4592 } 4593 4594 /** 4595 * ice_remove - Device removal routine 4596 * @pdev: PCI device information struct 4597 */ 4598 static void ice_remove(struct pci_dev *pdev) 4599 { 4600 struct ice_pf *pf = pci_get_drvdata(pdev); 4601 int i; 4602 4603 if (!pf) 4604 return; 4605 4606 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 4607 if (!ice_is_reset_in_progress(pf->state)) 4608 break; 4609 msleep(100); 4610 } 4611 4612 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 4613 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 4614 ice_free_vfs(pf); 4615 } 4616 4617 ice_service_task_stop(pf); 4618 4619 ice_aq_cancel_waiting_tasks(pf); 4620 ice_unplug_aux_dev(pf); 4621 if (pf->aux_idx >= 0) 4622 ida_free(&ice_aux_ida, pf->aux_idx); 4623 set_bit(ICE_DOWN, pf->state); 4624 4625 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4626 ice_deinit_lag(pf); 4627 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4628 ice_ptp_release(pf); 4629 if (!ice_is_safe_mode(pf)) 4630 ice_remove_arfs(pf); 4631 ice_setup_mc_magic_wake(pf); 4632 ice_vsi_release_all(pf); 4633 ice_set_wake(pf); 4634 ice_free_irq_msix_misc(pf); 4635 ice_for_each_vsi(pf, i) { 4636 if (!pf->vsi[i]) 4637 continue; 4638 ice_vsi_free_q_vectors(pf->vsi[i]); 4639 } 4640 ice_deinit_pf(pf); 4641 ice_devlink_destroy_regions(pf); 4642 ice_deinit_hw(&pf->hw); 4643 ice_devlink_unregister(pf); 4644 4645 /* Issue a PFR as part of the prescribed driver unload flow. Do not 4646 * do it via ice_schedule_reset() since there is no need to rebuild 4647 * and the service task is already stopped. 4648 */ 4649 ice_reset(&pf->hw, ICE_RESET_PFR); 4650 pci_wait_for_pending_transaction(pdev); 4651 ice_clear_interrupt_scheme(pf); 4652 pci_disable_pcie_error_reporting(pdev); 4653 pci_disable_device(pdev); 4654 } 4655 4656 /** 4657 * ice_shutdown - PCI callback for shutting down device 4658 * @pdev: PCI device information struct 4659 */ 4660 static void ice_shutdown(struct pci_dev *pdev) 4661 { 4662 struct ice_pf *pf = pci_get_drvdata(pdev); 4663 4664 ice_remove(pdev); 4665 4666 if (system_state == SYSTEM_POWER_OFF) { 4667 pci_wake_from_d3(pdev, pf->wol_ena); 4668 pci_set_power_state(pdev, PCI_D3hot); 4669 } 4670 } 4671 4672 #ifdef CONFIG_PM 4673 /** 4674 * ice_prepare_for_shutdown - prep for PCI shutdown 4675 * @pf: board private structure 4676 * 4677 * Inform or close all dependent features in prep for PCI device shutdown 4678 */ 4679 static void ice_prepare_for_shutdown(struct ice_pf *pf) 4680 { 4681 struct ice_hw *hw = &pf->hw; 4682 u32 v; 4683 4684 /* Notify VFs of impending reset */ 4685 if (ice_check_sq_alive(hw, &hw->mailboxq)) 4686 ice_vc_notify_reset(pf); 4687 4688 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 4689 4690 /* disable the VSIs and their queues that are not already DOWN */ 4691 ice_pf_dis_all_vsi(pf, false); 4692 4693 ice_for_each_vsi(pf, v) 4694 if (pf->vsi[v]) 4695 pf->vsi[v]->vsi_num = 0; 4696 4697 ice_shutdown_all_ctrlq(hw); 4698 } 4699 4700 /** 4701 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 4702 * @pf: board private structure to reinitialize 4703 * 4704 * This routine reinitialize interrupt scheme that was cleared during 4705 * power management suspend callback. 4706 * 4707 * This should be called during resume routine to re-allocate the q_vectors 4708 * and reacquire interrupts. 4709 */ 4710 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 4711 { 4712 struct device *dev = ice_pf_to_dev(pf); 4713 int ret, v; 4714 4715 /* Since we clear MSIX flag during suspend, we need to 4716 * set it back during resume... 4717 */ 4718 4719 ret = ice_init_interrupt_scheme(pf); 4720 if (ret) { 4721 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 4722 return ret; 4723 } 4724 4725 /* Remap vectors and rings, after successful re-init interrupts */ 4726 ice_for_each_vsi(pf, v) { 4727 if (!pf->vsi[v]) 4728 continue; 4729 4730 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 4731 if (ret) 4732 goto err_reinit; 4733 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 4734 } 4735 4736 ret = ice_req_irq_msix_misc(pf); 4737 if (ret) { 4738 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 4739 ret); 4740 goto err_reinit; 4741 } 4742 4743 return 0; 4744 4745 err_reinit: 4746 while (v--) 4747 if (pf->vsi[v]) 4748 ice_vsi_free_q_vectors(pf->vsi[v]); 4749 4750 return ret; 4751 } 4752 4753 /** 4754 * ice_suspend 4755 * @dev: generic device information structure 4756 * 4757 * Power Management callback to quiesce the device and prepare 4758 * for D3 transition. 4759 */ 4760 static int __maybe_unused ice_suspend(struct device *dev) 4761 { 4762 struct pci_dev *pdev = to_pci_dev(dev); 4763 struct ice_pf *pf; 4764 int disabled, v; 4765 4766 pf = pci_get_drvdata(pdev); 4767 4768 if (!ice_pf_state_is_nominal(pf)) { 4769 dev_err(dev, "Device is not ready, no need to suspend it\n"); 4770 return -EBUSY; 4771 } 4772 4773 /* Stop watchdog tasks until resume completion. 4774 * Even though it is most likely that the service task is 4775 * disabled if the device is suspended or down, the service task's 4776 * state is controlled by a different state bit, and we should 4777 * store and honor whatever state that bit is in at this point. 4778 */ 4779 disabled = ice_service_task_stop(pf); 4780 4781 ice_unplug_aux_dev(pf); 4782 4783 /* Already suspended?, then there is nothing to do */ 4784 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 4785 if (!disabled) 4786 ice_service_task_restart(pf); 4787 return 0; 4788 } 4789 4790 if (test_bit(ICE_DOWN, pf->state) || 4791 ice_is_reset_in_progress(pf->state)) { 4792 dev_err(dev, "can't suspend device in reset or already down\n"); 4793 if (!disabled) 4794 ice_service_task_restart(pf); 4795 return 0; 4796 } 4797 4798 ice_setup_mc_magic_wake(pf); 4799 4800 ice_prepare_for_shutdown(pf); 4801 4802 ice_set_wake(pf); 4803 4804 /* Free vectors, clear the interrupt scheme and release IRQs 4805 * for proper hibernation, especially with large number of CPUs. 4806 * Otherwise hibernation might fail when mapping all the vectors back 4807 * to CPU0. 4808 */ 4809 ice_free_irq_msix_misc(pf); 4810 ice_for_each_vsi(pf, v) { 4811 if (!pf->vsi[v]) 4812 continue; 4813 ice_vsi_free_q_vectors(pf->vsi[v]); 4814 } 4815 ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); 4816 ice_clear_interrupt_scheme(pf); 4817 4818 pci_save_state(pdev); 4819 pci_wake_from_d3(pdev, pf->wol_ena); 4820 pci_set_power_state(pdev, PCI_D3hot); 4821 return 0; 4822 } 4823 4824 /** 4825 * ice_resume - PM callback for waking up from D3 4826 * @dev: generic device information structure 4827 */ 4828 static int __maybe_unused ice_resume(struct device *dev) 4829 { 4830 struct pci_dev *pdev = to_pci_dev(dev); 4831 enum ice_reset_req reset_type; 4832 struct ice_pf *pf; 4833 struct ice_hw *hw; 4834 int ret; 4835 4836 pci_set_power_state(pdev, PCI_D0); 4837 pci_restore_state(pdev); 4838 pci_save_state(pdev); 4839 4840 if (!pci_device_is_present(pdev)) 4841 return -ENODEV; 4842 4843 ret = pci_enable_device_mem(pdev); 4844 if (ret) { 4845 dev_err(dev, "Cannot enable device after suspend\n"); 4846 return ret; 4847 } 4848 4849 pf = pci_get_drvdata(pdev); 4850 hw = &pf->hw; 4851 4852 pf->wakeup_reason = rd32(hw, PFPM_WUS); 4853 ice_print_wake_reason(pf); 4854 4855 /* We cleared the interrupt scheme when we suspended, so we need to 4856 * restore it now to resume device functionality. 4857 */ 4858 ret = ice_reinit_interrupt_scheme(pf); 4859 if (ret) 4860 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 4861 4862 clear_bit(ICE_DOWN, pf->state); 4863 /* Now perform PF reset and rebuild */ 4864 reset_type = ICE_RESET_PFR; 4865 /* re-enable service task for reset, but allow reset to schedule it */ 4866 clear_bit(ICE_SERVICE_DIS, pf->state); 4867 4868 if (ice_schedule_reset(pf, reset_type)) 4869 dev_err(dev, "Reset during resume failed.\n"); 4870 4871 clear_bit(ICE_SUSPENDED, pf->state); 4872 ice_service_task_restart(pf); 4873 4874 /* Restart the service task */ 4875 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4876 4877 return 0; 4878 } 4879 #endif /* CONFIG_PM */ 4880 4881 /** 4882 * ice_pci_err_detected - warning that PCI error has been detected 4883 * @pdev: PCI device information struct 4884 * @err: the type of PCI error 4885 * 4886 * Called to warn that something happened on the PCI bus and the error handling 4887 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 4888 */ 4889 static pci_ers_result_t 4890 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 4891 { 4892 struct ice_pf *pf = pci_get_drvdata(pdev); 4893 4894 if (!pf) { 4895 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 4896 __func__, err); 4897 return PCI_ERS_RESULT_DISCONNECT; 4898 } 4899 4900 if (!test_bit(ICE_SUSPENDED, pf->state)) { 4901 ice_service_task_stop(pf); 4902 4903 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 4904 set_bit(ICE_PFR_REQ, pf->state); 4905 ice_prepare_for_reset(pf); 4906 } 4907 } 4908 4909 return PCI_ERS_RESULT_NEED_RESET; 4910 } 4911 4912 /** 4913 * ice_pci_err_slot_reset - a PCI slot reset has just happened 4914 * @pdev: PCI device information struct 4915 * 4916 * Called to determine if the driver can recover from the PCI slot reset by 4917 * using a register read to determine if the device is recoverable. 4918 */ 4919 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 4920 { 4921 struct ice_pf *pf = pci_get_drvdata(pdev); 4922 pci_ers_result_t result; 4923 int err; 4924 u32 reg; 4925 4926 err = pci_enable_device_mem(pdev); 4927 if (err) { 4928 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 4929 err); 4930 result = PCI_ERS_RESULT_DISCONNECT; 4931 } else { 4932 pci_set_master(pdev); 4933 pci_restore_state(pdev); 4934 pci_save_state(pdev); 4935 pci_wake_from_d3(pdev, false); 4936 4937 /* Check for life */ 4938 reg = rd32(&pf->hw, GLGEN_RTRIG); 4939 if (!reg) 4940 result = PCI_ERS_RESULT_RECOVERED; 4941 else 4942 result = PCI_ERS_RESULT_DISCONNECT; 4943 } 4944 4945 err = pci_aer_clear_nonfatal_status(pdev); 4946 if (err) 4947 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", 4948 err); 4949 /* non-fatal, continue */ 4950 4951 return result; 4952 } 4953 4954 /** 4955 * ice_pci_err_resume - restart operations after PCI error recovery 4956 * @pdev: PCI device information struct 4957 * 4958 * Called to allow the driver to bring things back up after PCI error and/or 4959 * reset recovery have finished 4960 */ 4961 static void ice_pci_err_resume(struct pci_dev *pdev) 4962 { 4963 struct ice_pf *pf = pci_get_drvdata(pdev); 4964 4965 if (!pf) { 4966 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 4967 __func__); 4968 return; 4969 } 4970 4971 if (test_bit(ICE_SUSPENDED, pf->state)) { 4972 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 4973 __func__); 4974 return; 4975 } 4976 4977 ice_restore_all_vfs_msi_state(pdev); 4978 4979 ice_do_reset(pf, ICE_RESET_PFR); 4980 ice_service_task_restart(pf); 4981 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4982 } 4983 4984 /** 4985 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 4986 * @pdev: PCI device information struct 4987 */ 4988 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 4989 { 4990 struct ice_pf *pf = pci_get_drvdata(pdev); 4991 4992 if (!test_bit(ICE_SUSPENDED, pf->state)) { 4993 ice_service_task_stop(pf); 4994 4995 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 4996 set_bit(ICE_PFR_REQ, pf->state); 4997 ice_prepare_for_reset(pf); 4998 } 4999 } 5000 } 5001 5002 /** 5003 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 5004 * @pdev: PCI device information struct 5005 */ 5006 static void ice_pci_err_reset_done(struct pci_dev *pdev) 5007 { 5008 ice_pci_err_resume(pdev); 5009 } 5010 5011 /* ice_pci_tbl - PCI Device ID Table 5012 * 5013 * Wildcard entries (PCI_ANY_ID) should come last 5014 * Last entry must be all 0s 5015 * 5016 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5017 * Class, Class Mask, private data (not used) } 5018 */ 5019 static const struct pci_device_id ice_pci_tbl[] = { 5020 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, 5021 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, 5022 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, 5023 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 }, 5024 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 }, 5025 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, 5026 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, 5027 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, 5028 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, 5029 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, 5030 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, 5031 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, 5032 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, 5033 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, 5034 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, 5035 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, 5036 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, 5037 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, 5038 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, 5039 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, 5040 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, 5041 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, 5042 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, 5043 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, 5044 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, 5045 /* required last entry */ 5046 { 0, } 5047 }; 5048 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5049 5050 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5051 5052 static const struct pci_error_handlers ice_pci_err_handler = { 5053 .error_detected = ice_pci_err_detected, 5054 .slot_reset = ice_pci_err_slot_reset, 5055 .reset_prepare = ice_pci_err_reset_prepare, 5056 .reset_done = ice_pci_err_reset_done, 5057 .resume = ice_pci_err_resume 5058 }; 5059 5060 static struct pci_driver ice_driver = { 5061 .name = KBUILD_MODNAME, 5062 .id_table = ice_pci_tbl, 5063 .probe = ice_probe, 5064 .remove = ice_remove, 5065 #ifdef CONFIG_PM 5066 .driver.pm = &ice_pm_ops, 5067 #endif /* CONFIG_PM */ 5068 .shutdown = ice_shutdown, 5069 .sriov_configure = ice_sriov_configure, 5070 .err_handler = &ice_pci_err_handler 5071 }; 5072 5073 /** 5074 * ice_module_init - Driver registration routine 5075 * 5076 * ice_module_init is the first routine called when the driver is 5077 * loaded. All it does is register with the PCI subsystem. 5078 */ 5079 static int __init ice_module_init(void) 5080 { 5081 int status; 5082 5083 pr_info("%s\n", ice_driver_string); 5084 pr_info("%s\n", ice_copyright); 5085 5086 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); 5087 if (!ice_wq) { 5088 pr_err("Failed to create workqueue\n"); 5089 return -ENOMEM; 5090 } 5091 5092 status = pci_register_driver(&ice_driver); 5093 if (status) { 5094 pr_err("failed to register PCI driver, err %d\n", status); 5095 destroy_workqueue(ice_wq); 5096 } 5097 5098 return status; 5099 } 5100 module_init(ice_module_init); 5101 5102 /** 5103 * ice_module_exit - Driver exit cleanup routine 5104 * 5105 * ice_module_exit is called just before the driver is removed 5106 * from memory. 5107 */ 5108 static void __exit ice_module_exit(void) 5109 { 5110 pci_unregister_driver(&ice_driver); 5111 destroy_workqueue(ice_wq); 5112 pr_info("module unloaded\n"); 5113 } 5114 module_exit(ice_module_exit); 5115 5116 /** 5117 * ice_set_mac_address - NDO callback to set MAC address 5118 * @netdev: network interface device structure 5119 * @pi: pointer to an address structure 5120 * 5121 * Returns 0 on success, negative on failure 5122 */ 5123 static int ice_set_mac_address(struct net_device *netdev, void *pi) 5124 { 5125 struct ice_netdev_priv *np = netdev_priv(netdev); 5126 struct ice_vsi *vsi = np->vsi; 5127 struct ice_pf *pf = vsi->back; 5128 struct ice_hw *hw = &pf->hw; 5129 struct sockaddr *addr = pi; 5130 enum ice_status status; 5131 u8 old_mac[ETH_ALEN]; 5132 u8 flags = 0; 5133 int err = 0; 5134 u8 *mac; 5135 5136 mac = (u8 *)addr->sa_data; 5137 5138 if (!is_valid_ether_addr(mac)) 5139 return -EADDRNOTAVAIL; 5140 5141 if (ether_addr_equal(netdev->dev_addr, mac)) { 5142 netdev_dbg(netdev, "already using mac %pM\n", mac); 5143 return 0; 5144 } 5145 5146 if (test_bit(ICE_DOWN, pf->state) || 5147 ice_is_reset_in_progress(pf->state)) { 5148 netdev_err(netdev, "can't set mac %pM. device not ready\n", 5149 mac); 5150 return -EBUSY; 5151 } 5152 5153 netif_addr_lock_bh(netdev); 5154 ether_addr_copy(old_mac, netdev->dev_addr); 5155 /* change the netdev's MAC address */ 5156 memcpy(netdev->dev_addr, mac, netdev->addr_len); 5157 netif_addr_unlock_bh(netdev); 5158 5159 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 5160 status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 5161 if (status && status != ICE_ERR_DOES_NOT_EXIST) { 5162 err = -EADDRNOTAVAIL; 5163 goto err_update_filters; 5164 } 5165 5166 /* Add filter for new MAC. If filter exists, return success */ 5167 status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 5168 if (status == ICE_ERR_ALREADY_EXISTS) 5169 /* Although this MAC filter is already present in hardware it's 5170 * possible in some cases (e.g. bonding) that dev_addr was 5171 * modified outside of the driver and needs to be restored back 5172 * to this value. 5173 */ 5174 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 5175 else if (status) 5176 /* error if the new filter addition failed */ 5177 err = -EADDRNOTAVAIL; 5178 5179 err_update_filters: 5180 if (err) { 5181 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 5182 mac); 5183 netif_addr_lock_bh(netdev); 5184 ether_addr_copy(netdev->dev_addr, old_mac); 5185 netif_addr_unlock_bh(netdev); 5186 return err; 5187 } 5188 5189 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 5190 netdev->dev_addr); 5191 5192 /* write new MAC address to the firmware */ 5193 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 5194 status = ice_aq_manage_mac_write(hw, mac, flags, NULL); 5195 if (status) { 5196 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", 5197 mac, ice_stat_str(status)); 5198 } 5199 return 0; 5200 } 5201 5202 /** 5203 * ice_set_rx_mode - NDO callback to set the netdev filters 5204 * @netdev: network interface device structure 5205 */ 5206 static void ice_set_rx_mode(struct net_device *netdev) 5207 { 5208 struct ice_netdev_priv *np = netdev_priv(netdev); 5209 struct ice_vsi *vsi = np->vsi; 5210 5211 if (!vsi) 5212 return; 5213 5214 /* Set the flags to synchronize filters 5215 * ndo_set_rx_mode may be triggered even without a change in netdev 5216 * flags 5217 */ 5218 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 5219 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 5220 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 5221 5222 /* schedule our worker thread which will take care of 5223 * applying the new filter changes 5224 */ 5225 ice_service_task_schedule(vsi->back); 5226 } 5227 5228 /** 5229 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 5230 * @netdev: network interface device structure 5231 * @queue_index: Queue ID 5232 * @maxrate: maximum bandwidth in Mbps 5233 */ 5234 static int 5235 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 5236 { 5237 struct ice_netdev_priv *np = netdev_priv(netdev); 5238 struct ice_vsi *vsi = np->vsi; 5239 enum ice_status status; 5240 u16 q_handle; 5241 u8 tc; 5242 5243 /* Validate maxrate requested is within permitted range */ 5244 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 5245 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 5246 maxrate, queue_index); 5247 return -EINVAL; 5248 } 5249 5250 q_handle = vsi->tx_rings[queue_index]->q_handle; 5251 tc = ice_dcb_get_tc(vsi, queue_index); 5252 5253 /* Set BW back to default, when user set maxrate to 0 */ 5254 if (!maxrate) 5255 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 5256 q_handle, ICE_MAX_BW); 5257 else 5258 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 5259 q_handle, ICE_MAX_BW, maxrate * 1000); 5260 if (status) { 5261 netdev_err(netdev, "Unable to set Tx max rate, error %s\n", 5262 ice_stat_str(status)); 5263 return -EIO; 5264 } 5265 5266 return 0; 5267 } 5268 5269 /** 5270 * ice_fdb_add - add an entry to the hardware database 5271 * @ndm: the input from the stack 5272 * @tb: pointer to array of nladdr (unused) 5273 * @dev: the net device pointer 5274 * @addr: the MAC address entry being added 5275 * @vid: VLAN ID 5276 * @flags: instructions from stack about fdb operation 5277 * @extack: netlink extended ack 5278 */ 5279 static int 5280 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 5281 struct net_device *dev, const unsigned char *addr, u16 vid, 5282 u16 flags, struct netlink_ext_ack __always_unused *extack) 5283 { 5284 int err; 5285 5286 if (vid) { 5287 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 5288 return -EINVAL; 5289 } 5290 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 5291 netdev_err(dev, "FDB only supports static addresses\n"); 5292 return -EINVAL; 5293 } 5294 5295 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 5296 err = dev_uc_add_excl(dev, addr); 5297 else if (is_multicast_ether_addr(addr)) 5298 err = dev_mc_add_excl(dev, addr); 5299 else 5300 err = -EINVAL; 5301 5302 /* Only return duplicate errors if NLM_F_EXCL is set */ 5303 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 5304 err = 0; 5305 5306 return err; 5307 } 5308 5309 /** 5310 * ice_fdb_del - delete an entry from the hardware database 5311 * @ndm: the input from the stack 5312 * @tb: pointer to array of nladdr (unused) 5313 * @dev: the net device pointer 5314 * @addr: the MAC address entry being added 5315 * @vid: VLAN ID 5316 */ 5317 static int 5318 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 5319 struct net_device *dev, const unsigned char *addr, 5320 __always_unused u16 vid) 5321 { 5322 int err; 5323 5324 if (ndm->ndm_state & NUD_PERMANENT) { 5325 netdev_err(dev, "FDB only supports static addresses\n"); 5326 return -EINVAL; 5327 } 5328 5329 if (is_unicast_ether_addr(addr)) 5330 err = dev_uc_del(dev, addr); 5331 else if (is_multicast_ether_addr(addr)) 5332 err = dev_mc_del(dev, addr); 5333 else 5334 err = -EINVAL; 5335 5336 return err; 5337 } 5338 5339 /** 5340 * ice_set_features - set the netdev feature flags 5341 * @netdev: ptr to the netdev being adjusted 5342 * @features: the feature set that the stack is suggesting 5343 */ 5344 static int 5345 ice_set_features(struct net_device *netdev, netdev_features_t features) 5346 { 5347 struct ice_netdev_priv *np = netdev_priv(netdev); 5348 struct ice_vsi *vsi = np->vsi; 5349 struct ice_pf *pf = vsi->back; 5350 int ret = 0; 5351 5352 /* Don't set any netdev advanced features with device in Safe Mode */ 5353 if (ice_is_safe_mode(vsi->back)) { 5354 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); 5355 return ret; 5356 } 5357 5358 /* Do not change setting during reset */ 5359 if (ice_is_reset_in_progress(pf->state)) { 5360 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 5361 return -EBUSY; 5362 } 5363 5364 /* Multiple features can be changed in one call so keep features in 5365 * separate if/else statements to guarantee each feature is checked 5366 */ 5367 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) 5368 ice_vsi_manage_rss_lut(vsi, true); 5369 else if (!(features & NETIF_F_RXHASH) && 5370 netdev->features & NETIF_F_RXHASH) 5371 ice_vsi_manage_rss_lut(vsi, false); 5372 5373 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && 5374 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5375 ret = ice_vsi_manage_vlan_stripping(vsi, true); 5376 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && 5377 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 5378 ret = ice_vsi_manage_vlan_stripping(vsi, false); 5379 5380 if ((features & NETIF_F_HW_VLAN_CTAG_TX) && 5381 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5382 ret = ice_vsi_manage_vlan_insertion(vsi); 5383 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && 5384 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) 5385 ret = ice_vsi_manage_vlan_insertion(vsi); 5386 5387 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 5388 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 5389 ret = ice_cfg_vlan_pruning(vsi, true, false); 5390 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 5391 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 5392 ret = ice_cfg_vlan_pruning(vsi, false, false); 5393 5394 if ((features & NETIF_F_NTUPLE) && 5395 !(netdev->features & NETIF_F_NTUPLE)) { 5396 ice_vsi_manage_fdir(vsi, true); 5397 ice_init_arfs(vsi); 5398 } else if (!(features & NETIF_F_NTUPLE) && 5399 (netdev->features & NETIF_F_NTUPLE)) { 5400 ice_vsi_manage_fdir(vsi, false); 5401 ice_clear_arfs(vsi); 5402 } 5403 5404 return ret; 5405 } 5406 5407 /** 5408 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI 5409 * @vsi: VSI to setup VLAN properties for 5410 */ 5411 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 5412 { 5413 int ret = 0; 5414 5415 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 5416 ret = ice_vsi_manage_vlan_stripping(vsi, true); 5417 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 5418 ret = ice_vsi_manage_vlan_insertion(vsi); 5419 5420 return ret; 5421 } 5422 5423 /** 5424 * ice_vsi_cfg - Setup the VSI 5425 * @vsi: the VSI being configured 5426 * 5427 * Return 0 on success and negative value on error 5428 */ 5429 int ice_vsi_cfg(struct ice_vsi *vsi) 5430 { 5431 int err; 5432 5433 if (vsi->netdev) { 5434 ice_set_rx_mode(vsi->netdev); 5435 5436 err = ice_vsi_vlan_setup(vsi); 5437 5438 if (err) 5439 return err; 5440 } 5441 ice_vsi_cfg_dcb_rings(vsi); 5442 5443 err = ice_vsi_cfg_lan_txqs(vsi); 5444 if (!err && ice_is_xdp_ena_vsi(vsi)) 5445 err = ice_vsi_cfg_xdp_txqs(vsi); 5446 if (!err) 5447 err = ice_vsi_cfg_rxqs(vsi); 5448 5449 return err; 5450 } 5451 5452 /* THEORY OF MODERATION: 5453 * The below code creates custom DIM profiles for use by this driver, because 5454 * the ice driver hardware works differently than the hardware that DIMLIB was 5455 * originally made for. ice hardware doesn't have packet count limits that 5456 * can trigger an interrupt, but it *does* have interrupt rate limit support, 5457 * and this code adds that capability to be used by the driver when it's using 5458 * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver 5459 * for how to "respond" to traffic and interrupts, so this driver uses a 5460 * slightly different set of moderation parameters to get best performance. 5461 */ 5462 struct ice_dim { 5463 /* the throttle rate for interrupts, basically worst case delay before 5464 * an initial interrupt fires, value is stored in microseconds. 5465 */ 5466 u16 itr; 5467 /* the rate limit for interrupts, which can cap a delay from a small 5468 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR 5469 * could yield as much as 500,000 interrupts per second, but with a 5470 * 10us rate limit, it limits to 100,000 interrupts per second. Value 5471 * is stored in microseconds. 5472 */ 5473 u16 intrl; 5474 }; 5475 5476 /* Make a different profile for Rx that doesn't allow quite so aggressive 5477 * moderation at the high end (it maxes out at 128us or about 8k interrupts a 5478 * second. The INTRL/rate parameters here are only useful to cap small ITR 5479 * values, which is why for larger ITR's - like 128, which can only generate 5480 * 8k interrupts per second, there is no point to rate limit and the values 5481 * are set to zero. The rate limit values do affect latency, and so must 5482 * be reasonably small so to not impact latency sensitive tests. 5483 */ 5484 static const struct ice_dim rx_profile[] = { 5485 {2, 10}, 5486 {8, 16}, 5487 {32, 0}, 5488 {96, 0}, 5489 {128, 0} 5490 }; 5491 5492 /* The transmit profile, which has the same sorts of values 5493 * as the previous struct 5494 */ 5495 static const struct ice_dim tx_profile[] = { 5496 {2, 10}, 5497 {8, 16}, 5498 {64, 0}, 5499 {128, 0}, 5500 {256, 0} 5501 }; 5502 5503 static void ice_tx_dim_work(struct work_struct *work) 5504 { 5505 struct ice_ring_container *rc; 5506 struct ice_q_vector *q_vector; 5507 struct dim *dim; 5508 u16 itr, intrl; 5509 5510 dim = container_of(work, struct dim, work); 5511 rc = container_of(dim, struct ice_ring_container, dim); 5512 q_vector = container_of(rc, struct ice_q_vector, tx); 5513 5514 if (dim->profile_ix >= ARRAY_SIZE(tx_profile)) 5515 dim->profile_ix = ARRAY_SIZE(tx_profile) - 1; 5516 5517 /* look up the values in our local table */ 5518 itr = tx_profile[dim->profile_ix].itr; 5519 intrl = tx_profile[dim->profile_ix].intrl; 5520 5521 ice_trace(tx_dim_work, q_vector, dim); 5522 ice_write_itr(rc, itr); 5523 ice_write_intrl(q_vector, intrl); 5524 5525 dim->state = DIM_START_MEASURE; 5526 } 5527 5528 static void ice_rx_dim_work(struct work_struct *work) 5529 { 5530 struct ice_ring_container *rc; 5531 struct ice_q_vector *q_vector; 5532 struct dim *dim; 5533 u16 itr, intrl; 5534 5535 dim = container_of(work, struct dim, work); 5536 rc = container_of(dim, struct ice_ring_container, dim); 5537 q_vector = container_of(rc, struct ice_q_vector, rx); 5538 5539 if (dim->profile_ix >= ARRAY_SIZE(rx_profile)) 5540 dim->profile_ix = ARRAY_SIZE(rx_profile) - 1; 5541 5542 /* look up the values in our local table */ 5543 itr = rx_profile[dim->profile_ix].itr; 5544 intrl = rx_profile[dim->profile_ix].intrl; 5545 5546 ice_trace(rx_dim_work, q_vector, dim); 5547 ice_write_itr(rc, itr); 5548 ice_write_intrl(q_vector, intrl); 5549 5550 dim->state = DIM_START_MEASURE; 5551 } 5552 5553 /** 5554 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 5555 * @vsi: the VSI being configured 5556 */ 5557 static void ice_napi_enable_all(struct ice_vsi *vsi) 5558 { 5559 int q_idx; 5560 5561 if (!vsi->netdev) 5562 return; 5563 5564 ice_for_each_q_vector(vsi, q_idx) { 5565 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 5566 5567 INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work); 5568 q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5569 5570 INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work); 5571 q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 5572 5573 if (q_vector->rx.ring || q_vector->tx.ring) 5574 napi_enable(&q_vector->napi); 5575 } 5576 } 5577 5578 /** 5579 * ice_up_complete - Finish the last steps of bringing up a connection 5580 * @vsi: The VSI being configured 5581 * 5582 * Return 0 on success and negative value on error 5583 */ 5584 static int ice_up_complete(struct ice_vsi *vsi) 5585 { 5586 struct ice_pf *pf = vsi->back; 5587 int err; 5588 5589 ice_vsi_cfg_msix(vsi); 5590 5591 /* Enable only Rx rings, Tx rings were enabled by the FW when the 5592 * Tx queue group list was configured and the context bits were 5593 * programmed using ice_vsi_cfg_txqs 5594 */ 5595 err = ice_vsi_start_all_rx_rings(vsi); 5596 if (err) 5597 return err; 5598 5599 clear_bit(ICE_VSI_DOWN, vsi->state); 5600 ice_napi_enable_all(vsi); 5601 ice_vsi_ena_irq(vsi); 5602 5603 if (vsi->port_info && 5604 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 5605 vsi->netdev) { 5606 ice_print_link_msg(vsi, true); 5607 netif_tx_start_all_queues(vsi->netdev); 5608 netif_carrier_on(vsi->netdev); 5609 } 5610 5611 ice_service_task_schedule(pf); 5612 5613 return 0; 5614 } 5615 5616 /** 5617 * ice_up - Bring the connection back up after being down 5618 * @vsi: VSI being configured 5619 */ 5620 int ice_up(struct ice_vsi *vsi) 5621 { 5622 int err; 5623 5624 err = ice_vsi_cfg(vsi); 5625 if (!err) 5626 err = ice_up_complete(vsi); 5627 5628 return err; 5629 } 5630 5631 /** 5632 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 5633 * @ring: Tx or Rx ring to read stats from 5634 * @pkts: packets stats counter 5635 * @bytes: bytes stats counter 5636 * 5637 * This function fetches stats from the ring considering the atomic operations 5638 * that needs to be performed to read u64 values in 32 bit machine. 5639 */ 5640 static void 5641 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) 5642 { 5643 unsigned int start; 5644 *pkts = 0; 5645 *bytes = 0; 5646 5647 if (!ring) 5648 return; 5649 do { 5650 start = u64_stats_fetch_begin_irq(&ring->syncp); 5651 *pkts = ring->stats.pkts; 5652 *bytes = ring->stats.bytes; 5653 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); 5654 } 5655 5656 /** 5657 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 5658 * @vsi: the VSI to be updated 5659 * @rings: rings to work on 5660 * @count: number of rings 5661 */ 5662 static void 5663 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, 5664 u16 count) 5665 { 5666 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 5667 u16 i; 5668 5669 for (i = 0; i < count; i++) { 5670 struct ice_ring *ring; 5671 u64 pkts, bytes; 5672 5673 ring = READ_ONCE(rings[i]); 5674 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 5675 vsi_stats->tx_packets += pkts; 5676 vsi_stats->tx_bytes += bytes; 5677 vsi->tx_restart += ring->tx_stats.restart_q; 5678 vsi->tx_busy += ring->tx_stats.tx_busy; 5679 vsi->tx_linearize += ring->tx_stats.tx_linearize; 5680 } 5681 } 5682 5683 /** 5684 * ice_update_vsi_ring_stats - Update VSI stats counters 5685 * @vsi: the VSI to be updated 5686 */ 5687 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 5688 { 5689 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; 5690 u64 pkts, bytes; 5691 int i; 5692 5693 /* reset netdev stats */ 5694 vsi_stats->tx_packets = 0; 5695 vsi_stats->tx_bytes = 0; 5696 vsi_stats->rx_packets = 0; 5697 vsi_stats->rx_bytes = 0; 5698 5699 /* reset non-netdev (extended) stats */ 5700 vsi->tx_restart = 0; 5701 vsi->tx_busy = 0; 5702 vsi->tx_linearize = 0; 5703 vsi->rx_buf_failed = 0; 5704 vsi->rx_page_failed = 0; 5705 5706 rcu_read_lock(); 5707 5708 /* update Tx rings counters */ 5709 ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); 5710 5711 /* update Rx rings counters */ 5712 ice_for_each_rxq(vsi, i) { 5713 struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]); 5714 5715 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); 5716 vsi_stats->rx_packets += pkts; 5717 vsi_stats->rx_bytes += bytes; 5718 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; 5719 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; 5720 } 5721 5722 /* update XDP Tx rings counters */ 5723 if (ice_is_xdp_ena_vsi(vsi)) 5724 ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, 5725 vsi->num_xdp_txq); 5726 5727 rcu_read_unlock(); 5728 } 5729 5730 /** 5731 * ice_update_vsi_stats - Update VSI stats counters 5732 * @vsi: the VSI to be updated 5733 */ 5734 void ice_update_vsi_stats(struct ice_vsi *vsi) 5735 { 5736 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 5737 struct ice_eth_stats *cur_es = &vsi->eth_stats; 5738 struct ice_pf *pf = vsi->back; 5739 5740 if (test_bit(ICE_VSI_DOWN, vsi->state) || 5741 test_bit(ICE_CFG_BUSY, pf->state)) 5742 return; 5743 5744 /* get stats as recorded by Tx/Rx rings */ 5745 ice_update_vsi_ring_stats(vsi); 5746 5747 /* get VSI stats as recorded by the hardware */ 5748 ice_update_eth_stats(vsi); 5749 5750 cur_ns->tx_errors = cur_es->tx_errors; 5751 cur_ns->rx_dropped = cur_es->rx_discards; 5752 cur_ns->tx_dropped = cur_es->tx_discards; 5753 cur_ns->multicast = cur_es->rx_multicast; 5754 5755 /* update some more netdev stats if this is main VSI */ 5756 if (vsi->type == ICE_VSI_PF) { 5757 cur_ns->rx_crc_errors = pf->stats.crc_errors; 5758 cur_ns->rx_errors = pf->stats.crc_errors + 5759 pf->stats.illegal_bytes + 5760 pf->stats.rx_len_errors + 5761 pf->stats.rx_undersize + 5762 pf->hw_csum_rx_error + 5763 pf->stats.rx_jabber + 5764 pf->stats.rx_fragments + 5765 pf->stats.rx_oversize; 5766 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 5767 /* record drops from the port level */ 5768 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 5769 } 5770 } 5771 5772 /** 5773 * ice_update_pf_stats - Update PF port stats counters 5774 * @pf: PF whose stats needs to be updated 5775 */ 5776 void ice_update_pf_stats(struct ice_pf *pf) 5777 { 5778 struct ice_hw_port_stats *prev_ps, *cur_ps; 5779 struct ice_hw *hw = &pf->hw; 5780 u16 fd_ctr_base; 5781 u8 port; 5782 5783 port = hw->port_info->lport; 5784 prev_ps = &pf->stats_prev; 5785 cur_ps = &pf->stats; 5786 5787 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 5788 &prev_ps->eth.rx_bytes, 5789 &cur_ps->eth.rx_bytes); 5790 5791 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 5792 &prev_ps->eth.rx_unicast, 5793 &cur_ps->eth.rx_unicast); 5794 5795 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 5796 &prev_ps->eth.rx_multicast, 5797 &cur_ps->eth.rx_multicast); 5798 5799 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 5800 &prev_ps->eth.rx_broadcast, 5801 &cur_ps->eth.rx_broadcast); 5802 5803 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 5804 &prev_ps->eth.rx_discards, 5805 &cur_ps->eth.rx_discards); 5806 5807 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 5808 &prev_ps->eth.tx_bytes, 5809 &cur_ps->eth.tx_bytes); 5810 5811 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 5812 &prev_ps->eth.tx_unicast, 5813 &cur_ps->eth.tx_unicast); 5814 5815 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 5816 &prev_ps->eth.tx_multicast, 5817 &cur_ps->eth.tx_multicast); 5818 5819 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 5820 &prev_ps->eth.tx_broadcast, 5821 &cur_ps->eth.tx_broadcast); 5822 5823 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 5824 &prev_ps->tx_dropped_link_down, 5825 &cur_ps->tx_dropped_link_down); 5826 5827 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 5828 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 5829 5830 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 5831 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 5832 5833 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 5834 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 5835 5836 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 5837 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 5838 5839 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 5840 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 5841 5842 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 5843 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 5844 5845 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 5846 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 5847 5848 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 5849 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 5850 5851 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 5852 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 5853 5854 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 5855 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 5856 5857 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 5858 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 5859 5860 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 5861 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 5862 5863 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 5864 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 5865 5866 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 5867 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 5868 5869 fd_ctr_base = hw->fd_ctr_base; 5870 5871 ice_stat_update40(hw, 5872 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 5873 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 5874 &cur_ps->fd_sb_match); 5875 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 5876 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 5877 5878 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 5879 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 5880 5881 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 5882 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 5883 5884 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 5885 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 5886 5887 ice_update_dcb_stats(pf); 5888 5889 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 5890 &prev_ps->crc_errors, &cur_ps->crc_errors); 5891 5892 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 5893 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 5894 5895 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 5896 &prev_ps->mac_local_faults, 5897 &cur_ps->mac_local_faults); 5898 5899 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 5900 &prev_ps->mac_remote_faults, 5901 &cur_ps->mac_remote_faults); 5902 5903 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 5904 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 5905 5906 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 5907 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 5908 5909 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 5910 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 5911 5912 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 5913 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 5914 5915 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 5916 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 5917 5918 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 5919 5920 pf->stat_prev_loaded = true; 5921 } 5922 5923 /** 5924 * ice_get_stats64 - get statistics for network device structure 5925 * @netdev: network interface device structure 5926 * @stats: main device statistics structure 5927 */ 5928 static 5929 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 5930 { 5931 struct ice_netdev_priv *np = netdev_priv(netdev); 5932 struct rtnl_link_stats64 *vsi_stats; 5933 struct ice_vsi *vsi = np->vsi; 5934 5935 vsi_stats = &vsi->net_stats; 5936 5937 if (!vsi->num_txq || !vsi->num_rxq) 5938 return; 5939 5940 /* netdev packet/byte stats come from ring counter. These are obtained 5941 * by summing up ring counters (done by ice_update_vsi_ring_stats). 5942 * But, only call the update routine and read the registers if VSI is 5943 * not down. 5944 */ 5945 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 5946 ice_update_vsi_ring_stats(vsi); 5947 stats->tx_packets = vsi_stats->tx_packets; 5948 stats->tx_bytes = vsi_stats->tx_bytes; 5949 stats->rx_packets = vsi_stats->rx_packets; 5950 stats->rx_bytes = vsi_stats->rx_bytes; 5951 5952 /* The rest of the stats can be read from the hardware but instead we 5953 * just return values that the watchdog task has already obtained from 5954 * the hardware. 5955 */ 5956 stats->multicast = vsi_stats->multicast; 5957 stats->tx_errors = vsi_stats->tx_errors; 5958 stats->tx_dropped = vsi_stats->tx_dropped; 5959 stats->rx_errors = vsi_stats->rx_errors; 5960 stats->rx_dropped = vsi_stats->rx_dropped; 5961 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 5962 stats->rx_length_errors = vsi_stats->rx_length_errors; 5963 } 5964 5965 /** 5966 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 5967 * @vsi: VSI having NAPI disabled 5968 */ 5969 static void ice_napi_disable_all(struct ice_vsi *vsi) 5970 { 5971 int q_idx; 5972 5973 if (!vsi->netdev) 5974 return; 5975 5976 ice_for_each_q_vector(vsi, q_idx) { 5977 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 5978 5979 if (q_vector->rx.ring || q_vector->tx.ring) 5980 napi_disable(&q_vector->napi); 5981 5982 cancel_work_sync(&q_vector->tx.dim.work); 5983 cancel_work_sync(&q_vector->rx.dim.work); 5984 } 5985 } 5986 5987 /** 5988 * ice_down - Shutdown the connection 5989 * @vsi: The VSI being stopped 5990 */ 5991 int ice_down(struct ice_vsi *vsi) 5992 { 5993 int i, tx_err, rx_err, link_err = 0; 5994 5995 /* Caller of this function is expected to set the 5996 * vsi->state ICE_DOWN bit 5997 */ 5998 if (vsi->netdev) { 5999 netif_carrier_off(vsi->netdev); 6000 netif_tx_disable(vsi->netdev); 6001 } 6002 6003 ice_vsi_dis_irq(vsi); 6004 6005 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 6006 if (tx_err) 6007 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 6008 vsi->vsi_num, tx_err); 6009 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 6010 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 6011 if (tx_err) 6012 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 6013 vsi->vsi_num, tx_err); 6014 } 6015 6016 rx_err = ice_vsi_stop_all_rx_rings(vsi); 6017 if (rx_err) 6018 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 6019 vsi->vsi_num, rx_err); 6020 6021 ice_napi_disable_all(vsi); 6022 6023 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 6024 link_err = ice_force_phys_link_state(vsi, false); 6025 if (link_err) 6026 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 6027 vsi->vsi_num, link_err); 6028 } 6029 6030 ice_for_each_txq(vsi, i) 6031 ice_clean_tx_ring(vsi->tx_rings[i]); 6032 6033 ice_for_each_rxq(vsi, i) 6034 ice_clean_rx_ring(vsi->rx_rings[i]); 6035 6036 if (tx_err || rx_err || link_err) { 6037 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 6038 vsi->vsi_num, vsi->vsw->sw_id); 6039 return -EIO; 6040 } 6041 6042 return 0; 6043 } 6044 6045 /** 6046 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 6047 * @vsi: VSI having resources allocated 6048 * 6049 * Return 0 on success, negative on failure 6050 */ 6051 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 6052 { 6053 int i, err = 0; 6054 6055 if (!vsi->num_txq) { 6056 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 6057 vsi->vsi_num); 6058 return -EINVAL; 6059 } 6060 6061 ice_for_each_txq(vsi, i) { 6062 struct ice_ring *ring = vsi->tx_rings[i]; 6063 6064 if (!ring) 6065 return -EINVAL; 6066 6067 ring->netdev = vsi->netdev; 6068 err = ice_setup_tx_ring(ring); 6069 if (err) 6070 break; 6071 } 6072 6073 return err; 6074 } 6075 6076 /** 6077 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 6078 * @vsi: VSI having resources allocated 6079 * 6080 * Return 0 on success, negative on failure 6081 */ 6082 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 6083 { 6084 int i, err = 0; 6085 6086 if (!vsi->num_rxq) { 6087 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 6088 vsi->vsi_num); 6089 return -EINVAL; 6090 } 6091 6092 ice_for_each_rxq(vsi, i) { 6093 struct ice_ring *ring = vsi->rx_rings[i]; 6094 6095 if (!ring) 6096 return -EINVAL; 6097 6098 ring->netdev = vsi->netdev; 6099 err = ice_setup_rx_ring(ring); 6100 if (err) 6101 break; 6102 } 6103 6104 return err; 6105 } 6106 6107 /** 6108 * ice_vsi_open_ctrl - open control VSI for use 6109 * @vsi: the VSI to open 6110 * 6111 * Initialization of the Control VSI 6112 * 6113 * Returns 0 on success, negative value on error 6114 */ 6115 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 6116 { 6117 char int_name[ICE_INT_NAME_STR_LEN]; 6118 struct ice_pf *pf = vsi->back; 6119 struct device *dev; 6120 int err; 6121 6122 dev = ice_pf_to_dev(pf); 6123 /* allocate descriptors */ 6124 err = ice_vsi_setup_tx_rings(vsi); 6125 if (err) 6126 goto err_setup_tx; 6127 6128 err = ice_vsi_setup_rx_rings(vsi); 6129 if (err) 6130 goto err_setup_rx; 6131 6132 err = ice_vsi_cfg(vsi); 6133 if (err) 6134 goto err_setup_rx; 6135 6136 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 6137 dev_driver_string(dev), dev_name(dev)); 6138 err = ice_vsi_req_irq_msix(vsi, int_name); 6139 if (err) 6140 goto err_setup_rx; 6141 6142 ice_vsi_cfg_msix(vsi); 6143 6144 err = ice_vsi_start_all_rx_rings(vsi); 6145 if (err) 6146 goto err_up_complete; 6147 6148 clear_bit(ICE_VSI_DOWN, vsi->state); 6149 ice_vsi_ena_irq(vsi); 6150 6151 return 0; 6152 6153 err_up_complete: 6154 ice_down(vsi); 6155 err_setup_rx: 6156 ice_vsi_free_rx_rings(vsi); 6157 err_setup_tx: 6158 ice_vsi_free_tx_rings(vsi); 6159 6160 return err; 6161 } 6162 6163 /** 6164 * ice_vsi_open - Called when a network interface is made active 6165 * @vsi: the VSI to open 6166 * 6167 * Initialization of the VSI 6168 * 6169 * Returns 0 on success, negative value on error 6170 */ 6171 static int ice_vsi_open(struct ice_vsi *vsi) 6172 { 6173 char int_name[ICE_INT_NAME_STR_LEN]; 6174 struct ice_pf *pf = vsi->back; 6175 int err; 6176 6177 /* allocate descriptors */ 6178 err = ice_vsi_setup_tx_rings(vsi); 6179 if (err) 6180 goto err_setup_tx; 6181 6182 err = ice_vsi_setup_rx_rings(vsi); 6183 if (err) 6184 goto err_setup_rx; 6185 6186 err = ice_vsi_cfg(vsi); 6187 if (err) 6188 goto err_setup_rx; 6189 6190 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 6191 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 6192 err = ice_vsi_req_irq_msix(vsi, int_name); 6193 if (err) 6194 goto err_setup_rx; 6195 6196 /* Notify the stack of the actual queue counts. */ 6197 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 6198 if (err) 6199 goto err_set_qs; 6200 6201 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 6202 if (err) 6203 goto err_set_qs; 6204 6205 err = ice_up_complete(vsi); 6206 if (err) 6207 goto err_up_complete; 6208 6209 return 0; 6210 6211 err_up_complete: 6212 ice_down(vsi); 6213 err_set_qs: 6214 ice_vsi_free_irq(vsi); 6215 err_setup_rx: 6216 ice_vsi_free_rx_rings(vsi); 6217 err_setup_tx: 6218 ice_vsi_free_tx_rings(vsi); 6219 6220 return err; 6221 } 6222 6223 /** 6224 * ice_vsi_release_all - Delete all VSIs 6225 * @pf: PF from which all VSIs are being removed 6226 */ 6227 static void ice_vsi_release_all(struct ice_pf *pf) 6228 { 6229 int err, i; 6230 6231 if (!pf->vsi) 6232 return; 6233 6234 ice_for_each_vsi(pf, i) { 6235 if (!pf->vsi[i]) 6236 continue; 6237 6238 err = ice_vsi_release(pf->vsi[i]); 6239 if (err) 6240 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 6241 i, err, pf->vsi[i]->vsi_num); 6242 } 6243 } 6244 6245 /** 6246 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 6247 * @pf: pointer to the PF instance 6248 * @type: VSI type to rebuild 6249 * 6250 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 6251 */ 6252 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 6253 { 6254 struct device *dev = ice_pf_to_dev(pf); 6255 enum ice_status status; 6256 int i, err; 6257 6258 ice_for_each_vsi(pf, i) { 6259 struct ice_vsi *vsi = pf->vsi[i]; 6260 6261 if (!vsi || vsi->type != type) 6262 continue; 6263 6264 /* rebuild the VSI */ 6265 err = ice_vsi_rebuild(vsi, true); 6266 if (err) { 6267 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 6268 err, vsi->idx, ice_vsi_type_str(type)); 6269 return err; 6270 } 6271 6272 /* replay filters for the VSI */ 6273 status = ice_replay_vsi(&pf->hw, vsi->idx); 6274 if (status) { 6275 dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", 6276 ice_stat_str(status), vsi->idx, 6277 ice_vsi_type_str(type)); 6278 return -EIO; 6279 } 6280 6281 /* Re-map HW VSI number, using VSI handle that has been 6282 * previously validated in ice_replay_vsi() call above 6283 */ 6284 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 6285 6286 /* enable the VSI */ 6287 err = ice_ena_vsi(vsi, false); 6288 if (err) { 6289 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 6290 err, vsi->idx, ice_vsi_type_str(type)); 6291 return err; 6292 } 6293 6294 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 6295 ice_vsi_type_str(type)); 6296 } 6297 6298 return 0; 6299 } 6300 6301 /** 6302 * ice_update_pf_netdev_link - Update PF netdev link status 6303 * @pf: pointer to the PF instance 6304 */ 6305 static void ice_update_pf_netdev_link(struct ice_pf *pf) 6306 { 6307 bool link_up; 6308 int i; 6309 6310 ice_for_each_vsi(pf, i) { 6311 struct ice_vsi *vsi = pf->vsi[i]; 6312 6313 if (!vsi || vsi->type != ICE_VSI_PF) 6314 return; 6315 6316 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 6317 if (link_up) { 6318 netif_carrier_on(pf->vsi[i]->netdev); 6319 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 6320 } else { 6321 netif_carrier_off(pf->vsi[i]->netdev); 6322 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 6323 } 6324 } 6325 } 6326 6327 /** 6328 * ice_rebuild - rebuild after reset 6329 * @pf: PF to rebuild 6330 * @reset_type: type of reset 6331 * 6332 * Do not rebuild VF VSI in this flow because that is already handled via 6333 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 6334 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 6335 * to reset/rebuild all the VF VSI twice. 6336 */ 6337 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 6338 { 6339 struct device *dev = ice_pf_to_dev(pf); 6340 struct ice_hw *hw = &pf->hw; 6341 enum ice_status ret; 6342 int err; 6343 6344 if (test_bit(ICE_DOWN, pf->state)) 6345 goto clear_recovery; 6346 6347 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 6348 6349 ret = ice_init_all_ctrlq(hw); 6350 if (ret) { 6351 dev_err(dev, "control queues init failed %s\n", 6352 ice_stat_str(ret)); 6353 goto err_init_ctrlq; 6354 } 6355 6356 /* if DDP was previously loaded successfully */ 6357 if (!ice_is_safe_mode(pf)) { 6358 /* reload the SW DB of filter tables */ 6359 if (reset_type == ICE_RESET_PFR) 6360 ice_fill_blk_tbls(hw); 6361 else 6362 /* Reload DDP Package after CORER/GLOBR reset */ 6363 ice_load_pkg(NULL, pf); 6364 } 6365 6366 ret = ice_clear_pf_cfg(hw); 6367 if (ret) { 6368 dev_err(dev, "clear PF configuration failed %s\n", 6369 ice_stat_str(ret)); 6370 goto err_init_ctrlq; 6371 } 6372 6373 if (pf->first_sw->dflt_vsi_ena) 6374 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); 6375 /* clear the default VSI configuration if it exists */ 6376 pf->first_sw->dflt_vsi = NULL; 6377 pf->first_sw->dflt_vsi_ena = false; 6378 6379 ice_clear_pxe_mode(hw); 6380 6381 ret = ice_init_nvm(hw); 6382 if (ret) { 6383 dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); 6384 goto err_init_ctrlq; 6385 } 6386 6387 ret = ice_get_caps(hw); 6388 if (ret) { 6389 dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); 6390 goto err_init_ctrlq; 6391 } 6392 6393 ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 6394 if (ret) { 6395 dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); 6396 goto err_init_ctrlq; 6397 } 6398 6399 err = ice_sched_init_port(hw->port_info); 6400 if (err) 6401 goto err_sched_init_port; 6402 6403 /* start misc vector */ 6404 err = ice_req_irq_msix_misc(pf); 6405 if (err) { 6406 dev_err(dev, "misc vector setup failed: %d\n", err); 6407 goto err_sched_init_port; 6408 } 6409 6410 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6411 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 6412 if (!rd32(hw, PFQF_FD_SIZE)) { 6413 u16 unused, guar, b_effort; 6414 6415 guar = hw->func_caps.fd_fltr_guar; 6416 b_effort = hw->func_caps.fd_fltr_best_effort; 6417 6418 /* force guaranteed filter pool for PF */ 6419 ice_alloc_fd_guar_item(hw, &unused, guar); 6420 /* force shared filter pool for PF */ 6421 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 6422 } 6423 } 6424 6425 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 6426 ice_dcb_rebuild(pf); 6427 6428 /* If the PF previously had enabled PTP, PTP init needs to happen before 6429 * the VSI rebuild. If not, this causes the PTP link status events to 6430 * fail. 6431 */ 6432 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 6433 ice_ptp_init(pf); 6434 6435 /* rebuild PF VSI */ 6436 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 6437 if (err) { 6438 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 6439 goto err_vsi_rebuild; 6440 } 6441 6442 /* If Flow Director is active */ 6443 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 6444 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 6445 if (err) { 6446 dev_err(dev, "control VSI rebuild failed: %d\n", err); 6447 goto err_vsi_rebuild; 6448 } 6449 6450 /* replay HW Flow Director recipes */ 6451 if (hw->fdir_prof) 6452 ice_fdir_replay_flows(hw); 6453 6454 /* replay Flow Director filters */ 6455 ice_fdir_replay_fltrs(pf); 6456 6457 ice_rebuild_arfs(pf); 6458 } 6459 6460 ice_update_pf_netdev_link(pf); 6461 6462 /* tell the firmware we are up */ 6463 ret = ice_send_version(pf); 6464 if (ret) { 6465 dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", 6466 ice_stat_str(ret)); 6467 goto err_vsi_rebuild; 6468 } 6469 6470 ice_replay_post(hw); 6471 6472 /* if we get here, reset flow is successful */ 6473 clear_bit(ICE_RESET_FAILED, pf->state); 6474 6475 ice_plug_aux_dev(pf); 6476 return; 6477 6478 err_vsi_rebuild: 6479 err_sched_init_port: 6480 ice_sched_cleanup_all(hw); 6481 err_init_ctrlq: 6482 ice_shutdown_all_ctrlq(hw); 6483 set_bit(ICE_RESET_FAILED, pf->state); 6484 clear_recovery: 6485 /* set this bit in PF state to control service task scheduling */ 6486 set_bit(ICE_NEEDS_RESTART, pf->state); 6487 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 6488 } 6489 6490 /** 6491 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 6492 * @vsi: Pointer to VSI structure 6493 */ 6494 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 6495 { 6496 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 6497 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; 6498 else 6499 return ICE_RXBUF_3072; 6500 } 6501 6502 /** 6503 * ice_change_mtu - NDO callback to change the MTU 6504 * @netdev: network interface device structure 6505 * @new_mtu: new value for maximum frame size 6506 * 6507 * Returns 0 on success, negative on failure 6508 */ 6509 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 6510 { 6511 struct ice_netdev_priv *np = netdev_priv(netdev); 6512 struct ice_vsi *vsi = np->vsi; 6513 struct ice_pf *pf = vsi->back; 6514 struct iidc_event *event; 6515 u8 count = 0; 6516 int err = 0; 6517 6518 if (new_mtu == (int)netdev->mtu) { 6519 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 6520 return 0; 6521 } 6522 6523 if (ice_is_xdp_ena_vsi(vsi)) { 6524 int frame_size = ice_max_xdp_frame_size(vsi); 6525 6526 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 6527 netdev_err(netdev, "max MTU for XDP usage is %d\n", 6528 frame_size - ICE_ETH_PKT_HDR_PAD); 6529 return -EINVAL; 6530 } 6531 } 6532 6533 /* if a reset is in progress, wait for some time for it to complete */ 6534 do { 6535 if (ice_is_reset_in_progress(pf->state)) { 6536 count++; 6537 usleep_range(1000, 2000); 6538 } else { 6539 break; 6540 } 6541 6542 } while (count < 100); 6543 6544 if (count == 100) { 6545 netdev_err(netdev, "can't change MTU. Device is busy\n"); 6546 return -EBUSY; 6547 } 6548 6549 event = kzalloc(sizeof(*event), GFP_KERNEL); 6550 if (!event) 6551 return -ENOMEM; 6552 6553 set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); 6554 ice_send_event_to_aux(pf, event); 6555 clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); 6556 6557 netdev->mtu = (unsigned int)new_mtu; 6558 6559 /* if VSI is up, bring it down and then back up */ 6560 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6561 err = ice_down(vsi); 6562 if (err) { 6563 netdev_err(netdev, "change MTU if_down err %d\n", err); 6564 goto event_after; 6565 } 6566 6567 err = ice_up(vsi); 6568 if (err) { 6569 netdev_err(netdev, "change MTU if_up err %d\n", err); 6570 goto event_after; 6571 } 6572 } 6573 6574 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 6575 event_after: 6576 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 6577 ice_send_event_to_aux(pf, event); 6578 kfree(event); 6579 6580 return err; 6581 } 6582 6583 /** 6584 * ice_eth_ioctl - Access the hwtstamp interface 6585 * @netdev: network interface device structure 6586 * @ifr: interface request data 6587 * @cmd: ioctl command 6588 */ 6589 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6590 { 6591 struct ice_netdev_priv *np = netdev_priv(netdev); 6592 struct ice_pf *pf = np->vsi->back; 6593 6594 switch (cmd) { 6595 case SIOCGHWTSTAMP: 6596 return ice_ptp_get_ts_config(pf, ifr); 6597 case SIOCSHWTSTAMP: 6598 return ice_ptp_set_ts_config(pf, ifr); 6599 default: 6600 return -EOPNOTSUPP; 6601 } 6602 } 6603 6604 /** 6605 * ice_aq_str - convert AQ err code to a string 6606 * @aq_err: the AQ error code to convert 6607 */ 6608 const char *ice_aq_str(enum ice_aq_err aq_err) 6609 { 6610 switch (aq_err) { 6611 case ICE_AQ_RC_OK: 6612 return "OK"; 6613 case ICE_AQ_RC_EPERM: 6614 return "ICE_AQ_RC_EPERM"; 6615 case ICE_AQ_RC_ENOENT: 6616 return "ICE_AQ_RC_ENOENT"; 6617 case ICE_AQ_RC_ENOMEM: 6618 return "ICE_AQ_RC_ENOMEM"; 6619 case ICE_AQ_RC_EBUSY: 6620 return "ICE_AQ_RC_EBUSY"; 6621 case ICE_AQ_RC_EEXIST: 6622 return "ICE_AQ_RC_EEXIST"; 6623 case ICE_AQ_RC_EINVAL: 6624 return "ICE_AQ_RC_EINVAL"; 6625 case ICE_AQ_RC_ENOSPC: 6626 return "ICE_AQ_RC_ENOSPC"; 6627 case ICE_AQ_RC_ENOSYS: 6628 return "ICE_AQ_RC_ENOSYS"; 6629 case ICE_AQ_RC_EMODE: 6630 return "ICE_AQ_RC_EMODE"; 6631 case ICE_AQ_RC_ENOSEC: 6632 return "ICE_AQ_RC_ENOSEC"; 6633 case ICE_AQ_RC_EBADSIG: 6634 return "ICE_AQ_RC_EBADSIG"; 6635 case ICE_AQ_RC_ESVN: 6636 return "ICE_AQ_RC_ESVN"; 6637 case ICE_AQ_RC_EBADMAN: 6638 return "ICE_AQ_RC_EBADMAN"; 6639 case ICE_AQ_RC_EBADBUF: 6640 return "ICE_AQ_RC_EBADBUF"; 6641 } 6642 6643 return "ICE_AQ_RC_UNKNOWN"; 6644 } 6645 6646 /** 6647 * ice_stat_str - convert status err code to a string 6648 * @stat_err: the status error code to convert 6649 */ 6650 const char *ice_stat_str(enum ice_status stat_err) 6651 { 6652 switch (stat_err) { 6653 case ICE_SUCCESS: 6654 return "OK"; 6655 case ICE_ERR_PARAM: 6656 return "ICE_ERR_PARAM"; 6657 case ICE_ERR_NOT_IMPL: 6658 return "ICE_ERR_NOT_IMPL"; 6659 case ICE_ERR_NOT_READY: 6660 return "ICE_ERR_NOT_READY"; 6661 case ICE_ERR_NOT_SUPPORTED: 6662 return "ICE_ERR_NOT_SUPPORTED"; 6663 case ICE_ERR_BAD_PTR: 6664 return "ICE_ERR_BAD_PTR"; 6665 case ICE_ERR_INVAL_SIZE: 6666 return "ICE_ERR_INVAL_SIZE"; 6667 case ICE_ERR_DEVICE_NOT_SUPPORTED: 6668 return "ICE_ERR_DEVICE_NOT_SUPPORTED"; 6669 case ICE_ERR_RESET_FAILED: 6670 return "ICE_ERR_RESET_FAILED"; 6671 case ICE_ERR_FW_API_VER: 6672 return "ICE_ERR_FW_API_VER"; 6673 case ICE_ERR_NO_MEMORY: 6674 return "ICE_ERR_NO_MEMORY"; 6675 case ICE_ERR_CFG: 6676 return "ICE_ERR_CFG"; 6677 case ICE_ERR_OUT_OF_RANGE: 6678 return "ICE_ERR_OUT_OF_RANGE"; 6679 case ICE_ERR_ALREADY_EXISTS: 6680 return "ICE_ERR_ALREADY_EXISTS"; 6681 case ICE_ERR_NVM: 6682 return "ICE_ERR_NVM"; 6683 case ICE_ERR_NVM_CHECKSUM: 6684 return "ICE_ERR_NVM_CHECKSUM"; 6685 case ICE_ERR_BUF_TOO_SHORT: 6686 return "ICE_ERR_BUF_TOO_SHORT"; 6687 case ICE_ERR_NVM_BLANK_MODE: 6688 return "ICE_ERR_NVM_BLANK_MODE"; 6689 case ICE_ERR_IN_USE: 6690 return "ICE_ERR_IN_USE"; 6691 case ICE_ERR_MAX_LIMIT: 6692 return "ICE_ERR_MAX_LIMIT"; 6693 case ICE_ERR_RESET_ONGOING: 6694 return "ICE_ERR_RESET_ONGOING"; 6695 case ICE_ERR_HW_TABLE: 6696 return "ICE_ERR_HW_TABLE"; 6697 case ICE_ERR_DOES_NOT_EXIST: 6698 return "ICE_ERR_DOES_NOT_EXIST"; 6699 case ICE_ERR_FW_DDP_MISMATCH: 6700 return "ICE_ERR_FW_DDP_MISMATCH"; 6701 case ICE_ERR_AQ_ERROR: 6702 return "ICE_ERR_AQ_ERROR"; 6703 case ICE_ERR_AQ_TIMEOUT: 6704 return "ICE_ERR_AQ_TIMEOUT"; 6705 case ICE_ERR_AQ_FULL: 6706 return "ICE_ERR_AQ_FULL"; 6707 case ICE_ERR_AQ_NO_WORK: 6708 return "ICE_ERR_AQ_NO_WORK"; 6709 case ICE_ERR_AQ_EMPTY: 6710 return "ICE_ERR_AQ_EMPTY"; 6711 case ICE_ERR_AQ_FW_CRITICAL: 6712 return "ICE_ERR_AQ_FW_CRITICAL"; 6713 } 6714 6715 return "ICE_ERR_UNKNOWN"; 6716 } 6717 6718 /** 6719 * ice_set_rss_lut - Set RSS LUT 6720 * @vsi: Pointer to VSI structure 6721 * @lut: Lookup table 6722 * @lut_size: Lookup table size 6723 * 6724 * Returns 0 on success, negative on failure 6725 */ 6726 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 6727 { 6728 struct ice_aq_get_set_rss_lut_params params = {}; 6729 struct ice_hw *hw = &vsi->back->hw; 6730 enum ice_status status; 6731 6732 if (!lut) 6733 return -EINVAL; 6734 6735 params.vsi_handle = vsi->idx; 6736 params.lut_size = lut_size; 6737 params.lut_type = vsi->rss_lut_type; 6738 params.lut = lut; 6739 6740 status = ice_aq_set_rss_lut(hw, ¶ms); 6741 if (status) { 6742 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", 6743 ice_stat_str(status), 6744 ice_aq_str(hw->adminq.sq_last_status)); 6745 return -EIO; 6746 } 6747 6748 return 0; 6749 } 6750 6751 /** 6752 * ice_set_rss_key - Set RSS key 6753 * @vsi: Pointer to the VSI structure 6754 * @seed: RSS hash seed 6755 * 6756 * Returns 0 on success, negative on failure 6757 */ 6758 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 6759 { 6760 struct ice_hw *hw = &vsi->back->hw; 6761 enum ice_status status; 6762 6763 if (!seed) 6764 return -EINVAL; 6765 6766 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 6767 if (status) { 6768 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", 6769 ice_stat_str(status), 6770 ice_aq_str(hw->adminq.sq_last_status)); 6771 return -EIO; 6772 } 6773 6774 return 0; 6775 } 6776 6777 /** 6778 * ice_get_rss_lut - Get RSS LUT 6779 * @vsi: Pointer to VSI structure 6780 * @lut: Buffer to store the lookup table entries 6781 * @lut_size: Size of buffer to store the lookup table entries 6782 * 6783 * Returns 0 on success, negative on failure 6784 */ 6785 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 6786 { 6787 struct ice_aq_get_set_rss_lut_params params = {}; 6788 struct ice_hw *hw = &vsi->back->hw; 6789 enum ice_status status; 6790 6791 if (!lut) 6792 return -EINVAL; 6793 6794 params.vsi_handle = vsi->idx; 6795 params.lut_size = lut_size; 6796 params.lut_type = vsi->rss_lut_type; 6797 params.lut = lut; 6798 6799 status = ice_aq_get_rss_lut(hw, ¶ms); 6800 if (status) { 6801 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", 6802 ice_stat_str(status), 6803 ice_aq_str(hw->adminq.sq_last_status)); 6804 return -EIO; 6805 } 6806 6807 return 0; 6808 } 6809 6810 /** 6811 * ice_get_rss_key - Get RSS key 6812 * @vsi: Pointer to VSI structure 6813 * @seed: Buffer to store the key in 6814 * 6815 * Returns 0 on success, negative on failure 6816 */ 6817 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 6818 { 6819 struct ice_hw *hw = &vsi->back->hw; 6820 enum ice_status status; 6821 6822 if (!seed) 6823 return -EINVAL; 6824 6825 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 6826 if (status) { 6827 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", 6828 ice_stat_str(status), 6829 ice_aq_str(hw->adminq.sq_last_status)); 6830 return -EIO; 6831 } 6832 6833 return 0; 6834 } 6835 6836 /** 6837 * ice_bridge_getlink - Get the hardware bridge mode 6838 * @skb: skb buff 6839 * @pid: process ID 6840 * @seq: RTNL message seq 6841 * @dev: the netdev being configured 6842 * @filter_mask: filter mask passed in 6843 * @nlflags: netlink flags passed in 6844 * 6845 * Return the bridge mode (VEB/VEPA) 6846 */ 6847 static int 6848 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 6849 struct net_device *dev, u32 filter_mask, int nlflags) 6850 { 6851 struct ice_netdev_priv *np = netdev_priv(dev); 6852 struct ice_vsi *vsi = np->vsi; 6853 struct ice_pf *pf = vsi->back; 6854 u16 bmode; 6855 6856 bmode = pf->first_sw->bridge_mode; 6857 6858 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 6859 filter_mask, NULL); 6860 } 6861 6862 /** 6863 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 6864 * @vsi: Pointer to VSI structure 6865 * @bmode: Hardware bridge mode (VEB/VEPA) 6866 * 6867 * Returns 0 on success, negative on failure 6868 */ 6869 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 6870 { 6871 struct ice_aqc_vsi_props *vsi_props; 6872 struct ice_hw *hw = &vsi->back->hw; 6873 struct ice_vsi_ctx *ctxt; 6874 enum ice_status status; 6875 int ret = 0; 6876 6877 vsi_props = &vsi->info; 6878 6879 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 6880 if (!ctxt) 6881 return -ENOMEM; 6882 6883 ctxt->info = vsi->info; 6884 6885 if (bmode == BRIDGE_MODE_VEB) 6886 /* change from VEPA to VEB mode */ 6887 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 6888 else 6889 /* change from VEB to VEPA mode */ 6890 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 6891 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 6892 6893 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 6894 if (status) { 6895 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", 6896 bmode, ice_stat_str(status), 6897 ice_aq_str(hw->adminq.sq_last_status)); 6898 ret = -EIO; 6899 goto out; 6900 } 6901 /* Update sw flags for book keeping */ 6902 vsi_props->sw_flags = ctxt->info.sw_flags; 6903 6904 out: 6905 kfree(ctxt); 6906 return ret; 6907 } 6908 6909 /** 6910 * ice_bridge_setlink - Set the hardware bridge mode 6911 * @dev: the netdev being configured 6912 * @nlh: RTNL message 6913 * @flags: bridge setlink flags 6914 * @extack: netlink extended ack 6915 * 6916 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 6917 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 6918 * not already set for all VSIs connected to this switch. And also update the 6919 * unicast switch filter rules for the corresponding switch of the netdev. 6920 */ 6921 static int 6922 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 6923 u16 __always_unused flags, 6924 struct netlink_ext_ack __always_unused *extack) 6925 { 6926 struct ice_netdev_priv *np = netdev_priv(dev); 6927 struct ice_pf *pf = np->vsi->back; 6928 struct nlattr *attr, *br_spec; 6929 struct ice_hw *hw = &pf->hw; 6930 enum ice_status status; 6931 struct ice_sw *pf_sw; 6932 int rem, v, err = 0; 6933 6934 pf_sw = pf->first_sw; 6935 /* find the attribute in the netlink message */ 6936 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 6937 6938 nla_for_each_nested(attr, br_spec, rem) { 6939 __u16 mode; 6940 6941 if (nla_type(attr) != IFLA_BRIDGE_MODE) 6942 continue; 6943 mode = nla_get_u16(attr); 6944 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 6945 return -EINVAL; 6946 /* Continue if bridge mode is not being flipped */ 6947 if (mode == pf_sw->bridge_mode) 6948 continue; 6949 /* Iterates through the PF VSI list and update the loopback 6950 * mode of the VSI 6951 */ 6952 ice_for_each_vsi(pf, v) { 6953 if (!pf->vsi[v]) 6954 continue; 6955 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 6956 if (err) 6957 return err; 6958 } 6959 6960 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 6961 /* Update the unicast switch filter rules for the corresponding 6962 * switch of the netdev 6963 */ 6964 status = ice_update_sw_rule_bridge_mode(hw); 6965 if (status) { 6966 netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", 6967 mode, ice_stat_str(status), 6968 ice_aq_str(hw->adminq.sq_last_status)); 6969 /* revert hw->evb_veb */ 6970 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 6971 return -EIO; 6972 } 6973 6974 pf_sw->bridge_mode = mode; 6975 } 6976 6977 return 0; 6978 } 6979 6980 /** 6981 * ice_tx_timeout - Respond to a Tx Hang 6982 * @netdev: network interface device structure 6983 * @txqueue: Tx queue 6984 */ 6985 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 6986 { 6987 struct ice_netdev_priv *np = netdev_priv(netdev); 6988 struct ice_ring *tx_ring = NULL; 6989 struct ice_vsi *vsi = np->vsi; 6990 struct ice_pf *pf = vsi->back; 6991 u32 i; 6992 6993 pf->tx_timeout_count++; 6994 6995 /* Check if PFC is enabled for the TC to which the queue belongs 6996 * to. If yes then Tx timeout is not caused by a hung queue, no 6997 * need to reset and rebuild 6998 */ 6999 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 7000 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 7001 txqueue); 7002 return; 7003 } 7004 7005 /* now that we have an index, find the tx_ring struct */ 7006 for (i = 0; i < vsi->num_txq; i++) 7007 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 7008 if (txqueue == vsi->tx_rings[i]->q_index) { 7009 tx_ring = vsi->tx_rings[i]; 7010 break; 7011 } 7012 7013 /* Reset recovery level if enough time has elapsed after last timeout. 7014 * Also ensure no new reset action happens before next timeout period. 7015 */ 7016 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 7017 pf->tx_timeout_recovery_level = 1; 7018 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 7019 netdev->watchdog_timeo))) 7020 return; 7021 7022 if (tx_ring) { 7023 struct ice_hw *hw = &pf->hw; 7024 u32 head, val = 0; 7025 7026 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 7027 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 7028 /* Read interrupt register */ 7029 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 7030 7031 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 7032 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 7033 head, tx_ring->next_to_use, val); 7034 } 7035 7036 pf->tx_timeout_last_recovery = jiffies; 7037 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 7038 pf->tx_timeout_recovery_level, txqueue); 7039 7040 switch (pf->tx_timeout_recovery_level) { 7041 case 1: 7042 set_bit(ICE_PFR_REQ, pf->state); 7043 break; 7044 case 2: 7045 set_bit(ICE_CORER_REQ, pf->state); 7046 break; 7047 case 3: 7048 set_bit(ICE_GLOBR_REQ, pf->state); 7049 break; 7050 default: 7051 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 7052 set_bit(ICE_DOWN, pf->state); 7053 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 7054 set_bit(ICE_SERVICE_DIS, pf->state); 7055 break; 7056 } 7057 7058 ice_service_task_schedule(pf); 7059 pf->tx_timeout_recovery_level++; 7060 } 7061 7062 /** 7063 * ice_open - Called when a network interface becomes active 7064 * @netdev: network interface device structure 7065 * 7066 * The open entry point is called when a network interface is made 7067 * active by the system (IFF_UP). At this point all resources needed 7068 * for transmit and receive operations are allocated, the interrupt 7069 * handler is registered with the OS, the netdev watchdog is enabled, 7070 * and the stack is notified that the interface is ready. 7071 * 7072 * Returns 0 on success, negative value on failure 7073 */ 7074 int ice_open(struct net_device *netdev) 7075 { 7076 struct ice_netdev_priv *np = netdev_priv(netdev); 7077 struct ice_pf *pf = np->vsi->back; 7078 7079 if (ice_is_reset_in_progress(pf->state)) { 7080 netdev_err(netdev, "can't open net device while reset is in progress"); 7081 return -EBUSY; 7082 } 7083 7084 return ice_open_internal(netdev); 7085 } 7086 7087 /** 7088 * ice_open_internal - Called when a network interface becomes active 7089 * @netdev: network interface device structure 7090 * 7091 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 7092 * handling routine 7093 * 7094 * Returns 0 on success, negative value on failure 7095 */ 7096 int ice_open_internal(struct net_device *netdev) 7097 { 7098 struct ice_netdev_priv *np = netdev_priv(netdev); 7099 struct ice_vsi *vsi = np->vsi; 7100 struct ice_pf *pf = vsi->back; 7101 struct ice_port_info *pi; 7102 enum ice_status status; 7103 int err; 7104 7105 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 7106 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 7107 return -EIO; 7108 } 7109 7110 netif_carrier_off(netdev); 7111 7112 pi = vsi->port_info; 7113 status = ice_update_link_info(pi); 7114 if (status) { 7115 netdev_err(netdev, "Failed to get link info, error %s\n", 7116 ice_stat_str(status)); 7117 return -EIO; 7118 } 7119 7120 ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); 7121 7122 /* Set PHY if there is media, otherwise, turn off PHY */ 7123 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 7124 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 7125 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 7126 err = ice_init_phy_user_cfg(pi); 7127 if (err) { 7128 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 7129 err); 7130 return err; 7131 } 7132 } 7133 7134 err = ice_configure_phy(vsi); 7135 if (err) { 7136 netdev_err(netdev, "Failed to set physical link up, error %d\n", 7137 err); 7138 return err; 7139 } 7140 } else { 7141 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 7142 ice_set_link(vsi, false); 7143 } 7144 7145 err = ice_vsi_open(vsi); 7146 if (err) 7147 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 7148 vsi->vsi_num, vsi->vsw->sw_id); 7149 7150 /* Update existing tunnels information */ 7151 udp_tunnel_get_rx_info(netdev); 7152 7153 return err; 7154 } 7155 7156 /** 7157 * ice_stop - Disables a network interface 7158 * @netdev: network interface device structure 7159 * 7160 * The stop entry point is called when an interface is de-activated by the OS, 7161 * and the netdevice enters the DOWN state. The hardware is still under the 7162 * driver's control, but the netdev interface is disabled. 7163 * 7164 * Returns success only - not allowed to fail 7165 */ 7166 int ice_stop(struct net_device *netdev) 7167 { 7168 struct ice_netdev_priv *np = netdev_priv(netdev); 7169 struct ice_vsi *vsi = np->vsi; 7170 struct ice_pf *pf = vsi->back; 7171 7172 if (ice_is_reset_in_progress(pf->state)) { 7173 netdev_err(netdev, "can't stop net device while reset is in progress"); 7174 return -EBUSY; 7175 } 7176 7177 ice_vsi_close(vsi); 7178 7179 return 0; 7180 } 7181 7182 /** 7183 * ice_features_check - Validate encapsulated packet conforms to limits 7184 * @skb: skb buffer 7185 * @netdev: This port's netdev 7186 * @features: Offload features that the stack believes apply 7187 */ 7188 static netdev_features_t 7189 ice_features_check(struct sk_buff *skb, 7190 struct net_device __always_unused *netdev, 7191 netdev_features_t features) 7192 { 7193 size_t len; 7194 7195 /* No point in doing any of this if neither checksum nor GSO are 7196 * being requested for this frame. We can rule out both by just 7197 * checking for CHECKSUM_PARTIAL 7198 */ 7199 if (skb->ip_summed != CHECKSUM_PARTIAL) 7200 return features; 7201 7202 /* We cannot support GSO if the MSS is going to be less than 7203 * 64 bytes. If it is then we need to drop support for GSO. 7204 */ 7205 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) 7206 features &= ~NETIF_F_GSO_MASK; 7207 7208 len = skb_network_header(skb) - skb->data; 7209 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 7210 goto out_rm_features; 7211 7212 len = skb_transport_header(skb) - skb_network_header(skb); 7213 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 7214 goto out_rm_features; 7215 7216 if (skb->encapsulation) { 7217 len = skb_inner_network_header(skb) - skb_transport_header(skb); 7218 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 7219 goto out_rm_features; 7220 7221 len = skb_inner_transport_header(skb) - 7222 skb_inner_network_header(skb); 7223 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 7224 goto out_rm_features; 7225 } 7226 7227 return features; 7228 out_rm_features: 7229 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 7230 } 7231 7232 static const struct net_device_ops ice_netdev_safe_mode_ops = { 7233 .ndo_open = ice_open, 7234 .ndo_stop = ice_stop, 7235 .ndo_start_xmit = ice_start_xmit, 7236 .ndo_set_mac_address = ice_set_mac_address, 7237 .ndo_validate_addr = eth_validate_addr, 7238 .ndo_change_mtu = ice_change_mtu, 7239 .ndo_get_stats64 = ice_get_stats64, 7240 .ndo_tx_timeout = ice_tx_timeout, 7241 .ndo_bpf = ice_xdp_safe_mode, 7242 }; 7243 7244 static const struct net_device_ops ice_netdev_ops = { 7245 .ndo_open = ice_open, 7246 .ndo_stop = ice_stop, 7247 .ndo_start_xmit = ice_start_xmit, 7248 .ndo_features_check = ice_features_check, 7249 .ndo_set_rx_mode = ice_set_rx_mode, 7250 .ndo_set_mac_address = ice_set_mac_address, 7251 .ndo_validate_addr = eth_validate_addr, 7252 .ndo_change_mtu = ice_change_mtu, 7253 .ndo_get_stats64 = ice_get_stats64, 7254 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 7255 .ndo_eth_ioctl = ice_eth_ioctl, 7256 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 7257 .ndo_set_vf_mac = ice_set_vf_mac, 7258 .ndo_get_vf_config = ice_get_vf_cfg, 7259 .ndo_set_vf_trust = ice_set_vf_trust, 7260 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 7261 .ndo_set_vf_link_state = ice_set_vf_link_state, 7262 .ndo_get_vf_stats = ice_get_vf_stats, 7263 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 7264 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 7265 .ndo_set_features = ice_set_features, 7266 .ndo_bridge_getlink = ice_bridge_getlink, 7267 .ndo_bridge_setlink = ice_bridge_setlink, 7268 .ndo_fdb_add = ice_fdb_add, 7269 .ndo_fdb_del = ice_fdb_del, 7270 #ifdef CONFIG_RFS_ACCEL 7271 .ndo_rx_flow_steer = ice_rx_flow_steer, 7272 #endif 7273 .ndo_tx_timeout = ice_tx_timeout, 7274 .ndo_bpf = ice_xdp, 7275 .ndo_xdp_xmit = ice_xdp_xmit, 7276 .ndo_xsk_wakeup = ice_xsk_wakeup, 7277 }; 7278