1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include <linux/crash_dump.h> 10 #include "ice.h" 11 #include "ice_base.h" 12 #include "ice_lib.h" 13 #include "ice_fltr.h" 14 #include "ice_dcb_lib.h" 15 #include "ice_dcb_nl.h" 16 #include "devlink/devlink.h" 17 #include "devlink/port.h" 18 #include "ice_sf_eth.h" 19 #include "ice_hwmon.h" 20 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 21 * ice tracepoint functions. This must be done exactly once across the 22 * ice driver. 23 */ 24 #define CREATE_TRACE_POINTS 25 #include "ice_trace.h" 26 #include "ice_eswitch.h" 27 #include "ice_tc_lib.h" 28 #include "ice_vsi_vlan_ops.h" 29 #include <net/xdp_sock_drv.h> 30 31 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 32 static const char ice_driver_string[] = DRV_SUMMARY; 33 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 34 35 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 36 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 37 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 38 39 MODULE_DESCRIPTION(DRV_SUMMARY); 40 MODULE_IMPORT_NS("LIBIE"); 41 MODULE_LICENSE("GPL v2"); 42 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 43 44 static int debug = -1; 45 module_param(debug, int, 0644); 46 #ifndef CONFIG_DYNAMIC_DEBUG 47 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 48 #else 49 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 50 #endif /* !CONFIG_DYNAMIC_DEBUG */ 51 52 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); 53 EXPORT_SYMBOL(ice_xdp_locking_key); 54 55 /** 56 * ice_hw_to_dev - Get device pointer from the hardware structure 57 * @hw: pointer to the device HW structure 58 * 59 * Used to access the device pointer from compilation units which can't easily 60 * include the definition of struct ice_pf without leading to circular header 61 * dependencies. 62 */ 63 struct device *ice_hw_to_dev(struct ice_hw *hw) 64 { 65 struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 66 67 return &pf->pdev->dev; 68 } 69 70 static struct workqueue_struct *ice_wq; 71 struct workqueue_struct *ice_lag_wq; 72 static const struct net_device_ops ice_netdev_safe_mode_ops; 73 static const struct net_device_ops ice_netdev_ops; 74 75 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 76 77 static void ice_vsi_release_all(struct ice_pf *pf); 78 79 static int ice_rebuild_channels(struct ice_pf *pf); 80 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); 81 82 static int 83 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 84 void *cb_priv, enum tc_setup_type type, void *type_data, 85 void *data, 86 void (*cleanup)(struct flow_block_cb *block_cb)); 87 88 bool netif_is_ice(const struct net_device *dev) 89 { 90 return dev && (dev->netdev_ops == &ice_netdev_ops || 91 dev->netdev_ops == &ice_netdev_safe_mode_ops); 92 } 93 94 /** 95 * ice_get_tx_pending - returns number of Tx descriptors not processed 96 * @ring: the ring of descriptors 97 */ 98 static u16 ice_get_tx_pending(struct ice_tx_ring *ring) 99 { 100 u16 head, tail; 101 102 head = ring->next_to_clean; 103 tail = ring->next_to_use; 104 105 if (head != tail) 106 return (head < tail) ? 107 tail - head : (tail + ring->count - head); 108 return 0; 109 } 110 111 /** 112 * ice_check_for_hang_subtask - check for and recover hung queues 113 * @pf: pointer to PF struct 114 */ 115 static void ice_check_for_hang_subtask(struct ice_pf *pf) 116 { 117 struct ice_vsi *vsi = NULL; 118 struct ice_hw *hw; 119 unsigned int i; 120 int packets; 121 u32 v; 122 123 ice_for_each_vsi(pf, v) 124 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 125 vsi = pf->vsi[v]; 126 break; 127 } 128 129 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 130 return; 131 132 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 133 return; 134 135 hw = &vsi->back->hw; 136 137 ice_for_each_txq(vsi, i) { 138 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; 139 struct ice_ring_stats *ring_stats; 140 141 if (!tx_ring) 142 continue; 143 if (ice_ring_ch_enabled(tx_ring)) 144 continue; 145 146 ring_stats = tx_ring->ring_stats; 147 if (!ring_stats) 148 continue; 149 150 if (tx_ring->desc) { 151 /* If packet counter has not changed the queue is 152 * likely stalled, so force an interrupt for this 153 * queue. 154 * 155 * prev_pkt would be negative if there was no 156 * pending work. 157 */ 158 packets = ring_stats->stats.pkts & INT_MAX; 159 if (ring_stats->tx_stats.prev_pkt == packets) { 160 /* Trigger sw interrupt to revive the queue */ 161 ice_trigger_sw_intr(hw, tx_ring->q_vector); 162 continue; 163 } 164 165 /* Memory barrier between read of packet count and call 166 * to ice_get_tx_pending() 167 */ 168 smp_rmb(); 169 ring_stats->tx_stats.prev_pkt = 170 ice_get_tx_pending(tx_ring) ? packets : -1; 171 } 172 } 173 } 174 175 /** 176 * ice_init_mac_fltr - Set initial MAC filters 177 * @pf: board private structure 178 * 179 * Set initial set of MAC filters for PF VSI; configure filters for permanent 180 * address and broadcast address. If an error is encountered, netdevice will be 181 * unregistered. 182 */ 183 static int ice_init_mac_fltr(struct ice_pf *pf) 184 { 185 struct ice_vsi *vsi; 186 u8 *perm_addr; 187 188 vsi = ice_get_main_vsi(pf); 189 if (!vsi) 190 return -EINVAL; 191 192 perm_addr = vsi->port_info->mac.perm_addr; 193 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 194 } 195 196 /** 197 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 198 * @netdev: the net device on which the sync is happening 199 * @addr: MAC address to sync 200 * 201 * This is a callback function which is called by the in kernel device sync 202 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 203 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 204 * MAC filters from the hardware. 205 */ 206 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 207 { 208 struct ice_netdev_priv *np = netdev_priv(netdev); 209 struct ice_vsi *vsi = np->vsi; 210 211 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 212 ICE_FWD_TO_VSI)) 213 return -EINVAL; 214 215 return 0; 216 } 217 218 /** 219 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 220 * @netdev: the net device on which the unsync is happening 221 * @addr: MAC address to unsync 222 * 223 * This is a callback function which is called by the in kernel device unsync 224 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 225 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 226 * delete the MAC filters from the hardware. 227 */ 228 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 229 { 230 struct ice_netdev_priv *np = netdev_priv(netdev); 231 struct ice_vsi *vsi = np->vsi; 232 233 /* Under some circumstances, we might receive a request to delete our 234 * own device address from our uc list. Because we store the device 235 * address in the VSI's MAC filter list, we need to ignore such 236 * requests and not delete our device address from this list. 237 */ 238 if (ether_addr_equal(addr, netdev->dev_addr)) 239 return 0; 240 241 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 242 ICE_FWD_TO_VSI)) 243 return -EINVAL; 244 245 return 0; 246 } 247 248 /** 249 * ice_vsi_fltr_changed - check if filter state changed 250 * @vsi: VSI to be checked 251 * 252 * returns true if filter state has changed, false otherwise. 253 */ 254 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 255 { 256 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 257 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 258 } 259 260 /** 261 * ice_set_promisc - Enable promiscuous mode for a given PF 262 * @vsi: the VSI being configured 263 * @promisc_m: mask of promiscuous config bits 264 * 265 */ 266 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) 267 { 268 int status; 269 270 if (vsi->type != ICE_VSI_PF) 271 return 0; 272 273 if (ice_vsi_has_non_zero_vlans(vsi)) { 274 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 275 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, 276 promisc_m); 277 } else { 278 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 279 promisc_m, 0); 280 } 281 if (status && status != -EEXIST) 282 return status; 283 284 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", 285 vsi->vsi_num, promisc_m); 286 return 0; 287 } 288 289 /** 290 * ice_clear_promisc - Disable promiscuous mode for a given PF 291 * @vsi: the VSI being configured 292 * @promisc_m: mask of promiscuous config bits 293 * 294 */ 295 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) 296 { 297 int status; 298 299 if (vsi->type != ICE_VSI_PF) 300 return 0; 301 302 if (ice_vsi_has_non_zero_vlans(vsi)) { 303 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 304 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, 305 promisc_m); 306 } else { 307 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 308 promisc_m, 0); 309 } 310 311 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", 312 vsi->vsi_num, promisc_m); 313 return status; 314 } 315 316 /** 317 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 318 * @vsi: ptr to the VSI 319 * 320 * Push any outstanding VSI filter changes through the AdminQ. 321 */ 322 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 323 { 324 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 325 struct device *dev = ice_pf_to_dev(vsi->back); 326 struct net_device *netdev = vsi->netdev; 327 bool promisc_forced_on = false; 328 struct ice_pf *pf = vsi->back; 329 struct ice_hw *hw = &pf->hw; 330 u32 changed_flags = 0; 331 int err; 332 333 if (!vsi->netdev) 334 return -EINVAL; 335 336 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 337 usleep_range(1000, 2000); 338 339 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 340 vsi->current_netdev_flags = vsi->netdev->flags; 341 342 INIT_LIST_HEAD(&vsi->tmp_sync_list); 343 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 344 345 if (ice_vsi_fltr_changed(vsi)) { 346 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 347 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 348 349 /* grab the netdev's addr_list_lock */ 350 netif_addr_lock_bh(netdev); 351 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 352 ice_add_mac_to_unsync_list); 353 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 354 ice_add_mac_to_unsync_list); 355 /* our temp lists are populated. release lock */ 356 netif_addr_unlock_bh(netdev); 357 } 358 359 /* Remove MAC addresses in the unsync list */ 360 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 361 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 362 if (err) { 363 netdev_err(netdev, "Failed to delete MAC filters\n"); 364 /* if we failed because of alloc failures, just bail */ 365 if (err == -ENOMEM) 366 goto out; 367 } 368 369 /* Add MAC addresses in the sync list */ 370 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 371 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 372 /* If filter is added successfully or already exists, do not go into 373 * 'if' condition and report it as error. Instead continue processing 374 * rest of the function. 375 */ 376 if (err && err != -EEXIST) { 377 netdev_err(netdev, "Failed to add MAC filters\n"); 378 /* If there is no more space for new umac filters, VSI 379 * should go into promiscuous mode. There should be some 380 * space reserved for promiscuous filters. 381 */ 382 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 383 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 384 vsi->state)) { 385 promisc_forced_on = true; 386 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 387 vsi->vsi_num); 388 } else { 389 goto out; 390 } 391 } 392 err = 0; 393 /* check for changes in promiscuous modes */ 394 if (changed_flags & IFF_ALLMULTI) { 395 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 396 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); 397 if (err) { 398 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 399 goto out_promisc; 400 } 401 } else { 402 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 403 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); 404 if (err) { 405 vsi->current_netdev_flags |= IFF_ALLMULTI; 406 goto out_promisc; 407 } 408 } 409 } 410 411 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 412 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 413 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 414 if (vsi->current_netdev_flags & IFF_PROMISC) { 415 /* Apply Rx filter rule to get traffic from wire */ 416 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { 417 err = ice_set_dflt_vsi(vsi); 418 if (err && err != -EEXIST) { 419 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 420 err, vsi->vsi_num); 421 vsi->current_netdev_flags &= 422 ~IFF_PROMISC; 423 goto out_promisc; 424 } 425 err = 0; 426 vlan_ops->dis_rx_filtering(vsi); 427 428 /* promiscuous mode implies allmulticast so 429 * that VSIs that are in promiscuous mode are 430 * subscribed to multicast packets coming to 431 * the port 432 */ 433 err = ice_set_promisc(vsi, 434 ICE_MCAST_PROMISC_BITS); 435 if (err) 436 goto out_promisc; 437 } 438 } else { 439 /* Clear Rx filter to remove traffic from wire */ 440 if (ice_is_vsi_dflt_vsi(vsi)) { 441 err = ice_clear_dflt_vsi(vsi); 442 if (err) { 443 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 444 err, vsi->vsi_num); 445 vsi->current_netdev_flags |= 446 IFF_PROMISC; 447 goto out_promisc; 448 } 449 if (vsi->netdev->features & 450 NETIF_F_HW_VLAN_CTAG_FILTER) 451 vlan_ops->ena_rx_filtering(vsi); 452 } 453 454 /* disable allmulti here, but only if allmulti is not 455 * still enabled for the netdev 456 */ 457 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { 458 err = ice_clear_promisc(vsi, 459 ICE_MCAST_PROMISC_BITS); 460 if (err) { 461 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n", 462 err, vsi->vsi_num); 463 } 464 } 465 } 466 } 467 goto exit; 468 469 out_promisc: 470 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 471 goto exit; 472 out: 473 /* if something went wrong then set the changed flag so we try again */ 474 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 475 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 476 exit: 477 clear_bit(ICE_CFG_BUSY, vsi->state); 478 return err; 479 } 480 481 /** 482 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 483 * @pf: board private structure 484 */ 485 static void ice_sync_fltr_subtask(struct ice_pf *pf) 486 { 487 int v; 488 489 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 490 return; 491 492 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 493 494 ice_for_each_vsi(pf, v) 495 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 496 ice_vsi_sync_fltr(pf->vsi[v])) { 497 /* come back and try again later */ 498 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 499 break; 500 } 501 } 502 503 /** 504 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 505 * @pf: the PF 506 * @locked: is the rtnl_lock already held 507 */ 508 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 509 { 510 int node; 511 int v; 512 513 ice_for_each_vsi(pf, v) 514 if (pf->vsi[v]) 515 ice_dis_vsi(pf->vsi[v], locked); 516 517 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 518 pf->pf_agg_node[node].num_vsis = 0; 519 520 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 521 pf->vf_agg_node[node].num_vsis = 0; 522 } 523 524 /** 525 * ice_prepare_for_reset - prep for reset 526 * @pf: board private structure 527 * @reset_type: reset type requested 528 * 529 * Inform or close all dependent features in prep for reset. 530 */ 531 static void 532 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 533 { 534 struct ice_hw *hw = &pf->hw; 535 struct ice_vsi *vsi; 536 struct ice_vf *vf; 537 unsigned int bkt; 538 539 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); 540 541 /* already prepared for reset */ 542 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 543 return; 544 545 synchronize_irq(pf->oicr_irq.virq); 546 547 ice_unplug_aux_dev(pf); 548 549 /* Notify VFs of impending reset */ 550 if (ice_check_sq_alive(hw, &hw->mailboxq)) 551 ice_vc_notify_reset(pf); 552 553 /* Disable VFs until reset is completed */ 554 mutex_lock(&pf->vfs.table_lock); 555 ice_for_each_vf(pf, bkt, vf) 556 ice_set_vf_state_dis(vf); 557 mutex_unlock(&pf->vfs.table_lock); 558 559 if (ice_is_eswitch_mode_switchdev(pf)) { 560 rtnl_lock(); 561 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); 562 rtnl_unlock(); 563 } 564 565 /* release ADQ specific HW and SW resources */ 566 vsi = ice_get_main_vsi(pf); 567 if (!vsi) 568 goto skip; 569 570 /* to be on safe side, reset orig_rss_size so that normal flow 571 * of deciding rss_size can take precedence 572 */ 573 vsi->orig_rss_size = 0; 574 575 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 576 if (reset_type == ICE_RESET_PFR) { 577 vsi->old_ena_tc = vsi->all_enatc; 578 vsi->old_numtc = vsi->all_numtc; 579 } else { 580 ice_remove_q_channels(vsi, true); 581 582 /* for other reset type, do not support channel rebuild 583 * hence reset needed info 584 */ 585 vsi->old_ena_tc = 0; 586 vsi->all_enatc = 0; 587 vsi->old_numtc = 0; 588 vsi->all_numtc = 0; 589 vsi->req_txq = 0; 590 vsi->req_rxq = 0; 591 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 592 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); 593 } 594 } 595 596 if (vsi->netdev) 597 netif_device_detach(vsi->netdev); 598 skip: 599 600 /* clear SW filtering DB */ 601 ice_clear_hw_tbls(hw); 602 /* disable the VSIs and their queues that are not already DOWN */ 603 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); 604 ice_pf_dis_all_vsi(pf, false); 605 606 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 607 ice_ptp_prepare_for_reset(pf, reset_type); 608 609 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 610 ice_gnss_exit(pf); 611 612 if (hw->port_info) 613 ice_sched_clear_port(hw->port_info); 614 615 ice_shutdown_all_ctrlq(hw, false); 616 617 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 618 } 619 620 /** 621 * ice_do_reset - Initiate one of many types of resets 622 * @pf: board private structure 623 * @reset_type: reset type requested before this function was called. 624 */ 625 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 626 { 627 struct device *dev = ice_pf_to_dev(pf); 628 struct ice_hw *hw = &pf->hw; 629 630 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 631 632 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { 633 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n"); 634 reset_type = ICE_RESET_CORER; 635 } 636 637 ice_prepare_for_reset(pf, reset_type); 638 639 /* trigger the reset */ 640 if (ice_reset(hw, reset_type)) { 641 dev_err(dev, "reset %d failed\n", reset_type); 642 set_bit(ICE_RESET_FAILED, pf->state); 643 clear_bit(ICE_RESET_OICR_RECV, pf->state); 644 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 645 clear_bit(ICE_PFR_REQ, pf->state); 646 clear_bit(ICE_CORER_REQ, pf->state); 647 clear_bit(ICE_GLOBR_REQ, pf->state); 648 wake_up(&pf->reset_wait_queue); 649 return; 650 } 651 652 /* PFR is a bit of a special case because it doesn't result in an OICR 653 * interrupt. So for PFR, rebuild after the reset and clear the reset- 654 * associated state bits. 655 */ 656 if (reset_type == ICE_RESET_PFR) { 657 pf->pfr_count++; 658 ice_rebuild(pf, reset_type); 659 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 660 clear_bit(ICE_PFR_REQ, pf->state); 661 wake_up(&pf->reset_wait_queue); 662 ice_reset_all_vfs(pf); 663 } 664 } 665 666 /** 667 * ice_reset_subtask - Set up for resetting the device and driver 668 * @pf: board private structure 669 */ 670 static void ice_reset_subtask(struct ice_pf *pf) 671 { 672 enum ice_reset_req reset_type = ICE_RESET_INVAL; 673 674 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 675 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 676 * of reset is pending and sets bits in pf->state indicating the reset 677 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 678 * prepare for pending reset if not already (for PF software-initiated 679 * global resets the software should already be prepared for it as 680 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 681 * by firmware or software on other PFs, that bit is not set so prepare 682 * for the reset now), poll for reset done, rebuild and return. 683 */ 684 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 685 /* Perform the largest reset requested */ 686 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 687 reset_type = ICE_RESET_CORER; 688 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 689 reset_type = ICE_RESET_GLOBR; 690 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 691 reset_type = ICE_RESET_EMPR; 692 /* return if no valid reset type requested */ 693 if (reset_type == ICE_RESET_INVAL) 694 return; 695 ice_prepare_for_reset(pf, reset_type); 696 697 /* make sure we are ready to rebuild */ 698 if (ice_check_reset(&pf->hw)) { 699 set_bit(ICE_RESET_FAILED, pf->state); 700 } else { 701 /* done with reset. start rebuild */ 702 pf->hw.reset_ongoing = false; 703 ice_rebuild(pf, reset_type); 704 /* clear bit to resume normal operations, but 705 * ICE_NEEDS_RESTART bit is set in case rebuild failed 706 */ 707 clear_bit(ICE_RESET_OICR_RECV, pf->state); 708 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 709 clear_bit(ICE_PFR_REQ, pf->state); 710 clear_bit(ICE_CORER_REQ, pf->state); 711 clear_bit(ICE_GLOBR_REQ, pf->state); 712 wake_up(&pf->reset_wait_queue); 713 ice_reset_all_vfs(pf); 714 } 715 716 return; 717 } 718 719 /* No pending resets to finish processing. Check for new resets */ 720 if (test_bit(ICE_PFR_REQ, pf->state)) { 721 reset_type = ICE_RESET_PFR; 722 if (pf->lag && pf->lag->bonded) { 723 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); 724 reset_type = ICE_RESET_CORER; 725 } 726 } 727 if (test_bit(ICE_CORER_REQ, pf->state)) 728 reset_type = ICE_RESET_CORER; 729 if (test_bit(ICE_GLOBR_REQ, pf->state)) 730 reset_type = ICE_RESET_GLOBR; 731 /* If no valid reset type requested just return */ 732 if (reset_type == ICE_RESET_INVAL) 733 return; 734 735 /* reset if not already down or busy */ 736 if (!test_bit(ICE_DOWN, pf->state) && 737 !test_bit(ICE_CFG_BUSY, pf->state)) { 738 ice_do_reset(pf, reset_type); 739 } 740 } 741 742 /** 743 * ice_print_topo_conflict - print topology conflict message 744 * @vsi: the VSI whose topology status is being checked 745 */ 746 static void ice_print_topo_conflict(struct ice_vsi *vsi) 747 { 748 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 749 case ICE_AQ_LINK_TOPO_CONFLICT: 750 case ICE_AQ_LINK_MEDIA_CONFLICT: 751 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 752 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 753 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 754 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 755 break; 756 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 757 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) 758 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); 759 else 760 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 761 break; 762 default: 763 break; 764 } 765 } 766 767 /** 768 * ice_print_link_msg - print link up or down message 769 * @vsi: the VSI whose link status is being queried 770 * @isup: boolean for if the link is now up or down 771 */ 772 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 773 { 774 struct ice_aqc_get_phy_caps_data *caps; 775 const char *an_advertised; 776 const char *fec_req; 777 const char *speed; 778 const char *fec; 779 const char *fc; 780 const char *an; 781 int status; 782 783 if (!vsi) 784 return; 785 786 if (vsi->current_isup == isup) 787 return; 788 789 vsi->current_isup = isup; 790 791 if (!isup) { 792 netdev_info(vsi->netdev, "NIC Link is Down\n"); 793 return; 794 } 795 796 switch (vsi->port_info->phy.link_info.link_speed) { 797 case ICE_AQ_LINK_SPEED_200GB: 798 speed = "200 G"; 799 break; 800 case ICE_AQ_LINK_SPEED_100GB: 801 speed = "100 G"; 802 break; 803 case ICE_AQ_LINK_SPEED_50GB: 804 speed = "50 G"; 805 break; 806 case ICE_AQ_LINK_SPEED_40GB: 807 speed = "40 G"; 808 break; 809 case ICE_AQ_LINK_SPEED_25GB: 810 speed = "25 G"; 811 break; 812 case ICE_AQ_LINK_SPEED_20GB: 813 speed = "20 G"; 814 break; 815 case ICE_AQ_LINK_SPEED_10GB: 816 speed = "10 G"; 817 break; 818 case ICE_AQ_LINK_SPEED_5GB: 819 speed = "5 G"; 820 break; 821 case ICE_AQ_LINK_SPEED_2500MB: 822 speed = "2.5 G"; 823 break; 824 case ICE_AQ_LINK_SPEED_1000MB: 825 speed = "1 G"; 826 break; 827 case ICE_AQ_LINK_SPEED_100MB: 828 speed = "100 M"; 829 break; 830 default: 831 speed = "Unknown "; 832 break; 833 } 834 835 switch (vsi->port_info->fc.current_mode) { 836 case ICE_FC_FULL: 837 fc = "Rx/Tx"; 838 break; 839 case ICE_FC_TX_PAUSE: 840 fc = "Tx"; 841 break; 842 case ICE_FC_RX_PAUSE: 843 fc = "Rx"; 844 break; 845 case ICE_FC_NONE: 846 fc = "None"; 847 break; 848 default: 849 fc = "Unknown"; 850 break; 851 } 852 853 /* Get FEC mode based on negotiated link info */ 854 switch (vsi->port_info->phy.link_info.fec_info) { 855 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 856 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 857 fec = "RS-FEC"; 858 break; 859 case ICE_AQ_LINK_25G_KR_FEC_EN: 860 fec = "FC-FEC/BASE-R"; 861 break; 862 default: 863 fec = "NONE"; 864 break; 865 } 866 867 /* check if autoneg completed, might be false due to not supported */ 868 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 869 an = "True"; 870 else 871 an = "False"; 872 873 /* Get FEC mode requested based on PHY caps last SW configuration */ 874 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 875 if (!caps) { 876 fec_req = "Unknown"; 877 an_advertised = "Unknown"; 878 goto done; 879 } 880 881 status = ice_aq_get_phy_caps(vsi->port_info, false, 882 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 883 if (status) 884 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 885 886 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 887 888 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 889 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 890 fec_req = "RS-FEC"; 891 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 892 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 893 fec_req = "FC-FEC/BASE-R"; 894 else 895 fec_req = "NONE"; 896 897 kfree(caps); 898 899 done: 900 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 901 speed, fec_req, fec, an_advertised, an, fc); 902 ice_print_topo_conflict(vsi); 903 } 904 905 /** 906 * ice_vsi_link_event - update the VSI's netdev 907 * @vsi: the VSI on which the link event occurred 908 * @link_up: whether or not the VSI needs to be set up or down 909 */ 910 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 911 { 912 if (!vsi) 913 return; 914 915 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 916 return; 917 918 if (vsi->type == ICE_VSI_PF) { 919 if (link_up == netif_carrier_ok(vsi->netdev)) 920 return; 921 922 if (link_up) { 923 netif_carrier_on(vsi->netdev); 924 netif_tx_wake_all_queues(vsi->netdev); 925 } else { 926 netif_carrier_off(vsi->netdev); 927 netif_tx_stop_all_queues(vsi->netdev); 928 } 929 } 930 } 931 932 /** 933 * ice_set_dflt_mib - send a default config MIB to the FW 934 * @pf: private PF struct 935 * 936 * This function sends a default configuration MIB to the FW. 937 * 938 * If this function errors out at any point, the driver is still able to 939 * function. The main impact is that LFC may not operate as expected. 940 * Therefore an error state in this function should be treated with a DBG 941 * message and continue on with driver rebuild/reenable. 942 */ 943 static void ice_set_dflt_mib(struct ice_pf *pf) 944 { 945 struct device *dev = ice_pf_to_dev(pf); 946 u8 mib_type, *buf, *lldpmib = NULL; 947 u16 len, typelen, offset = 0; 948 struct ice_lldp_org_tlv *tlv; 949 struct ice_hw *hw = &pf->hw; 950 u32 ouisubtype; 951 952 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 953 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 954 if (!lldpmib) { 955 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 956 __func__); 957 return; 958 } 959 960 /* Add ETS CFG TLV */ 961 tlv = (struct ice_lldp_org_tlv *)lldpmib; 962 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 963 ICE_IEEE_ETS_TLV_LEN); 964 tlv->typelen = htons(typelen); 965 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 966 ICE_IEEE_SUBTYPE_ETS_CFG); 967 tlv->ouisubtype = htonl(ouisubtype); 968 969 buf = tlv->tlvinfo; 970 buf[0] = 0; 971 972 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 973 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 974 * Octets 13 - 20 are TSA values - leave as zeros 975 */ 976 buf[5] = 0x64; 977 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); 978 offset += len + 2; 979 tlv = (struct ice_lldp_org_tlv *) 980 ((char *)tlv + sizeof(tlv->typelen) + len); 981 982 /* Add ETS REC TLV */ 983 buf = tlv->tlvinfo; 984 tlv->typelen = htons(typelen); 985 986 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 987 ICE_IEEE_SUBTYPE_ETS_REC); 988 tlv->ouisubtype = htonl(ouisubtype); 989 990 /* First octet of buf is reserved 991 * Octets 1 - 4 map UP to TC - all UPs map to zero 992 * Octets 5 - 12 are BW values - set TC 0 to 100%. 993 * Octets 13 - 20 are TSA value - leave as zeros 994 */ 995 buf[5] = 0x64; 996 offset += len + 2; 997 tlv = (struct ice_lldp_org_tlv *) 998 ((char *)tlv + sizeof(tlv->typelen) + len); 999 1000 /* Add PFC CFG TLV */ 1001 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 1002 ICE_IEEE_PFC_TLV_LEN); 1003 tlv->typelen = htons(typelen); 1004 1005 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 1006 ICE_IEEE_SUBTYPE_PFC_CFG); 1007 tlv->ouisubtype = htonl(ouisubtype); 1008 1009 /* Octet 1 left as all zeros - PFC disabled */ 1010 buf[0] = 0x08; 1011 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); 1012 offset += len + 2; 1013 1014 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 1015 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 1016 1017 kfree(lldpmib); 1018 } 1019 1020 /** 1021 * ice_check_phy_fw_load - check if PHY FW load failed 1022 * @pf: pointer to PF struct 1023 * @link_cfg_err: bitmap from the link info structure 1024 * 1025 * check if external PHY FW load failed and print an error message if it did 1026 */ 1027 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) 1028 { 1029 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { 1030 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 1031 return; 1032 } 1033 1034 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) 1035 return; 1036 1037 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { 1038 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); 1039 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 1040 } 1041 } 1042 1043 /** 1044 * ice_check_module_power 1045 * @pf: pointer to PF struct 1046 * @link_cfg_err: bitmap from the link info structure 1047 * 1048 * check module power level returned by a previous call to aq_get_link_info 1049 * and print error messages if module power level is not supported 1050 */ 1051 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 1052 { 1053 /* if module power level is supported, clear the flag */ 1054 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 1055 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 1056 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1057 return; 1058 } 1059 1060 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 1061 * above block didn't clear this bit, there's nothing to do 1062 */ 1063 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 1064 return; 1065 1066 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 1067 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 1068 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1069 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 1070 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 1071 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1072 } 1073 } 1074 1075 /** 1076 * ice_check_link_cfg_err - check if link configuration failed 1077 * @pf: pointer to the PF struct 1078 * @link_cfg_err: bitmap from the link info structure 1079 * 1080 * print if any link configuration failure happens due to the value in the 1081 * link_cfg_err parameter in the link info structure 1082 */ 1083 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) 1084 { 1085 ice_check_module_power(pf, link_cfg_err); 1086 ice_check_phy_fw_load(pf, link_cfg_err); 1087 } 1088 1089 /** 1090 * ice_link_event - process the link event 1091 * @pf: PF that the link event is associated with 1092 * @pi: port_info for the port that the link event is associated with 1093 * @link_up: true if the physical link is up and false if it is down 1094 * @link_speed: current link speed received from the link event 1095 * 1096 * Returns 0 on success and negative on failure 1097 */ 1098 static int 1099 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 1100 u16 link_speed) 1101 { 1102 struct device *dev = ice_pf_to_dev(pf); 1103 struct ice_phy_info *phy_info; 1104 struct ice_vsi *vsi; 1105 u16 old_link_speed; 1106 bool old_link; 1107 int status; 1108 1109 phy_info = &pi->phy; 1110 phy_info->link_info_old = phy_info->link_info; 1111 1112 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 1113 old_link_speed = phy_info->link_info_old.link_speed; 1114 1115 /* update the link info structures and re-enable link events, 1116 * don't bail on failure due to other book keeping needed 1117 */ 1118 status = ice_update_link_info(pi); 1119 if (status) 1120 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", 1121 pi->lport, status, 1122 ice_aq_str(pi->hw->adminq.sq_last_status)); 1123 1124 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 1125 1126 /* Check if the link state is up after updating link info, and treat 1127 * this event as an UP event since the link is actually UP now. 1128 */ 1129 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 1130 link_up = true; 1131 1132 vsi = ice_get_main_vsi(pf); 1133 if (!vsi || !vsi->port_info) 1134 return -EINVAL; 1135 1136 /* turn off PHY if media was removed */ 1137 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 1138 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1139 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1140 ice_set_link(vsi, false); 1141 } 1142 1143 /* if the old link up/down and speed is the same as the new */ 1144 if (link_up == old_link && link_speed == old_link_speed) 1145 return 0; 1146 1147 ice_ptp_link_change(pf, link_up); 1148 1149 if (ice_is_dcb_active(pf)) { 1150 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 1151 ice_dcb_rebuild(pf); 1152 } else { 1153 if (link_up) 1154 ice_set_dflt_mib(pf); 1155 } 1156 ice_vsi_link_event(vsi, link_up); 1157 ice_print_link_msg(vsi, link_up); 1158 1159 ice_vc_notify_link_state(pf); 1160 1161 return 0; 1162 } 1163 1164 /** 1165 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 1166 * @pf: board private structure 1167 */ 1168 static void ice_watchdog_subtask(struct ice_pf *pf) 1169 { 1170 int i; 1171 1172 /* if interface is down do nothing */ 1173 if (test_bit(ICE_DOWN, pf->state) || 1174 test_bit(ICE_CFG_BUSY, pf->state)) 1175 return; 1176 1177 /* make sure we don't do these things too often */ 1178 if (time_before(jiffies, 1179 pf->serv_tmr_prev + pf->serv_tmr_period)) 1180 return; 1181 1182 pf->serv_tmr_prev = jiffies; 1183 1184 /* Update the stats for active netdevs so the network stack 1185 * can look at updated numbers whenever it cares to 1186 */ 1187 ice_update_pf_stats(pf); 1188 ice_for_each_vsi(pf, i) 1189 if (pf->vsi[i] && pf->vsi[i]->netdev) 1190 ice_update_vsi_stats(pf->vsi[i]); 1191 } 1192 1193 /** 1194 * ice_init_link_events - enable/initialize link events 1195 * @pi: pointer to the port_info instance 1196 * 1197 * Returns -EIO on failure, 0 on success 1198 */ 1199 static int ice_init_link_events(struct ice_port_info *pi) 1200 { 1201 u16 mask; 1202 1203 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1204 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | 1205 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); 1206 1207 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1208 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1209 pi->lport); 1210 return -EIO; 1211 } 1212 1213 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1214 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1215 pi->lport); 1216 return -EIO; 1217 } 1218 1219 return 0; 1220 } 1221 1222 /** 1223 * ice_handle_link_event - handle link event via ARQ 1224 * @pf: PF that the link event is associated with 1225 * @event: event structure containing link status info 1226 */ 1227 static int 1228 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1229 { 1230 struct ice_aqc_get_link_status_data *link_data; 1231 struct ice_port_info *port_info; 1232 int status; 1233 1234 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1235 port_info = pf->hw.port_info; 1236 if (!port_info) 1237 return -EINVAL; 1238 1239 status = ice_link_event(pf, port_info, 1240 !!(link_data->link_info & ICE_AQ_LINK_UP), 1241 le16_to_cpu(link_data->link_speed)); 1242 if (status) 1243 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1244 status); 1245 1246 return status; 1247 } 1248 1249 /** 1250 * ice_get_fwlog_data - copy the FW log data from ARQ event 1251 * @pf: PF that the FW log event is associated with 1252 * @event: event structure containing FW log data 1253 */ 1254 static void 1255 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) 1256 { 1257 struct ice_fwlog_data *fwlog; 1258 struct ice_hw *hw = &pf->hw; 1259 1260 fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail]; 1261 1262 memset(fwlog->data, 0, PAGE_SIZE); 1263 fwlog->data_size = le16_to_cpu(event->desc.datalen); 1264 1265 memcpy(fwlog->data, event->msg_buf, fwlog->data_size); 1266 ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size); 1267 1268 if (ice_fwlog_ring_full(&hw->fwlog_ring)) { 1269 /* the rings are full so bump the head to create room */ 1270 ice_fwlog_ring_increment(&hw->fwlog_ring.head, 1271 hw->fwlog_ring.size); 1272 } 1273 } 1274 1275 /** 1276 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware 1277 * @pf: pointer to the PF private structure 1278 * @task: intermediate helper storage and identifier for waiting 1279 * @opcode: the opcode to wait for 1280 * 1281 * Prepares to wait for a specific AdminQ completion event on the ARQ for 1282 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event(). 1283 * 1284 * Calls are separated to allow caller registering for event before sending 1285 * the command, which mitigates a race between registering and FW responding. 1286 * 1287 * To obtain only the descriptor contents, pass an task->event with null 1288 * msg_buf. If the complete data buffer is desired, allocate the 1289 * task->event.msg_buf with enough space ahead of time. 1290 */ 1291 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1292 u16 opcode) 1293 { 1294 INIT_HLIST_NODE(&task->entry); 1295 task->opcode = opcode; 1296 task->state = ICE_AQ_TASK_WAITING; 1297 1298 spin_lock_bh(&pf->aq_wait_lock); 1299 hlist_add_head(&task->entry, &pf->aq_wait_list); 1300 spin_unlock_bh(&pf->aq_wait_lock); 1301 } 1302 1303 /** 1304 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1305 * @pf: pointer to the PF private structure 1306 * @task: ptr prepared by ice_aq_prep_for_event() 1307 * @timeout: how long to wait, in jiffies 1308 * 1309 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1310 * current thread will be put to sleep until the specified event occurs or 1311 * until the given timeout is reached. 1312 * 1313 * Returns: zero on success, or a negative error code on failure. 1314 */ 1315 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1316 unsigned long timeout) 1317 { 1318 enum ice_aq_task_state *state = &task->state; 1319 struct device *dev = ice_pf_to_dev(pf); 1320 unsigned long start = jiffies; 1321 long ret; 1322 int err; 1323 1324 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, 1325 *state != ICE_AQ_TASK_WAITING, 1326 timeout); 1327 switch (*state) { 1328 case ICE_AQ_TASK_NOT_PREPARED: 1329 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__); 1330 err = -EINVAL; 1331 break; 1332 case ICE_AQ_TASK_WAITING: 1333 err = ret < 0 ? ret : -ETIMEDOUT; 1334 break; 1335 case ICE_AQ_TASK_CANCELED: 1336 err = ret < 0 ? ret : -ECANCELED; 1337 break; 1338 case ICE_AQ_TASK_COMPLETE: 1339 err = ret < 0 ? ret : 0; 1340 break; 1341 default: 1342 WARN(1, "Unexpected AdminQ wait task state %u", *state); 1343 err = -EINVAL; 1344 break; 1345 } 1346 1347 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1348 jiffies_to_msecs(jiffies - start), 1349 jiffies_to_msecs(timeout), 1350 task->opcode); 1351 1352 spin_lock_bh(&pf->aq_wait_lock); 1353 hlist_del(&task->entry); 1354 spin_unlock_bh(&pf->aq_wait_lock); 1355 1356 return err; 1357 } 1358 1359 /** 1360 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1361 * @pf: pointer to the PF private structure 1362 * @opcode: the opcode of the event 1363 * @event: the event to check 1364 * 1365 * Loops over the current list of pending threads waiting for an AdminQ event. 1366 * For each matching task, copy the contents of the event into the task 1367 * structure and wake up the thread. 1368 * 1369 * If multiple threads wait for the same opcode, they will all be woken up. 1370 * 1371 * Note that event->msg_buf will only be duplicated if the event has a buffer 1372 * with enough space already allocated. Otherwise, only the descriptor and 1373 * message length will be copied. 1374 * 1375 * Returns: true if an event was found, false otherwise 1376 */ 1377 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1378 struct ice_rq_event_info *event) 1379 { 1380 struct ice_rq_event_info *task_ev; 1381 struct ice_aq_task *task; 1382 bool found = false; 1383 1384 spin_lock_bh(&pf->aq_wait_lock); 1385 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1386 if (task->state != ICE_AQ_TASK_WAITING) 1387 continue; 1388 if (task->opcode != opcode) 1389 continue; 1390 1391 task_ev = &task->event; 1392 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); 1393 task_ev->msg_len = event->msg_len; 1394 1395 /* Only copy the data buffer if a destination was set */ 1396 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { 1397 memcpy(task_ev->msg_buf, event->msg_buf, 1398 event->buf_len); 1399 task_ev->buf_len = event->buf_len; 1400 } 1401 1402 task->state = ICE_AQ_TASK_COMPLETE; 1403 found = true; 1404 } 1405 spin_unlock_bh(&pf->aq_wait_lock); 1406 1407 if (found) 1408 wake_up(&pf->aq_wait_queue); 1409 } 1410 1411 /** 1412 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1413 * @pf: the PF private structure 1414 * 1415 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1416 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1417 */ 1418 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1419 { 1420 struct ice_aq_task *task; 1421 1422 spin_lock_bh(&pf->aq_wait_lock); 1423 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1424 task->state = ICE_AQ_TASK_CANCELED; 1425 spin_unlock_bh(&pf->aq_wait_lock); 1426 1427 wake_up(&pf->aq_wait_queue); 1428 } 1429 1430 #define ICE_MBX_OVERFLOW_WATERMARK 64 1431 1432 /** 1433 * __ice_clean_ctrlq - helper function to clean controlq rings 1434 * @pf: ptr to struct ice_pf 1435 * @q_type: specific Control queue type 1436 */ 1437 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1438 { 1439 struct device *dev = ice_pf_to_dev(pf); 1440 struct ice_rq_event_info event; 1441 struct ice_hw *hw = &pf->hw; 1442 struct ice_ctl_q_info *cq; 1443 u16 pending, i = 0; 1444 const char *qtype; 1445 u32 oldval, val; 1446 1447 /* Do not clean control queue if/when PF reset fails */ 1448 if (test_bit(ICE_RESET_FAILED, pf->state)) 1449 return 0; 1450 1451 switch (q_type) { 1452 case ICE_CTL_Q_ADMIN: 1453 cq = &hw->adminq; 1454 qtype = "Admin"; 1455 break; 1456 case ICE_CTL_Q_SB: 1457 cq = &hw->sbq; 1458 qtype = "Sideband"; 1459 break; 1460 case ICE_CTL_Q_MAILBOX: 1461 cq = &hw->mailboxq; 1462 qtype = "Mailbox"; 1463 /* we are going to try to detect a malicious VF, so set the 1464 * state to begin detection 1465 */ 1466 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1467 break; 1468 default: 1469 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1470 return 0; 1471 } 1472 1473 /* check for error indications - PF_xx_AxQLEN register layout for 1474 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1475 */ 1476 val = rd32(hw, cq->rq.len); 1477 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1478 PF_FW_ARQLEN_ARQCRIT_M)) { 1479 oldval = val; 1480 if (val & PF_FW_ARQLEN_ARQVFE_M) 1481 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1482 qtype); 1483 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1484 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1485 qtype); 1486 } 1487 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1488 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1489 qtype); 1490 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1491 PF_FW_ARQLEN_ARQCRIT_M); 1492 if (oldval != val) 1493 wr32(hw, cq->rq.len, val); 1494 } 1495 1496 val = rd32(hw, cq->sq.len); 1497 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1498 PF_FW_ATQLEN_ATQCRIT_M)) { 1499 oldval = val; 1500 if (val & PF_FW_ATQLEN_ATQVFE_M) 1501 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1502 qtype); 1503 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1504 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1505 qtype); 1506 } 1507 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1508 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1509 qtype); 1510 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1511 PF_FW_ATQLEN_ATQCRIT_M); 1512 if (oldval != val) 1513 wr32(hw, cq->sq.len, val); 1514 } 1515 1516 event.buf_len = cq->rq_buf_size; 1517 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1518 if (!event.msg_buf) 1519 return 0; 1520 1521 do { 1522 struct ice_mbx_data data = {}; 1523 u16 opcode; 1524 int ret; 1525 1526 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1527 if (ret == -EALREADY) 1528 break; 1529 if (ret) { 1530 dev_err(dev, "%s Receive Queue event error %d\n", qtype, 1531 ret); 1532 break; 1533 } 1534 1535 opcode = le16_to_cpu(event.desc.opcode); 1536 1537 /* Notify any thread that might be waiting for this event */ 1538 ice_aq_check_events(pf, opcode, &event); 1539 1540 switch (opcode) { 1541 case ice_aqc_opc_get_link_status: 1542 if (ice_handle_link_event(pf, &event)) 1543 dev_err(dev, "Could not handle link event\n"); 1544 break; 1545 case ice_aqc_opc_event_lan_overflow: 1546 ice_vf_lan_overflow_event(pf, &event); 1547 break; 1548 case ice_mbx_opc_send_msg_to_pf: 1549 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) { 1550 ice_vc_process_vf_msg(pf, &event, NULL); 1551 ice_mbx_vf_dec_trig_e830(hw, &event); 1552 } else { 1553 u16 val = hw->mailboxq.num_rq_entries; 1554 1555 data.max_num_msgs_mbx = val; 1556 val = ICE_MBX_OVERFLOW_WATERMARK; 1557 data.async_watermark_val = val; 1558 data.num_msg_proc = i; 1559 data.num_pending_arq = pending; 1560 1561 ice_vc_process_vf_msg(pf, &event, &data); 1562 } 1563 break; 1564 case ice_aqc_opc_fw_logs_event: 1565 ice_get_fwlog_data(pf, &event); 1566 break; 1567 case ice_aqc_opc_lldp_set_mib_change: 1568 ice_dcb_process_lldp_set_mib_change(pf, &event); 1569 break; 1570 case ice_aqc_opc_get_health_status: 1571 ice_process_health_status_event(pf, &event); 1572 break; 1573 default: 1574 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1575 qtype, opcode); 1576 break; 1577 } 1578 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1579 1580 kfree(event.msg_buf); 1581 1582 return pending && (i == ICE_DFLT_IRQ_WORK); 1583 } 1584 1585 /** 1586 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1587 * @hw: pointer to hardware info 1588 * @cq: control queue information 1589 * 1590 * returns true if there are pending messages in a queue, false if there aren't 1591 */ 1592 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1593 { 1594 u16 ntu; 1595 1596 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1597 return cq->rq.next_to_clean != ntu; 1598 } 1599 1600 /** 1601 * ice_clean_adminq_subtask - clean the AdminQ rings 1602 * @pf: board private structure 1603 */ 1604 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1605 { 1606 struct ice_hw *hw = &pf->hw; 1607 1608 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1609 return; 1610 1611 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1612 return; 1613 1614 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1615 1616 /* There might be a situation where new messages arrive to a control 1617 * queue between processing the last message and clearing the 1618 * EVENT_PENDING bit. So before exiting, check queue head again (using 1619 * ice_ctrlq_pending) and process new messages if any. 1620 */ 1621 if (ice_ctrlq_pending(hw, &hw->adminq)) 1622 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1623 1624 ice_flush(hw); 1625 } 1626 1627 /** 1628 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1629 * @pf: board private structure 1630 */ 1631 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1632 { 1633 struct ice_hw *hw = &pf->hw; 1634 1635 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1636 return; 1637 1638 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1639 return; 1640 1641 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1642 1643 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1644 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1645 1646 ice_flush(hw); 1647 } 1648 1649 /** 1650 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1651 * @pf: board private structure 1652 */ 1653 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1654 { 1655 struct ice_hw *hw = &pf->hw; 1656 1657 /* if mac_type is not generic, sideband is not supported 1658 * and there's nothing to do here 1659 */ 1660 if (!ice_is_generic_mac(hw)) { 1661 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1662 return; 1663 } 1664 1665 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1666 return; 1667 1668 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1669 return; 1670 1671 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1672 1673 if (ice_ctrlq_pending(hw, &hw->sbq)) 1674 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1675 1676 ice_flush(hw); 1677 } 1678 1679 /** 1680 * ice_service_task_schedule - schedule the service task to wake up 1681 * @pf: board private structure 1682 * 1683 * If not already scheduled, this puts the task into the work queue. 1684 */ 1685 void ice_service_task_schedule(struct ice_pf *pf) 1686 { 1687 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1688 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1689 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1690 queue_work(ice_wq, &pf->serv_task); 1691 } 1692 1693 /** 1694 * ice_service_task_complete - finish up the service task 1695 * @pf: board private structure 1696 */ 1697 static void ice_service_task_complete(struct ice_pf *pf) 1698 { 1699 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1700 1701 /* force memory (pf->state) to sync before next service task */ 1702 smp_mb__before_atomic(); 1703 clear_bit(ICE_SERVICE_SCHED, pf->state); 1704 } 1705 1706 /** 1707 * ice_service_task_stop - stop service task and cancel works 1708 * @pf: board private structure 1709 * 1710 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1711 * 1 otherwise. 1712 */ 1713 static int ice_service_task_stop(struct ice_pf *pf) 1714 { 1715 int ret; 1716 1717 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1718 1719 if (pf->serv_tmr.function) 1720 del_timer_sync(&pf->serv_tmr); 1721 if (pf->serv_task.func) 1722 cancel_work_sync(&pf->serv_task); 1723 1724 clear_bit(ICE_SERVICE_SCHED, pf->state); 1725 return ret; 1726 } 1727 1728 /** 1729 * ice_service_task_restart - restart service task and schedule works 1730 * @pf: board private structure 1731 * 1732 * This function is needed for suspend and resume works (e.g WoL scenario) 1733 */ 1734 static void ice_service_task_restart(struct ice_pf *pf) 1735 { 1736 clear_bit(ICE_SERVICE_DIS, pf->state); 1737 ice_service_task_schedule(pf); 1738 } 1739 1740 /** 1741 * ice_service_timer - timer callback to schedule service task 1742 * @t: pointer to timer_list 1743 */ 1744 static void ice_service_timer(struct timer_list *t) 1745 { 1746 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1747 1748 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1749 ice_service_task_schedule(pf); 1750 } 1751 1752 /** 1753 * ice_mdd_maybe_reset_vf - reset VF after MDD event 1754 * @pf: pointer to the PF structure 1755 * @vf: pointer to the VF structure 1756 * @reset_vf_tx: whether Tx MDD has occurred 1757 * @reset_vf_rx: whether Rx MDD has occurred 1758 * 1759 * Since the queue can get stuck on VF MDD events, the PF can be configured to 1760 * automatically reset the VF by enabling the private ethtool flag 1761 * mdd-auto-reset-vf. 1762 */ 1763 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, 1764 bool reset_vf_tx, bool reset_vf_rx) 1765 { 1766 struct device *dev = ice_pf_to_dev(pf); 1767 1768 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) 1769 return; 1770 1771 /* VF MDD event counters will be cleared by reset, so print the event 1772 * prior to reset. 1773 */ 1774 if (reset_vf_tx) 1775 ice_print_vf_tx_mdd_event(vf); 1776 1777 if (reset_vf_rx) 1778 ice_print_vf_rx_mdd_event(vf); 1779 1780 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", 1781 pf->hw.pf_id, vf->vf_id); 1782 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); 1783 } 1784 1785 /** 1786 * ice_handle_mdd_event - handle malicious driver detect event 1787 * @pf: pointer to the PF structure 1788 * 1789 * Called from service task. OICR interrupt handler indicates MDD event. 1790 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1791 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1792 * disable the queue, the PF can be configured to reset the VF using ethtool 1793 * private flag mdd-auto-reset-vf. 1794 */ 1795 static void ice_handle_mdd_event(struct ice_pf *pf) 1796 { 1797 struct device *dev = ice_pf_to_dev(pf); 1798 struct ice_hw *hw = &pf->hw; 1799 struct ice_vf *vf; 1800 unsigned int bkt; 1801 u32 reg; 1802 1803 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1804 /* Since the VF MDD event logging is rate limited, check if 1805 * there are pending MDD events. 1806 */ 1807 ice_print_vfs_mdd_events(pf); 1808 return; 1809 } 1810 1811 /* find what triggered an MDD event */ 1812 reg = rd32(hw, GL_MDET_TX_PQM); 1813 if (reg & GL_MDET_TX_PQM_VALID_M) { 1814 u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg); 1815 u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg); 1816 u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg); 1817 u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg); 1818 1819 if (netif_msg_tx_err(pf)) 1820 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1821 event, queue, pf_num, vf_num); 1822 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num, 1823 event, queue); 1824 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1825 } 1826 1827 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); 1828 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1829 u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg); 1830 u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg); 1831 u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg); 1832 u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg); 1833 1834 if (netif_msg_tx_err(pf)) 1835 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1836 event, queue, pf_num, vf_num); 1837 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num, 1838 event, queue); 1839 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); 1840 } 1841 1842 reg = rd32(hw, GL_MDET_RX); 1843 if (reg & GL_MDET_RX_VALID_M) { 1844 u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg); 1845 u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg); 1846 u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg); 1847 u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg); 1848 1849 if (netif_msg_rx_err(pf)) 1850 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1851 event, queue, pf_num, vf_num); 1852 ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event, 1853 queue); 1854 wr32(hw, GL_MDET_RX, 0xffffffff); 1855 } 1856 1857 /* check to see if this PF caused an MDD event */ 1858 reg = rd32(hw, PF_MDET_TX_PQM); 1859 if (reg & PF_MDET_TX_PQM_VALID_M) { 1860 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1861 if (netif_msg_tx_err(pf)) 1862 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1863 } 1864 1865 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw)); 1866 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1867 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff); 1868 if (netif_msg_tx_err(pf)) 1869 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1870 } 1871 1872 reg = rd32(hw, PF_MDET_RX); 1873 if (reg & PF_MDET_RX_VALID_M) { 1874 wr32(hw, PF_MDET_RX, 0xFFFF); 1875 if (netif_msg_rx_err(pf)) 1876 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1877 } 1878 1879 /* Check to see if one of the VFs caused an MDD event, and then 1880 * increment counters and set print pending 1881 */ 1882 mutex_lock(&pf->vfs.table_lock); 1883 ice_for_each_vf(pf, bkt, vf) { 1884 bool reset_vf_tx = false, reset_vf_rx = false; 1885 1886 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); 1887 if (reg & VP_MDET_TX_PQM_VALID_M) { 1888 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); 1889 vf->mdd_tx_events.count++; 1890 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1891 if (netif_msg_tx_err(pf)) 1892 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1893 vf->vf_id); 1894 1895 reset_vf_tx = true; 1896 } 1897 1898 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); 1899 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1900 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); 1901 vf->mdd_tx_events.count++; 1902 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1903 if (netif_msg_tx_err(pf)) 1904 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1905 vf->vf_id); 1906 1907 reset_vf_tx = true; 1908 } 1909 1910 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); 1911 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1912 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); 1913 vf->mdd_tx_events.count++; 1914 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1915 if (netif_msg_tx_err(pf)) 1916 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1917 vf->vf_id); 1918 1919 reset_vf_tx = true; 1920 } 1921 1922 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); 1923 if (reg & VP_MDET_RX_VALID_M) { 1924 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); 1925 vf->mdd_rx_events.count++; 1926 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1927 if (netif_msg_rx_err(pf)) 1928 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1929 vf->vf_id); 1930 1931 reset_vf_rx = true; 1932 } 1933 1934 if (reset_vf_tx || reset_vf_rx) 1935 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, 1936 reset_vf_rx); 1937 } 1938 mutex_unlock(&pf->vfs.table_lock); 1939 1940 ice_print_vfs_mdd_events(pf); 1941 } 1942 1943 /** 1944 * ice_force_phys_link_state - Force the physical link state 1945 * @vsi: VSI to force the physical link state to up/down 1946 * @link_up: true/false indicates to set the physical link to up/down 1947 * 1948 * Force the physical link state by getting the current PHY capabilities from 1949 * hardware and setting the PHY config based on the determined capabilities. If 1950 * link changes a link event will be triggered because both the Enable Automatic 1951 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1952 * 1953 * Returns 0 on success, negative on failure 1954 */ 1955 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1956 { 1957 struct ice_aqc_get_phy_caps_data *pcaps; 1958 struct ice_aqc_set_phy_cfg_data *cfg; 1959 struct ice_port_info *pi; 1960 struct device *dev; 1961 int retcode; 1962 1963 if (!vsi || !vsi->port_info || !vsi->back) 1964 return -EINVAL; 1965 if (vsi->type != ICE_VSI_PF) 1966 return 0; 1967 1968 dev = ice_pf_to_dev(vsi->back); 1969 1970 pi = vsi->port_info; 1971 1972 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1973 if (!pcaps) 1974 return -ENOMEM; 1975 1976 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1977 NULL); 1978 if (retcode) { 1979 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1980 vsi->vsi_num, retcode); 1981 retcode = -EIO; 1982 goto out; 1983 } 1984 1985 /* No change in link */ 1986 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1987 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1988 goto out; 1989 1990 /* Use the current user PHY configuration. The current user PHY 1991 * configuration is initialized during probe from PHY capabilities 1992 * software mode, and updated on set PHY configuration. 1993 */ 1994 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1995 if (!cfg) { 1996 retcode = -ENOMEM; 1997 goto out; 1998 } 1999 2000 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2001 if (link_up) 2002 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 2003 else 2004 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 2005 2006 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 2007 if (retcode) { 2008 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 2009 vsi->vsi_num, retcode); 2010 retcode = -EIO; 2011 } 2012 2013 kfree(cfg); 2014 out: 2015 kfree(pcaps); 2016 return retcode; 2017 } 2018 2019 /** 2020 * ice_init_nvm_phy_type - Initialize the NVM PHY type 2021 * @pi: port info structure 2022 * 2023 * Initialize nvm_phy_type_[low|high] for link lenient mode support 2024 */ 2025 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 2026 { 2027 struct ice_aqc_get_phy_caps_data *pcaps; 2028 struct ice_pf *pf = pi->hw->back; 2029 int err; 2030 2031 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2032 if (!pcaps) 2033 return -ENOMEM; 2034 2035 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, 2036 pcaps, NULL); 2037 2038 if (err) { 2039 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2040 goto out; 2041 } 2042 2043 pf->nvm_phy_type_hi = pcaps->phy_type_high; 2044 pf->nvm_phy_type_lo = pcaps->phy_type_low; 2045 2046 out: 2047 kfree(pcaps); 2048 return err; 2049 } 2050 2051 /** 2052 * ice_init_link_dflt_override - Initialize link default override 2053 * @pi: port info structure 2054 * 2055 * Initialize link default override and PHY total port shutdown during probe 2056 */ 2057 static void ice_init_link_dflt_override(struct ice_port_info *pi) 2058 { 2059 struct ice_link_default_override_tlv *ldo; 2060 struct ice_pf *pf = pi->hw->back; 2061 2062 ldo = &pf->link_dflt_override; 2063 if (ice_get_link_default_override(ldo, pi)) 2064 return; 2065 2066 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 2067 return; 2068 2069 /* Enable Total Port Shutdown (override/replace link-down-on-close 2070 * ethtool private flag) for ports with Port Disable bit set. 2071 */ 2072 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 2073 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 2074 } 2075 2076 /** 2077 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 2078 * @pi: port info structure 2079 * 2080 * If default override is enabled, initialize the user PHY cfg speed and FEC 2081 * settings using the default override mask from the NVM. 2082 * 2083 * The PHY should only be configured with the default override settings the 2084 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 2085 * is used to indicate that the user PHY cfg default override is initialized 2086 * and the PHY has not been configured with the default override settings. The 2087 * state is set here, and cleared in ice_configure_phy the first time the PHY is 2088 * configured. 2089 * 2090 * This function should be called only if the FW doesn't support default 2091 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 2092 */ 2093 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 2094 { 2095 struct ice_link_default_override_tlv *ldo; 2096 struct ice_aqc_set_phy_cfg_data *cfg; 2097 struct ice_phy_info *phy = &pi->phy; 2098 struct ice_pf *pf = pi->hw->back; 2099 2100 ldo = &pf->link_dflt_override; 2101 2102 /* If link default override is enabled, use to mask NVM PHY capabilities 2103 * for speed and FEC default configuration. 2104 */ 2105 cfg = &phy->curr_user_phy_cfg; 2106 2107 if (ldo->phy_type_low || ldo->phy_type_high) { 2108 cfg->phy_type_low = pf->nvm_phy_type_lo & 2109 cpu_to_le64(ldo->phy_type_low); 2110 cfg->phy_type_high = pf->nvm_phy_type_hi & 2111 cpu_to_le64(ldo->phy_type_high); 2112 } 2113 cfg->link_fec_opt = ldo->fec_options; 2114 phy->curr_user_fec_req = ICE_FEC_AUTO; 2115 2116 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 2117 } 2118 2119 /** 2120 * ice_init_phy_user_cfg - Initialize the PHY user configuration 2121 * @pi: port info structure 2122 * 2123 * Initialize the current user PHY configuration, speed, FEC, and FC requested 2124 * mode to default. The PHY defaults are from get PHY capabilities topology 2125 * with media so call when media is first available. An error is returned if 2126 * called when media is not available. The PHY initialization completed state is 2127 * set here. 2128 * 2129 * These configurations are used when setting PHY 2130 * configuration. The user PHY configuration is updated on set PHY 2131 * configuration. Returns 0 on success, negative on failure 2132 */ 2133 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 2134 { 2135 struct ice_aqc_get_phy_caps_data *pcaps; 2136 struct ice_phy_info *phy = &pi->phy; 2137 struct ice_pf *pf = pi->hw->back; 2138 int err; 2139 2140 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2141 return -EIO; 2142 2143 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2144 if (!pcaps) 2145 return -ENOMEM; 2146 2147 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2148 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2149 pcaps, NULL); 2150 else 2151 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2152 pcaps, NULL); 2153 if (err) { 2154 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2155 goto err_out; 2156 } 2157 2158 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 2159 2160 /* check if lenient mode is supported and enabled */ 2161 if (ice_fw_supports_link_override(pi->hw) && 2162 !(pcaps->module_compliance_enforcement & 2163 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 2164 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 2165 2166 /* if the FW supports default PHY configuration mode, then the driver 2167 * does not have to apply link override settings. If not, 2168 * initialize user PHY configuration with link override values 2169 */ 2170 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 2171 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 2172 ice_init_phy_cfg_dflt_override(pi); 2173 goto out; 2174 } 2175 } 2176 2177 /* if link default override is not enabled, set user flow control and 2178 * FEC settings based on what get_phy_caps returned 2179 */ 2180 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 2181 pcaps->link_fec_options); 2182 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 2183 2184 out: 2185 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 2186 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 2187 err_out: 2188 kfree(pcaps); 2189 return err; 2190 } 2191 2192 /** 2193 * ice_configure_phy - configure PHY 2194 * @vsi: VSI of PHY 2195 * 2196 * Set the PHY configuration. If the current PHY configuration is the same as 2197 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 2198 * configure the based get PHY capabilities for topology with media. 2199 */ 2200 static int ice_configure_phy(struct ice_vsi *vsi) 2201 { 2202 struct device *dev = ice_pf_to_dev(vsi->back); 2203 struct ice_port_info *pi = vsi->port_info; 2204 struct ice_aqc_get_phy_caps_data *pcaps; 2205 struct ice_aqc_set_phy_cfg_data *cfg; 2206 struct ice_phy_info *phy = &pi->phy; 2207 struct ice_pf *pf = vsi->back; 2208 int err; 2209 2210 /* Ensure we have media as we cannot configure a medialess port */ 2211 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2212 return -ENOMEDIUM; 2213 2214 ice_print_topo_conflict(vsi); 2215 2216 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && 2217 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 2218 return -EPERM; 2219 2220 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 2221 return ice_force_phys_link_state(vsi, true); 2222 2223 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2224 if (!pcaps) 2225 return -ENOMEM; 2226 2227 /* Get current PHY config */ 2228 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 2229 NULL); 2230 if (err) { 2231 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", 2232 vsi->vsi_num, err); 2233 goto done; 2234 } 2235 2236 /* If PHY enable link is configured and configuration has not changed, 2237 * there's nothing to do 2238 */ 2239 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 2240 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 2241 goto done; 2242 2243 /* Use PHY topology as baseline for configuration */ 2244 memset(pcaps, 0, sizeof(*pcaps)); 2245 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2246 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2247 pcaps, NULL); 2248 else 2249 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2250 pcaps, NULL); 2251 if (err) { 2252 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", 2253 vsi->vsi_num, err); 2254 goto done; 2255 } 2256 2257 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2258 if (!cfg) { 2259 err = -ENOMEM; 2260 goto done; 2261 } 2262 2263 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2264 2265 /* Speed - If default override pending, use curr_user_phy_cfg set in 2266 * ice_init_phy_user_cfg_ldo. 2267 */ 2268 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2269 vsi->back->state)) { 2270 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2271 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2272 } else { 2273 u64 phy_low = 0, phy_high = 0; 2274 2275 ice_update_phy_type(&phy_low, &phy_high, 2276 pi->phy.curr_user_speed_req); 2277 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2278 cfg->phy_type_high = pcaps->phy_type_high & 2279 cpu_to_le64(phy_high); 2280 } 2281 2282 /* Can't provide what was requested; use PHY capabilities */ 2283 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2284 cfg->phy_type_low = pcaps->phy_type_low; 2285 cfg->phy_type_high = pcaps->phy_type_high; 2286 } 2287 2288 /* FEC */ 2289 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2290 2291 /* Can't provide what was requested; use PHY capabilities */ 2292 if (cfg->link_fec_opt != 2293 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2294 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2295 cfg->link_fec_opt = pcaps->link_fec_options; 2296 } 2297 2298 /* Flow Control - always supported; no need to check against 2299 * capabilities 2300 */ 2301 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2302 2303 /* Enable link and link update */ 2304 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2305 2306 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2307 if (err) 2308 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 2309 vsi->vsi_num, err); 2310 2311 kfree(cfg); 2312 done: 2313 kfree(pcaps); 2314 return err; 2315 } 2316 2317 /** 2318 * ice_check_media_subtask - Check for media 2319 * @pf: pointer to PF struct 2320 * 2321 * If media is available, then initialize PHY user configuration if it is not 2322 * been, and configure the PHY if the interface is up. 2323 */ 2324 static void ice_check_media_subtask(struct ice_pf *pf) 2325 { 2326 struct ice_port_info *pi; 2327 struct ice_vsi *vsi; 2328 int err; 2329 2330 /* No need to check for media if it's already present */ 2331 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2332 return; 2333 2334 vsi = ice_get_main_vsi(pf); 2335 if (!vsi) 2336 return; 2337 2338 /* Refresh link info and check if media is present */ 2339 pi = vsi->port_info; 2340 err = ice_update_link_info(pi); 2341 if (err) 2342 return; 2343 2344 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 2345 2346 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2347 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2348 ice_init_phy_user_cfg(pi); 2349 2350 /* PHY settings are reset on media insertion, reconfigure 2351 * PHY to preserve settings. 2352 */ 2353 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2354 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2355 return; 2356 2357 err = ice_configure_phy(vsi); 2358 if (!err) 2359 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2360 2361 /* A Link Status Event will be generated; the event handler 2362 * will complete bringing the interface up 2363 */ 2364 } 2365 } 2366 2367 static void ice_service_task_recovery_mode(struct work_struct *work) 2368 { 2369 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2370 2371 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 2372 ice_clean_adminq_subtask(pf); 2373 2374 ice_service_task_complete(pf); 2375 2376 mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); 2377 } 2378 2379 /** 2380 * ice_service_task - manage and run subtasks 2381 * @work: pointer to work_struct contained by the PF struct 2382 */ 2383 static void ice_service_task(struct work_struct *work) 2384 { 2385 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2386 unsigned long start_time = jiffies; 2387 2388 if (pf->health_reporters.tx_hang_buf.tx_ring) { 2389 ice_report_tx_hang(pf); 2390 pf->health_reporters.tx_hang_buf.tx_ring = NULL; 2391 } 2392 2393 ice_reset_subtask(pf); 2394 2395 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2396 if (ice_is_reset_in_progress(pf->state) || 2397 test_bit(ICE_SUSPENDED, pf->state) || 2398 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2399 ice_service_task_complete(pf); 2400 return; 2401 } 2402 2403 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { 2404 struct iidc_event *event; 2405 2406 event = kzalloc(sizeof(*event), GFP_KERNEL); 2407 if (event) { 2408 set_bit(IIDC_EVENT_CRIT_ERR, event->type); 2409 /* report the entire OICR value to AUX driver */ 2410 swap(event->reg, pf->oicr_err_reg); 2411 ice_send_event_to_aux(pf, event); 2412 kfree(event); 2413 } 2414 } 2415 2416 /* unplug aux dev per request, if an unplug request came in 2417 * while processing a plug request, this will handle it 2418 */ 2419 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) 2420 ice_unplug_aux_dev(pf); 2421 2422 /* Plug aux device per request */ 2423 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) 2424 ice_plug_aux_dev(pf); 2425 2426 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { 2427 struct iidc_event *event; 2428 2429 event = kzalloc(sizeof(*event), GFP_KERNEL); 2430 if (event) { 2431 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 2432 ice_send_event_to_aux(pf, event); 2433 kfree(event); 2434 } 2435 } 2436 2437 ice_clean_adminq_subtask(pf); 2438 ice_check_media_subtask(pf); 2439 ice_check_for_hang_subtask(pf); 2440 ice_sync_fltr_subtask(pf); 2441 ice_handle_mdd_event(pf); 2442 ice_watchdog_subtask(pf); 2443 2444 if (ice_is_safe_mode(pf)) { 2445 ice_service_task_complete(pf); 2446 return; 2447 } 2448 2449 ice_process_vflr_event(pf); 2450 ice_clean_mailboxq_subtask(pf); 2451 ice_clean_sbq_subtask(pf); 2452 ice_sync_arfs_fltrs(pf); 2453 ice_flush_fdir_ctx(pf); 2454 2455 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2456 ice_service_task_complete(pf); 2457 2458 /* If the tasks have taken longer than one service timer period 2459 * or there is more work to be done, reset the service timer to 2460 * schedule the service task now. 2461 */ 2462 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2463 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2464 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2465 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2466 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2467 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2468 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2469 mod_timer(&pf->serv_tmr, jiffies); 2470 } 2471 2472 /** 2473 * ice_set_ctrlq_len - helper function to set controlq length 2474 * @hw: pointer to the HW instance 2475 */ 2476 static void ice_set_ctrlq_len(struct ice_hw *hw) 2477 { 2478 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2479 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2480 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2481 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2482 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2483 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2484 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2485 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2486 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2487 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2488 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2489 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2490 } 2491 2492 /** 2493 * ice_schedule_reset - schedule a reset 2494 * @pf: board private structure 2495 * @reset: reset being requested 2496 */ 2497 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2498 { 2499 struct device *dev = ice_pf_to_dev(pf); 2500 2501 /* bail out if earlier reset has failed */ 2502 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2503 dev_dbg(dev, "earlier reset has failed\n"); 2504 return -EIO; 2505 } 2506 /* bail if reset/recovery already in progress */ 2507 if (ice_is_reset_in_progress(pf->state)) { 2508 dev_dbg(dev, "Reset already in progress\n"); 2509 return -EBUSY; 2510 } 2511 2512 switch (reset) { 2513 case ICE_RESET_PFR: 2514 set_bit(ICE_PFR_REQ, pf->state); 2515 break; 2516 case ICE_RESET_CORER: 2517 set_bit(ICE_CORER_REQ, pf->state); 2518 break; 2519 case ICE_RESET_GLOBR: 2520 set_bit(ICE_GLOBR_REQ, pf->state); 2521 break; 2522 default: 2523 return -EINVAL; 2524 } 2525 2526 ice_service_task_schedule(pf); 2527 return 0; 2528 } 2529 2530 /** 2531 * ice_irq_affinity_notify - Callback for affinity changes 2532 * @notify: context as to what irq was changed 2533 * @mask: the new affinity mask 2534 * 2535 * This is a callback function used by the irq_set_affinity_notifier function 2536 * so that we may register to receive changes to the irq affinity masks. 2537 */ 2538 static void 2539 ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2540 const cpumask_t *mask) 2541 { 2542 struct ice_q_vector *q_vector = 2543 container_of(notify, struct ice_q_vector, affinity_notify); 2544 2545 cpumask_copy(&q_vector->affinity_mask, mask); 2546 } 2547 2548 /** 2549 * ice_irq_affinity_release - Callback for affinity notifier release 2550 * @ref: internal core kernel usage 2551 * 2552 * This is a callback function used by the irq_set_affinity_notifier function 2553 * to inform the current notification subscriber that they will no longer 2554 * receive notifications. 2555 */ 2556 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2557 2558 /** 2559 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2560 * @vsi: the VSI being configured 2561 */ 2562 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2563 { 2564 struct ice_hw *hw = &vsi->back->hw; 2565 int i; 2566 2567 ice_for_each_q_vector(vsi, i) 2568 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2569 2570 ice_flush(hw); 2571 return 0; 2572 } 2573 2574 /** 2575 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2576 * @vsi: the VSI being configured 2577 * @basename: name for the vector 2578 */ 2579 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2580 { 2581 int q_vectors = vsi->num_q_vectors; 2582 struct ice_pf *pf = vsi->back; 2583 struct device *dev; 2584 int rx_int_idx = 0; 2585 int tx_int_idx = 0; 2586 int vector, err; 2587 int irq_num; 2588 2589 dev = ice_pf_to_dev(pf); 2590 for (vector = 0; vector < q_vectors; vector++) { 2591 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2592 2593 irq_num = q_vector->irq.virq; 2594 2595 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { 2596 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2597 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2598 tx_int_idx++; 2599 } else if (q_vector->rx.rx_ring) { 2600 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2601 "%s-%s-%d", basename, "rx", rx_int_idx++); 2602 } else if (q_vector->tx.tx_ring) { 2603 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2604 "%s-%s-%d", basename, "tx", tx_int_idx++); 2605 } else { 2606 /* skip this unused q_vector */ 2607 continue; 2608 } 2609 if (vsi->type == ICE_VSI_CTRL && vsi->vf) 2610 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2611 IRQF_SHARED, q_vector->name, 2612 q_vector); 2613 else 2614 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2615 0, q_vector->name, q_vector); 2616 if (err) { 2617 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2618 err); 2619 goto free_q_irqs; 2620 } 2621 2622 /* register for affinity change notifications */ 2623 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 2624 struct irq_affinity_notify *affinity_notify; 2625 2626 affinity_notify = &q_vector->affinity_notify; 2627 affinity_notify->notify = ice_irq_affinity_notify; 2628 affinity_notify->release = ice_irq_affinity_release; 2629 irq_set_affinity_notifier(irq_num, affinity_notify); 2630 } 2631 2632 /* assign the mask for this irq */ 2633 irq_update_affinity_hint(irq_num, &q_vector->affinity_mask); 2634 } 2635 2636 err = ice_set_cpu_rx_rmap(vsi); 2637 if (err) { 2638 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", 2639 vsi->vsi_num, ERR_PTR(err)); 2640 goto free_q_irqs; 2641 } 2642 2643 vsi->irqs_ready = true; 2644 return 0; 2645 2646 free_q_irqs: 2647 while (vector--) { 2648 irq_num = vsi->q_vectors[vector]->irq.virq; 2649 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2650 irq_set_affinity_notifier(irq_num, NULL); 2651 irq_update_affinity_hint(irq_num, NULL); 2652 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2653 } 2654 return err; 2655 } 2656 2657 /** 2658 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2659 * @vsi: VSI to setup Tx rings used by XDP 2660 * 2661 * Return 0 on success and negative value on error 2662 */ 2663 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2664 { 2665 struct device *dev = ice_pf_to_dev(vsi->back); 2666 struct ice_tx_desc *tx_desc; 2667 int i, j; 2668 2669 ice_for_each_xdp_txq(vsi, i) { 2670 u16 xdp_q_idx = vsi->alloc_txq + i; 2671 struct ice_ring_stats *ring_stats; 2672 struct ice_tx_ring *xdp_ring; 2673 2674 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2675 if (!xdp_ring) 2676 goto free_xdp_rings; 2677 2678 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 2679 if (!ring_stats) { 2680 ice_free_tx_ring(xdp_ring); 2681 goto free_xdp_rings; 2682 } 2683 2684 xdp_ring->ring_stats = ring_stats; 2685 xdp_ring->q_index = xdp_q_idx; 2686 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2687 xdp_ring->vsi = vsi; 2688 xdp_ring->netdev = NULL; 2689 xdp_ring->dev = dev; 2690 xdp_ring->count = vsi->num_tx_desc; 2691 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2692 if (ice_setup_tx_ring(xdp_ring)) 2693 goto free_xdp_rings; 2694 ice_set_ring_xdp(xdp_ring); 2695 spin_lock_init(&xdp_ring->tx_lock); 2696 for (j = 0; j < xdp_ring->count; j++) { 2697 tx_desc = ICE_TX_DESC(xdp_ring, j); 2698 tx_desc->cmd_type_offset_bsz = 0; 2699 } 2700 } 2701 2702 return 0; 2703 2704 free_xdp_rings: 2705 for (; i >= 0; i--) { 2706 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { 2707 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2708 vsi->xdp_rings[i]->ring_stats = NULL; 2709 ice_free_tx_ring(vsi->xdp_rings[i]); 2710 } 2711 } 2712 return -ENOMEM; 2713 } 2714 2715 /** 2716 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2717 * @vsi: VSI to set the bpf prog on 2718 * @prog: the bpf prog pointer 2719 */ 2720 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2721 { 2722 struct bpf_prog *old_prog; 2723 int i; 2724 2725 old_prog = xchg(&vsi->xdp_prog, prog); 2726 ice_for_each_rxq(vsi, i) 2727 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2728 2729 if (old_prog) 2730 bpf_prog_put(old_prog); 2731 } 2732 2733 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) 2734 { 2735 struct ice_q_vector *q_vector; 2736 struct ice_tx_ring *ring; 2737 2738 if (static_key_enabled(&ice_xdp_locking_key)) 2739 return vsi->xdp_rings[qid % vsi->num_xdp_txq]; 2740 2741 q_vector = vsi->rx_rings[qid]->q_vector; 2742 ice_for_each_tx_ring(ring, q_vector->tx) 2743 if (ice_ring_is_xdp(ring)) 2744 return ring; 2745 2746 return NULL; 2747 } 2748 2749 /** 2750 * ice_map_xdp_rings - Map XDP rings to interrupt vectors 2751 * @vsi: the VSI with XDP rings being configured 2752 * 2753 * Map XDP rings to interrupt vectors and perform the configuration steps 2754 * dependent on the mapping. 2755 */ 2756 void ice_map_xdp_rings(struct ice_vsi *vsi) 2757 { 2758 int xdp_rings_rem = vsi->num_xdp_txq; 2759 int v_idx, q_idx; 2760 2761 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2762 ice_for_each_q_vector(vsi, v_idx) { 2763 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2764 int xdp_rings_per_v, q_id, q_base; 2765 2766 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2767 vsi->num_q_vectors - v_idx); 2768 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2769 2770 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2771 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; 2772 2773 xdp_ring->q_vector = q_vector; 2774 xdp_ring->next = q_vector->tx.tx_ring; 2775 q_vector->tx.tx_ring = xdp_ring; 2776 } 2777 xdp_rings_rem -= xdp_rings_per_v; 2778 } 2779 2780 ice_for_each_rxq(vsi, q_idx) { 2781 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, 2782 q_idx); 2783 ice_tx_xsk_pool(vsi, q_idx); 2784 } 2785 } 2786 2787 /** 2788 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2789 * @vsi: VSI to bring up Tx rings used by XDP 2790 * @prog: bpf program that will be assigned to VSI 2791 * @cfg_type: create from scratch or restore the existing configuration 2792 * 2793 * Return 0 on success and negative value on error 2794 */ 2795 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, 2796 enum ice_xdp_cfg cfg_type) 2797 { 2798 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2799 struct ice_pf *pf = vsi->back; 2800 struct ice_qs_cfg xdp_qs_cfg = { 2801 .qs_mutex = &pf->avail_q_mutex, 2802 .pf_map = pf->avail_txqs, 2803 .pf_map_size = pf->max_pf_txqs, 2804 .q_count = vsi->num_xdp_txq, 2805 .scatter_count = ICE_MAX_SCATTER_TXQS, 2806 .vsi_map = vsi->txq_map, 2807 .vsi_map_offset = vsi->alloc_txq, 2808 .mapping_mode = ICE_VSI_MAP_CONTIG 2809 }; 2810 struct device *dev; 2811 int status, i; 2812 2813 dev = ice_pf_to_dev(pf); 2814 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2815 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2816 if (!vsi->xdp_rings) 2817 return -ENOMEM; 2818 2819 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2820 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2821 goto err_map_xdp; 2822 2823 if (static_key_enabled(&ice_xdp_locking_key)) 2824 netdev_warn(vsi->netdev, 2825 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); 2826 2827 if (ice_xdp_alloc_setup_rings(vsi)) 2828 goto clear_xdp_rings; 2829 2830 /* omit the scheduler update if in reset path; XDP queues will be 2831 * taken into account at the end of ice_vsi_rebuild, where 2832 * ice_cfg_vsi_lan is being called 2833 */ 2834 if (cfg_type == ICE_XDP_CFG_PART) 2835 return 0; 2836 2837 ice_map_xdp_rings(vsi); 2838 2839 /* tell the Tx scheduler that right now we have 2840 * additional queues 2841 */ 2842 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2843 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2844 2845 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2846 max_txqs); 2847 if (status) { 2848 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 2849 status); 2850 goto clear_xdp_rings; 2851 } 2852 2853 /* assign the prog only when it's not already present on VSI; 2854 * this flow is a subject of both ethtool -L and ndo_bpf flows; 2855 * VSI rebuild that happens under ethtool -L can expose us to 2856 * the bpf_prog refcount issues as we would be swapping same 2857 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put 2858 * on it as it would be treated as an 'old_prog'; for ndo_bpf 2859 * this is not harmful as dev_xdp_install bumps the refcount 2860 * before calling the op exposed by the driver; 2861 */ 2862 if (!ice_is_xdp_ena_vsi(vsi)) 2863 ice_vsi_assign_bpf_prog(vsi, prog); 2864 2865 return 0; 2866 clear_xdp_rings: 2867 ice_for_each_xdp_txq(vsi, i) 2868 if (vsi->xdp_rings[i]) { 2869 kfree_rcu(vsi->xdp_rings[i], rcu); 2870 vsi->xdp_rings[i] = NULL; 2871 } 2872 2873 err_map_xdp: 2874 mutex_lock(&pf->avail_q_mutex); 2875 ice_for_each_xdp_txq(vsi, i) { 2876 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2877 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2878 } 2879 mutex_unlock(&pf->avail_q_mutex); 2880 2881 devm_kfree(dev, vsi->xdp_rings); 2882 return -ENOMEM; 2883 } 2884 2885 /** 2886 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2887 * @vsi: VSI to remove XDP rings 2888 * @cfg_type: disable XDP permanently or allow it to be restored later 2889 * 2890 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2891 * resources 2892 */ 2893 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) 2894 { 2895 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2896 struct ice_pf *pf = vsi->back; 2897 int i, v_idx; 2898 2899 /* q_vectors are freed in reset path so there's no point in detaching 2900 * rings 2901 */ 2902 if (cfg_type == ICE_XDP_CFG_PART) 2903 goto free_qmap; 2904 2905 ice_for_each_q_vector(vsi, v_idx) { 2906 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2907 struct ice_tx_ring *ring; 2908 2909 ice_for_each_tx_ring(ring, q_vector->tx) 2910 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2911 break; 2912 2913 /* restore the value of last node prior to XDP setup */ 2914 q_vector->tx.tx_ring = ring; 2915 } 2916 2917 free_qmap: 2918 mutex_lock(&pf->avail_q_mutex); 2919 ice_for_each_xdp_txq(vsi, i) { 2920 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2921 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2922 } 2923 mutex_unlock(&pf->avail_q_mutex); 2924 2925 ice_for_each_xdp_txq(vsi, i) 2926 if (vsi->xdp_rings[i]) { 2927 if (vsi->xdp_rings[i]->desc) { 2928 synchronize_rcu(); 2929 ice_free_tx_ring(vsi->xdp_rings[i]); 2930 } 2931 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2932 vsi->xdp_rings[i]->ring_stats = NULL; 2933 kfree_rcu(vsi->xdp_rings[i], rcu); 2934 vsi->xdp_rings[i] = NULL; 2935 } 2936 2937 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2938 vsi->xdp_rings = NULL; 2939 2940 if (static_key_enabled(&ice_xdp_locking_key)) 2941 static_branch_dec(&ice_xdp_locking_key); 2942 2943 if (cfg_type == ICE_XDP_CFG_PART) 2944 return 0; 2945 2946 ice_vsi_assign_bpf_prog(vsi, NULL); 2947 2948 /* notify Tx scheduler that we destroyed XDP queues and bring 2949 * back the old number of child nodes 2950 */ 2951 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2952 max_txqs[i] = vsi->num_txq; 2953 2954 /* change number of XDP Tx queues to 0 */ 2955 vsi->num_xdp_txq = 0; 2956 2957 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2958 max_txqs); 2959 } 2960 2961 /** 2962 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2963 * @vsi: VSI to schedule napi on 2964 */ 2965 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2966 { 2967 int i; 2968 2969 ice_for_each_rxq(vsi, i) { 2970 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2971 2972 if (READ_ONCE(rx_ring->xsk_pool)) 2973 napi_schedule(&rx_ring->q_vector->napi); 2974 } 2975 } 2976 2977 /** 2978 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have 2979 * @vsi: VSI to determine the count of XDP Tx qs 2980 * 2981 * returns 0 if Tx qs count is higher than at least half of CPU count, 2982 * -ENOMEM otherwise 2983 */ 2984 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) 2985 { 2986 u16 avail = ice_get_avail_txq_count(vsi->back); 2987 u16 cpus = num_possible_cpus(); 2988 2989 if (avail < cpus / 2) 2990 return -ENOMEM; 2991 2992 if (vsi->type == ICE_VSI_SF) 2993 avail = vsi->alloc_txq; 2994 2995 vsi->num_xdp_txq = min_t(u16, avail, cpus); 2996 2997 if (vsi->num_xdp_txq < cpus) 2998 static_branch_inc(&ice_xdp_locking_key); 2999 3000 return 0; 3001 } 3002 3003 /** 3004 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 3005 * @vsi: Pointer to VSI structure 3006 */ 3007 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 3008 { 3009 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 3010 return ICE_RXBUF_1664; 3011 else 3012 return ICE_RXBUF_3072; 3013 } 3014 3015 /** 3016 * ice_xdp_setup_prog - Add or remove XDP eBPF program 3017 * @vsi: VSI to setup XDP for 3018 * @prog: XDP program 3019 * @extack: netlink extended ack 3020 */ 3021 static int 3022 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 3023 struct netlink_ext_ack *extack) 3024 { 3025 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 3026 int ret = 0, xdp_ring_err = 0; 3027 bool if_running; 3028 3029 if (prog && !prog->aux->xdp_has_frags) { 3030 if (frame_size > ice_max_xdp_frame_size(vsi)) { 3031 NL_SET_ERR_MSG_MOD(extack, 3032 "MTU is too large for linear frames and XDP prog does not support frags"); 3033 return -EOPNOTSUPP; 3034 } 3035 } 3036 3037 /* hot swap progs and avoid toggling link */ 3038 if (ice_is_xdp_ena_vsi(vsi) == !!prog || 3039 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { 3040 ice_vsi_assign_bpf_prog(vsi, prog); 3041 return 0; 3042 } 3043 3044 if_running = netif_running(vsi->netdev) && 3045 !test_and_set_bit(ICE_VSI_DOWN, vsi->state); 3046 3047 /* need to stop netdev while setting up the program for Rx rings */ 3048 if (if_running) { 3049 ret = ice_down(vsi); 3050 if (ret) { 3051 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 3052 return ret; 3053 } 3054 } 3055 3056 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 3057 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 3058 if (xdp_ring_err) { 3059 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 3060 } else { 3061 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, 3062 ICE_XDP_CFG_FULL); 3063 if (xdp_ring_err) 3064 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 3065 } 3066 xdp_features_set_redirect_target(vsi->netdev, true); 3067 /* reallocate Rx queues that are used for zero-copy */ 3068 xdp_ring_err = ice_realloc_zc_buf(vsi, true); 3069 if (xdp_ring_err) 3070 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); 3071 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 3072 xdp_features_clear_redirect_target(vsi->netdev); 3073 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); 3074 if (xdp_ring_err) 3075 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 3076 /* reallocate Rx queues that were used for zero-copy */ 3077 xdp_ring_err = ice_realloc_zc_buf(vsi, false); 3078 if (xdp_ring_err) 3079 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); 3080 } 3081 3082 if (if_running) 3083 ret = ice_up(vsi); 3084 3085 if (!ret && prog) 3086 ice_vsi_rx_napi_schedule(vsi); 3087 3088 return (ret || xdp_ring_err) ? -ENOMEM : 0; 3089 } 3090 3091 /** 3092 * ice_xdp_safe_mode - XDP handler for safe mode 3093 * @dev: netdevice 3094 * @xdp: XDP command 3095 */ 3096 static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 3097 struct netdev_bpf *xdp) 3098 { 3099 NL_SET_ERR_MSG_MOD(xdp->extack, 3100 "Please provide working DDP firmware package in order to use XDP\n" 3101 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 3102 return -EOPNOTSUPP; 3103 } 3104 3105 /** 3106 * ice_xdp - implements XDP handler 3107 * @dev: netdevice 3108 * @xdp: XDP command 3109 */ 3110 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 3111 { 3112 struct ice_netdev_priv *np = netdev_priv(dev); 3113 struct ice_vsi *vsi = np->vsi; 3114 int ret; 3115 3116 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { 3117 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); 3118 return -EINVAL; 3119 } 3120 3121 mutex_lock(&vsi->xdp_state_lock); 3122 3123 switch (xdp->command) { 3124 case XDP_SETUP_PROG: 3125 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 3126 break; 3127 case XDP_SETUP_XSK_POOL: 3128 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); 3129 break; 3130 default: 3131 ret = -EINVAL; 3132 } 3133 3134 mutex_unlock(&vsi->xdp_state_lock); 3135 return ret; 3136 } 3137 3138 /** 3139 * ice_ena_misc_vector - enable the non-queue interrupts 3140 * @pf: board private structure 3141 */ 3142 static void ice_ena_misc_vector(struct ice_pf *pf) 3143 { 3144 struct ice_hw *hw = &pf->hw; 3145 u32 pf_intr_start_offset; 3146 u32 val; 3147 3148 /* Disable anti-spoof detection interrupt to prevent spurious event 3149 * interrupts during a function reset. Anti-spoof functionally is 3150 * still supported. 3151 */ 3152 val = rd32(hw, GL_MDCK_TX_TDPU); 3153 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 3154 wr32(hw, GL_MDCK_TX_TDPU, val); 3155 3156 /* clear things first */ 3157 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 3158 rd32(hw, PFINT_OICR); /* read to clear */ 3159 3160 val = (PFINT_OICR_ECC_ERR_M | 3161 PFINT_OICR_MAL_DETECT_M | 3162 PFINT_OICR_GRST_M | 3163 PFINT_OICR_PCI_EXCEPTION_M | 3164 PFINT_OICR_VFLR_M | 3165 PFINT_OICR_HMC_ERR_M | 3166 PFINT_OICR_PE_PUSH_M | 3167 PFINT_OICR_PE_CRITERR_M); 3168 3169 wr32(hw, PFINT_OICR_ENA, val); 3170 3171 /* SW_ITR_IDX = 0, but don't change INTENA */ 3172 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), 3173 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 3174 3175 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3176 return; 3177 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; 3178 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), 3179 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 3180 } 3181 3182 /** 3183 * ice_ll_ts_intr - ll_ts interrupt handler 3184 * @irq: interrupt number 3185 * @data: pointer to a q_vector 3186 */ 3187 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data) 3188 { 3189 struct ice_pf *pf = data; 3190 u32 pf_intr_start_offset; 3191 struct ice_ptp_tx *tx; 3192 unsigned long flags; 3193 struct ice_hw *hw; 3194 u32 val; 3195 u8 idx; 3196 3197 hw = &pf->hw; 3198 tx = &pf->ptp.port.tx; 3199 spin_lock_irqsave(&tx->lock, flags); 3200 ice_ptp_complete_tx_single_tstamp(tx); 3201 3202 idx = find_next_bit_wrap(tx->in_use, tx->len, 3203 tx->last_ll_ts_idx_read + 1); 3204 if (idx != tx->len) 3205 ice_ptp_req_tx_single_tstamp(tx, idx); 3206 spin_unlock_irqrestore(&tx->lock, flags); 3207 3208 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 3209 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 3210 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; 3211 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), 3212 val); 3213 3214 return IRQ_HANDLED; 3215 } 3216 3217 /** 3218 * ice_misc_intr - misc interrupt handler 3219 * @irq: interrupt number 3220 * @data: pointer to a q_vector 3221 */ 3222 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 3223 { 3224 struct ice_pf *pf = (struct ice_pf *)data; 3225 irqreturn_t ret = IRQ_HANDLED; 3226 struct ice_hw *hw = &pf->hw; 3227 struct device *dev; 3228 u32 oicr, ena_mask; 3229 3230 dev = ice_pf_to_dev(pf); 3231 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 3232 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 3233 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 3234 3235 oicr = rd32(hw, PFINT_OICR); 3236 ena_mask = rd32(hw, PFINT_OICR_ENA); 3237 3238 if (oicr & PFINT_OICR_SWINT_M) { 3239 ena_mask &= ~PFINT_OICR_SWINT_M; 3240 pf->sw_int_count++; 3241 } 3242 3243 if (oicr & PFINT_OICR_MAL_DETECT_M) { 3244 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 3245 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 3246 } 3247 if (oicr & PFINT_OICR_VFLR_M) { 3248 /* disable any further VFLR event notifications */ 3249 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 3250 u32 reg = rd32(hw, PFINT_OICR_ENA); 3251 3252 reg &= ~PFINT_OICR_VFLR_M; 3253 wr32(hw, PFINT_OICR_ENA, reg); 3254 } else { 3255 ena_mask &= ~PFINT_OICR_VFLR_M; 3256 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 3257 } 3258 } 3259 3260 if (oicr & PFINT_OICR_GRST_M) { 3261 u32 reset; 3262 3263 /* we have a reset warning */ 3264 ena_mask &= ~PFINT_OICR_GRST_M; 3265 reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M, 3266 rd32(hw, GLGEN_RSTAT)); 3267 3268 if (reset == ICE_RESET_CORER) 3269 pf->corer_count++; 3270 else if (reset == ICE_RESET_GLOBR) 3271 pf->globr_count++; 3272 else if (reset == ICE_RESET_EMPR) 3273 pf->empr_count++; 3274 else 3275 dev_dbg(dev, "Invalid reset type %d\n", reset); 3276 3277 /* If a reset cycle isn't already in progress, we set a bit in 3278 * pf->state so that the service task can start a reset/rebuild. 3279 */ 3280 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 3281 if (reset == ICE_RESET_CORER) 3282 set_bit(ICE_CORER_RECV, pf->state); 3283 else if (reset == ICE_RESET_GLOBR) 3284 set_bit(ICE_GLOBR_RECV, pf->state); 3285 else 3286 set_bit(ICE_EMPR_RECV, pf->state); 3287 3288 /* There are couple of different bits at play here. 3289 * hw->reset_ongoing indicates whether the hardware is 3290 * in reset. This is set to true when a reset interrupt 3291 * is received and set back to false after the driver 3292 * has determined that the hardware is out of reset. 3293 * 3294 * ICE_RESET_OICR_RECV in pf->state indicates 3295 * that a post reset rebuild is required before the 3296 * driver is operational again. This is set above. 3297 * 3298 * As this is the start of the reset/rebuild cycle, set 3299 * both to indicate that. 3300 */ 3301 hw->reset_ongoing = true; 3302 } 3303 } 3304 3305 if (oicr & PFINT_OICR_TSYN_TX_M) { 3306 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 3307 if (ice_pf_state_is_nominal(pf) && 3308 pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { 3309 struct ice_ptp_tx *tx = &pf->ptp.port.tx; 3310 unsigned long flags; 3311 u8 idx; 3312 3313 spin_lock_irqsave(&tx->lock, flags); 3314 idx = find_next_bit_wrap(tx->in_use, tx->len, 3315 tx->last_ll_ts_idx_read + 1); 3316 if (idx != tx->len) 3317 ice_ptp_req_tx_single_tstamp(tx, idx); 3318 spin_unlock_irqrestore(&tx->lock, flags); 3319 } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { 3320 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); 3321 ret = IRQ_WAKE_THREAD; 3322 } 3323 } 3324 3325 if (oicr & PFINT_OICR_TSYN_EVNT_M) { 3326 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3327 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 3328 3329 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 3330 3331 if (ice_pf_src_tmr_owned(pf)) { 3332 /* Save EVENTs from GLTSYN register */ 3333 pf->ptp.ext_ts_irq |= gltsyn_stat & 3334 (GLTSYN_STAT_EVENT0_M | 3335 GLTSYN_STAT_EVENT1_M | 3336 GLTSYN_STAT_EVENT2_M); 3337 3338 ice_ptp_extts_event(pf); 3339 } 3340 } 3341 3342 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 3343 if (oicr & ICE_AUX_CRIT_ERR) { 3344 pf->oicr_err_reg |= oicr; 3345 set_bit(ICE_AUX_ERR_PENDING, pf->state); 3346 ena_mask &= ~ICE_AUX_CRIT_ERR; 3347 } 3348 3349 /* Report any remaining unexpected interrupts */ 3350 oicr &= ena_mask; 3351 if (oicr) { 3352 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 3353 /* If a critical error is pending there is no choice but to 3354 * reset the device. 3355 */ 3356 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 3357 PFINT_OICR_ECC_ERR_M)) { 3358 set_bit(ICE_PFR_REQ, pf->state); 3359 } 3360 } 3361 ice_service_task_schedule(pf); 3362 if (ret == IRQ_HANDLED) 3363 ice_irq_dynamic_ena(hw, NULL, NULL); 3364 3365 return ret; 3366 } 3367 3368 /** 3369 * ice_misc_intr_thread_fn - misc interrupt thread function 3370 * @irq: interrupt number 3371 * @data: pointer to a q_vector 3372 */ 3373 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) 3374 { 3375 struct ice_pf *pf = data; 3376 struct ice_hw *hw; 3377 3378 hw = &pf->hw; 3379 3380 if (ice_is_reset_in_progress(pf->state)) 3381 goto skip_irq; 3382 3383 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { 3384 /* Process outstanding Tx timestamps. If there is more work, 3385 * re-arm the interrupt to trigger again. 3386 */ 3387 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 3388 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 3389 ice_flush(hw); 3390 } 3391 } 3392 3393 skip_irq: 3394 ice_irq_dynamic_ena(hw, NULL, NULL); 3395 3396 return IRQ_HANDLED; 3397 } 3398 3399 /** 3400 * ice_dis_ctrlq_interrupts - disable control queue interrupts 3401 * @hw: pointer to HW structure 3402 */ 3403 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 3404 { 3405 /* disable Admin queue Interrupt causes */ 3406 wr32(hw, PFINT_FW_CTL, 3407 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 3408 3409 /* disable Mailbox queue Interrupt causes */ 3410 wr32(hw, PFINT_MBX_CTL, 3411 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 3412 3413 wr32(hw, PFINT_SB_CTL, 3414 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 3415 3416 /* disable Control queue Interrupt causes */ 3417 wr32(hw, PFINT_OICR_CTL, 3418 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 3419 3420 ice_flush(hw); 3421 } 3422 3423 /** 3424 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup 3425 * @pf: board private structure 3426 */ 3427 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) 3428 { 3429 int irq_num = pf->ll_ts_irq.virq; 3430 3431 synchronize_irq(irq_num); 3432 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); 3433 3434 ice_free_irq(pf, pf->ll_ts_irq); 3435 } 3436 3437 /** 3438 * ice_free_irq_msix_misc - Unroll misc vector setup 3439 * @pf: board private structure 3440 */ 3441 static void ice_free_irq_msix_misc(struct ice_pf *pf) 3442 { 3443 int misc_irq_num = pf->oicr_irq.virq; 3444 struct ice_hw *hw = &pf->hw; 3445 3446 ice_dis_ctrlq_interrupts(hw); 3447 3448 /* disable OICR interrupt */ 3449 wr32(hw, PFINT_OICR_ENA, 0); 3450 ice_flush(hw); 3451 3452 synchronize_irq(misc_irq_num); 3453 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); 3454 3455 ice_free_irq(pf, pf->oicr_irq); 3456 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3457 ice_free_irq_msix_ll_ts(pf); 3458 } 3459 3460 /** 3461 * ice_ena_ctrlq_interrupts - enable control queue interrupts 3462 * @hw: pointer to HW structure 3463 * @reg_idx: HW vector index to associate the control queue interrupts with 3464 */ 3465 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 3466 { 3467 u32 val; 3468 3469 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 3470 PFINT_OICR_CTL_CAUSE_ENA_M); 3471 wr32(hw, PFINT_OICR_CTL, val); 3472 3473 /* enable Admin queue Interrupt causes */ 3474 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 3475 PFINT_FW_CTL_CAUSE_ENA_M); 3476 wr32(hw, PFINT_FW_CTL, val); 3477 3478 /* enable Mailbox queue Interrupt causes */ 3479 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 3480 PFINT_MBX_CTL_CAUSE_ENA_M); 3481 wr32(hw, PFINT_MBX_CTL, val); 3482 3483 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { 3484 /* enable Sideband queue Interrupt causes */ 3485 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 3486 PFINT_SB_CTL_CAUSE_ENA_M); 3487 wr32(hw, PFINT_SB_CTL, val); 3488 } 3489 3490 ice_flush(hw); 3491 } 3492 3493 /** 3494 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 3495 * @pf: board private structure 3496 * 3497 * This sets up the handler for MSIX 0, which is used to manage the 3498 * non-queue interrupts, e.g. AdminQ and errors. This is not used 3499 * when in MSI or Legacy interrupt mode. 3500 */ 3501 static int ice_req_irq_msix_misc(struct ice_pf *pf) 3502 { 3503 struct device *dev = ice_pf_to_dev(pf); 3504 struct ice_hw *hw = &pf->hw; 3505 u32 pf_intr_start_offset; 3506 struct msi_map irq; 3507 int err = 0; 3508 3509 if (!pf->int_name[0]) 3510 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 3511 dev_driver_string(dev), dev_name(dev)); 3512 3513 if (!pf->int_name_ll_ts[0]) 3514 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, 3515 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); 3516 /* Do not request IRQ but do enable OICR interrupt since settings are 3517 * lost during reset. Note that this function is called only during 3518 * rebuild path and not while reset is in progress. 3519 */ 3520 if (ice_is_reset_in_progress(pf->state)) 3521 goto skip_req_irq; 3522 3523 /* reserve one vector in irq_tracker for misc interrupts */ 3524 irq = ice_alloc_irq(pf, false); 3525 if (irq.index < 0) 3526 return irq.index; 3527 3528 pf->oicr_irq = irq; 3529 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, 3530 ice_misc_intr_thread_fn, 0, 3531 pf->int_name, pf); 3532 if (err) { 3533 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", 3534 pf->int_name, err); 3535 ice_free_irq(pf, pf->oicr_irq); 3536 return err; 3537 } 3538 3539 /* reserve one vector in irq_tracker for ll_ts interrupt */ 3540 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3541 goto skip_req_irq; 3542 3543 irq = ice_alloc_irq(pf, false); 3544 if (irq.index < 0) 3545 return irq.index; 3546 3547 pf->ll_ts_irq = irq; 3548 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, 3549 pf->int_name_ll_ts, pf); 3550 if (err) { 3551 dev_err(dev, "devm_request_irq for %s failed: %d\n", 3552 pf->int_name_ll_ts, err); 3553 ice_free_irq(pf, pf->ll_ts_irq); 3554 return err; 3555 } 3556 3557 skip_req_irq: 3558 ice_ena_misc_vector(pf); 3559 3560 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); 3561 /* This enables LL TS interrupt */ 3562 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; 3563 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3564 wr32(hw, PFINT_SB_CTL, 3565 ((pf->ll_ts_irq.index + pf_intr_start_offset) & 3566 PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M); 3567 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), 3568 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3569 3570 ice_flush(hw); 3571 ice_irq_dynamic_ena(hw, NULL, NULL); 3572 3573 return 0; 3574 } 3575 3576 /** 3577 * ice_set_ops - set netdev and ethtools ops for the given netdev 3578 * @vsi: the VSI associated with the new netdev 3579 */ 3580 static void ice_set_ops(struct ice_vsi *vsi) 3581 { 3582 struct net_device *netdev = vsi->netdev; 3583 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3584 3585 if (ice_is_safe_mode(pf)) { 3586 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3587 ice_set_ethtool_safe_mode_ops(netdev); 3588 return; 3589 } 3590 3591 netdev->netdev_ops = &ice_netdev_ops; 3592 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3593 netdev->xdp_metadata_ops = &ice_xdp_md_ops; 3594 ice_set_ethtool_ops(netdev); 3595 3596 if (vsi->type != ICE_VSI_PF) 3597 return; 3598 3599 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 3600 NETDEV_XDP_ACT_XSK_ZEROCOPY | 3601 NETDEV_XDP_ACT_RX_SG; 3602 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; 3603 } 3604 3605 /** 3606 * ice_set_netdev_features - set features for the given netdev 3607 * @netdev: netdev instance 3608 */ 3609 void ice_set_netdev_features(struct net_device *netdev) 3610 { 3611 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3612 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); 3613 netdev_features_t csumo_features; 3614 netdev_features_t vlano_features; 3615 netdev_features_t dflt_features; 3616 netdev_features_t tso_features; 3617 3618 if (ice_is_safe_mode(pf)) { 3619 /* safe mode */ 3620 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3621 netdev->hw_features = netdev->features; 3622 return; 3623 } 3624 3625 dflt_features = NETIF_F_SG | 3626 NETIF_F_HIGHDMA | 3627 NETIF_F_NTUPLE | 3628 NETIF_F_RXHASH; 3629 3630 csumo_features = NETIF_F_RXCSUM | 3631 NETIF_F_IP_CSUM | 3632 NETIF_F_SCTP_CRC | 3633 NETIF_F_IPV6_CSUM; 3634 3635 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3636 NETIF_F_HW_VLAN_CTAG_TX | 3637 NETIF_F_HW_VLAN_CTAG_RX; 3638 3639 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ 3640 if (is_dvm_ena) 3641 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER; 3642 3643 tso_features = NETIF_F_TSO | 3644 NETIF_F_TSO_ECN | 3645 NETIF_F_TSO6 | 3646 NETIF_F_GSO_GRE | 3647 NETIF_F_GSO_UDP_TUNNEL | 3648 NETIF_F_GSO_GRE_CSUM | 3649 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3650 NETIF_F_GSO_PARTIAL | 3651 NETIF_F_GSO_IPXIP4 | 3652 NETIF_F_GSO_IPXIP6 | 3653 NETIF_F_GSO_UDP_L4; 3654 3655 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3656 NETIF_F_GSO_GRE_CSUM; 3657 /* set features that user can change */ 3658 netdev->hw_features = dflt_features | csumo_features | 3659 vlano_features | tso_features; 3660 3661 /* add support for HW_CSUM on packets with MPLS header */ 3662 netdev->mpls_features = NETIF_F_HW_CSUM | 3663 NETIF_F_TSO | 3664 NETIF_F_TSO6; 3665 3666 /* enable features */ 3667 netdev->features |= netdev->hw_features; 3668 3669 netdev->hw_features |= NETIF_F_HW_TC; 3670 netdev->hw_features |= NETIF_F_LOOPBACK; 3671 3672 /* encap and VLAN devices inherit default, csumo and tso features */ 3673 netdev->hw_enc_features |= dflt_features | csumo_features | 3674 tso_features; 3675 netdev->vlan_features |= dflt_features | csumo_features | 3676 tso_features; 3677 3678 /* advertise support but don't enable by default since only one type of 3679 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one 3680 * type turns on the other has to be turned off. This is enforced by the 3681 * ice_fix_features() ndo callback. 3682 */ 3683 if (is_dvm_ena) 3684 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | 3685 NETIF_F_HW_VLAN_STAG_TX; 3686 3687 /* Leave CRC / FCS stripping enabled by default, but allow the value to 3688 * be changed at runtime 3689 */ 3690 netdev->hw_features |= NETIF_F_RXFCS; 3691 3692 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); 3693 } 3694 3695 /** 3696 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3697 * @lut: Lookup table 3698 * @rss_table_size: Lookup table size 3699 * @rss_size: Range of queue number for hashing 3700 */ 3701 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3702 { 3703 u16 i; 3704 3705 for (i = 0; i < rss_table_size; i++) 3706 lut[i] = i % rss_size; 3707 } 3708 3709 /** 3710 * ice_pf_vsi_setup - Set up a PF VSI 3711 * @pf: board private structure 3712 * @pi: pointer to the port_info instance 3713 * 3714 * Returns pointer to the successfully allocated VSI software struct 3715 * on success, otherwise returns NULL on failure. 3716 */ 3717 static struct ice_vsi * 3718 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3719 { 3720 struct ice_vsi_cfg_params params = {}; 3721 3722 params.type = ICE_VSI_PF; 3723 params.port_info = pi; 3724 params.flags = ICE_VSI_FLAG_INIT; 3725 3726 return ice_vsi_setup(pf, ¶ms); 3727 } 3728 3729 static struct ice_vsi * 3730 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 3731 struct ice_channel *ch) 3732 { 3733 struct ice_vsi_cfg_params params = {}; 3734 3735 params.type = ICE_VSI_CHNL; 3736 params.port_info = pi; 3737 params.ch = ch; 3738 params.flags = ICE_VSI_FLAG_INIT; 3739 3740 return ice_vsi_setup(pf, ¶ms); 3741 } 3742 3743 /** 3744 * ice_ctrl_vsi_setup - Set up a control VSI 3745 * @pf: board private structure 3746 * @pi: pointer to the port_info instance 3747 * 3748 * Returns pointer to the successfully allocated VSI software struct 3749 * on success, otherwise returns NULL on failure. 3750 */ 3751 static struct ice_vsi * 3752 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3753 { 3754 struct ice_vsi_cfg_params params = {}; 3755 3756 params.type = ICE_VSI_CTRL; 3757 params.port_info = pi; 3758 params.flags = ICE_VSI_FLAG_INIT; 3759 3760 return ice_vsi_setup(pf, ¶ms); 3761 } 3762 3763 /** 3764 * ice_lb_vsi_setup - Set up a loopback VSI 3765 * @pf: board private structure 3766 * @pi: pointer to the port_info instance 3767 * 3768 * Returns pointer to the successfully allocated VSI software struct 3769 * on success, otherwise returns NULL on failure. 3770 */ 3771 struct ice_vsi * 3772 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3773 { 3774 struct ice_vsi_cfg_params params = {}; 3775 3776 params.type = ICE_VSI_LB; 3777 params.port_info = pi; 3778 params.flags = ICE_VSI_FLAG_INIT; 3779 3780 return ice_vsi_setup(pf, ¶ms); 3781 } 3782 3783 /** 3784 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3785 * @netdev: network interface to be adjusted 3786 * @proto: VLAN TPID 3787 * @vid: VLAN ID to be added 3788 * 3789 * net_device_ops implementation for adding VLAN IDs 3790 */ 3791 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3792 { 3793 struct ice_netdev_priv *np = netdev_priv(netdev); 3794 struct ice_vsi_vlan_ops *vlan_ops; 3795 struct ice_vsi *vsi = np->vsi; 3796 struct ice_vlan vlan; 3797 int ret; 3798 3799 /* VLAN 0 is added by default during load/reset */ 3800 if (!vid) 3801 return 0; 3802 3803 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3804 usleep_range(1000, 2000); 3805 3806 /* Add multicast promisc rule for the VLAN ID to be added if 3807 * all-multicast is currently enabled. 3808 */ 3809 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3810 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3811 ICE_MCAST_VLAN_PROMISC_BITS, 3812 vid); 3813 if (ret) 3814 goto finish; 3815 } 3816 3817 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3818 3819 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3820 * packets aren't pruned by the device's internal switch on Rx 3821 */ 3822 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3823 ret = vlan_ops->add_vlan(vsi, &vlan); 3824 if (ret) 3825 goto finish; 3826 3827 /* If all-multicast is currently enabled and this VLAN ID is only one 3828 * besides VLAN-0 we have to update look-up type of multicast promisc 3829 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. 3830 */ 3831 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && 3832 ice_vsi_num_non_zero_vlans(vsi) == 1) { 3833 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3834 ICE_MCAST_PROMISC_BITS, 0); 3835 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3836 ICE_MCAST_VLAN_PROMISC_BITS, 0); 3837 } 3838 3839 finish: 3840 clear_bit(ICE_CFG_BUSY, vsi->state); 3841 3842 return ret; 3843 } 3844 3845 /** 3846 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3847 * @netdev: network interface to be adjusted 3848 * @proto: VLAN TPID 3849 * @vid: VLAN ID to be removed 3850 * 3851 * net_device_ops implementation for removing VLAN IDs 3852 */ 3853 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3854 { 3855 struct ice_netdev_priv *np = netdev_priv(netdev); 3856 struct ice_vsi_vlan_ops *vlan_ops; 3857 struct ice_vsi *vsi = np->vsi; 3858 struct ice_vlan vlan; 3859 int ret; 3860 3861 /* don't allow removal of VLAN 0 */ 3862 if (!vid) 3863 return 0; 3864 3865 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3866 usleep_range(1000, 2000); 3867 3868 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3869 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3870 if (ret) { 3871 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n", 3872 vsi->vsi_num); 3873 vsi->current_netdev_flags |= IFF_ALLMULTI; 3874 } 3875 3876 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3877 3878 /* Make sure VLAN delete is successful before updating VLAN 3879 * information 3880 */ 3881 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3882 ret = vlan_ops->del_vlan(vsi, &vlan); 3883 if (ret) 3884 goto finish; 3885 3886 /* Remove multicast promisc rule for the removed VLAN ID if 3887 * all-multicast is enabled. 3888 */ 3889 if (vsi->current_netdev_flags & IFF_ALLMULTI) 3890 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3891 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3892 3893 if (!ice_vsi_has_non_zero_vlans(vsi)) { 3894 /* Update look-up type of multicast promisc rule for VLAN 0 3895 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when 3896 * all-multicast is enabled and VLAN 0 is the only VLAN rule. 3897 */ 3898 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3899 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3900 ICE_MCAST_VLAN_PROMISC_BITS, 3901 0); 3902 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3903 ICE_MCAST_PROMISC_BITS, 0); 3904 } 3905 } 3906 3907 finish: 3908 clear_bit(ICE_CFG_BUSY, vsi->state); 3909 3910 return ret; 3911 } 3912 3913 /** 3914 * ice_rep_indr_tc_block_unbind 3915 * @cb_priv: indirection block private data 3916 */ 3917 static void ice_rep_indr_tc_block_unbind(void *cb_priv) 3918 { 3919 struct ice_indr_block_priv *indr_priv = cb_priv; 3920 3921 list_del(&indr_priv->list); 3922 kfree(indr_priv); 3923 } 3924 3925 /** 3926 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications 3927 * @vsi: VSI struct which has the netdev 3928 */ 3929 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) 3930 { 3931 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); 3932 3933 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, 3934 ice_rep_indr_tc_block_unbind); 3935 } 3936 3937 /** 3938 * ice_tc_indir_block_register - Register TC indirect block notifications 3939 * @vsi: VSI struct which has the netdev 3940 * 3941 * Returns 0 on success, negative value on failure 3942 */ 3943 static int ice_tc_indir_block_register(struct ice_vsi *vsi) 3944 { 3945 struct ice_netdev_priv *np; 3946 3947 if (!vsi || !vsi->netdev) 3948 return -EINVAL; 3949 3950 np = netdev_priv(vsi->netdev); 3951 3952 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); 3953 return flow_indr_dev_register(ice_indr_setup_tc_cb, np); 3954 } 3955 3956 /** 3957 * ice_get_avail_q_count - Get count of queues in use 3958 * @pf_qmap: bitmap to get queue use count from 3959 * @lock: pointer to a mutex that protects access to pf_qmap 3960 * @size: size of the bitmap 3961 */ 3962 static u16 3963 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3964 { 3965 unsigned long bit; 3966 u16 count = 0; 3967 3968 mutex_lock(lock); 3969 for_each_clear_bit(bit, pf_qmap, size) 3970 count++; 3971 mutex_unlock(lock); 3972 3973 return count; 3974 } 3975 3976 /** 3977 * ice_get_avail_txq_count - Get count of Tx queues in use 3978 * @pf: pointer to an ice_pf instance 3979 */ 3980 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3981 { 3982 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3983 pf->max_pf_txqs); 3984 } 3985 3986 /** 3987 * ice_get_avail_rxq_count - Get count of Rx queues in use 3988 * @pf: pointer to an ice_pf instance 3989 */ 3990 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3991 { 3992 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3993 pf->max_pf_rxqs); 3994 } 3995 3996 /** 3997 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3998 * @pf: board private structure to initialize 3999 */ 4000 static void ice_deinit_pf(struct ice_pf *pf) 4001 { 4002 ice_service_task_stop(pf); 4003 mutex_destroy(&pf->lag_mutex); 4004 mutex_destroy(&pf->adev_mutex); 4005 mutex_destroy(&pf->sw_mutex); 4006 mutex_destroy(&pf->tc_mutex); 4007 mutex_destroy(&pf->avail_q_mutex); 4008 mutex_destroy(&pf->vfs.table_lock); 4009 4010 if (pf->avail_txqs) { 4011 bitmap_free(pf->avail_txqs); 4012 pf->avail_txqs = NULL; 4013 } 4014 4015 if (pf->avail_rxqs) { 4016 bitmap_free(pf->avail_rxqs); 4017 pf->avail_rxqs = NULL; 4018 } 4019 4020 if (pf->ptp.clock) 4021 ptp_clock_unregister(pf->ptp.clock); 4022 4023 xa_destroy(&pf->dyn_ports); 4024 xa_destroy(&pf->sf_nums); 4025 } 4026 4027 /** 4028 * ice_set_pf_caps - set PFs capability flags 4029 * @pf: pointer to the PF instance 4030 */ 4031 static void ice_set_pf_caps(struct ice_pf *pf) 4032 { 4033 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 4034 4035 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 4036 if (func_caps->common_cap.rdma) 4037 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 4038 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4039 if (func_caps->common_cap.dcb) 4040 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4041 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 4042 if (func_caps->common_cap.sr_iov_1_1) { 4043 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 4044 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, 4045 ICE_MAX_SRIOV_VFS); 4046 } 4047 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 4048 if (func_caps->common_cap.rss_table_size) 4049 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 4050 4051 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 4052 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 4053 u16 unused; 4054 4055 /* ctrl_vsi_idx will be set to a valid value when flow director 4056 * is setup by ice_init_fdir 4057 */ 4058 pf->ctrl_vsi_idx = ICE_NO_VSI; 4059 set_bit(ICE_FLAG_FD_ENA, pf->flags); 4060 /* force guaranteed filter pool for PF */ 4061 ice_alloc_fd_guar_item(&pf->hw, &unused, 4062 func_caps->fd_fltr_guar); 4063 /* force shared filter pool for PF */ 4064 ice_alloc_fd_shrd_item(&pf->hw, &unused, 4065 func_caps->fd_fltr_best_effort); 4066 } 4067 4068 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 4069 if (func_caps->common_cap.ieee_1588 && 4070 !(pf->hw.mac_type == ICE_MAC_E830)) 4071 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 4072 4073 pf->max_pf_txqs = func_caps->common_cap.num_txq; 4074 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 4075 } 4076 4077 /** 4078 * ice_init_pf - Initialize general software structures (struct ice_pf) 4079 * @pf: board private structure to initialize 4080 */ 4081 static int ice_init_pf(struct ice_pf *pf) 4082 { 4083 ice_set_pf_caps(pf); 4084 4085 mutex_init(&pf->sw_mutex); 4086 mutex_init(&pf->tc_mutex); 4087 mutex_init(&pf->adev_mutex); 4088 mutex_init(&pf->lag_mutex); 4089 4090 INIT_HLIST_HEAD(&pf->aq_wait_list); 4091 spin_lock_init(&pf->aq_wait_lock); 4092 init_waitqueue_head(&pf->aq_wait_queue); 4093 4094 init_waitqueue_head(&pf->reset_wait_queue); 4095 4096 /* setup service timer and periodic service task */ 4097 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 4098 pf->serv_tmr_period = HZ; 4099 INIT_WORK(&pf->serv_task, ice_service_task); 4100 clear_bit(ICE_SERVICE_SCHED, pf->state); 4101 4102 mutex_init(&pf->avail_q_mutex); 4103 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 4104 if (!pf->avail_txqs) 4105 return -ENOMEM; 4106 4107 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 4108 if (!pf->avail_rxqs) { 4109 bitmap_free(pf->avail_txqs); 4110 pf->avail_txqs = NULL; 4111 return -ENOMEM; 4112 } 4113 4114 mutex_init(&pf->vfs.table_lock); 4115 hash_init(pf->vfs.table); 4116 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) 4117 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, 4118 ICE_MBX_OVERFLOW_WATERMARK); 4119 else 4120 ice_mbx_init_snapshot(&pf->hw); 4121 4122 xa_init(&pf->dyn_ports); 4123 xa_init(&pf->sf_nums); 4124 4125 return 0; 4126 } 4127 4128 /** 4129 * ice_is_wol_supported - check if WoL is supported 4130 * @hw: pointer to hardware info 4131 * 4132 * Check if WoL is supported based on the HW configuration. 4133 * Returns true if NVM supports and enables WoL for this port, false otherwise 4134 */ 4135 bool ice_is_wol_supported(struct ice_hw *hw) 4136 { 4137 u16 wol_ctrl; 4138 4139 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 4140 * word) indicates WoL is not supported on the corresponding PF ID. 4141 */ 4142 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 4143 return false; 4144 4145 return !(BIT(hw->port_info->lport) & wol_ctrl); 4146 } 4147 4148 /** 4149 * ice_vsi_recfg_qs - Change the number of queues on a VSI 4150 * @vsi: VSI being changed 4151 * @new_rx: new number of Rx queues 4152 * @new_tx: new number of Tx queues 4153 * @locked: is adev device_lock held 4154 * 4155 * Only change the number of queues if new_tx, or new_rx is non-0. 4156 * 4157 * Returns 0 on success. 4158 */ 4159 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) 4160 { 4161 struct ice_pf *pf = vsi->back; 4162 int i, err = 0, timeout = 50; 4163 4164 if (!new_rx && !new_tx) 4165 return -EINVAL; 4166 4167 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 4168 timeout--; 4169 if (!timeout) 4170 return -EBUSY; 4171 usleep_range(1000, 2000); 4172 } 4173 4174 if (new_tx) 4175 vsi->req_txq = (u16)new_tx; 4176 if (new_rx) 4177 vsi->req_rxq = (u16)new_rx; 4178 4179 /* set for the next time the netdev is started */ 4180 if (!netif_running(vsi->netdev)) { 4181 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 4182 if (err) 4183 goto rebuild_err; 4184 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 4185 goto done; 4186 } 4187 4188 ice_vsi_close(vsi); 4189 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 4190 if (err) 4191 goto rebuild_err; 4192 4193 ice_for_each_traffic_class(i) { 4194 if (vsi->tc_cfg.ena_tc & BIT(i)) 4195 netdev_set_tc_queue(vsi->netdev, 4196 vsi->tc_cfg.tc_info[i].netdev_tc, 4197 vsi->tc_cfg.tc_info[i].qcount_tx, 4198 vsi->tc_cfg.tc_info[i].qoffset); 4199 } 4200 ice_pf_dcb_recfg(pf, locked); 4201 ice_vsi_open(vsi); 4202 goto done; 4203 4204 rebuild_err: 4205 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", 4206 err); 4207 done: 4208 clear_bit(ICE_CFG_BUSY, pf->state); 4209 return err; 4210 } 4211 4212 /** 4213 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 4214 * @pf: PF to configure 4215 * 4216 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 4217 * VSI can still Tx/Rx VLAN tagged packets. 4218 */ 4219 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 4220 { 4221 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4222 struct ice_vsi_ctx *ctxt; 4223 struct ice_hw *hw; 4224 int status; 4225 4226 if (!vsi) 4227 return; 4228 4229 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4230 if (!ctxt) 4231 return; 4232 4233 hw = &pf->hw; 4234 ctxt->info = vsi->info; 4235 4236 ctxt->info.valid_sections = 4237 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 4238 ICE_AQ_VSI_PROP_SECURITY_VALID | 4239 ICE_AQ_VSI_PROP_SW_VALID); 4240 4241 /* disable VLAN anti-spoof */ 4242 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4243 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4244 4245 /* disable VLAN pruning and keep all other settings */ 4246 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4247 4248 /* allow all VLANs on Tx and don't strip on Rx */ 4249 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | 4250 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 4251 4252 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4253 if (status) { 4254 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", 4255 status, ice_aq_str(hw->adminq.sq_last_status)); 4256 } else { 4257 vsi->info.sec_flags = ctxt->info.sec_flags; 4258 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 4259 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; 4260 } 4261 4262 kfree(ctxt); 4263 } 4264 4265 /** 4266 * ice_log_pkg_init - log result of DDP package load 4267 * @hw: pointer to hardware info 4268 * @state: state of package load 4269 */ 4270 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) 4271 { 4272 struct ice_pf *pf = hw->back; 4273 struct device *dev; 4274 4275 dev = ice_pf_to_dev(pf); 4276 4277 switch (state) { 4278 case ICE_DDP_PKG_SUCCESS: 4279 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 4280 hw->active_pkg_name, 4281 hw->active_pkg_ver.major, 4282 hw->active_pkg_ver.minor, 4283 hw->active_pkg_ver.update, 4284 hw->active_pkg_ver.draft); 4285 break; 4286 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 4287 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 4288 hw->active_pkg_name, 4289 hw->active_pkg_ver.major, 4290 hw->active_pkg_ver.minor, 4291 hw->active_pkg_ver.update, 4292 hw->active_pkg_ver.draft); 4293 break; 4294 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: 4295 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 4296 hw->active_pkg_name, 4297 hw->active_pkg_ver.major, 4298 hw->active_pkg_ver.minor, 4299 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4300 break; 4301 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 4302 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 4303 hw->active_pkg_name, 4304 hw->active_pkg_ver.major, 4305 hw->active_pkg_ver.minor, 4306 hw->active_pkg_ver.update, 4307 hw->active_pkg_ver.draft, 4308 hw->pkg_name, 4309 hw->pkg_ver.major, 4310 hw->pkg_ver.minor, 4311 hw->pkg_ver.update, 4312 hw->pkg_ver.draft); 4313 break; 4314 case ICE_DDP_PKG_FW_MISMATCH: 4315 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 4316 break; 4317 case ICE_DDP_PKG_INVALID_FILE: 4318 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 4319 break; 4320 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: 4321 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 4322 break; 4323 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: 4324 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 4325 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4326 break; 4327 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: 4328 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 4329 break; 4330 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: 4331 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 4332 break; 4333 case ICE_DDP_PKG_LOAD_ERROR: 4334 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 4335 /* poll for reset to complete */ 4336 if (ice_check_reset(hw)) 4337 dev_err(dev, "Error resetting device. Please reload the driver\n"); 4338 break; 4339 case ICE_DDP_PKG_ERR: 4340 default: 4341 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); 4342 break; 4343 } 4344 } 4345 4346 /** 4347 * ice_load_pkg - load/reload the DDP Package file 4348 * @firmware: firmware structure when firmware requested or NULL for reload 4349 * @pf: pointer to the PF instance 4350 * 4351 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 4352 * initialize HW tables. 4353 */ 4354 static void 4355 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 4356 { 4357 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 4358 struct device *dev = ice_pf_to_dev(pf); 4359 struct ice_hw *hw = &pf->hw; 4360 4361 /* Load DDP Package */ 4362 if (firmware && !hw->pkg_copy) { 4363 state = ice_copy_and_init_pkg(hw, firmware->data, 4364 firmware->size); 4365 ice_log_pkg_init(hw, state); 4366 } else if (!firmware && hw->pkg_copy) { 4367 /* Reload package during rebuild after CORER/GLOBR reset */ 4368 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 4369 ice_log_pkg_init(hw, state); 4370 } else { 4371 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 4372 } 4373 4374 if (!ice_is_init_pkg_successful(state)) { 4375 /* Safe Mode */ 4376 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4377 return; 4378 } 4379 4380 /* Successful download package is the precondition for advanced 4381 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 4382 */ 4383 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4384 } 4385 4386 /** 4387 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 4388 * @pf: pointer to the PF structure 4389 * 4390 * There is no error returned here because the driver should be able to handle 4391 * 128 Byte cache lines, so we only print a warning in case issues are seen, 4392 * specifically with Tx. 4393 */ 4394 static void ice_verify_cacheline_size(struct ice_pf *pf) 4395 { 4396 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 4397 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 4398 ICE_CACHE_LINE_BYTES); 4399 } 4400 4401 /** 4402 * ice_send_version - update firmware with driver version 4403 * @pf: PF struct 4404 * 4405 * Returns 0 on success, else error code 4406 */ 4407 static int ice_send_version(struct ice_pf *pf) 4408 { 4409 struct ice_driver_ver dv; 4410 4411 dv.major_ver = 0xff; 4412 dv.minor_ver = 0xff; 4413 dv.build_ver = 0xff; 4414 dv.subbuild_ver = 0; 4415 strscpy((char *)dv.driver_string, UTS_RELEASE, 4416 sizeof(dv.driver_string)); 4417 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4418 } 4419 4420 /** 4421 * ice_init_fdir - Initialize flow director VSI and configuration 4422 * @pf: pointer to the PF instance 4423 * 4424 * returns 0 on success, negative on error 4425 */ 4426 static int ice_init_fdir(struct ice_pf *pf) 4427 { 4428 struct device *dev = ice_pf_to_dev(pf); 4429 struct ice_vsi *ctrl_vsi; 4430 int err; 4431 4432 /* Side Band Flow Director needs to have a control VSI. 4433 * Allocate it and store it in the PF. 4434 */ 4435 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4436 if (!ctrl_vsi) { 4437 dev_dbg(dev, "could not create control VSI\n"); 4438 return -ENOMEM; 4439 } 4440 4441 err = ice_vsi_open_ctrl(ctrl_vsi); 4442 if (err) { 4443 dev_dbg(dev, "could not open control VSI\n"); 4444 goto err_vsi_open; 4445 } 4446 4447 mutex_init(&pf->hw.fdir_fltr_lock); 4448 4449 err = ice_fdir_create_dflt_rules(pf); 4450 if (err) 4451 goto err_fdir_rule; 4452 4453 return 0; 4454 4455 err_fdir_rule: 4456 ice_fdir_release_flows(&pf->hw); 4457 ice_vsi_close(ctrl_vsi); 4458 err_vsi_open: 4459 ice_vsi_release(ctrl_vsi); 4460 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4461 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4462 pf->ctrl_vsi_idx = ICE_NO_VSI; 4463 } 4464 return err; 4465 } 4466 4467 static void ice_deinit_fdir(struct ice_pf *pf) 4468 { 4469 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); 4470 4471 if (!vsi) 4472 return; 4473 4474 ice_vsi_manage_fdir(vsi, false); 4475 ice_vsi_release(vsi); 4476 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4477 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4478 pf->ctrl_vsi_idx = ICE_NO_VSI; 4479 } 4480 4481 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4482 } 4483 4484 /** 4485 * ice_get_opt_fw_name - return optional firmware file name or NULL 4486 * @pf: pointer to the PF instance 4487 */ 4488 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4489 { 4490 /* Optional firmware name same as default with additional dash 4491 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4492 */ 4493 struct pci_dev *pdev = pf->pdev; 4494 char *opt_fw_filename; 4495 u64 dsn; 4496 4497 /* Determine the name of the optional file using the DSN (two 4498 * dwords following the start of the DSN Capability). 4499 */ 4500 dsn = pci_get_dsn(pdev); 4501 if (!dsn) 4502 return NULL; 4503 4504 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4505 if (!opt_fw_filename) 4506 return NULL; 4507 4508 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4509 ICE_DDP_PKG_PATH, dsn); 4510 4511 return opt_fw_filename; 4512 } 4513 4514 /** 4515 * ice_request_fw - Device initialization routine 4516 * @pf: pointer to the PF instance 4517 * @firmware: double pointer to firmware struct 4518 * 4519 * Return: zero when successful, negative values otherwise. 4520 */ 4521 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) 4522 { 4523 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4524 struct device *dev = ice_pf_to_dev(pf); 4525 int err = 0; 4526 4527 /* optional device-specific DDP (if present) overrides the default DDP 4528 * package file. kernel logs a debug message if the file doesn't exist, 4529 * and warning messages for other errors. 4530 */ 4531 if (opt_fw_filename) { 4532 err = firmware_request_nowarn(firmware, opt_fw_filename, dev); 4533 kfree(opt_fw_filename); 4534 if (!err) 4535 return err; 4536 } 4537 err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev); 4538 if (err) 4539 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4540 4541 return err; 4542 } 4543 4544 /** 4545 * ice_init_tx_topology - performs Tx topology initialization 4546 * @hw: pointer to the hardware structure 4547 * @firmware: pointer to firmware structure 4548 * 4549 * Return: zero when init was successful, negative values otherwise. 4550 */ 4551 static int 4552 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) 4553 { 4554 u8 num_tx_sched_layers = hw->num_tx_sched_layers; 4555 struct ice_pf *pf = hw->back; 4556 struct device *dev; 4557 int err; 4558 4559 dev = ice_pf_to_dev(pf); 4560 err = ice_cfg_tx_topo(hw, firmware->data, firmware->size); 4561 if (!err) { 4562 if (hw->num_tx_sched_layers > num_tx_sched_layers) 4563 dev_info(dev, "Tx scheduling layers switching feature disabled\n"); 4564 else 4565 dev_info(dev, "Tx scheduling layers switching feature enabled\n"); 4566 /* if there was a change in topology ice_cfg_tx_topo triggered 4567 * a CORER and we need to re-init hw 4568 */ 4569 ice_deinit_hw(hw); 4570 err = ice_init_hw(hw); 4571 4572 return err; 4573 } else if (err == -EIO) { 4574 dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); 4575 } 4576 4577 return 0; 4578 } 4579 4580 /** 4581 * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs 4582 * @hw: pointer to the hardware structure 4583 * @pf: pointer to pf structure 4584 * 4585 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor 4586 * formats the PF hardware supports. The exact list of supported RXDIDs 4587 * depends on the loaded DDP package. The IDs can be determined by reading the 4588 * GLFLXP_RXDID_FLAGS register after the DDP package is loaded. 4589 * 4590 * Note that the legacy 32-byte RXDID 0 is always supported but is not listed 4591 * in the DDP package. The 16-byte legacy descriptor is never supported by 4592 * VFs. 4593 */ 4594 static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf) 4595 { 4596 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); 4597 4598 for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { 4599 u32 regval; 4600 4601 regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); 4602 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) 4603 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) 4604 pf->supported_rxdids |= BIT(i); 4605 } 4606 } 4607 4608 /** 4609 * ice_init_ddp_config - DDP related configuration 4610 * @hw: pointer to the hardware structure 4611 * @pf: pointer to pf structure 4612 * 4613 * This function loads DDP file from the disk, then initializes Tx 4614 * topology. At the end DDP package is loaded on the card. 4615 * 4616 * Return: zero when init was successful, negative values otherwise. 4617 */ 4618 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) 4619 { 4620 struct device *dev = ice_pf_to_dev(pf); 4621 const struct firmware *firmware = NULL; 4622 int err; 4623 4624 err = ice_request_fw(pf, &firmware); 4625 if (err) { 4626 dev_err(dev, "Fail during requesting FW: %d\n", err); 4627 return err; 4628 } 4629 4630 err = ice_init_tx_topology(hw, firmware); 4631 if (err) { 4632 dev_err(dev, "Fail during initialization of Tx topology: %d\n", 4633 err); 4634 release_firmware(firmware); 4635 return err; 4636 } 4637 4638 /* Download firmware to device */ 4639 ice_load_pkg(firmware, pf); 4640 release_firmware(firmware); 4641 4642 /* Initialize the supported Rx descriptor IDs after loading DDP */ 4643 ice_init_supported_rxdids(hw, pf); 4644 4645 return 0; 4646 } 4647 4648 /** 4649 * ice_print_wake_reason - show the wake up cause in the log 4650 * @pf: pointer to the PF struct 4651 */ 4652 static void ice_print_wake_reason(struct ice_pf *pf) 4653 { 4654 u32 wus = pf->wakeup_reason; 4655 const char *wake_str; 4656 4657 /* if no wake event, nothing to print */ 4658 if (!wus) 4659 return; 4660 4661 if (wus & PFPM_WUS_LNKC_M) 4662 wake_str = "Link\n"; 4663 else if (wus & PFPM_WUS_MAG_M) 4664 wake_str = "Magic Packet\n"; 4665 else if (wus & PFPM_WUS_MNG_M) 4666 wake_str = "Management\n"; 4667 else if (wus & PFPM_WUS_FW_RST_WK_M) 4668 wake_str = "Firmware Reset\n"; 4669 else 4670 wake_str = "Unknown\n"; 4671 4672 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4673 } 4674 4675 /** 4676 * ice_pf_fwlog_update_module - update 1 module 4677 * @pf: pointer to the PF struct 4678 * @log_level: log_level to use for the @module 4679 * @module: module to update 4680 */ 4681 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) 4682 { 4683 struct ice_hw *hw = &pf->hw; 4684 4685 hw->fwlog_cfg.module_entries[module].log_level = log_level; 4686 } 4687 4688 /** 4689 * ice_register_netdev - register netdev 4690 * @vsi: pointer to the VSI struct 4691 */ 4692 static int ice_register_netdev(struct ice_vsi *vsi) 4693 { 4694 int err; 4695 4696 if (!vsi || !vsi->netdev) 4697 return -EIO; 4698 4699 err = register_netdev(vsi->netdev); 4700 if (err) 4701 return err; 4702 4703 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4704 netif_carrier_off(vsi->netdev); 4705 netif_tx_stop_all_queues(vsi->netdev); 4706 4707 return 0; 4708 } 4709 4710 static void ice_unregister_netdev(struct ice_vsi *vsi) 4711 { 4712 if (!vsi || !vsi->netdev) 4713 return; 4714 4715 unregister_netdev(vsi->netdev); 4716 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4717 } 4718 4719 /** 4720 * ice_cfg_netdev - Allocate, configure and register a netdev 4721 * @vsi: the VSI associated with the new netdev 4722 * 4723 * Returns 0 on success, negative value on failure 4724 */ 4725 static int ice_cfg_netdev(struct ice_vsi *vsi) 4726 { 4727 struct ice_netdev_priv *np; 4728 struct net_device *netdev; 4729 u8 mac_addr[ETH_ALEN]; 4730 4731 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 4732 vsi->alloc_rxq); 4733 if (!netdev) 4734 return -ENOMEM; 4735 4736 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4737 vsi->netdev = netdev; 4738 np = netdev_priv(netdev); 4739 np->vsi = vsi; 4740 4741 ice_set_netdev_features(netdev); 4742 ice_set_ops(vsi); 4743 4744 if (vsi->type == ICE_VSI_PF) { 4745 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 4746 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4747 eth_hw_addr_set(netdev, mac_addr); 4748 } 4749 4750 netdev->priv_flags |= IFF_UNICAST_FLT; 4751 4752 /* Setup netdev TC information */ 4753 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 4754 4755 netdev->max_mtu = ICE_MAX_MTU; 4756 4757 return 0; 4758 } 4759 4760 static void ice_decfg_netdev(struct ice_vsi *vsi) 4761 { 4762 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4763 free_netdev(vsi->netdev); 4764 vsi->netdev = NULL; 4765 } 4766 4767 int ice_init_dev(struct ice_pf *pf) 4768 { 4769 struct device *dev = ice_pf_to_dev(pf); 4770 struct ice_hw *hw = &pf->hw; 4771 int err; 4772 4773 ice_init_feature_support(pf); 4774 4775 err = ice_init_ddp_config(hw, pf); 4776 4777 /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be 4778 * set in pf->state, which will cause ice_is_safe_mode to return 4779 * true 4780 */ 4781 if (err || ice_is_safe_mode(pf)) { 4782 /* we already got function/device capabilities but these don't 4783 * reflect what the driver needs to do in safe mode. Instead of 4784 * adding conditional logic everywhere to ignore these 4785 * device/function capabilities, override them. 4786 */ 4787 ice_set_safe_mode_caps(hw); 4788 } 4789 4790 err = ice_init_pf(pf); 4791 if (err) { 4792 dev_err(dev, "ice_init_pf failed: %d\n", err); 4793 return err; 4794 } 4795 4796 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4797 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4798 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4799 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4800 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4801 pf->hw.udp_tunnel_nic.tables[0].n_entries = 4802 pf->hw.tnl.valid_count[TNL_VXLAN]; 4803 pf->hw.udp_tunnel_nic.tables[0].tunnel_types = 4804 UDP_TUNNEL_TYPE_VXLAN; 4805 } 4806 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4807 pf->hw.udp_tunnel_nic.tables[1].n_entries = 4808 pf->hw.tnl.valid_count[TNL_GENEVE]; 4809 pf->hw.udp_tunnel_nic.tables[1].tunnel_types = 4810 UDP_TUNNEL_TYPE_GENEVE; 4811 } 4812 4813 err = ice_init_interrupt_scheme(pf); 4814 if (err) { 4815 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4816 err = -EIO; 4817 goto unroll_pf_init; 4818 } 4819 4820 /* In case of MSIX we are going to setup the misc vector right here 4821 * to handle admin queue events etc. In case of legacy and MSI 4822 * the misc functionality and queue processing is combined in 4823 * the same vector and that gets setup at open. 4824 */ 4825 err = ice_req_irq_msix_misc(pf); 4826 if (err) { 4827 dev_err(dev, "setup of misc vector failed: %d\n", err); 4828 goto unroll_irq_scheme_init; 4829 } 4830 4831 return 0; 4832 4833 unroll_irq_scheme_init: 4834 ice_clear_interrupt_scheme(pf); 4835 unroll_pf_init: 4836 ice_deinit_pf(pf); 4837 return err; 4838 } 4839 4840 void ice_deinit_dev(struct ice_pf *pf) 4841 { 4842 ice_free_irq_msix_misc(pf); 4843 ice_deinit_pf(pf); 4844 ice_deinit_hw(&pf->hw); 4845 4846 /* Service task is already stopped, so call reset directly. */ 4847 ice_reset(&pf->hw, ICE_RESET_PFR); 4848 pci_wait_for_pending_transaction(pf->pdev); 4849 ice_clear_interrupt_scheme(pf); 4850 } 4851 4852 static void ice_init_features(struct ice_pf *pf) 4853 { 4854 struct device *dev = ice_pf_to_dev(pf); 4855 4856 if (ice_is_safe_mode(pf)) 4857 return; 4858 4859 /* initialize DDP driven features */ 4860 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4861 ice_ptp_init(pf); 4862 4863 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4864 ice_gnss_init(pf); 4865 4866 if (ice_is_feature_supported(pf, ICE_F_CGU) || 4867 ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) 4868 ice_dpll_init(pf); 4869 4870 /* Note: Flow director init failure is non-fatal to load */ 4871 if (ice_init_fdir(pf)) 4872 dev_err(dev, "could not initialize flow director\n"); 4873 4874 /* Note: DCB init failure is non-fatal to load */ 4875 if (ice_init_pf_dcb(pf, false)) { 4876 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4877 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4878 } else { 4879 ice_cfg_lldp_mib_change(&pf->hw, true); 4880 } 4881 4882 if (ice_init_lag(pf)) 4883 dev_warn(dev, "Failed to init link aggregation support\n"); 4884 4885 ice_hwmon_init(pf); 4886 } 4887 4888 static void ice_deinit_features(struct ice_pf *pf) 4889 { 4890 if (ice_is_safe_mode(pf)) 4891 return; 4892 4893 ice_deinit_lag(pf); 4894 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) 4895 ice_cfg_lldp_mib_change(&pf->hw, false); 4896 ice_deinit_fdir(pf); 4897 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4898 ice_gnss_exit(pf); 4899 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4900 ice_ptp_release(pf); 4901 if (test_bit(ICE_FLAG_DPLL, pf->flags)) 4902 ice_dpll_deinit(pf); 4903 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) 4904 xa_destroy(&pf->eswitch.reprs); 4905 } 4906 4907 static void ice_init_wakeup(struct ice_pf *pf) 4908 { 4909 /* Save wakeup reason register for later use */ 4910 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); 4911 4912 /* check for a power management event */ 4913 ice_print_wake_reason(pf); 4914 4915 /* clear wake status, all bits */ 4916 wr32(&pf->hw, PFPM_WUS, U32_MAX); 4917 4918 /* Disable WoL at init, wait for user to enable */ 4919 device_set_wakeup_enable(ice_pf_to_dev(pf), false); 4920 } 4921 4922 static int ice_init_link(struct ice_pf *pf) 4923 { 4924 struct device *dev = ice_pf_to_dev(pf); 4925 int err; 4926 4927 err = ice_init_link_events(pf->hw.port_info); 4928 if (err) { 4929 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4930 return err; 4931 } 4932 4933 /* not a fatal error if this fails */ 4934 err = ice_init_nvm_phy_type(pf->hw.port_info); 4935 if (err) 4936 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4937 4938 /* not a fatal error if this fails */ 4939 err = ice_update_link_info(pf->hw.port_info); 4940 if (err) 4941 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4942 4943 ice_init_link_dflt_override(pf->hw.port_info); 4944 4945 ice_check_link_cfg_err(pf, 4946 pf->hw.port_info->phy.link_info.link_cfg_err); 4947 4948 /* if media available, initialize PHY settings */ 4949 if (pf->hw.port_info->phy.link_info.link_info & 4950 ICE_AQ_MEDIA_AVAILABLE) { 4951 /* not a fatal error if this fails */ 4952 err = ice_init_phy_user_cfg(pf->hw.port_info); 4953 if (err) 4954 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4955 4956 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4957 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4958 4959 if (vsi) 4960 ice_configure_phy(vsi); 4961 } 4962 } else { 4963 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4964 } 4965 4966 return err; 4967 } 4968 4969 static int ice_init_pf_sw(struct ice_pf *pf) 4970 { 4971 bool dvm = ice_is_dvm_ena(&pf->hw); 4972 struct ice_vsi *vsi; 4973 int err; 4974 4975 /* create switch struct for the switch element created by FW on boot */ 4976 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); 4977 if (!pf->first_sw) 4978 return -ENOMEM; 4979 4980 if (pf->hw.evb_veb) 4981 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4982 else 4983 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4984 4985 pf->first_sw->pf = pf; 4986 4987 /* record the sw_id available for later use */ 4988 pf->first_sw->sw_id = pf->hw.port_info->sw_id; 4989 4990 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 4991 if (err) 4992 goto err_aq_set_port_params; 4993 4994 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 4995 if (!vsi) { 4996 err = -ENOMEM; 4997 goto err_pf_vsi_setup; 4998 } 4999 5000 return 0; 5001 5002 err_pf_vsi_setup: 5003 err_aq_set_port_params: 5004 kfree(pf->first_sw); 5005 return err; 5006 } 5007 5008 static void ice_deinit_pf_sw(struct ice_pf *pf) 5009 { 5010 struct ice_vsi *vsi = ice_get_main_vsi(pf); 5011 5012 if (!vsi) 5013 return; 5014 5015 ice_vsi_release(vsi); 5016 kfree(pf->first_sw); 5017 } 5018 5019 static int ice_alloc_vsis(struct ice_pf *pf) 5020 { 5021 struct device *dev = ice_pf_to_dev(pf); 5022 5023 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; 5024 if (!pf->num_alloc_vsi) 5025 return -EIO; 5026 5027 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 5028 dev_warn(dev, 5029 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 5030 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 5031 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 5032 } 5033 5034 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 5035 GFP_KERNEL); 5036 if (!pf->vsi) 5037 return -ENOMEM; 5038 5039 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, 5040 sizeof(*pf->vsi_stats), GFP_KERNEL); 5041 if (!pf->vsi_stats) { 5042 devm_kfree(dev, pf->vsi); 5043 return -ENOMEM; 5044 } 5045 5046 return 0; 5047 } 5048 5049 static void ice_dealloc_vsis(struct ice_pf *pf) 5050 { 5051 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); 5052 pf->vsi_stats = NULL; 5053 5054 pf->num_alloc_vsi = 0; 5055 devm_kfree(ice_pf_to_dev(pf), pf->vsi); 5056 pf->vsi = NULL; 5057 } 5058 5059 static int ice_init_devlink(struct ice_pf *pf) 5060 { 5061 int err; 5062 5063 err = ice_devlink_register_params(pf); 5064 if (err) 5065 return err; 5066 5067 ice_devlink_init_regions(pf); 5068 ice_health_init(pf); 5069 ice_devlink_register(pf); 5070 5071 return 0; 5072 } 5073 5074 static void ice_deinit_devlink(struct ice_pf *pf) 5075 { 5076 ice_devlink_unregister(pf); 5077 ice_health_deinit(pf); 5078 ice_devlink_destroy_regions(pf); 5079 ice_devlink_unregister_params(pf); 5080 } 5081 5082 static int ice_init(struct ice_pf *pf) 5083 { 5084 int err; 5085 5086 err = ice_init_dev(pf); 5087 if (err) 5088 return err; 5089 5090 err = ice_alloc_vsis(pf); 5091 if (err) 5092 goto err_alloc_vsis; 5093 5094 err = ice_init_pf_sw(pf); 5095 if (err) 5096 goto err_init_pf_sw; 5097 5098 ice_init_wakeup(pf); 5099 5100 err = ice_init_link(pf); 5101 if (err) 5102 goto err_init_link; 5103 5104 err = ice_send_version(pf); 5105 if (err) 5106 goto err_init_link; 5107 5108 ice_verify_cacheline_size(pf); 5109 5110 if (ice_is_safe_mode(pf)) 5111 ice_set_safe_mode_vlan_cfg(pf); 5112 else 5113 /* print PCI link speed and width */ 5114 pcie_print_link_status(pf->pdev); 5115 5116 /* ready to go, so clear down state bit */ 5117 clear_bit(ICE_DOWN, pf->state); 5118 clear_bit(ICE_SERVICE_DIS, pf->state); 5119 5120 /* since everything is good, start the service timer */ 5121 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5122 5123 return 0; 5124 5125 err_init_link: 5126 ice_deinit_pf_sw(pf); 5127 err_init_pf_sw: 5128 ice_dealloc_vsis(pf); 5129 err_alloc_vsis: 5130 ice_deinit_dev(pf); 5131 return err; 5132 } 5133 5134 static void ice_deinit(struct ice_pf *pf) 5135 { 5136 set_bit(ICE_SERVICE_DIS, pf->state); 5137 set_bit(ICE_DOWN, pf->state); 5138 5139 ice_deinit_pf_sw(pf); 5140 ice_dealloc_vsis(pf); 5141 ice_deinit_dev(pf); 5142 } 5143 5144 /** 5145 * ice_load - load pf by init hw and starting VSI 5146 * @pf: pointer to the pf instance 5147 * 5148 * This function has to be called under devl_lock. 5149 */ 5150 int ice_load(struct ice_pf *pf) 5151 { 5152 struct ice_vsi *vsi; 5153 int err; 5154 5155 devl_assert_locked(priv_to_devlink(pf)); 5156 5157 vsi = ice_get_main_vsi(pf); 5158 5159 /* init channel list */ 5160 INIT_LIST_HEAD(&vsi->ch_list); 5161 5162 err = ice_cfg_netdev(vsi); 5163 if (err) 5164 return err; 5165 5166 /* Setup DCB netlink interface */ 5167 ice_dcbnl_setup(vsi); 5168 5169 err = ice_init_mac_fltr(pf); 5170 if (err) 5171 goto err_init_mac_fltr; 5172 5173 err = ice_devlink_create_pf_port(pf); 5174 if (err) 5175 goto err_devlink_create_pf_port; 5176 5177 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); 5178 5179 err = ice_register_netdev(vsi); 5180 if (err) 5181 goto err_register_netdev; 5182 5183 err = ice_tc_indir_block_register(vsi); 5184 if (err) 5185 goto err_tc_indir_block_register; 5186 5187 ice_napi_add(vsi); 5188 5189 ice_init_features(pf); 5190 5191 err = ice_init_rdma(pf); 5192 if (err) 5193 goto err_init_rdma; 5194 5195 ice_service_task_restart(pf); 5196 5197 clear_bit(ICE_DOWN, pf->state); 5198 5199 return 0; 5200 5201 err_init_rdma: 5202 ice_deinit_features(pf); 5203 ice_tc_indir_block_unregister(vsi); 5204 err_tc_indir_block_register: 5205 ice_unregister_netdev(vsi); 5206 err_register_netdev: 5207 ice_devlink_destroy_pf_port(pf); 5208 err_devlink_create_pf_port: 5209 err_init_mac_fltr: 5210 ice_decfg_netdev(vsi); 5211 return err; 5212 } 5213 5214 /** 5215 * ice_unload - unload pf by stopping VSI and deinit hw 5216 * @pf: pointer to the pf instance 5217 * 5218 * This function has to be called under devl_lock. 5219 */ 5220 void ice_unload(struct ice_pf *pf) 5221 { 5222 struct ice_vsi *vsi = ice_get_main_vsi(pf); 5223 5224 devl_assert_locked(priv_to_devlink(pf)); 5225 5226 ice_deinit_rdma(pf); 5227 ice_deinit_features(pf); 5228 ice_tc_indir_block_unregister(vsi); 5229 ice_unregister_netdev(vsi); 5230 ice_devlink_destroy_pf_port(pf); 5231 ice_decfg_netdev(vsi); 5232 } 5233 5234 static int ice_probe_recovery_mode(struct ice_pf *pf) 5235 { 5236 struct device *dev = ice_pf_to_dev(pf); 5237 int err; 5238 5239 dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n"); 5240 5241 INIT_HLIST_HEAD(&pf->aq_wait_list); 5242 spin_lock_init(&pf->aq_wait_lock); 5243 init_waitqueue_head(&pf->aq_wait_queue); 5244 5245 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 5246 pf->serv_tmr_period = HZ; 5247 INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); 5248 clear_bit(ICE_SERVICE_SCHED, pf->state); 5249 err = ice_create_all_ctrlq(&pf->hw); 5250 if (err) 5251 return err; 5252 5253 scoped_guard(devl, priv_to_devlink(pf)) { 5254 err = ice_init_devlink(pf); 5255 if (err) 5256 return err; 5257 } 5258 5259 ice_service_task_restart(pf); 5260 5261 return 0; 5262 } 5263 5264 /** 5265 * ice_probe - Device initialization routine 5266 * @pdev: PCI device information struct 5267 * @ent: entry in ice_pci_tbl 5268 * 5269 * Returns 0 on success, negative on failure 5270 */ 5271 static int 5272 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 5273 { 5274 struct device *dev = &pdev->dev; 5275 struct ice_adapter *adapter; 5276 struct ice_pf *pf; 5277 struct ice_hw *hw; 5278 int err; 5279 5280 if (pdev->is_virtfn) { 5281 dev_err(dev, "can't probe a virtual function\n"); 5282 return -EINVAL; 5283 } 5284 5285 /* when under a kdump kernel initiate a reset before enabling the 5286 * device in order to clear out any pending DMA transactions. These 5287 * transactions can cause some systems to machine check when doing 5288 * the pcim_enable_device() below. 5289 */ 5290 if (is_kdump_kernel()) { 5291 pci_save_state(pdev); 5292 pci_clear_master(pdev); 5293 err = pcie_flr(pdev); 5294 if (err) 5295 return err; 5296 pci_restore_state(pdev); 5297 } 5298 5299 /* this driver uses devres, see 5300 * Documentation/driver-api/driver-model/devres.rst 5301 */ 5302 err = pcim_enable_device(pdev); 5303 if (err) 5304 return err; 5305 5306 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 5307 if (err) { 5308 dev_err(dev, "BAR0 I/O map error %d\n", err); 5309 return err; 5310 } 5311 5312 pf = ice_allocate_pf(dev); 5313 if (!pf) 5314 return -ENOMEM; 5315 5316 /* initialize Auxiliary index to invalid value */ 5317 pf->aux_idx = -1; 5318 5319 /* set up for high or low DMA */ 5320 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5321 if (err) { 5322 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 5323 return err; 5324 } 5325 5326 pci_set_master(pdev); 5327 pf->pdev = pdev; 5328 pci_set_drvdata(pdev, pf); 5329 set_bit(ICE_DOWN, pf->state); 5330 /* Disable service task until DOWN bit is cleared */ 5331 set_bit(ICE_SERVICE_DIS, pf->state); 5332 5333 hw = &pf->hw; 5334 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 5335 pci_save_state(pdev); 5336 5337 hw->back = pf; 5338 hw->port_info = NULL; 5339 hw->vendor_id = pdev->vendor; 5340 hw->device_id = pdev->device; 5341 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 5342 hw->subsystem_vendor_id = pdev->subsystem_vendor; 5343 hw->subsystem_device_id = pdev->subsystem_device; 5344 hw->bus.device = PCI_SLOT(pdev->devfn); 5345 hw->bus.func = PCI_FUNC(pdev->devfn); 5346 ice_set_ctrlq_len(hw); 5347 5348 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 5349 5350 #ifndef CONFIG_DYNAMIC_DEBUG 5351 if (debug < -1) 5352 hw->debug_mask = debug; 5353 #endif 5354 5355 if (ice_is_recovery_mode(hw)) 5356 return ice_probe_recovery_mode(pf); 5357 5358 err = ice_init_hw(hw); 5359 if (err) { 5360 dev_err(dev, "ice_init_hw failed: %d\n", err); 5361 return err; 5362 } 5363 5364 adapter = ice_adapter_get(pdev); 5365 if (IS_ERR(adapter)) { 5366 err = PTR_ERR(adapter); 5367 goto unroll_hw_init; 5368 } 5369 pf->adapter = adapter; 5370 5371 err = ice_init(pf); 5372 if (err) 5373 goto unroll_adapter; 5374 5375 devl_lock(priv_to_devlink(pf)); 5376 err = ice_load(pf); 5377 if (err) 5378 goto unroll_init; 5379 5380 err = ice_init_devlink(pf); 5381 if (err) 5382 goto unroll_load; 5383 devl_unlock(priv_to_devlink(pf)); 5384 5385 return 0; 5386 5387 unroll_load: 5388 ice_unload(pf); 5389 unroll_init: 5390 devl_unlock(priv_to_devlink(pf)); 5391 ice_deinit(pf); 5392 unroll_adapter: 5393 ice_adapter_put(pdev); 5394 unroll_hw_init: 5395 ice_deinit_hw(hw); 5396 return err; 5397 } 5398 5399 /** 5400 * ice_set_wake - enable or disable Wake on LAN 5401 * @pf: pointer to the PF struct 5402 * 5403 * Simple helper for WoL control 5404 */ 5405 static void ice_set_wake(struct ice_pf *pf) 5406 { 5407 struct ice_hw *hw = &pf->hw; 5408 bool wol = pf->wol_ena; 5409 5410 /* clear wake state, otherwise new wake events won't fire */ 5411 wr32(hw, PFPM_WUS, U32_MAX); 5412 5413 /* enable / disable APM wake up, no RMW needed */ 5414 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 5415 5416 /* set magic packet filter enabled */ 5417 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 5418 } 5419 5420 /** 5421 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 5422 * @pf: pointer to the PF struct 5423 * 5424 * Issue firmware command to enable multicast magic wake, making 5425 * sure that any locally administered address (LAA) is used for 5426 * wake, and that PF reset doesn't undo the LAA. 5427 */ 5428 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 5429 { 5430 struct device *dev = ice_pf_to_dev(pf); 5431 struct ice_hw *hw = &pf->hw; 5432 u8 mac_addr[ETH_ALEN]; 5433 struct ice_vsi *vsi; 5434 int status; 5435 u8 flags; 5436 5437 if (!pf->wol_ena) 5438 return; 5439 5440 vsi = ice_get_main_vsi(pf); 5441 if (!vsi) 5442 return; 5443 5444 /* Get current MAC address in case it's an LAA */ 5445 if (vsi->netdev) 5446 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 5447 else 5448 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 5449 5450 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 5451 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 5452 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 5453 5454 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 5455 if (status) 5456 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", 5457 status, ice_aq_str(hw->adminq.sq_last_status)); 5458 } 5459 5460 /** 5461 * ice_remove - Device removal routine 5462 * @pdev: PCI device information struct 5463 */ 5464 static void ice_remove(struct pci_dev *pdev) 5465 { 5466 struct ice_pf *pf = pci_get_drvdata(pdev); 5467 int i; 5468 5469 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 5470 if (!ice_is_reset_in_progress(pf->state)) 5471 break; 5472 msleep(100); 5473 } 5474 5475 if (ice_is_recovery_mode(&pf->hw)) { 5476 ice_service_task_stop(pf); 5477 scoped_guard(devl, priv_to_devlink(pf)) { 5478 ice_deinit_devlink(pf); 5479 } 5480 return; 5481 } 5482 5483 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 5484 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 5485 ice_free_vfs(pf); 5486 } 5487 5488 ice_hwmon_exit(pf); 5489 5490 ice_service_task_stop(pf); 5491 ice_aq_cancel_waiting_tasks(pf); 5492 set_bit(ICE_DOWN, pf->state); 5493 5494 if (!ice_is_safe_mode(pf)) 5495 ice_remove_arfs(pf); 5496 5497 devl_lock(priv_to_devlink(pf)); 5498 ice_dealloc_all_dynamic_ports(pf); 5499 ice_deinit_devlink(pf); 5500 5501 ice_unload(pf); 5502 devl_unlock(priv_to_devlink(pf)); 5503 5504 ice_deinit(pf); 5505 ice_vsi_release_all(pf); 5506 5507 ice_setup_mc_magic_wake(pf); 5508 ice_set_wake(pf); 5509 5510 ice_adapter_put(pdev); 5511 } 5512 5513 /** 5514 * ice_shutdown - PCI callback for shutting down device 5515 * @pdev: PCI device information struct 5516 */ 5517 static void ice_shutdown(struct pci_dev *pdev) 5518 { 5519 struct ice_pf *pf = pci_get_drvdata(pdev); 5520 5521 ice_remove(pdev); 5522 5523 if (system_state == SYSTEM_POWER_OFF) { 5524 pci_wake_from_d3(pdev, pf->wol_ena); 5525 pci_set_power_state(pdev, PCI_D3hot); 5526 } 5527 } 5528 5529 /** 5530 * ice_prepare_for_shutdown - prep for PCI shutdown 5531 * @pf: board private structure 5532 * 5533 * Inform or close all dependent features in prep for PCI device shutdown 5534 */ 5535 static void ice_prepare_for_shutdown(struct ice_pf *pf) 5536 { 5537 struct ice_hw *hw = &pf->hw; 5538 u32 v; 5539 5540 /* Notify VFs of impending reset */ 5541 if (ice_check_sq_alive(hw, &hw->mailboxq)) 5542 ice_vc_notify_reset(pf); 5543 5544 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 5545 5546 /* disable the VSIs and their queues that are not already DOWN */ 5547 ice_pf_dis_all_vsi(pf, false); 5548 5549 ice_for_each_vsi(pf, v) 5550 if (pf->vsi[v]) 5551 pf->vsi[v]->vsi_num = 0; 5552 5553 ice_shutdown_all_ctrlq(hw, true); 5554 } 5555 5556 /** 5557 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 5558 * @pf: board private structure to reinitialize 5559 * 5560 * This routine reinitialize interrupt scheme that was cleared during 5561 * power management suspend callback. 5562 * 5563 * This should be called during resume routine to re-allocate the q_vectors 5564 * and reacquire interrupts. 5565 */ 5566 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 5567 { 5568 struct device *dev = ice_pf_to_dev(pf); 5569 int ret, v; 5570 5571 /* Since we clear MSIX flag during suspend, we need to 5572 * set it back during resume... 5573 */ 5574 5575 ret = ice_init_interrupt_scheme(pf); 5576 if (ret) { 5577 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 5578 return ret; 5579 } 5580 5581 /* Remap vectors and rings, after successful re-init interrupts */ 5582 ice_for_each_vsi(pf, v) { 5583 if (!pf->vsi[v]) 5584 continue; 5585 5586 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 5587 if (ret) 5588 goto err_reinit; 5589 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 5590 rtnl_lock(); 5591 ice_vsi_set_napi_queues(pf->vsi[v]); 5592 rtnl_unlock(); 5593 } 5594 5595 ret = ice_req_irq_msix_misc(pf); 5596 if (ret) { 5597 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 5598 ret); 5599 goto err_reinit; 5600 } 5601 5602 return 0; 5603 5604 err_reinit: 5605 while (v--) 5606 if (pf->vsi[v]) { 5607 rtnl_lock(); 5608 ice_vsi_clear_napi_queues(pf->vsi[v]); 5609 rtnl_unlock(); 5610 ice_vsi_free_q_vectors(pf->vsi[v]); 5611 } 5612 5613 return ret; 5614 } 5615 5616 /** 5617 * ice_suspend 5618 * @dev: generic device information structure 5619 * 5620 * Power Management callback to quiesce the device and prepare 5621 * for D3 transition. 5622 */ 5623 static int ice_suspend(struct device *dev) 5624 { 5625 struct pci_dev *pdev = to_pci_dev(dev); 5626 struct ice_pf *pf; 5627 int disabled, v; 5628 5629 pf = pci_get_drvdata(pdev); 5630 5631 if (!ice_pf_state_is_nominal(pf)) { 5632 dev_err(dev, "Device is not ready, no need to suspend it\n"); 5633 return -EBUSY; 5634 } 5635 5636 /* Stop watchdog tasks until resume completion. 5637 * Even though it is most likely that the service task is 5638 * disabled if the device is suspended or down, the service task's 5639 * state is controlled by a different state bit, and we should 5640 * store and honor whatever state that bit is in at this point. 5641 */ 5642 disabled = ice_service_task_stop(pf); 5643 5644 ice_deinit_rdma(pf); 5645 5646 /* Already suspended?, then there is nothing to do */ 5647 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 5648 if (!disabled) 5649 ice_service_task_restart(pf); 5650 return 0; 5651 } 5652 5653 if (test_bit(ICE_DOWN, pf->state) || 5654 ice_is_reset_in_progress(pf->state)) { 5655 dev_err(dev, "can't suspend device in reset or already down\n"); 5656 if (!disabled) 5657 ice_service_task_restart(pf); 5658 return 0; 5659 } 5660 5661 ice_setup_mc_magic_wake(pf); 5662 5663 ice_prepare_for_shutdown(pf); 5664 5665 ice_set_wake(pf); 5666 5667 /* Free vectors, clear the interrupt scheme and release IRQs 5668 * for proper hibernation, especially with large number of CPUs. 5669 * Otherwise hibernation might fail when mapping all the vectors back 5670 * to CPU0. 5671 */ 5672 ice_free_irq_msix_misc(pf); 5673 ice_for_each_vsi(pf, v) { 5674 if (!pf->vsi[v]) 5675 continue; 5676 rtnl_lock(); 5677 ice_vsi_clear_napi_queues(pf->vsi[v]); 5678 rtnl_unlock(); 5679 ice_vsi_free_q_vectors(pf->vsi[v]); 5680 } 5681 ice_clear_interrupt_scheme(pf); 5682 5683 pci_save_state(pdev); 5684 pci_wake_from_d3(pdev, pf->wol_ena); 5685 pci_set_power_state(pdev, PCI_D3hot); 5686 return 0; 5687 } 5688 5689 /** 5690 * ice_resume - PM callback for waking up from D3 5691 * @dev: generic device information structure 5692 */ 5693 static int ice_resume(struct device *dev) 5694 { 5695 struct pci_dev *pdev = to_pci_dev(dev); 5696 enum ice_reset_req reset_type; 5697 struct ice_pf *pf; 5698 struct ice_hw *hw; 5699 int ret; 5700 5701 pci_set_power_state(pdev, PCI_D0); 5702 pci_restore_state(pdev); 5703 pci_save_state(pdev); 5704 5705 if (!pci_device_is_present(pdev)) 5706 return -ENODEV; 5707 5708 ret = pci_enable_device_mem(pdev); 5709 if (ret) { 5710 dev_err(dev, "Cannot enable device after suspend\n"); 5711 return ret; 5712 } 5713 5714 pf = pci_get_drvdata(pdev); 5715 hw = &pf->hw; 5716 5717 pf->wakeup_reason = rd32(hw, PFPM_WUS); 5718 ice_print_wake_reason(pf); 5719 5720 /* We cleared the interrupt scheme when we suspended, so we need to 5721 * restore it now to resume device functionality. 5722 */ 5723 ret = ice_reinit_interrupt_scheme(pf); 5724 if (ret) 5725 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5726 5727 ret = ice_init_rdma(pf); 5728 if (ret) 5729 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n", 5730 ret); 5731 5732 clear_bit(ICE_DOWN, pf->state); 5733 /* Now perform PF reset and rebuild */ 5734 reset_type = ICE_RESET_PFR; 5735 /* re-enable service task for reset, but allow reset to schedule it */ 5736 clear_bit(ICE_SERVICE_DIS, pf->state); 5737 5738 if (ice_schedule_reset(pf, reset_type)) 5739 dev_err(dev, "Reset during resume failed.\n"); 5740 5741 clear_bit(ICE_SUSPENDED, pf->state); 5742 ice_service_task_restart(pf); 5743 5744 /* Restart the service task */ 5745 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5746 5747 return 0; 5748 } 5749 5750 /** 5751 * ice_pci_err_detected - warning that PCI error has been detected 5752 * @pdev: PCI device information struct 5753 * @err: the type of PCI error 5754 * 5755 * Called to warn that something happened on the PCI bus and the error handling 5756 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 5757 */ 5758 static pci_ers_result_t 5759 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 5760 { 5761 struct ice_pf *pf = pci_get_drvdata(pdev); 5762 5763 if (!pf) { 5764 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 5765 __func__, err); 5766 return PCI_ERS_RESULT_DISCONNECT; 5767 } 5768 5769 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5770 ice_service_task_stop(pf); 5771 5772 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5773 set_bit(ICE_PFR_REQ, pf->state); 5774 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5775 } 5776 } 5777 5778 return PCI_ERS_RESULT_NEED_RESET; 5779 } 5780 5781 /** 5782 * ice_pci_err_slot_reset - a PCI slot reset has just happened 5783 * @pdev: PCI device information struct 5784 * 5785 * Called to determine if the driver can recover from the PCI slot reset by 5786 * using a register read to determine if the device is recoverable. 5787 */ 5788 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 5789 { 5790 struct ice_pf *pf = pci_get_drvdata(pdev); 5791 pci_ers_result_t result; 5792 int err; 5793 u32 reg; 5794 5795 err = pci_enable_device_mem(pdev); 5796 if (err) { 5797 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 5798 err); 5799 result = PCI_ERS_RESULT_DISCONNECT; 5800 } else { 5801 pci_set_master(pdev); 5802 pci_restore_state(pdev); 5803 pci_save_state(pdev); 5804 pci_wake_from_d3(pdev, false); 5805 5806 /* Check for life */ 5807 reg = rd32(&pf->hw, GLGEN_RTRIG); 5808 if (!reg) 5809 result = PCI_ERS_RESULT_RECOVERED; 5810 else 5811 result = PCI_ERS_RESULT_DISCONNECT; 5812 } 5813 5814 return result; 5815 } 5816 5817 /** 5818 * ice_pci_err_resume - restart operations after PCI error recovery 5819 * @pdev: PCI device information struct 5820 * 5821 * Called to allow the driver to bring things back up after PCI error and/or 5822 * reset recovery have finished 5823 */ 5824 static void ice_pci_err_resume(struct pci_dev *pdev) 5825 { 5826 struct ice_pf *pf = pci_get_drvdata(pdev); 5827 5828 if (!pf) { 5829 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 5830 __func__); 5831 return; 5832 } 5833 5834 if (test_bit(ICE_SUSPENDED, pf->state)) { 5835 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 5836 __func__); 5837 return; 5838 } 5839 5840 ice_restore_all_vfs_msi_state(pf); 5841 5842 ice_do_reset(pf, ICE_RESET_PFR); 5843 ice_service_task_restart(pf); 5844 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5845 } 5846 5847 /** 5848 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 5849 * @pdev: PCI device information struct 5850 */ 5851 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 5852 { 5853 struct ice_pf *pf = pci_get_drvdata(pdev); 5854 5855 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5856 ice_service_task_stop(pf); 5857 5858 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5859 set_bit(ICE_PFR_REQ, pf->state); 5860 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5861 } 5862 } 5863 } 5864 5865 /** 5866 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 5867 * @pdev: PCI device information struct 5868 */ 5869 static void ice_pci_err_reset_done(struct pci_dev *pdev) 5870 { 5871 ice_pci_err_resume(pdev); 5872 } 5873 5874 /* ice_pci_tbl - PCI Device ID Table 5875 * 5876 * Wildcard entries (PCI_ANY_ID) should come last 5877 * Last entry must be all 0s 5878 * 5879 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5880 * Class, Class Mask, private data (not used) } 5881 */ 5882 static const struct pci_device_id ice_pci_tbl[] = { 5883 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) }, 5884 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) }, 5885 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) }, 5886 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) }, 5887 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) }, 5888 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) }, 5889 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) }, 5890 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) }, 5891 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) }, 5892 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) }, 5893 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) }, 5894 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) }, 5895 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) }, 5896 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) }, 5897 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) }, 5898 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) }, 5899 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) }, 5900 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) }, 5901 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) }, 5902 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) }, 5903 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) }, 5904 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) }, 5905 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) }, 5906 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, 5907 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, 5908 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, 5909 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), }, 5910 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), }, 5911 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), }, 5912 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), }, 5913 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) }, 5914 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) }, 5915 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) }, 5916 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) }, 5917 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), }, 5918 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), }, 5919 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), }, 5920 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), }, 5921 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), }, 5922 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), }, 5923 /* required last entry */ 5924 {} 5925 }; 5926 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5927 5928 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5929 5930 static const struct pci_error_handlers ice_pci_err_handler = { 5931 .error_detected = ice_pci_err_detected, 5932 .slot_reset = ice_pci_err_slot_reset, 5933 .reset_prepare = ice_pci_err_reset_prepare, 5934 .reset_done = ice_pci_err_reset_done, 5935 .resume = ice_pci_err_resume 5936 }; 5937 5938 static struct pci_driver ice_driver = { 5939 .name = KBUILD_MODNAME, 5940 .id_table = ice_pci_tbl, 5941 .probe = ice_probe, 5942 .remove = ice_remove, 5943 .driver.pm = pm_sleep_ptr(&ice_pm_ops), 5944 .shutdown = ice_shutdown, 5945 .sriov_configure = ice_sriov_configure, 5946 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, 5947 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count, 5948 .err_handler = &ice_pci_err_handler 5949 }; 5950 5951 /** 5952 * ice_module_init - Driver registration routine 5953 * 5954 * ice_module_init is the first routine called when the driver is 5955 * loaded. All it does is register with the PCI subsystem. 5956 */ 5957 static int __init ice_module_init(void) 5958 { 5959 int status = -ENOMEM; 5960 5961 pr_info("%s\n", ice_driver_string); 5962 pr_info("%s\n", ice_copyright); 5963 5964 ice_adv_lnk_speed_maps_init(); 5965 5966 ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME); 5967 if (!ice_wq) { 5968 pr_err("Failed to create workqueue\n"); 5969 return status; 5970 } 5971 5972 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0); 5973 if (!ice_lag_wq) { 5974 pr_err("Failed to create LAG workqueue\n"); 5975 goto err_dest_wq; 5976 } 5977 5978 ice_debugfs_init(); 5979 5980 status = pci_register_driver(&ice_driver); 5981 if (status) { 5982 pr_err("failed to register PCI driver, err %d\n", status); 5983 goto err_dest_lag_wq; 5984 } 5985 5986 status = ice_sf_driver_register(); 5987 if (status) { 5988 pr_err("Failed to register SF driver, err %d\n", status); 5989 goto err_sf_driver; 5990 } 5991 5992 return 0; 5993 5994 err_sf_driver: 5995 pci_unregister_driver(&ice_driver); 5996 err_dest_lag_wq: 5997 destroy_workqueue(ice_lag_wq); 5998 ice_debugfs_exit(); 5999 err_dest_wq: 6000 destroy_workqueue(ice_wq); 6001 return status; 6002 } 6003 module_init(ice_module_init); 6004 6005 /** 6006 * ice_module_exit - Driver exit cleanup routine 6007 * 6008 * ice_module_exit is called just before the driver is removed 6009 * from memory. 6010 */ 6011 static void __exit ice_module_exit(void) 6012 { 6013 ice_sf_driver_unregister(); 6014 pci_unregister_driver(&ice_driver); 6015 ice_debugfs_exit(); 6016 destroy_workqueue(ice_wq); 6017 destroy_workqueue(ice_lag_wq); 6018 pr_info("module unloaded\n"); 6019 } 6020 module_exit(ice_module_exit); 6021 6022 /** 6023 * ice_set_mac_address - NDO callback to set MAC address 6024 * @netdev: network interface device structure 6025 * @pi: pointer to an address structure 6026 * 6027 * Returns 0 on success, negative on failure 6028 */ 6029 static int ice_set_mac_address(struct net_device *netdev, void *pi) 6030 { 6031 struct ice_netdev_priv *np = netdev_priv(netdev); 6032 struct ice_vsi *vsi = np->vsi; 6033 struct ice_pf *pf = vsi->back; 6034 struct ice_hw *hw = &pf->hw; 6035 struct sockaddr *addr = pi; 6036 u8 old_mac[ETH_ALEN]; 6037 u8 flags = 0; 6038 u8 *mac; 6039 int err; 6040 6041 mac = (u8 *)addr->sa_data; 6042 6043 if (!is_valid_ether_addr(mac)) 6044 return -EADDRNOTAVAIL; 6045 6046 if (test_bit(ICE_DOWN, pf->state) || 6047 ice_is_reset_in_progress(pf->state)) { 6048 netdev_err(netdev, "can't set mac %pM. device not ready\n", 6049 mac); 6050 return -EBUSY; 6051 } 6052 6053 if (ice_chnl_dmac_fltr_cnt(pf)) { 6054 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", 6055 mac); 6056 return -EAGAIN; 6057 } 6058 6059 netif_addr_lock_bh(netdev); 6060 ether_addr_copy(old_mac, netdev->dev_addr); 6061 /* change the netdev's MAC address */ 6062 eth_hw_addr_set(netdev, mac); 6063 netif_addr_unlock_bh(netdev); 6064 6065 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 6066 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 6067 if (err && err != -ENOENT) { 6068 err = -EADDRNOTAVAIL; 6069 goto err_update_filters; 6070 } 6071 6072 /* Add filter for new MAC. If filter exists, return success */ 6073 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 6074 if (err == -EEXIST) { 6075 /* Although this MAC filter is already present in hardware it's 6076 * possible in some cases (e.g. bonding) that dev_addr was 6077 * modified outside of the driver and needs to be restored back 6078 * to this value. 6079 */ 6080 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 6081 6082 return 0; 6083 } else if (err) { 6084 /* error if the new filter addition failed */ 6085 err = -EADDRNOTAVAIL; 6086 } 6087 6088 err_update_filters: 6089 if (err) { 6090 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 6091 mac); 6092 netif_addr_lock_bh(netdev); 6093 eth_hw_addr_set(netdev, old_mac); 6094 netif_addr_unlock_bh(netdev); 6095 return err; 6096 } 6097 6098 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 6099 netdev->dev_addr); 6100 6101 /* write new MAC address to the firmware */ 6102 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 6103 err = ice_aq_manage_mac_write(hw, mac, flags, NULL); 6104 if (err) { 6105 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 6106 mac, err); 6107 } 6108 return 0; 6109 } 6110 6111 /** 6112 * ice_set_rx_mode - NDO callback to set the netdev filters 6113 * @netdev: network interface device structure 6114 */ 6115 static void ice_set_rx_mode(struct net_device *netdev) 6116 { 6117 struct ice_netdev_priv *np = netdev_priv(netdev); 6118 struct ice_vsi *vsi = np->vsi; 6119 6120 if (!vsi || ice_is_switchdev_running(vsi->back)) 6121 return; 6122 6123 /* Set the flags to synchronize filters 6124 * ndo_set_rx_mode may be triggered even without a change in netdev 6125 * flags 6126 */ 6127 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 6128 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 6129 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 6130 6131 /* schedule our worker thread which will take care of 6132 * applying the new filter changes 6133 */ 6134 ice_service_task_schedule(vsi->back); 6135 } 6136 6137 /** 6138 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 6139 * @netdev: network interface device structure 6140 * @queue_index: Queue ID 6141 * @maxrate: maximum bandwidth in Mbps 6142 */ 6143 static int 6144 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 6145 { 6146 struct ice_netdev_priv *np = netdev_priv(netdev); 6147 struct ice_vsi *vsi = np->vsi; 6148 u16 q_handle; 6149 int status; 6150 u8 tc; 6151 6152 /* Validate maxrate requested is within permitted range */ 6153 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 6154 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 6155 maxrate, queue_index); 6156 return -EINVAL; 6157 } 6158 6159 q_handle = vsi->tx_rings[queue_index]->q_handle; 6160 tc = ice_dcb_get_tc(vsi, queue_index); 6161 6162 vsi = ice_locate_vsi_using_queue(vsi, queue_index); 6163 if (!vsi) { 6164 netdev_err(netdev, "Invalid VSI for given queue %d\n", 6165 queue_index); 6166 return -EINVAL; 6167 } 6168 6169 /* Set BW back to default, when user set maxrate to 0 */ 6170 if (!maxrate) 6171 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 6172 q_handle, ICE_MAX_BW); 6173 else 6174 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 6175 q_handle, ICE_MAX_BW, maxrate * 1000); 6176 if (status) 6177 netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 6178 status); 6179 6180 return status; 6181 } 6182 6183 /** 6184 * ice_fdb_add - add an entry to the hardware database 6185 * @ndm: the input from the stack 6186 * @tb: pointer to array of nladdr (unused) 6187 * @dev: the net device pointer 6188 * @addr: the MAC address entry being added 6189 * @vid: VLAN ID 6190 * @flags: instructions from stack about fdb operation 6191 * @notified: whether notification was emitted 6192 * @extack: netlink extended ack 6193 */ 6194 static int 6195 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 6196 struct net_device *dev, const unsigned char *addr, u16 vid, 6197 u16 flags, bool *notified, 6198 struct netlink_ext_ack __always_unused *extack) 6199 { 6200 int err; 6201 6202 if (vid) { 6203 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 6204 return -EINVAL; 6205 } 6206 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 6207 netdev_err(dev, "FDB only supports static addresses\n"); 6208 return -EINVAL; 6209 } 6210 6211 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 6212 err = dev_uc_add_excl(dev, addr); 6213 else if (is_multicast_ether_addr(addr)) 6214 err = dev_mc_add_excl(dev, addr); 6215 else 6216 err = -EINVAL; 6217 6218 /* Only return duplicate errors if NLM_F_EXCL is set */ 6219 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 6220 err = 0; 6221 6222 return err; 6223 } 6224 6225 /** 6226 * ice_fdb_del - delete an entry from the hardware database 6227 * @ndm: the input from the stack 6228 * @tb: pointer to array of nladdr (unused) 6229 * @dev: the net device pointer 6230 * @addr: the MAC address entry being added 6231 * @vid: VLAN ID 6232 * @notified: whether notification was emitted 6233 * @extack: netlink extended ack 6234 */ 6235 static int 6236 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 6237 struct net_device *dev, const unsigned char *addr, 6238 __always_unused u16 vid, bool *notified, 6239 struct netlink_ext_ack *extack) 6240 { 6241 int err; 6242 6243 if (ndm->ndm_state & NUD_PERMANENT) { 6244 netdev_err(dev, "FDB only supports static addresses\n"); 6245 return -EINVAL; 6246 } 6247 6248 if (is_unicast_ether_addr(addr)) 6249 err = dev_uc_del(dev, addr); 6250 else if (is_multicast_ether_addr(addr)) 6251 err = dev_mc_del(dev, addr); 6252 else 6253 err = -EINVAL; 6254 6255 return err; 6256 } 6257 6258 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 6259 NETIF_F_HW_VLAN_CTAG_TX | \ 6260 NETIF_F_HW_VLAN_STAG_RX | \ 6261 NETIF_F_HW_VLAN_STAG_TX) 6262 6263 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 6264 NETIF_F_HW_VLAN_STAG_RX) 6265 6266 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ 6267 NETIF_F_HW_VLAN_STAG_FILTER) 6268 6269 /** 6270 * ice_fix_features - fix the netdev features flags based on device limitations 6271 * @netdev: ptr to the netdev that flags are being fixed on 6272 * @features: features that need to be checked and possibly fixed 6273 * 6274 * Make sure any fixups are made to features in this callback. This enables the 6275 * driver to not have to check unsupported configurations throughout the driver 6276 * because that's the responsiblity of this callback. 6277 * 6278 * Single VLAN Mode (SVM) Supported Features: 6279 * NETIF_F_HW_VLAN_CTAG_FILTER 6280 * NETIF_F_HW_VLAN_CTAG_RX 6281 * NETIF_F_HW_VLAN_CTAG_TX 6282 * 6283 * Double VLAN Mode (DVM) Supported Features: 6284 * NETIF_F_HW_VLAN_CTAG_FILTER 6285 * NETIF_F_HW_VLAN_CTAG_RX 6286 * NETIF_F_HW_VLAN_CTAG_TX 6287 * 6288 * NETIF_F_HW_VLAN_STAG_FILTER 6289 * NETIF_HW_VLAN_STAG_RX 6290 * NETIF_HW_VLAN_STAG_TX 6291 * 6292 * Features that need fixing: 6293 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion. 6294 * These are mutually exlusive as the VSI context cannot support multiple 6295 * VLAN ethertypes simultaneously for stripping and/or insertion. If this 6296 * is not done, then default to clearing the requested STAG offload 6297 * settings. 6298 * 6299 * All supported filtering has to be enabled or disabled together. For 6300 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled 6301 * together. If this is not done, then default to VLAN filtering disabled. 6302 * These are mutually exclusive as there is currently no way to 6303 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN 6304 * prune rules. 6305 */ 6306 static netdev_features_t 6307 ice_fix_features(struct net_device *netdev, netdev_features_t features) 6308 { 6309 struct ice_netdev_priv *np = netdev_priv(netdev); 6310 netdev_features_t req_vlan_fltr, cur_vlan_fltr; 6311 bool cur_ctag, cur_stag, req_ctag, req_stag; 6312 6313 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; 6314 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 6315 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 6316 6317 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; 6318 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 6319 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 6320 6321 if (req_vlan_fltr != cur_vlan_fltr) { 6322 if (ice_is_dvm_ena(&np->vsi->back->hw)) { 6323 if (req_ctag && req_stag) { 6324 features |= NETIF_VLAN_FILTERING_FEATURES; 6325 } else if (!req_ctag && !req_stag) { 6326 features &= ~NETIF_VLAN_FILTERING_FEATURES; 6327 } else if ((!cur_ctag && req_ctag && !cur_stag) || 6328 (!cur_stag && req_stag && !cur_ctag)) { 6329 features |= NETIF_VLAN_FILTERING_FEATURES; 6330 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); 6331 } else if ((cur_ctag && !req_ctag && cur_stag) || 6332 (cur_stag && !req_stag && cur_ctag)) { 6333 features &= ~NETIF_VLAN_FILTERING_FEATURES; 6334 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); 6335 } 6336 } else { 6337 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) 6338 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); 6339 6340 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) 6341 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6342 } 6343 } 6344 6345 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && 6346 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { 6347 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); 6348 features &= ~(NETIF_F_HW_VLAN_STAG_RX | 6349 NETIF_F_HW_VLAN_STAG_TX); 6350 } 6351 6352 if (!(netdev->features & NETIF_F_RXFCS) && 6353 (features & NETIF_F_RXFCS) && 6354 (features & NETIF_VLAN_STRIPPING_FEATURES) && 6355 !ice_vsi_has_non_zero_vlans(np->vsi)) { 6356 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); 6357 features &= ~NETIF_VLAN_STRIPPING_FEATURES; 6358 } 6359 6360 return features; 6361 } 6362 6363 /** 6364 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto 6365 * @vsi: PF's VSI 6366 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order 6367 * 6368 * Store current stripped VLAN proto in ring packet context, 6369 * so it can be accessed more efficiently by packet processing code. 6370 */ 6371 static void 6372 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) 6373 { 6374 u16 i; 6375 6376 ice_for_each_alloc_rxq(vsi, i) 6377 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; 6378 } 6379 6380 /** 6381 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI 6382 * @vsi: PF's VSI 6383 * @features: features used to determine VLAN offload settings 6384 * 6385 * First, determine the vlan_ethertype based on the VLAN offload bits in 6386 * features. Then determine if stripping and insertion should be enabled or 6387 * disabled. Finally enable or disable VLAN stripping and insertion. 6388 */ 6389 static int 6390 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) 6391 { 6392 bool enable_stripping = true, enable_insertion = true; 6393 struct ice_vsi_vlan_ops *vlan_ops; 6394 int strip_err = 0, insert_err = 0; 6395 u16 vlan_ethertype = 0; 6396 6397 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 6398 6399 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 6400 vlan_ethertype = ETH_P_8021AD; 6401 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 6402 vlan_ethertype = ETH_P_8021Q; 6403 6404 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) 6405 enable_stripping = false; 6406 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) 6407 enable_insertion = false; 6408 6409 if (enable_stripping) 6410 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); 6411 else 6412 strip_err = vlan_ops->dis_stripping(vsi); 6413 6414 if (enable_insertion) 6415 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); 6416 else 6417 insert_err = vlan_ops->dis_insertion(vsi); 6418 6419 if (strip_err || insert_err) 6420 return -EIO; 6421 6422 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? 6423 htons(vlan_ethertype) : 0); 6424 6425 return 0; 6426 } 6427 6428 /** 6429 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI 6430 * @vsi: PF's VSI 6431 * @features: features used to determine VLAN filtering settings 6432 * 6433 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the 6434 * features. 6435 */ 6436 static int 6437 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) 6438 { 6439 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 6440 int err = 0; 6441 6442 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking 6443 * if either bit is set. In switchdev mode Rx filtering should never be 6444 * enabled. 6445 */ 6446 if ((features & 6447 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) && 6448 !ice_is_eswitch_mode_switchdev(vsi->back)) 6449 err = vlan_ops->ena_rx_filtering(vsi); 6450 else 6451 err = vlan_ops->dis_rx_filtering(vsi); 6452 6453 return err; 6454 } 6455 6456 /** 6457 * ice_set_vlan_features - set VLAN settings based on suggested feature set 6458 * @netdev: ptr to the netdev being adjusted 6459 * @features: the feature set that the stack is suggesting 6460 * 6461 * Only update VLAN settings if the requested_vlan_features are different than 6462 * the current_vlan_features. 6463 */ 6464 static int 6465 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) 6466 { 6467 netdev_features_t current_vlan_features, requested_vlan_features; 6468 struct ice_netdev_priv *np = netdev_priv(netdev); 6469 struct ice_vsi *vsi = np->vsi; 6470 int err; 6471 6472 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; 6473 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; 6474 if (current_vlan_features ^ requested_vlan_features) { 6475 if ((features & NETIF_F_RXFCS) && 6476 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6477 dev_err(ice_pf_to_dev(vsi->back), 6478 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); 6479 return -EIO; 6480 } 6481 6482 err = ice_set_vlan_offload_features(vsi, features); 6483 if (err) 6484 return err; 6485 } 6486 6487 current_vlan_features = netdev->features & 6488 NETIF_VLAN_FILTERING_FEATURES; 6489 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES; 6490 if (current_vlan_features ^ requested_vlan_features) { 6491 err = ice_set_vlan_filtering_features(vsi, features); 6492 if (err) 6493 return err; 6494 } 6495 6496 return 0; 6497 } 6498 6499 /** 6500 * ice_set_loopback - turn on/off loopback mode on underlying PF 6501 * @vsi: ptr to VSI 6502 * @ena: flag to indicate the on/off setting 6503 */ 6504 static int ice_set_loopback(struct ice_vsi *vsi, bool ena) 6505 { 6506 bool if_running = netif_running(vsi->netdev); 6507 int ret; 6508 6509 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6510 ret = ice_down(vsi); 6511 if (ret) { 6512 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); 6513 return ret; 6514 } 6515 } 6516 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); 6517 if (ret) 6518 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); 6519 if (if_running) 6520 ret = ice_up(vsi); 6521 6522 return ret; 6523 } 6524 6525 /** 6526 * ice_set_features - set the netdev feature flags 6527 * @netdev: ptr to the netdev being adjusted 6528 * @features: the feature set that the stack is suggesting 6529 */ 6530 static int 6531 ice_set_features(struct net_device *netdev, netdev_features_t features) 6532 { 6533 netdev_features_t changed = netdev->features ^ features; 6534 struct ice_netdev_priv *np = netdev_priv(netdev); 6535 struct ice_vsi *vsi = np->vsi; 6536 struct ice_pf *pf = vsi->back; 6537 int ret = 0; 6538 6539 /* Don't set any netdev advanced features with device in Safe Mode */ 6540 if (ice_is_safe_mode(pf)) { 6541 dev_err(ice_pf_to_dev(pf), 6542 "Device is in Safe Mode - not enabling advanced netdev features\n"); 6543 return ret; 6544 } 6545 6546 /* Do not change setting during reset */ 6547 if (ice_is_reset_in_progress(pf->state)) { 6548 dev_err(ice_pf_to_dev(pf), 6549 "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 6550 return -EBUSY; 6551 } 6552 6553 /* Multiple features can be changed in one call so keep features in 6554 * separate if/else statements to guarantee each feature is checked 6555 */ 6556 if (changed & NETIF_F_RXHASH) 6557 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); 6558 6559 ret = ice_set_vlan_features(netdev, features); 6560 if (ret) 6561 return ret; 6562 6563 /* Turn on receive of FCS aka CRC, and after setting this 6564 * flag the packet data will have the 4 byte CRC appended 6565 */ 6566 if (changed & NETIF_F_RXFCS) { 6567 if ((features & NETIF_F_RXFCS) && 6568 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6569 dev_err(ice_pf_to_dev(vsi->back), 6570 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); 6571 return -EIO; 6572 } 6573 6574 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); 6575 ret = ice_down_up(vsi); 6576 if (ret) 6577 return ret; 6578 } 6579 6580 if (changed & NETIF_F_NTUPLE) { 6581 bool ena = !!(features & NETIF_F_NTUPLE); 6582 6583 ice_vsi_manage_fdir(vsi, ena); 6584 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); 6585 } 6586 6587 /* don't turn off hw_tc_offload when ADQ is already enabled */ 6588 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { 6589 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); 6590 return -EACCES; 6591 } 6592 6593 if (changed & NETIF_F_HW_TC) { 6594 bool ena = !!(features & NETIF_F_HW_TC); 6595 6596 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); 6597 } 6598 6599 if (changed & NETIF_F_LOOPBACK) 6600 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); 6601 6602 return ret; 6603 } 6604 6605 /** 6606 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI 6607 * @vsi: VSI to setup VLAN properties for 6608 */ 6609 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 6610 { 6611 int err; 6612 6613 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); 6614 if (err) 6615 return err; 6616 6617 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); 6618 if (err) 6619 return err; 6620 6621 return ice_vsi_add_vlan_zero(vsi); 6622 } 6623 6624 /** 6625 * ice_vsi_cfg_lan - Setup the VSI lan related config 6626 * @vsi: the VSI being configured 6627 * 6628 * Return 0 on success and negative value on error 6629 */ 6630 int ice_vsi_cfg_lan(struct ice_vsi *vsi) 6631 { 6632 int err; 6633 6634 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6635 ice_set_rx_mode(vsi->netdev); 6636 6637 err = ice_vsi_vlan_setup(vsi); 6638 if (err) 6639 return err; 6640 } 6641 ice_vsi_cfg_dcb_rings(vsi); 6642 6643 err = ice_vsi_cfg_lan_txqs(vsi); 6644 if (!err && ice_is_xdp_ena_vsi(vsi)) 6645 err = ice_vsi_cfg_xdp_txqs(vsi); 6646 if (!err) 6647 err = ice_vsi_cfg_rxqs(vsi); 6648 6649 return err; 6650 } 6651 6652 /* THEORY OF MODERATION: 6653 * The ice driver hardware works differently than the hardware that DIMLIB was 6654 * originally made for. ice hardware doesn't have packet count limits that 6655 * can trigger an interrupt, but it *does* have interrupt rate limit support, 6656 * which is hard-coded to a limit of 250,000 ints/second. 6657 * If not using dynamic moderation, the INTRL value can be modified 6658 * by ethtool rx-usecs-high. 6659 */ 6660 struct ice_dim { 6661 /* the throttle rate for interrupts, basically worst case delay before 6662 * an initial interrupt fires, value is stored in microseconds. 6663 */ 6664 u16 itr; 6665 }; 6666 6667 /* Make a different profile for Rx that doesn't allow quite so aggressive 6668 * moderation at the high end (it maxes out at 126us or about 8k interrupts a 6669 * second. 6670 */ 6671 static const struct ice_dim rx_profile[] = { 6672 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6673 {8}, /* 125,000 ints/s */ 6674 {16}, /* 62,500 ints/s */ 6675 {62}, /* 16,129 ints/s */ 6676 {126} /* 7,936 ints/s */ 6677 }; 6678 6679 /* The transmit profile, which has the same sorts of values 6680 * as the previous struct 6681 */ 6682 static const struct ice_dim tx_profile[] = { 6683 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6684 {8}, /* 125,000 ints/s */ 6685 {40}, /* 16,125 ints/s */ 6686 {128}, /* 7,812 ints/s */ 6687 {256} /* 3,906 ints/s */ 6688 }; 6689 6690 static void ice_tx_dim_work(struct work_struct *work) 6691 { 6692 struct ice_ring_container *rc; 6693 struct dim *dim; 6694 u16 itr; 6695 6696 dim = container_of(work, struct dim, work); 6697 rc = dim->priv; 6698 6699 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); 6700 6701 /* look up the values in our local table */ 6702 itr = tx_profile[dim->profile_ix].itr; 6703 6704 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); 6705 ice_write_itr(rc, itr); 6706 6707 dim->state = DIM_START_MEASURE; 6708 } 6709 6710 static void ice_rx_dim_work(struct work_struct *work) 6711 { 6712 struct ice_ring_container *rc; 6713 struct dim *dim; 6714 u16 itr; 6715 6716 dim = container_of(work, struct dim, work); 6717 rc = dim->priv; 6718 6719 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); 6720 6721 /* look up the values in our local table */ 6722 itr = rx_profile[dim->profile_ix].itr; 6723 6724 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); 6725 ice_write_itr(rc, itr); 6726 6727 dim->state = DIM_START_MEASURE; 6728 } 6729 6730 #define ICE_DIM_DEFAULT_PROFILE_IX 1 6731 6732 /** 6733 * ice_init_moderation - set up interrupt moderation 6734 * @q_vector: the vector containing rings to be configured 6735 * 6736 * Set up interrupt moderation registers, with the intent to do the right thing 6737 * when called from reset or from probe, and whether or not dynamic moderation 6738 * is enabled or not. Take special care to write all the registers in both 6739 * dynamic moderation mode or not in order to make sure hardware is in a known 6740 * state. 6741 */ 6742 static void ice_init_moderation(struct ice_q_vector *q_vector) 6743 { 6744 struct ice_ring_container *rc; 6745 bool tx_dynamic, rx_dynamic; 6746 6747 rc = &q_vector->tx; 6748 INIT_WORK(&rc->dim.work, ice_tx_dim_work); 6749 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6750 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6751 rc->dim.priv = rc; 6752 tx_dynamic = ITR_IS_DYNAMIC(rc); 6753 6754 /* set the initial TX ITR to match the above */ 6755 ice_write_itr(rc, tx_dynamic ? 6756 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); 6757 6758 rc = &q_vector->rx; 6759 INIT_WORK(&rc->dim.work, ice_rx_dim_work); 6760 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6761 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6762 rc->dim.priv = rc; 6763 rx_dynamic = ITR_IS_DYNAMIC(rc); 6764 6765 /* set the initial RX ITR to match the above */ 6766 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : 6767 rc->itr_setting); 6768 6769 ice_set_q_vector_intrl(q_vector); 6770 } 6771 6772 /** 6773 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 6774 * @vsi: the VSI being configured 6775 */ 6776 static void ice_napi_enable_all(struct ice_vsi *vsi) 6777 { 6778 int q_idx; 6779 6780 if (!vsi->netdev) 6781 return; 6782 6783 ice_for_each_q_vector(vsi, q_idx) { 6784 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6785 6786 ice_init_moderation(q_vector); 6787 6788 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6789 napi_enable(&q_vector->napi); 6790 } 6791 } 6792 6793 /** 6794 * ice_up_complete - Finish the last steps of bringing up a connection 6795 * @vsi: The VSI being configured 6796 * 6797 * Return 0 on success and negative value on error 6798 */ 6799 static int ice_up_complete(struct ice_vsi *vsi) 6800 { 6801 struct ice_pf *pf = vsi->back; 6802 int err; 6803 6804 ice_vsi_cfg_msix(vsi); 6805 6806 /* Enable only Rx rings, Tx rings were enabled by the FW when the 6807 * Tx queue group list was configured and the context bits were 6808 * programmed using ice_vsi_cfg_txqs 6809 */ 6810 err = ice_vsi_start_all_rx_rings(vsi); 6811 if (err) 6812 return err; 6813 6814 clear_bit(ICE_VSI_DOWN, vsi->state); 6815 ice_napi_enable_all(vsi); 6816 ice_vsi_ena_irq(vsi); 6817 6818 if (vsi->port_info && 6819 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 6820 ((vsi->netdev && (vsi->type == ICE_VSI_PF || 6821 vsi->type == ICE_VSI_SF)))) { 6822 ice_print_link_msg(vsi, true); 6823 netif_tx_start_all_queues(vsi->netdev); 6824 netif_carrier_on(vsi->netdev); 6825 ice_ptp_link_change(pf, true); 6826 } 6827 6828 /* Perform an initial read of the statistics registers now to 6829 * set the baseline so counters are ready when interface is up 6830 */ 6831 ice_update_eth_stats(vsi); 6832 6833 if (vsi->type == ICE_VSI_PF) 6834 ice_service_task_schedule(pf); 6835 6836 return 0; 6837 } 6838 6839 /** 6840 * ice_up - Bring the connection back up after being down 6841 * @vsi: VSI being configured 6842 */ 6843 int ice_up(struct ice_vsi *vsi) 6844 { 6845 int err; 6846 6847 err = ice_vsi_cfg_lan(vsi); 6848 if (!err) 6849 err = ice_up_complete(vsi); 6850 6851 return err; 6852 } 6853 6854 /** 6855 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 6856 * @syncp: pointer to u64_stats_sync 6857 * @stats: stats that pkts and bytes count will be taken from 6858 * @pkts: packets stats counter 6859 * @bytes: bytes stats counter 6860 * 6861 * This function fetches stats from the ring considering the atomic operations 6862 * that needs to be performed to read u64 values in 32 bit machine. 6863 */ 6864 void 6865 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, 6866 struct ice_q_stats stats, u64 *pkts, u64 *bytes) 6867 { 6868 unsigned int start; 6869 6870 do { 6871 start = u64_stats_fetch_begin(syncp); 6872 *pkts = stats.pkts; 6873 *bytes = stats.bytes; 6874 } while (u64_stats_fetch_retry(syncp, start)); 6875 } 6876 6877 /** 6878 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 6879 * @vsi: the VSI to be updated 6880 * @vsi_stats: the stats struct to be updated 6881 * @rings: rings to work on 6882 * @count: number of rings 6883 */ 6884 static void 6885 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, 6886 struct rtnl_link_stats64 *vsi_stats, 6887 struct ice_tx_ring **rings, u16 count) 6888 { 6889 u16 i; 6890 6891 for (i = 0; i < count; i++) { 6892 struct ice_tx_ring *ring; 6893 u64 pkts = 0, bytes = 0; 6894 6895 ring = READ_ONCE(rings[i]); 6896 if (!ring || !ring->ring_stats) 6897 continue; 6898 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, 6899 ring->ring_stats->stats, &pkts, 6900 &bytes); 6901 vsi_stats->tx_packets += pkts; 6902 vsi_stats->tx_bytes += bytes; 6903 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; 6904 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; 6905 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; 6906 } 6907 } 6908 6909 /** 6910 * ice_update_vsi_ring_stats - Update VSI stats counters 6911 * @vsi: the VSI to be updated 6912 */ 6913 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 6914 { 6915 struct rtnl_link_stats64 *net_stats, *stats_prev; 6916 struct rtnl_link_stats64 *vsi_stats; 6917 struct ice_pf *pf = vsi->back; 6918 u64 pkts, bytes; 6919 int i; 6920 6921 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); 6922 if (!vsi_stats) 6923 return; 6924 6925 /* reset non-netdev (extended) stats */ 6926 vsi->tx_restart = 0; 6927 vsi->tx_busy = 0; 6928 vsi->tx_linearize = 0; 6929 vsi->rx_buf_failed = 0; 6930 vsi->rx_page_failed = 0; 6931 6932 rcu_read_lock(); 6933 6934 /* update Tx rings counters */ 6935 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, 6936 vsi->num_txq); 6937 6938 /* update Rx rings counters */ 6939 ice_for_each_rxq(vsi, i) { 6940 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); 6941 struct ice_ring_stats *ring_stats; 6942 6943 ring_stats = ring->ring_stats; 6944 ice_fetch_u64_stats_per_ring(&ring_stats->syncp, 6945 ring_stats->stats, &pkts, 6946 &bytes); 6947 vsi_stats->rx_packets += pkts; 6948 vsi_stats->rx_bytes += bytes; 6949 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; 6950 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; 6951 } 6952 6953 /* update XDP Tx rings counters */ 6954 if (ice_is_xdp_ena_vsi(vsi)) 6955 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, 6956 vsi->num_xdp_txq); 6957 6958 rcu_read_unlock(); 6959 6960 net_stats = &vsi->net_stats; 6961 stats_prev = &vsi->net_stats_prev; 6962 6963 /* Update netdev counters, but keep in mind that values could start at 6964 * random value after PF reset. And as we increase the reported stat by 6965 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not, 6966 * let's skip this round. 6967 */ 6968 if (likely(pf->stat_prev_loaded)) { 6969 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; 6970 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; 6971 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; 6972 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; 6973 } 6974 6975 stats_prev->tx_packets = vsi_stats->tx_packets; 6976 stats_prev->tx_bytes = vsi_stats->tx_bytes; 6977 stats_prev->rx_packets = vsi_stats->rx_packets; 6978 stats_prev->rx_bytes = vsi_stats->rx_bytes; 6979 6980 kfree(vsi_stats); 6981 } 6982 6983 /** 6984 * ice_update_vsi_stats - Update VSI stats counters 6985 * @vsi: the VSI to be updated 6986 */ 6987 void ice_update_vsi_stats(struct ice_vsi *vsi) 6988 { 6989 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 6990 struct ice_eth_stats *cur_es = &vsi->eth_stats; 6991 struct ice_pf *pf = vsi->back; 6992 6993 if (test_bit(ICE_VSI_DOWN, vsi->state) || 6994 test_bit(ICE_CFG_BUSY, pf->state)) 6995 return; 6996 6997 /* get stats as recorded by Tx/Rx rings */ 6998 ice_update_vsi_ring_stats(vsi); 6999 7000 /* get VSI stats as recorded by the hardware */ 7001 ice_update_eth_stats(vsi); 7002 7003 cur_ns->tx_errors = cur_es->tx_errors; 7004 cur_ns->rx_dropped = cur_es->rx_discards; 7005 cur_ns->tx_dropped = cur_es->tx_discards; 7006 cur_ns->multicast = cur_es->rx_multicast; 7007 7008 /* update some more netdev stats if this is main VSI */ 7009 if (vsi->type == ICE_VSI_PF) { 7010 cur_ns->rx_crc_errors = pf->stats.crc_errors; 7011 cur_ns->rx_errors = pf->stats.crc_errors + 7012 pf->stats.illegal_bytes + 7013 pf->stats.rx_undersize + 7014 pf->hw_csum_rx_error + 7015 pf->stats.rx_jabber + 7016 pf->stats.rx_fragments + 7017 pf->stats.rx_oversize; 7018 /* record drops from the port level */ 7019 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 7020 } 7021 } 7022 7023 /** 7024 * ice_update_pf_stats - Update PF port stats counters 7025 * @pf: PF whose stats needs to be updated 7026 */ 7027 void ice_update_pf_stats(struct ice_pf *pf) 7028 { 7029 struct ice_hw_port_stats *prev_ps, *cur_ps; 7030 struct ice_hw *hw = &pf->hw; 7031 u16 fd_ctr_base; 7032 u8 port; 7033 7034 port = hw->port_info->lport; 7035 prev_ps = &pf->stats_prev; 7036 cur_ps = &pf->stats; 7037 7038 if (ice_is_reset_in_progress(pf->state)) 7039 pf->stat_prev_loaded = false; 7040 7041 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 7042 &prev_ps->eth.rx_bytes, 7043 &cur_ps->eth.rx_bytes); 7044 7045 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 7046 &prev_ps->eth.rx_unicast, 7047 &cur_ps->eth.rx_unicast); 7048 7049 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 7050 &prev_ps->eth.rx_multicast, 7051 &cur_ps->eth.rx_multicast); 7052 7053 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 7054 &prev_ps->eth.rx_broadcast, 7055 &cur_ps->eth.rx_broadcast); 7056 7057 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 7058 &prev_ps->eth.rx_discards, 7059 &cur_ps->eth.rx_discards); 7060 7061 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 7062 &prev_ps->eth.tx_bytes, 7063 &cur_ps->eth.tx_bytes); 7064 7065 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 7066 &prev_ps->eth.tx_unicast, 7067 &cur_ps->eth.tx_unicast); 7068 7069 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 7070 &prev_ps->eth.tx_multicast, 7071 &cur_ps->eth.tx_multicast); 7072 7073 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 7074 &prev_ps->eth.tx_broadcast, 7075 &cur_ps->eth.tx_broadcast); 7076 7077 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 7078 &prev_ps->tx_dropped_link_down, 7079 &cur_ps->tx_dropped_link_down); 7080 7081 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 7082 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 7083 7084 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 7085 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 7086 7087 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 7088 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 7089 7090 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 7091 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 7092 7093 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 7094 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 7095 7096 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 7097 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 7098 7099 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 7100 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 7101 7102 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 7103 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 7104 7105 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 7106 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 7107 7108 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 7109 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 7110 7111 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 7112 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 7113 7114 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 7115 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 7116 7117 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 7118 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 7119 7120 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 7121 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 7122 7123 fd_ctr_base = hw->fd_ctr_base; 7124 7125 ice_stat_update40(hw, 7126 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 7127 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 7128 &cur_ps->fd_sb_match); 7129 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 7130 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 7131 7132 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 7133 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 7134 7135 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 7136 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 7137 7138 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 7139 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 7140 7141 ice_update_dcb_stats(pf); 7142 7143 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 7144 &prev_ps->crc_errors, &cur_ps->crc_errors); 7145 7146 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 7147 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 7148 7149 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 7150 &prev_ps->mac_local_faults, 7151 &cur_ps->mac_local_faults); 7152 7153 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 7154 &prev_ps->mac_remote_faults, 7155 &cur_ps->mac_remote_faults); 7156 7157 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 7158 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 7159 7160 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 7161 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 7162 7163 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 7164 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 7165 7166 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 7167 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 7168 7169 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 7170 7171 pf->stat_prev_loaded = true; 7172 } 7173 7174 /** 7175 * ice_get_stats64 - get statistics for network device structure 7176 * @netdev: network interface device structure 7177 * @stats: main device statistics structure 7178 */ 7179 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 7180 { 7181 struct ice_netdev_priv *np = netdev_priv(netdev); 7182 struct rtnl_link_stats64 *vsi_stats; 7183 struct ice_vsi *vsi = np->vsi; 7184 7185 vsi_stats = &vsi->net_stats; 7186 7187 if (!vsi->num_txq || !vsi->num_rxq) 7188 return; 7189 7190 /* netdev packet/byte stats come from ring counter. These are obtained 7191 * by summing up ring counters (done by ice_update_vsi_ring_stats). 7192 * But, only call the update routine and read the registers if VSI is 7193 * not down. 7194 */ 7195 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 7196 ice_update_vsi_ring_stats(vsi); 7197 stats->tx_packets = vsi_stats->tx_packets; 7198 stats->tx_bytes = vsi_stats->tx_bytes; 7199 stats->rx_packets = vsi_stats->rx_packets; 7200 stats->rx_bytes = vsi_stats->rx_bytes; 7201 7202 /* The rest of the stats can be read from the hardware but instead we 7203 * just return values that the watchdog task has already obtained from 7204 * the hardware. 7205 */ 7206 stats->multicast = vsi_stats->multicast; 7207 stats->tx_errors = vsi_stats->tx_errors; 7208 stats->tx_dropped = vsi_stats->tx_dropped; 7209 stats->rx_errors = vsi_stats->rx_errors; 7210 stats->rx_dropped = vsi_stats->rx_dropped; 7211 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 7212 stats->rx_length_errors = vsi_stats->rx_length_errors; 7213 } 7214 7215 /** 7216 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 7217 * @vsi: VSI having NAPI disabled 7218 */ 7219 static void ice_napi_disable_all(struct ice_vsi *vsi) 7220 { 7221 int q_idx; 7222 7223 if (!vsi->netdev) 7224 return; 7225 7226 ice_for_each_q_vector(vsi, q_idx) { 7227 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 7228 7229 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 7230 napi_disable(&q_vector->napi); 7231 7232 cancel_work_sync(&q_vector->tx.dim.work); 7233 cancel_work_sync(&q_vector->rx.dim.work); 7234 } 7235 } 7236 7237 /** 7238 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 7239 * @vsi: the VSI being un-configured 7240 */ 7241 static void ice_vsi_dis_irq(struct ice_vsi *vsi) 7242 { 7243 struct ice_pf *pf = vsi->back; 7244 struct ice_hw *hw = &pf->hw; 7245 u32 val; 7246 int i; 7247 7248 /* disable interrupt causation from each Rx queue; Tx queues are 7249 * handled in ice_vsi_stop_tx_ring() 7250 */ 7251 if (vsi->rx_rings) { 7252 ice_for_each_rxq(vsi, i) { 7253 if (vsi->rx_rings[i]) { 7254 u16 reg; 7255 7256 reg = vsi->rx_rings[i]->reg_idx; 7257 val = rd32(hw, QINT_RQCTL(reg)); 7258 val &= ~QINT_RQCTL_CAUSE_ENA_M; 7259 wr32(hw, QINT_RQCTL(reg), val); 7260 } 7261 } 7262 } 7263 7264 /* disable each interrupt */ 7265 ice_for_each_q_vector(vsi, i) { 7266 if (!vsi->q_vectors[i]) 7267 continue; 7268 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 7269 } 7270 7271 ice_flush(hw); 7272 7273 /* don't call synchronize_irq() for VF's from the host */ 7274 if (vsi->type == ICE_VSI_VF) 7275 return; 7276 7277 ice_for_each_q_vector(vsi, i) 7278 synchronize_irq(vsi->q_vectors[i]->irq.virq); 7279 } 7280 7281 /** 7282 * ice_down - Shutdown the connection 7283 * @vsi: The VSI being stopped 7284 * 7285 * Caller of this function is expected to set the vsi->state ICE_DOWN bit 7286 */ 7287 int ice_down(struct ice_vsi *vsi) 7288 { 7289 int i, tx_err, rx_err, vlan_err = 0; 7290 7291 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); 7292 7293 if (vsi->netdev) { 7294 vlan_err = ice_vsi_del_vlan_zero(vsi); 7295 ice_ptp_link_change(vsi->back, false); 7296 netif_carrier_off(vsi->netdev); 7297 netif_tx_disable(vsi->netdev); 7298 } 7299 7300 ice_vsi_dis_irq(vsi); 7301 7302 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 7303 if (tx_err) 7304 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 7305 vsi->vsi_num, tx_err); 7306 if (!tx_err && vsi->xdp_rings) { 7307 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 7308 if (tx_err) 7309 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 7310 vsi->vsi_num, tx_err); 7311 } 7312 7313 rx_err = ice_vsi_stop_all_rx_rings(vsi); 7314 if (rx_err) 7315 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 7316 vsi->vsi_num, rx_err); 7317 7318 ice_napi_disable_all(vsi); 7319 7320 ice_for_each_txq(vsi, i) 7321 ice_clean_tx_ring(vsi->tx_rings[i]); 7322 7323 if (vsi->xdp_rings) 7324 ice_for_each_xdp_txq(vsi, i) 7325 ice_clean_tx_ring(vsi->xdp_rings[i]); 7326 7327 ice_for_each_rxq(vsi, i) 7328 ice_clean_rx_ring(vsi->rx_rings[i]); 7329 7330 if (tx_err || rx_err || vlan_err) { 7331 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 7332 vsi->vsi_num, vsi->vsw->sw_id); 7333 return -EIO; 7334 } 7335 7336 return 0; 7337 } 7338 7339 /** 7340 * ice_down_up - shutdown the VSI connection and bring it up 7341 * @vsi: the VSI to be reconnected 7342 */ 7343 int ice_down_up(struct ice_vsi *vsi) 7344 { 7345 int ret; 7346 7347 /* if DOWN already set, nothing to do */ 7348 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 7349 return 0; 7350 7351 ret = ice_down(vsi); 7352 if (ret) 7353 return ret; 7354 7355 ret = ice_up(vsi); 7356 if (ret) { 7357 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); 7358 return ret; 7359 } 7360 7361 return 0; 7362 } 7363 7364 /** 7365 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 7366 * @vsi: VSI having resources allocated 7367 * 7368 * Return 0 on success, negative on failure 7369 */ 7370 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 7371 { 7372 int i, err = 0; 7373 7374 if (!vsi->num_txq) { 7375 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 7376 vsi->vsi_num); 7377 return -EINVAL; 7378 } 7379 7380 ice_for_each_txq(vsi, i) { 7381 struct ice_tx_ring *ring = vsi->tx_rings[i]; 7382 7383 if (!ring) 7384 return -EINVAL; 7385 7386 if (vsi->netdev) 7387 ring->netdev = vsi->netdev; 7388 err = ice_setup_tx_ring(ring); 7389 if (err) 7390 break; 7391 } 7392 7393 return err; 7394 } 7395 7396 /** 7397 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 7398 * @vsi: VSI having resources allocated 7399 * 7400 * Return 0 on success, negative on failure 7401 */ 7402 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 7403 { 7404 int i, err = 0; 7405 7406 if (!vsi->num_rxq) { 7407 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 7408 vsi->vsi_num); 7409 return -EINVAL; 7410 } 7411 7412 ice_for_each_rxq(vsi, i) { 7413 struct ice_rx_ring *ring = vsi->rx_rings[i]; 7414 7415 if (!ring) 7416 return -EINVAL; 7417 7418 if (vsi->netdev) 7419 ring->netdev = vsi->netdev; 7420 err = ice_setup_rx_ring(ring); 7421 if (err) 7422 break; 7423 } 7424 7425 return err; 7426 } 7427 7428 /** 7429 * ice_vsi_open_ctrl - open control VSI for use 7430 * @vsi: the VSI to open 7431 * 7432 * Initialization of the Control VSI 7433 * 7434 * Returns 0 on success, negative value on error 7435 */ 7436 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 7437 { 7438 char int_name[ICE_INT_NAME_STR_LEN]; 7439 struct ice_pf *pf = vsi->back; 7440 struct device *dev; 7441 int err; 7442 7443 dev = ice_pf_to_dev(pf); 7444 /* allocate descriptors */ 7445 err = ice_vsi_setup_tx_rings(vsi); 7446 if (err) 7447 goto err_setup_tx; 7448 7449 err = ice_vsi_setup_rx_rings(vsi); 7450 if (err) 7451 goto err_setup_rx; 7452 7453 err = ice_vsi_cfg_lan(vsi); 7454 if (err) 7455 goto err_setup_rx; 7456 7457 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 7458 dev_driver_string(dev), dev_name(dev)); 7459 err = ice_vsi_req_irq_msix(vsi, int_name); 7460 if (err) 7461 goto err_setup_rx; 7462 7463 ice_vsi_cfg_msix(vsi); 7464 7465 err = ice_vsi_start_all_rx_rings(vsi); 7466 if (err) 7467 goto err_up_complete; 7468 7469 clear_bit(ICE_VSI_DOWN, vsi->state); 7470 ice_vsi_ena_irq(vsi); 7471 7472 return 0; 7473 7474 err_up_complete: 7475 ice_down(vsi); 7476 err_setup_rx: 7477 ice_vsi_free_rx_rings(vsi); 7478 err_setup_tx: 7479 ice_vsi_free_tx_rings(vsi); 7480 7481 return err; 7482 } 7483 7484 /** 7485 * ice_vsi_open - Called when a network interface is made active 7486 * @vsi: the VSI to open 7487 * 7488 * Initialization of the VSI 7489 * 7490 * Returns 0 on success, negative value on error 7491 */ 7492 int ice_vsi_open(struct ice_vsi *vsi) 7493 { 7494 char int_name[ICE_INT_NAME_STR_LEN]; 7495 struct ice_pf *pf = vsi->back; 7496 int err; 7497 7498 /* allocate descriptors */ 7499 err = ice_vsi_setup_tx_rings(vsi); 7500 if (err) 7501 goto err_setup_tx; 7502 7503 err = ice_vsi_setup_rx_rings(vsi); 7504 if (err) 7505 goto err_setup_rx; 7506 7507 err = ice_vsi_cfg_lan(vsi); 7508 if (err) 7509 goto err_setup_rx; 7510 7511 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 7512 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 7513 err = ice_vsi_req_irq_msix(vsi, int_name); 7514 if (err) 7515 goto err_setup_rx; 7516 7517 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 7518 7519 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { 7520 /* Notify the stack of the actual queue counts. */ 7521 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 7522 if (err) 7523 goto err_set_qs; 7524 7525 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 7526 if (err) 7527 goto err_set_qs; 7528 7529 ice_vsi_set_napi_queues(vsi); 7530 } 7531 7532 err = ice_up_complete(vsi); 7533 if (err) 7534 goto err_up_complete; 7535 7536 return 0; 7537 7538 err_up_complete: 7539 ice_down(vsi); 7540 err_set_qs: 7541 ice_vsi_free_irq(vsi); 7542 err_setup_rx: 7543 ice_vsi_free_rx_rings(vsi); 7544 err_setup_tx: 7545 ice_vsi_free_tx_rings(vsi); 7546 7547 return err; 7548 } 7549 7550 /** 7551 * ice_vsi_release_all - Delete all VSIs 7552 * @pf: PF from which all VSIs are being removed 7553 */ 7554 static void ice_vsi_release_all(struct ice_pf *pf) 7555 { 7556 int err, i; 7557 7558 if (!pf->vsi) 7559 return; 7560 7561 ice_for_each_vsi(pf, i) { 7562 if (!pf->vsi[i]) 7563 continue; 7564 7565 if (pf->vsi[i]->type == ICE_VSI_CHNL) 7566 continue; 7567 7568 err = ice_vsi_release(pf->vsi[i]); 7569 if (err) 7570 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 7571 i, err, pf->vsi[i]->vsi_num); 7572 } 7573 } 7574 7575 /** 7576 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 7577 * @pf: pointer to the PF instance 7578 * @type: VSI type to rebuild 7579 * 7580 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 7581 */ 7582 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 7583 { 7584 struct device *dev = ice_pf_to_dev(pf); 7585 int i, err; 7586 7587 ice_for_each_vsi(pf, i) { 7588 struct ice_vsi *vsi = pf->vsi[i]; 7589 7590 if (!vsi || vsi->type != type) 7591 continue; 7592 7593 /* rebuild the VSI */ 7594 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); 7595 if (err) { 7596 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 7597 err, vsi->idx, ice_vsi_type_str(type)); 7598 return err; 7599 } 7600 7601 /* replay filters for the VSI */ 7602 err = ice_replay_vsi(&pf->hw, vsi->idx); 7603 if (err) { 7604 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", 7605 err, vsi->idx, ice_vsi_type_str(type)); 7606 return err; 7607 } 7608 7609 /* Re-map HW VSI number, using VSI handle that has been 7610 * previously validated in ice_replay_vsi() call above 7611 */ 7612 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 7613 7614 /* enable the VSI */ 7615 err = ice_ena_vsi(vsi, false); 7616 if (err) { 7617 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 7618 err, vsi->idx, ice_vsi_type_str(type)); 7619 return err; 7620 } 7621 7622 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 7623 ice_vsi_type_str(type)); 7624 } 7625 7626 return 0; 7627 } 7628 7629 /** 7630 * ice_update_pf_netdev_link - Update PF netdev link status 7631 * @pf: pointer to the PF instance 7632 */ 7633 static void ice_update_pf_netdev_link(struct ice_pf *pf) 7634 { 7635 bool link_up; 7636 int i; 7637 7638 ice_for_each_vsi(pf, i) { 7639 struct ice_vsi *vsi = pf->vsi[i]; 7640 7641 if (!vsi || vsi->type != ICE_VSI_PF) 7642 return; 7643 7644 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 7645 if (link_up) { 7646 netif_carrier_on(pf->vsi[i]->netdev); 7647 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 7648 } else { 7649 netif_carrier_off(pf->vsi[i]->netdev); 7650 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 7651 } 7652 } 7653 } 7654 7655 /** 7656 * ice_rebuild - rebuild after reset 7657 * @pf: PF to rebuild 7658 * @reset_type: type of reset 7659 * 7660 * Do not rebuild VF VSI in this flow because that is already handled via 7661 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 7662 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 7663 * to reset/rebuild all the VF VSI twice. 7664 */ 7665 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 7666 { 7667 struct ice_vsi *vsi = ice_get_main_vsi(pf); 7668 struct device *dev = ice_pf_to_dev(pf); 7669 struct ice_hw *hw = &pf->hw; 7670 bool dvm; 7671 int err; 7672 7673 if (test_bit(ICE_DOWN, pf->state)) 7674 goto clear_recovery; 7675 7676 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 7677 7678 #define ICE_EMP_RESET_SLEEP_MS 5000 7679 if (reset_type == ICE_RESET_EMPR) { 7680 /* If an EMP reset has occurred, any previously pending flash 7681 * update will have completed. We no longer know whether or 7682 * not the NVM update EMP reset is restricted. 7683 */ 7684 pf->fw_emp_reset_disabled = false; 7685 7686 msleep(ICE_EMP_RESET_SLEEP_MS); 7687 } 7688 7689 err = ice_init_all_ctrlq(hw); 7690 if (err) { 7691 dev_err(dev, "control queues init failed %d\n", err); 7692 goto err_init_ctrlq; 7693 } 7694 7695 /* if DDP was previously loaded successfully */ 7696 if (!ice_is_safe_mode(pf)) { 7697 /* reload the SW DB of filter tables */ 7698 if (reset_type == ICE_RESET_PFR) 7699 ice_fill_blk_tbls(hw); 7700 else 7701 /* Reload DDP Package after CORER/GLOBR reset */ 7702 ice_load_pkg(NULL, pf); 7703 } 7704 7705 err = ice_clear_pf_cfg(hw); 7706 if (err) { 7707 dev_err(dev, "clear PF configuration failed %d\n", err); 7708 goto err_init_ctrlq; 7709 } 7710 7711 ice_clear_pxe_mode(hw); 7712 7713 err = ice_init_nvm(hw); 7714 if (err) { 7715 dev_err(dev, "ice_init_nvm failed %d\n", err); 7716 goto err_init_ctrlq; 7717 } 7718 7719 err = ice_get_caps(hw); 7720 if (err) { 7721 dev_err(dev, "ice_get_caps failed %d\n", err); 7722 goto err_init_ctrlq; 7723 } 7724 7725 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 7726 if (err) { 7727 dev_err(dev, "set_mac_cfg failed %d\n", err); 7728 goto err_init_ctrlq; 7729 } 7730 7731 dvm = ice_is_dvm_ena(hw); 7732 7733 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 7734 if (err) 7735 goto err_init_ctrlq; 7736 7737 err = ice_sched_init_port(hw->port_info); 7738 if (err) 7739 goto err_sched_init_port; 7740 7741 /* start misc vector */ 7742 err = ice_req_irq_msix_misc(pf); 7743 if (err) { 7744 dev_err(dev, "misc vector setup failed: %d\n", err); 7745 goto err_sched_init_port; 7746 } 7747 7748 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7749 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 7750 if (!rd32(hw, PFQF_FD_SIZE)) { 7751 u16 unused, guar, b_effort; 7752 7753 guar = hw->func_caps.fd_fltr_guar; 7754 b_effort = hw->func_caps.fd_fltr_best_effort; 7755 7756 /* force guaranteed filter pool for PF */ 7757 ice_alloc_fd_guar_item(hw, &unused, guar); 7758 /* force shared filter pool for PF */ 7759 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 7760 } 7761 } 7762 7763 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 7764 ice_dcb_rebuild(pf); 7765 7766 /* If the PF previously had enabled PTP, PTP init needs to happen before 7767 * the VSI rebuild. If not, this causes the PTP link status events to 7768 * fail. 7769 */ 7770 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 7771 ice_ptp_rebuild(pf, reset_type); 7772 7773 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 7774 ice_gnss_init(pf); 7775 7776 /* rebuild PF VSI */ 7777 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 7778 if (err) { 7779 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 7780 goto err_vsi_rebuild; 7781 } 7782 7783 if (reset_type == ICE_RESET_PFR) { 7784 err = ice_rebuild_channels(pf); 7785 if (err) { 7786 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", 7787 err); 7788 goto err_vsi_rebuild; 7789 } 7790 } 7791 7792 /* If Flow Director is active */ 7793 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7794 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 7795 if (err) { 7796 dev_err(dev, "control VSI rebuild failed: %d\n", err); 7797 goto err_vsi_rebuild; 7798 } 7799 7800 /* replay HW Flow Director recipes */ 7801 if (hw->fdir_prof) 7802 ice_fdir_replay_flows(hw); 7803 7804 /* replay Flow Director filters */ 7805 ice_fdir_replay_fltrs(pf); 7806 7807 ice_rebuild_arfs(pf); 7808 } 7809 7810 if (vsi && vsi->netdev) 7811 netif_device_attach(vsi->netdev); 7812 7813 ice_update_pf_netdev_link(pf); 7814 7815 /* tell the firmware we are up */ 7816 err = ice_send_version(pf); 7817 if (err) { 7818 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 7819 err); 7820 goto err_vsi_rebuild; 7821 } 7822 7823 ice_replay_post(hw); 7824 7825 /* if we get here, reset flow is successful */ 7826 clear_bit(ICE_RESET_FAILED, pf->state); 7827 7828 ice_health_clear(pf); 7829 7830 ice_plug_aux_dev(pf); 7831 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) 7832 ice_lag_rebuild(pf); 7833 7834 /* Restore timestamp mode settings after VSI rebuild */ 7835 ice_ptp_restore_timestamp_mode(pf); 7836 return; 7837 7838 err_vsi_rebuild: 7839 err_sched_init_port: 7840 ice_sched_cleanup_all(hw); 7841 err_init_ctrlq: 7842 ice_shutdown_all_ctrlq(hw, false); 7843 set_bit(ICE_RESET_FAILED, pf->state); 7844 clear_recovery: 7845 /* set this bit in PF state to control service task scheduling */ 7846 set_bit(ICE_NEEDS_RESTART, pf->state); 7847 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 7848 } 7849 7850 /** 7851 * ice_change_mtu - NDO callback to change the MTU 7852 * @netdev: network interface device structure 7853 * @new_mtu: new value for maximum frame size 7854 * 7855 * Returns 0 on success, negative on failure 7856 */ 7857 int ice_change_mtu(struct net_device *netdev, int new_mtu) 7858 { 7859 struct ice_netdev_priv *np = netdev_priv(netdev); 7860 struct ice_vsi *vsi = np->vsi; 7861 struct ice_pf *pf = vsi->back; 7862 struct bpf_prog *prog; 7863 u8 count = 0; 7864 int err = 0; 7865 7866 if (new_mtu == (int)netdev->mtu) { 7867 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 7868 return 0; 7869 } 7870 7871 prog = vsi->xdp_prog; 7872 if (prog && !prog->aux->xdp_has_frags) { 7873 int frame_size = ice_max_xdp_frame_size(vsi); 7874 7875 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 7876 netdev_err(netdev, "max MTU for XDP usage is %d\n", 7877 frame_size - ICE_ETH_PKT_HDR_PAD); 7878 return -EINVAL; 7879 } 7880 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { 7881 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { 7882 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", 7883 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); 7884 return -EINVAL; 7885 } 7886 } 7887 7888 /* if a reset is in progress, wait for some time for it to complete */ 7889 do { 7890 if (ice_is_reset_in_progress(pf->state)) { 7891 count++; 7892 usleep_range(1000, 2000); 7893 } else { 7894 break; 7895 } 7896 7897 } while (count < 100); 7898 7899 if (count == 100) { 7900 netdev_err(netdev, "can't change MTU. Device is busy\n"); 7901 return -EBUSY; 7902 } 7903 7904 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); 7905 err = ice_down_up(vsi); 7906 if (err) 7907 return err; 7908 7909 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 7910 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); 7911 7912 return err; 7913 } 7914 7915 /** 7916 * ice_eth_ioctl - Access the hwtstamp interface 7917 * @netdev: network interface device structure 7918 * @ifr: interface request data 7919 * @cmd: ioctl command 7920 */ 7921 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 7922 { 7923 struct ice_netdev_priv *np = netdev_priv(netdev); 7924 struct ice_pf *pf = np->vsi->back; 7925 7926 switch (cmd) { 7927 case SIOCGHWTSTAMP: 7928 return ice_ptp_get_ts_config(pf, ifr); 7929 case SIOCSHWTSTAMP: 7930 return ice_ptp_set_ts_config(pf, ifr); 7931 default: 7932 return -EOPNOTSUPP; 7933 } 7934 } 7935 7936 /** 7937 * ice_aq_str - convert AQ err code to a string 7938 * @aq_err: the AQ error code to convert 7939 */ 7940 const char *ice_aq_str(enum ice_aq_err aq_err) 7941 { 7942 switch (aq_err) { 7943 case ICE_AQ_RC_OK: 7944 return "OK"; 7945 case ICE_AQ_RC_EPERM: 7946 return "ICE_AQ_RC_EPERM"; 7947 case ICE_AQ_RC_ENOENT: 7948 return "ICE_AQ_RC_ENOENT"; 7949 case ICE_AQ_RC_ENOMEM: 7950 return "ICE_AQ_RC_ENOMEM"; 7951 case ICE_AQ_RC_EBUSY: 7952 return "ICE_AQ_RC_EBUSY"; 7953 case ICE_AQ_RC_EEXIST: 7954 return "ICE_AQ_RC_EEXIST"; 7955 case ICE_AQ_RC_EINVAL: 7956 return "ICE_AQ_RC_EINVAL"; 7957 case ICE_AQ_RC_ENOSPC: 7958 return "ICE_AQ_RC_ENOSPC"; 7959 case ICE_AQ_RC_ENOSYS: 7960 return "ICE_AQ_RC_ENOSYS"; 7961 case ICE_AQ_RC_EMODE: 7962 return "ICE_AQ_RC_EMODE"; 7963 case ICE_AQ_RC_ENOSEC: 7964 return "ICE_AQ_RC_ENOSEC"; 7965 case ICE_AQ_RC_EBADSIG: 7966 return "ICE_AQ_RC_EBADSIG"; 7967 case ICE_AQ_RC_ESVN: 7968 return "ICE_AQ_RC_ESVN"; 7969 case ICE_AQ_RC_EBADMAN: 7970 return "ICE_AQ_RC_EBADMAN"; 7971 case ICE_AQ_RC_EBADBUF: 7972 return "ICE_AQ_RC_EBADBUF"; 7973 } 7974 7975 return "ICE_AQ_RC_UNKNOWN"; 7976 } 7977 7978 /** 7979 * ice_set_rss_lut - Set RSS LUT 7980 * @vsi: Pointer to VSI structure 7981 * @lut: Lookup table 7982 * @lut_size: Lookup table size 7983 * 7984 * Returns 0 on success, negative on failure 7985 */ 7986 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7987 { 7988 struct ice_aq_get_set_rss_lut_params params = {}; 7989 struct ice_hw *hw = &vsi->back->hw; 7990 int status; 7991 7992 if (!lut) 7993 return -EINVAL; 7994 7995 params.vsi_handle = vsi->idx; 7996 params.lut_size = lut_size; 7997 params.lut_type = vsi->rss_lut_type; 7998 params.lut = lut; 7999 8000 status = ice_aq_set_rss_lut(hw, ¶ms); 8001 if (status) 8002 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", 8003 status, ice_aq_str(hw->adminq.sq_last_status)); 8004 8005 return status; 8006 } 8007 8008 /** 8009 * ice_set_rss_key - Set RSS key 8010 * @vsi: Pointer to the VSI structure 8011 * @seed: RSS hash seed 8012 * 8013 * Returns 0 on success, negative on failure 8014 */ 8015 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 8016 { 8017 struct ice_hw *hw = &vsi->back->hw; 8018 int status; 8019 8020 if (!seed) 8021 return -EINVAL; 8022 8023 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 8024 if (status) 8025 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", 8026 status, ice_aq_str(hw->adminq.sq_last_status)); 8027 8028 return status; 8029 } 8030 8031 /** 8032 * ice_get_rss_lut - Get RSS LUT 8033 * @vsi: Pointer to VSI structure 8034 * @lut: Buffer to store the lookup table entries 8035 * @lut_size: Size of buffer to store the lookup table entries 8036 * 8037 * Returns 0 on success, negative on failure 8038 */ 8039 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 8040 { 8041 struct ice_aq_get_set_rss_lut_params params = {}; 8042 struct ice_hw *hw = &vsi->back->hw; 8043 int status; 8044 8045 if (!lut) 8046 return -EINVAL; 8047 8048 params.vsi_handle = vsi->idx; 8049 params.lut_size = lut_size; 8050 params.lut_type = vsi->rss_lut_type; 8051 params.lut = lut; 8052 8053 status = ice_aq_get_rss_lut(hw, ¶ms); 8054 if (status) 8055 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", 8056 status, ice_aq_str(hw->adminq.sq_last_status)); 8057 8058 return status; 8059 } 8060 8061 /** 8062 * ice_get_rss_key - Get RSS key 8063 * @vsi: Pointer to VSI structure 8064 * @seed: Buffer to store the key in 8065 * 8066 * Returns 0 on success, negative on failure 8067 */ 8068 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 8069 { 8070 struct ice_hw *hw = &vsi->back->hw; 8071 int status; 8072 8073 if (!seed) 8074 return -EINVAL; 8075 8076 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 8077 if (status) 8078 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", 8079 status, ice_aq_str(hw->adminq.sq_last_status)); 8080 8081 return status; 8082 } 8083 8084 /** 8085 * ice_set_rss_hfunc - Set RSS HASH function 8086 * @vsi: Pointer to VSI structure 8087 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*) 8088 * 8089 * Returns 0 on success, negative on failure 8090 */ 8091 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) 8092 { 8093 struct ice_hw *hw = &vsi->back->hw; 8094 struct ice_vsi_ctx *ctx; 8095 bool symm; 8096 int err; 8097 8098 if (hfunc == vsi->rss_hfunc) 8099 return 0; 8100 8101 if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ && 8102 hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) 8103 return -EOPNOTSUPP; 8104 8105 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8106 if (!ctx) 8107 return -ENOMEM; 8108 8109 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 8110 ctx->info.q_opt_rss = vsi->info.q_opt_rss; 8111 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; 8112 ctx->info.q_opt_rss |= 8113 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc); 8114 ctx->info.q_opt_tc = vsi->info.q_opt_tc; 8115 ctx->info.q_opt_flags = vsi->info.q_opt_rss; 8116 8117 err = ice_update_vsi(hw, vsi->idx, ctx, NULL); 8118 if (err) { 8119 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", 8120 vsi->vsi_num, err); 8121 } else { 8122 vsi->info.q_opt_rss = ctx->info.q_opt_rss; 8123 vsi->rss_hfunc = hfunc; 8124 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", 8125 hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ? 8126 "Symmetric " : ""); 8127 } 8128 kfree(ctx); 8129 if (err) 8130 return err; 8131 8132 /* Fix the symmetry setting for all existing RSS configurations */ 8133 symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); 8134 return ice_set_rss_cfg_symm(hw, vsi, symm); 8135 } 8136 8137 /** 8138 * ice_bridge_getlink - Get the hardware bridge mode 8139 * @skb: skb buff 8140 * @pid: process ID 8141 * @seq: RTNL message seq 8142 * @dev: the netdev being configured 8143 * @filter_mask: filter mask passed in 8144 * @nlflags: netlink flags passed in 8145 * 8146 * Return the bridge mode (VEB/VEPA) 8147 */ 8148 static int 8149 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8150 struct net_device *dev, u32 filter_mask, int nlflags) 8151 { 8152 struct ice_netdev_priv *np = netdev_priv(dev); 8153 struct ice_vsi *vsi = np->vsi; 8154 struct ice_pf *pf = vsi->back; 8155 u16 bmode; 8156 8157 bmode = pf->first_sw->bridge_mode; 8158 8159 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 8160 filter_mask, NULL); 8161 } 8162 8163 /** 8164 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 8165 * @vsi: Pointer to VSI structure 8166 * @bmode: Hardware bridge mode (VEB/VEPA) 8167 * 8168 * Returns 0 on success, negative on failure 8169 */ 8170 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 8171 { 8172 struct ice_aqc_vsi_props *vsi_props; 8173 struct ice_hw *hw = &vsi->back->hw; 8174 struct ice_vsi_ctx *ctxt; 8175 int ret; 8176 8177 vsi_props = &vsi->info; 8178 8179 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 8180 if (!ctxt) 8181 return -ENOMEM; 8182 8183 ctxt->info = vsi->info; 8184 8185 if (bmode == BRIDGE_MODE_VEB) 8186 /* change from VEPA to VEB mode */ 8187 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 8188 else 8189 /* change from VEB to VEPA mode */ 8190 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 8191 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 8192 8193 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 8194 if (ret) { 8195 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", 8196 bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); 8197 goto out; 8198 } 8199 /* Update sw flags for book keeping */ 8200 vsi_props->sw_flags = ctxt->info.sw_flags; 8201 8202 out: 8203 kfree(ctxt); 8204 return ret; 8205 } 8206 8207 /** 8208 * ice_bridge_setlink - Set the hardware bridge mode 8209 * @dev: the netdev being configured 8210 * @nlh: RTNL message 8211 * @flags: bridge setlink flags 8212 * @extack: netlink extended ack 8213 * 8214 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 8215 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 8216 * not already set for all VSIs connected to this switch. And also update the 8217 * unicast switch filter rules for the corresponding switch of the netdev. 8218 */ 8219 static int 8220 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 8221 u16 __always_unused flags, 8222 struct netlink_ext_ack __always_unused *extack) 8223 { 8224 struct ice_netdev_priv *np = netdev_priv(dev); 8225 struct ice_pf *pf = np->vsi->back; 8226 struct nlattr *attr, *br_spec; 8227 struct ice_hw *hw = &pf->hw; 8228 struct ice_sw *pf_sw; 8229 int rem, v, err = 0; 8230 8231 pf_sw = pf->first_sw; 8232 /* find the attribute in the netlink message */ 8233 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 8234 if (!br_spec) 8235 return -EINVAL; 8236 8237 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { 8238 __u16 mode = nla_get_u16(attr); 8239 8240 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 8241 return -EINVAL; 8242 /* Continue if bridge mode is not being flipped */ 8243 if (mode == pf_sw->bridge_mode) 8244 continue; 8245 /* Iterates through the PF VSI list and update the loopback 8246 * mode of the VSI 8247 */ 8248 ice_for_each_vsi(pf, v) { 8249 if (!pf->vsi[v]) 8250 continue; 8251 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 8252 if (err) 8253 return err; 8254 } 8255 8256 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 8257 /* Update the unicast switch filter rules for the corresponding 8258 * switch of the netdev 8259 */ 8260 err = ice_update_sw_rule_bridge_mode(hw); 8261 if (err) { 8262 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", 8263 mode, err, 8264 ice_aq_str(hw->adminq.sq_last_status)); 8265 /* revert hw->evb_veb */ 8266 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 8267 return err; 8268 } 8269 8270 pf_sw->bridge_mode = mode; 8271 } 8272 8273 return 0; 8274 } 8275 8276 /** 8277 * ice_tx_timeout - Respond to a Tx Hang 8278 * @netdev: network interface device structure 8279 * @txqueue: Tx queue 8280 */ 8281 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 8282 { 8283 struct ice_netdev_priv *np = netdev_priv(netdev); 8284 struct ice_tx_ring *tx_ring = NULL; 8285 struct ice_vsi *vsi = np->vsi; 8286 struct ice_pf *pf = vsi->back; 8287 u32 i; 8288 8289 pf->tx_timeout_count++; 8290 8291 /* Check if PFC is enabled for the TC to which the queue belongs 8292 * to. If yes then Tx timeout is not caused by a hung queue, no 8293 * need to reset and rebuild 8294 */ 8295 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 8296 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 8297 txqueue); 8298 return; 8299 } 8300 8301 /* now that we have an index, find the tx_ring struct */ 8302 ice_for_each_txq(vsi, i) 8303 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 8304 if (txqueue == vsi->tx_rings[i]->q_index) { 8305 tx_ring = vsi->tx_rings[i]; 8306 break; 8307 } 8308 8309 /* Reset recovery level if enough time has elapsed after last timeout. 8310 * Also ensure no new reset action happens before next timeout period. 8311 */ 8312 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 8313 pf->tx_timeout_recovery_level = 1; 8314 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 8315 netdev->watchdog_timeo))) 8316 return; 8317 8318 if (tx_ring) { 8319 struct ice_hw *hw = &pf->hw; 8320 u32 head, intr = 0; 8321 8322 head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, 8323 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); 8324 /* Read interrupt register */ 8325 intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 8326 8327 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 8328 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 8329 head, tx_ring->next_to_use, intr); 8330 8331 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); 8332 } 8333 8334 pf->tx_timeout_last_recovery = jiffies; 8335 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 8336 pf->tx_timeout_recovery_level, txqueue); 8337 8338 switch (pf->tx_timeout_recovery_level) { 8339 case 1: 8340 set_bit(ICE_PFR_REQ, pf->state); 8341 break; 8342 case 2: 8343 set_bit(ICE_CORER_REQ, pf->state); 8344 break; 8345 case 3: 8346 set_bit(ICE_GLOBR_REQ, pf->state); 8347 break; 8348 default: 8349 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 8350 set_bit(ICE_DOWN, pf->state); 8351 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 8352 set_bit(ICE_SERVICE_DIS, pf->state); 8353 break; 8354 } 8355 8356 ice_service_task_schedule(pf); 8357 pf->tx_timeout_recovery_level++; 8358 } 8359 8360 /** 8361 * ice_setup_tc_cls_flower - flower classifier offloads 8362 * @np: net device to configure 8363 * @filter_dev: device on which filter is added 8364 * @cls_flower: offload data 8365 */ 8366 static int 8367 ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 8368 struct net_device *filter_dev, 8369 struct flow_cls_offload *cls_flower) 8370 { 8371 struct ice_vsi *vsi = np->vsi; 8372 8373 if (cls_flower->common.chain_index) 8374 return -EOPNOTSUPP; 8375 8376 switch (cls_flower->command) { 8377 case FLOW_CLS_REPLACE: 8378 return ice_add_cls_flower(filter_dev, vsi, cls_flower); 8379 case FLOW_CLS_DESTROY: 8380 return ice_del_cls_flower(vsi, cls_flower); 8381 default: 8382 return -EINVAL; 8383 } 8384 } 8385 8386 /** 8387 * ice_setup_tc_block_cb - callback handler registered for TC block 8388 * @type: TC SETUP type 8389 * @type_data: TC flower offload data that contains user input 8390 * @cb_priv: netdev private data 8391 */ 8392 static int 8393 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 8394 { 8395 struct ice_netdev_priv *np = cb_priv; 8396 8397 switch (type) { 8398 case TC_SETUP_CLSFLOWER: 8399 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 8400 type_data); 8401 default: 8402 return -EOPNOTSUPP; 8403 } 8404 } 8405 8406 /** 8407 * ice_validate_mqprio_qopt - Validate TCF input parameters 8408 * @vsi: Pointer to VSI 8409 * @mqprio_qopt: input parameters for mqprio queue configuration 8410 * 8411 * This function validates MQPRIO params, such as qcount (power of 2 wherever 8412 * needed), and make sure user doesn't specify qcount and BW rate limit 8413 * for TCs, which are more than "num_tc" 8414 */ 8415 static int 8416 ice_validate_mqprio_qopt(struct ice_vsi *vsi, 8417 struct tc_mqprio_qopt_offload *mqprio_qopt) 8418 { 8419 int non_power_of_2_qcount = 0; 8420 struct ice_pf *pf = vsi->back; 8421 int max_rss_q_cnt = 0; 8422 u64 sum_min_rate = 0; 8423 struct device *dev; 8424 int i, speed; 8425 u8 num_tc; 8426 8427 if (vsi->type != ICE_VSI_PF) 8428 return -EINVAL; 8429 8430 if (mqprio_qopt->qopt.offset[0] != 0 || 8431 mqprio_qopt->qopt.num_tc < 1 || 8432 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) 8433 return -EINVAL; 8434 8435 dev = ice_pf_to_dev(pf); 8436 vsi->ch_rss_size = 0; 8437 num_tc = mqprio_qopt->qopt.num_tc; 8438 speed = ice_get_link_speed_kbps(vsi); 8439 8440 for (i = 0; num_tc; i++) { 8441 int qcount = mqprio_qopt->qopt.count[i]; 8442 u64 max_rate, min_rate, rem; 8443 8444 if (!qcount) 8445 return -EINVAL; 8446 8447 if (is_power_of_2(qcount)) { 8448 if (non_power_of_2_qcount && 8449 qcount > non_power_of_2_qcount) { 8450 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", 8451 qcount, non_power_of_2_qcount); 8452 return -EINVAL; 8453 } 8454 if (qcount > max_rss_q_cnt) 8455 max_rss_q_cnt = qcount; 8456 } else { 8457 if (non_power_of_2_qcount && 8458 qcount != non_power_of_2_qcount) { 8459 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", 8460 qcount, non_power_of_2_qcount); 8461 return -EINVAL; 8462 } 8463 if (qcount < max_rss_q_cnt) { 8464 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", 8465 qcount, max_rss_q_cnt); 8466 return -EINVAL; 8467 } 8468 max_rss_q_cnt = qcount; 8469 non_power_of_2_qcount = qcount; 8470 } 8471 8472 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but 8473 * converts the bandwidth rate limit into Bytes/s when 8474 * passing it down to the driver. So convert input bandwidth 8475 * from Bytes/s to Kbps 8476 */ 8477 max_rate = mqprio_qopt->max_rate[i]; 8478 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); 8479 8480 /* min_rate is minimum guaranteed rate and it can't be zero */ 8481 min_rate = mqprio_qopt->min_rate[i]; 8482 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); 8483 sum_min_rate += min_rate; 8484 8485 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { 8486 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, 8487 min_rate, ICE_MIN_BW_LIMIT); 8488 return -EINVAL; 8489 } 8490 8491 if (max_rate && max_rate > speed) { 8492 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n", 8493 i, max_rate, speed); 8494 return -EINVAL; 8495 } 8496 8497 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); 8498 if (rem) { 8499 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", 8500 i, ICE_MIN_BW_LIMIT); 8501 return -EINVAL; 8502 } 8503 8504 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); 8505 if (rem) { 8506 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", 8507 i, ICE_MIN_BW_LIMIT); 8508 return -EINVAL; 8509 } 8510 8511 /* min_rate can't be more than max_rate, except when max_rate 8512 * is zero (implies max_rate sought is max line rate). In such 8513 * a case min_rate can be more than max. 8514 */ 8515 if (max_rate && min_rate > max_rate) { 8516 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", 8517 min_rate, max_rate); 8518 return -EINVAL; 8519 } 8520 8521 if (i >= mqprio_qopt->qopt.num_tc - 1) 8522 break; 8523 if (mqprio_qopt->qopt.offset[i + 1] != 8524 (mqprio_qopt->qopt.offset[i] + qcount)) 8525 return -EINVAL; 8526 } 8527 if (vsi->num_rxq < 8528 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 8529 return -EINVAL; 8530 if (vsi->num_txq < 8531 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 8532 return -EINVAL; 8533 8534 if (sum_min_rate && sum_min_rate > (u64)speed) { 8535 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", 8536 sum_min_rate, speed); 8537 return -EINVAL; 8538 } 8539 8540 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ 8541 vsi->ch_rss_size = max_rss_q_cnt; 8542 8543 return 0; 8544 } 8545 8546 /** 8547 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF 8548 * @pf: ptr to PF device 8549 * @vsi: ptr to VSI 8550 */ 8551 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) 8552 { 8553 struct device *dev = ice_pf_to_dev(pf); 8554 bool added = false; 8555 struct ice_hw *hw; 8556 int flow; 8557 8558 if (!(vsi->num_gfltr || vsi->num_bfltr)) 8559 return -EINVAL; 8560 8561 hw = &pf->hw; 8562 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 8563 struct ice_fd_hw_prof *prof; 8564 int tun, status; 8565 u64 entry_h; 8566 8567 if (!(hw->fdir_prof && hw->fdir_prof[flow] && 8568 hw->fdir_prof[flow]->cnt)) 8569 continue; 8570 8571 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 8572 enum ice_flow_priority prio; 8573 8574 /* add this VSI to FDir profile for this flow */ 8575 prio = ICE_FLOW_PRIO_NORMAL; 8576 prof = hw->fdir_prof[flow]; 8577 status = ice_flow_add_entry(hw, ICE_BLK_FD, 8578 prof->prof_id[tun], 8579 prof->vsi_h[0], vsi->idx, 8580 prio, prof->fdir_seg[tun], 8581 &entry_h); 8582 if (status) { 8583 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", 8584 vsi->idx, flow); 8585 continue; 8586 } 8587 8588 prof->entry_h[prof->cnt][tun] = entry_h; 8589 } 8590 8591 /* store VSI for filter replay and delete */ 8592 prof->vsi_h[prof->cnt] = vsi->idx; 8593 prof->cnt++; 8594 8595 added = true; 8596 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, 8597 flow); 8598 } 8599 8600 if (!added) 8601 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); 8602 8603 return 0; 8604 } 8605 8606 /** 8607 * ice_add_channel - add a channel by adding VSI 8608 * @pf: ptr to PF device 8609 * @sw_id: underlying HW switching element ID 8610 * @ch: ptr to channel structure 8611 * 8612 * Add a channel (VSI) using add_vsi and queue_map 8613 */ 8614 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) 8615 { 8616 struct device *dev = ice_pf_to_dev(pf); 8617 struct ice_vsi *vsi; 8618 8619 if (ch->type != ICE_VSI_CHNL) { 8620 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); 8621 return -EINVAL; 8622 } 8623 8624 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); 8625 if (!vsi || vsi->type != ICE_VSI_CHNL) { 8626 dev_err(dev, "create chnl VSI failure\n"); 8627 return -EINVAL; 8628 } 8629 8630 ice_add_vsi_to_fdir(pf, vsi); 8631 8632 ch->sw_id = sw_id; 8633 ch->vsi_num = vsi->vsi_num; 8634 ch->info.mapping_flags = vsi->info.mapping_flags; 8635 ch->ch_vsi = vsi; 8636 /* set the back pointer of channel for newly created VSI */ 8637 vsi->ch = ch; 8638 8639 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, 8640 sizeof(vsi->info.q_mapping)); 8641 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, 8642 sizeof(vsi->info.tc_mapping)); 8643 8644 return 0; 8645 } 8646 8647 /** 8648 * ice_chnl_cfg_res 8649 * @vsi: the VSI being setup 8650 * @ch: ptr to channel structure 8651 * 8652 * Configure channel specific resources such as rings, vector. 8653 */ 8654 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) 8655 { 8656 int i; 8657 8658 for (i = 0; i < ch->num_txq; i++) { 8659 struct ice_q_vector *tx_q_vector, *rx_q_vector; 8660 struct ice_ring_container *rc; 8661 struct ice_tx_ring *tx_ring; 8662 struct ice_rx_ring *rx_ring; 8663 8664 tx_ring = vsi->tx_rings[ch->base_q + i]; 8665 rx_ring = vsi->rx_rings[ch->base_q + i]; 8666 if (!tx_ring || !rx_ring) 8667 continue; 8668 8669 /* setup ring being channel enabled */ 8670 tx_ring->ch = ch; 8671 rx_ring->ch = ch; 8672 8673 /* following code block sets up vector specific attributes */ 8674 tx_q_vector = tx_ring->q_vector; 8675 rx_q_vector = rx_ring->q_vector; 8676 if (!tx_q_vector && !rx_q_vector) 8677 continue; 8678 8679 if (tx_q_vector) { 8680 tx_q_vector->ch = ch; 8681 /* setup Tx and Rx ITR setting if DIM is off */ 8682 rc = &tx_q_vector->tx; 8683 if (!ITR_IS_DYNAMIC(rc)) 8684 ice_write_itr(rc, rc->itr_setting); 8685 } 8686 if (rx_q_vector) { 8687 rx_q_vector->ch = ch; 8688 /* setup Tx and Rx ITR setting if DIM is off */ 8689 rc = &rx_q_vector->rx; 8690 if (!ITR_IS_DYNAMIC(rc)) 8691 ice_write_itr(rc, rc->itr_setting); 8692 } 8693 } 8694 8695 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then 8696 * GLINT_ITR register would have written to perform in-context 8697 * update, hence perform flush 8698 */ 8699 if (ch->num_txq || ch->num_rxq) 8700 ice_flush(&vsi->back->hw); 8701 } 8702 8703 /** 8704 * ice_cfg_chnl_all_res - configure channel resources 8705 * @vsi: pte to main_vsi 8706 * @ch: ptr to channel structure 8707 * 8708 * This function configures channel specific resources such as flow-director 8709 * counter index, and other resources such as queues, vectors, ITR settings 8710 */ 8711 static void 8712 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) 8713 { 8714 /* configure channel (aka ADQ) resources such as queues, vectors, 8715 * ITR settings for channel specific vectors and anything else 8716 */ 8717 ice_chnl_cfg_res(vsi, ch); 8718 } 8719 8720 /** 8721 * ice_setup_hw_channel - setup new channel 8722 * @pf: ptr to PF device 8723 * @vsi: the VSI being setup 8724 * @ch: ptr to channel structure 8725 * @sw_id: underlying HW switching element ID 8726 * @type: type of channel to be created (VMDq2/VF) 8727 * 8728 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8729 * and configures Tx rings accordingly 8730 */ 8731 static int 8732 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8733 struct ice_channel *ch, u16 sw_id, u8 type) 8734 { 8735 struct device *dev = ice_pf_to_dev(pf); 8736 int ret; 8737 8738 ch->base_q = vsi->next_base_q; 8739 ch->type = type; 8740 8741 ret = ice_add_channel(pf, sw_id, ch); 8742 if (ret) { 8743 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); 8744 return ret; 8745 } 8746 8747 /* configure/setup ADQ specific resources */ 8748 ice_cfg_chnl_all_res(vsi, ch); 8749 8750 /* make sure to update the next_base_q so that subsequent channel's 8751 * (aka ADQ) VSI queue map is correct 8752 */ 8753 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; 8754 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, 8755 ch->num_rxq); 8756 8757 return 0; 8758 } 8759 8760 /** 8761 * ice_setup_channel - setup new channel using uplink element 8762 * @pf: ptr to PF device 8763 * @vsi: the VSI being setup 8764 * @ch: ptr to channel structure 8765 * 8766 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8767 * and uplink switching element 8768 */ 8769 static bool 8770 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8771 struct ice_channel *ch) 8772 { 8773 struct device *dev = ice_pf_to_dev(pf); 8774 u16 sw_id; 8775 int ret; 8776 8777 if (vsi->type != ICE_VSI_PF) { 8778 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); 8779 return false; 8780 } 8781 8782 sw_id = pf->first_sw->sw_id; 8783 8784 /* create channel (VSI) */ 8785 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); 8786 if (ret) { 8787 dev_err(dev, "failed to setup hw_channel\n"); 8788 return false; 8789 } 8790 dev_dbg(dev, "successfully created channel()\n"); 8791 8792 return ch->ch_vsi ? true : false; 8793 } 8794 8795 /** 8796 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 8797 * @vsi: VSI to be configured 8798 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit 8799 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit 8800 */ 8801 static int 8802 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) 8803 { 8804 int err; 8805 8806 err = ice_set_min_bw_limit(vsi, min_tx_rate); 8807 if (err) 8808 return err; 8809 8810 return ice_set_max_bw_limit(vsi, max_tx_rate); 8811 } 8812 8813 /** 8814 * ice_create_q_channel - function to create channel 8815 * @vsi: VSI to be configured 8816 * @ch: ptr to channel (it contains channel specific params) 8817 * 8818 * This function creates channel (VSI) using num_queues specified by user, 8819 * reconfigs RSS if needed. 8820 */ 8821 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) 8822 { 8823 struct ice_pf *pf = vsi->back; 8824 struct device *dev; 8825 8826 if (!ch) 8827 return -EINVAL; 8828 8829 dev = ice_pf_to_dev(pf); 8830 if (!ch->num_txq || !ch->num_rxq) { 8831 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); 8832 return -EINVAL; 8833 } 8834 8835 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { 8836 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", 8837 vsi->cnt_q_avail, ch->num_txq); 8838 return -EINVAL; 8839 } 8840 8841 if (!ice_setup_channel(pf, vsi, ch)) { 8842 dev_info(dev, "Failed to setup channel\n"); 8843 return -EINVAL; 8844 } 8845 /* configure BW rate limit */ 8846 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { 8847 int ret; 8848 8849 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, 8850 ch->min_tx_rate); 8851 if (ret) 8852 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", 8853 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8854 else 8855 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", 8856 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8857 } 8858 8859 vsi->cnt_q_avail -= ch->num_txq; 8860 8861 return 0; 8862 } 8863 8864 /** 8865 * ice_rem_all_chnl_fltrs - removes all channel filters 8866 * @pf: ptr to PF, TC-flower based filter are tracked at PF level 8867 * 8868 * Remove all advanced switch filters only if they are channel specific 8869 * tc-flower based filter 8870 */ 8871 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) 8872 { 8873 struct ice_tc_flower_fltr *fltr; 8874 struct hlist_node *node; 8875 8876 /* to remove all channel filters, iterate an ordered list of filters */ 8877 hlist_for_each_entry_safe(fltr, node, 8878 &pf->tc_flower_fltr_list, 8879 tc_flower_node) { 8880 struct ice_rule_query_data rule; 8881 int status; 8882 8883 /* for now process only channel specific filters */ 8884 if (!ice_is_chnl_fltr(fltr)) 8885 continue; 8886 8887 rule.rid = fltr->rid; 8888 rule.rule_id = fltr->rule_id; 8889 rule.vsi_handle = fltr->dest_vsi_handle; 8890 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 8891 if (status) { 8892 if (status == -ENOENT) 8893 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", 8894 rule.rule_id); 8895 else 8896 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", 8897 status); 8898 } else if (fltr->dest_vsi) { 8899 /* update advanced switch filter count */ 8900 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 8901 u32 flags = fltr->flags; 8902 8903 fltr->dest_vsi->num_chnl_fltr--; 8904 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 8905 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 8906 pf->num_dmac_chnl_fltrs--; 8907 } 8908 } 8909 8910 hlist_del(&fltr->tc_flower_node); 8911 kfree(fltr); 8912 } 8913 } 8914 8915 /** 8916 * ice_remove_q_channels - Remove queue channels for the TCs 8917 * @vsi: VSI to be configured 8918 * @rem_fltr: delete advanced switch filter or not 8919 * 8920 * Remove queue channels for the TCs 8921 */ 8922 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) 8923 { 8924 struct ice_channel *ch, *ch_tmp; 8925 struct ice_pf *pf = vsi->back; 8926 int i; 8927 8928 /* remove all tc-flower based filter if they are channel filters only */ 8929 if (rem_fltr) 8930 ice_rem_all_chnl_fltrs(pf); 8931 8932 /* remove ntuple filters since queue configuration is being changed */ 8933 if (vsi->netdev->features & NETIF_F_NTUPLE) { 8934 struct ice_hw *hw = &pf->hw; 8935 8936 mutex_lock(&hw->fdir_fltr_lock); 8937 ice_fdir_del_all_fltrs(vsi); 8938 mutex_unlock(&hw->fdir_fltr_lock); 8939 } 8940 8941 /* perform cleanup for channels if they exist */ 8942 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 8943 struct ice_vsi *ch_vsi; 8944 8945 list_del(&ch->list); 8946 ch_vsi = ch->ch_vsi; 8947 if (!ch_vsi) { 8948 kfree(ch); 8949 continue; 8950 } 8951 8952 /* Reset queue contexts */ 8953 for (i = 0; i < ch->num_rxq; i++) { 8954 struct ice_tx_ring *tx_ring; 8955 struct ice_rx_ring *rx_ring; 8956 8957 tx_ring = vsi->tx_rings[ch->base_q + i]; 8958 rx_ring = vsi->rx_rings[ch->base_q + i]; 8959 if (tx_ring) { 8960 tx_ring->ch = NULL; 8961 if (tx_ring->q_vector) 8962 tx_ring->q_vector->ch = NULL; 8963 } 8964 if (rx_ring) { 8965 rx_ring->ch = NULL; 8966 if (rx_ring->q_vector) 8967 rx_ring->q_vector->ch = NULL; 8968 } 8969 } 8970 8971 /* Release FD resources for the channel VSI */ 8972 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); 8973 8974 /* clear the VSI from scheduler tree */ 8975 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); 8976 8977 /* Delete VSI from FW, PF and HW VSI arrays */ 8978 ice_vsi_delete(ch->ch_vsi); 8979 8980 /* free the channel */ 8981 kfree(ch); 8982 } 8983 8984 /* clear the channel VSI map which is stored in main VSI */ 8985 ice_for_each_chnl_tc(i) 8986 vsi->tc_map_vsi[i] = NULL; 8987 8988 /* reset main VSI's all TC information */ 8989 vsi->all_enatc = 0; 8990 vsi->all_numtc = 0; 8991 } 8992 8993 /** 8994 * ice_rebuild_channels - rebuild channel 8995 * @pf: ptr to PF 8996 * 8997 * Recreate channel VSIs and replay filters 8998 */ 8999 static int ice_rebuild_channels(struct ice_pf *pf) 9000 { 9001 struct device *dev = ice_pf_to_dev(pf); 9002 struct ice_vsi *main_vsi; 9003 bool rem_adv_fltr = true; 9004 struct ice_channel *ch; 9005 struct ice_vsi *vsi; 9006 int tc_idx = 1; 9007 int i, err; 9008 9009 main_vsi = ice_get_main_vsi(pf); 9010 if (!main_vsi) 9011 return 0; 9012 9013 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || 9014 main_vsi->old_numtc == 1) 9015 return 0; /* nothing to be done */ 9016 9017 /* reconfigure main VSI based on old value of TC and cached values 9018 * for MQPRIO opts 9019 */ 9020 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); 9021 if (err) { 9022 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", 9023 main_vsi->old_ena_tc, main_vsi->vsi_num); 9024 return err; 9025 } 9026 9027 /* rebuild ADQ VSIs */ 9028 ice_for_each_vsi(pf, i) { 9029 enum ice_vsi_type type; 9030 9031 vsi = pf->vsi[i]; 9032 if (!vsi || vsi->type != ICE_VSI_CHNL) 9033 continue; 9034 9035 type = vsi->type; 9036 9037 /* rebuild ADQ VSI */ 9038 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); 9039 if (err) { 9040 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", 9041 ice_vsi_type_str(type), vsi->idx, err); 9042 goto cleanup; 9043 } 9044 9045 /* Re-map HW VSI number, using VSI handle that has been 9046 * previously validated in ice_replay_vsi() call above 9047 */ 9048 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 9049 9050 /* replay filters for the VSI */ 9051 err = ice_replay_vsi(&pf->hw, vsi->idx); 9052 if (err) { 9053 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", 9054 ice_vsi_type_str(type), err, vsi->idx); 9055 rem_adv_fltr = false; 9056 goto cleanup; 9057 } 9058 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", 9059 ice_vsi_type_str(type), vsi->idx); 9060 9061 /* store ADQ VSI at correct TC index in main VSI's 9062 * map of TC to VSI 9063 */ 9064 main_vsi->tc_map_vsi[tc_idx++] = vsi; 9065 } 9066 9067 /* ADQ VSI(s) has been rebuilt successfully, so setup 9068 * channel for main VSI's Tx and Rx rings 9069 */ 9070 list_for_each_entry(ch, &main_vsi->ch_list, list) { 9071 struct ice_vsi *ch_vsi; 9072 9073 ch_vsi = ch->ch_vsi; 9074 if (!ch_vsi) 9075 continue; 9076 9077 /* reconfig channel resources */ 9078 ice_cfg_chnl_all_res(main_vsi, ch); 9079 9080 /* replay BW rate limit if it is non-zero */ 9081 if (!ch->max_tx_rate && !ch->min_tx_rate) 9082 continue; 9083 9084 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, 9085 ch->min_tx_rate); 9086 if (err) 9087 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 9088 err, ch->max_tx_rate, ch->min_tx_rate, 9089 ch_vsi->vsi_num); 9090 else 9091 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 9092 ch->max_tx_rate, ch->min_tx_rate, 9093 ch_vsi->vsi_num); 9094 } 9095 9096 /* reconfig RSS for main VSI */ 9097 if (main_vsi->ch_rss_size) 9098 ice_vsi_cfg_rss_lut_key(main_vsi); 9099 9100 return 0; 9101 9102 cleanup: 9103 ice_remove_q_channels(main_vsi, rem_adv_fltr); 9104 return err; 9105 } 9106 9107 /** 9108 * ice_create_q_channels - Add queue channel for the given TCs 9109 * @vsi: VSI to be configured 9110 * 9111 * Configures queue channel mapping to the given TCs 9112 */ 9113 static int ice_create_q_channels(struct ice_vsi *vsi) 9114 { 9115 struct ice_pf *pf = vsi->back; 9116 struct ice_channel *ch; 9117 int ret = 0, i; 9118 9119 ice_for_each_chnl_tc(i) { 9120 if (!(vsi->all_enatc & BIT(i))) 9121 continue; 9122 9123 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 9124 if (!ch) { 9125 ret = -ENOMEM; 9126 goto err_free; 9127 } 9128 INIT_LIST_HEAD(&ch->list); 9129 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; 9130 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; 9131 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; 9132 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; 9133 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; 9134 9135 /* convert to Kbits/s */ 9136 if (ch->max_tx_rate) 9137 ch->max_tx_rate = div_u64(ch->max_tx_rate, 9138 ICE_BW_KBPS_DIVISOR); 9139 if (ch->min_tx_rate) 9140 ch->min_tx_rate = div_u64(ch->min_tx_rate, 9141 ICE_BW_KBPS_DIVISOR); 9142 9143 ret = ice_create_q_channel(vsi, ch); 9144 if (ret) { 9145 dev_err(ice_pf_to_dev(pf), 9146 "failed creating channel TC:%d\n", i); 9147 kfree(ch); 9148 goto err_free; 9149 } 9150 list_add_tail(&ch->list, &vsi->ch_list); 9151 vsi->tc_map_vsi[i] = ch->ch_vsi; 9152 dev_dbg(ice_pf_to_dev(pf), 9153 "successfully created channel: VSI %pK\n", ch->ch_vsi); 9154 } 9155 return 0; 9156 9157 err_free: 9158 ice_remove_q_channels(vsi, false); 9159 9160 return ret; 9161 } 9162 9163 /** 9164 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes 9165 * @netdev: net device to configure 9166 * @type_data: TC offload data 9167 */ 9168 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) 9169 { 9170 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 9171 struct ice_netdev_priv *np = netdev_priv(netdev); 9172 struct ice_vsi *vsi = np->vsi; 9173 struct ice_pf *pf = vsi->back; 9174 u16 mode, ena_tc_qdisc = 0; 9175 int cur_txq, cur_rxq; 9176 u8 hw = 0, num_tcf; 9177 struct device *dev; 9178 int ret, i; 9179 9180 dev = ice_pf_to_dev(pf); 9181 num_tcf = mqprio_qopt->qopt.num_tc; 9182 hw = mqprio_qopt->qopt.hw; 9183 mode = mqprio_qopt->mode; 9184 if (!hw) { 9185 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 9186 vsi->ch_rss_size = 0; 9187 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 9188 goto config_tcf; 9189 } 9190 9191 /* Generate queue region map for number of TCF requested */ 9192 for (i = 0; i < num_tcf; i++) 9193 ena_tc_qdisc |= BIT(i); 9194 9195 switch (mode) { 9196 case TC_MQPRIO_MODE_CHANNEL: 9197 9198 if (pf->hw.port_info->is_custom_tx_enabled) { 9199 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); 9200 return -EBUSY; 9201 } 9202 ice_tear_down_devlink_rate_tree(pf); 9203 9204 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); 9205 if (ret) { 9206 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", 9207 ret); 9208 return ret; 9209 } 9210 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 9211 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 9212 /* don't assume state of hw_tc_offload during driver load 9213 * and set the flag for TC flower filter if hw_tc_offload 9214 * already ON 9215 */ 9216 if (vsi->netdev->features & NETIF_F_HW_TC) 9217 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 9218 break; 9219 default: 9220 return -EINVAL; 9221 } 9222 9223 config_tcf: 9224 9225 /* Requesting same TCF configuration as already enabled */ 9226 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && 9227 mode != TC_MQPRIO_MODE_CHANNEL) 9228 return 0; 9229 9230 /* Pause VSI queues */ 9231 ice_dis_vsi(vsi, true); 9232 9233 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 9234 ice_remove_q_channels(vsi, true); 9235 9236 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 9237 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), 9238 num_online_cpus()); 9239 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), 9240 num_online_cpus()); 9241 } else { 9242 /* logic to rebuild VSI, same like ethtool -L */ 9243 u16 offset = 0, qcount_tx = 0, qcount_rx = 0; 9244 9245 for (i = 0; i < num_tcf; i++) { 9246 if (!(ena_tc_qdisc & BIT(i))) 9247 continue; 9248 9249 offset = vsi->mqprio_qopt.qopt.offset[i]; 9250 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 9251 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 9252 } 9253 vsi->req_txq = offset + qcount_tx; 9254 vsi->req_rxq = offset + qcount_rx; 9255 9256 /* store away original rss_size info, so that it gets reused 9257 * form ice_vsi_rebuild during tc-qdisc delete stage - to 9258 * determine, what should be the rss_sizefor main VSI 9259 */ 9260 vsi->orig_rss_size = vsi->rss_size; 9261 } 9262 9263 /* save current values of Tx and Rx queues before calling VSI rebuild 9264 * for fallback option 9265 */ 9266 cur_txq = vsi->num_txq; 9267 cur_rxq = vsi->num_rxq; 9268 9269 /* proceed with rebuild main VSI using correct number of queues */ 9270 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 9271 if (ret) { 9272 /* fallback to current number of queues */ 9273 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); 9274 vsi->req_txq = cur_txq; 9275 vsi->req_rxq = cur_rxq; 9276 clear_bit(ICE_RESET_FAILED, pf->state); 9277 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { 9278 dev_err(dev, "Rebuild of main VSI failed again\n"); 9279 return ret; 9280 } 9281 } 9282 9283 vsi->all_numtc = num_tcf; 9284 vsi->all_enatc = ena_tc_qdisc; 9285 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); 9286 if (ret) { 9287 netdev_err(netdev, "failed configuring TC for VSI id=%d\n", 9288 vsi->vsi_num); 9289 goto exit; 9290 } 9291 9292 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 9293 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 9294 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; 9295 9296 /* set TC0 rate limit if specified */ 9297 if (max_tx_rate || min_tx_rate) { 9298 /* convert to Kbits/s */ 9299 if (max_tx_rate) 9300 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); 9301 if (min_tx_rate) 9302 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); 9303 9304 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); 9305 if (!ret) { 9306 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", 9307 max_tx_rate, min_tx_rate, vsi->vsi_num); 9308 } else { 9309 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", 9310 max_tx_rate, min_tx_rate, vsi->vsi_num); 9311 goto exit; 9312 } 9313 } 9314 ret = ice_create_q_channels(vsi); 9315 if (ret) { 9316 netdev_err(netdev, "failed configuring queue channels\n"); 9317 goto exit; 9318 } else { 9319 netdev_dbg(netdev, "successfully configured channels\n"); 9320 } 9321 } 9322 9323 if (vsi->ch_rss_size) 9324 ice_vsi_cfg_rss_lut_key(vsi); 9325 9326 exit: 9327 /* if error, reset the all_numtc and all_enatc */ 9328 if (ret) { 9329 vsi->all_numtc = 0; 9330 vsi->all_enatc = 0; 9331 } 9332 /* resume VSI */ 9333 ice_ena_vsi(vsi, true); 9334 9335 return ret; 9336 } 9337 9338 static LIST_HEAD(ice_block_cb_list); 9339 9340 static int 9341 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, 9342 void *type_data) 9343 { 9344 struct ice_netdev_priv *np = netdev_priv(netdev); 9345 struct ice_pf *pf = np->vsi->back; 9346 bool locked = false; 9347 int err; 9348 9349 switch (type) { 9350 case TC_SETUP_BLOCK: 9351 return flow_block_cb_setup_simple(type_data, 9352 &ice_block_cb_list, 9353 ice_setup_tc_block_cb, 9354 np, np, true); 9355 case TC_SETUP_QDISC_MQPRIO: 9356 if (ice_is_eswitch_mode_switchdev(pf)) { 9357 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); 9358 return -EOPNOTSUPP; 9359 } 9360 9361 if (pf->adev) { 9362 mutex_lock(&pf->adev_mutex); 9363 device_lock(&pf->adev->dev); 9364 locked = true; 9365 if (pf->adev->dev.driver) { 9366 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); 9367 err = -EBUSY; 9368 goto adev_unlock; 9369 } 9370 } 9371 9372 /* setup traffic classifier for receive side */ 9373 mutex_lock(&pf->tc_mutex); 9374 err = ice_setup_tc_mqprio_qdisc(netdev, type_data); 9375 mutex_unlock(&pf->tc_mutex); 9376 9377 adev_unlock: 9378 if (locked) { 9379 device_unlock(&pf->adev->dev); 9380 mutex_unlock(&pf->adev_mutex); 9381 } 9382 return err; 9383 default: 9384 return -EOPNOTSUPP; 9385 } 9386 return -EOPNOTSUPP; 9387 } 9388 9389 static struct ice_indr_block_priv * 9390 ice_indr_block_priv_lookup(struct ice_netdev_priv *np, 9391 struct net_device *netdev) 9392 { 9393 struct ice_indr_block_priv *cb_priv; 9394 9395 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { 9396 if (!cb_priv->netdev) 9397 return NULL; 9398 if (cb_priv->netdev == netdev) 9399 return cb_priv; 9400 } 9401 return NULL; 9402 } 9403 9404 static int 9405 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, 9406 void *indr_priv) 9407 { 9408 struct ice_indr_block_priv *priv = indr_priv; 9409 struct ice_netdev_priv *np = priv->np; 9410 9411 switch (type) { 9412 case TC_SETUP_CLSFLOWER: 9413 return ice_setup_tc_cls_flower(np, priv->netdev, 9414 (struct flow_cls_offload *) 9415 type_data); 9416 default: 9417 return -EOPNOTSUPP; 9418 } 9419 } 9420 9421 static int 9422 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, 9423 struct ice_netdev_priv *np, 9424 struct flow_block_offload *f, void *data, 9425 void (*cleanup)(struct flow_block_cb *block_cb)) 9426 { 9427 struct ice_indr_block_priv *indr_priv; 9428 struct flow_block_cb *block_cb; 9429 9430 if (!ice_is_tunnel_supported(netdev) && 9431 !(is_vlan_dev(netdev) && 9432 vlan_dev_real_dev(netdev) == np->vsi->netdev)) 9433 return -EOPNOTSUPP; 9434 9435 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 9436 return -EOPNOTSUPP; 9437 9438 switch (f->command) { 9439 case FLOW_BLOCK_BIND: 9440 indr_priv = ice_indr_block_priv_lookup(np, netdev); 9441 if (indr_priv) 9442 return -EEXIST; 9443 9444 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); 9445 if (!indr_priv) 9446 return -ENOMEM; 9447 9448 indr_priv->netdev = netdev; 9449 indr_priv->np = np; 9450 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); 9451 9452 block_cb = 9453 flow_indr_block_cb_alloc(ice_indr_setup_block_cb, 9454 indr_priv, indr_priv, 9455 ice_rep_indr_tc_block_unbind, 9456 f, netdev, sch, data, np, 9457 cleanup); 9458 9459 if (IS_ERR(block_cb)) { 9460 list_del(&indr_priv->list); 9461 kfree(indr_priv); 9462 return PTR_ERR(block_cb); 9463 } 9464 flow_block_cb_add(block_cb, f); 9465 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); 9466 break; 9467 case FLOW_BLOCK_UNBIND: 9468 indr_priv = ice_indr_block_priv_lookup(np, netdev); 9469 if (!indr_priv) 9470 return -ENOENT; 9471 9472 block_cb = flow_block_cb_lookup(f->block, 9473 ice_indr_setup_block_cb, 9474 indr_priv); 9475 if (!block_cb) 9476 return -ENOENT; 9477 9478 flow_indr_block_cb_remove(block_cb, f); 9479 9480 list_del(&block_cb->driver_list); 9481 break; 9482 default: 9483 return -EOPNOTSUPP; 9484 } 9485 return 0; 9486 } 9487 9488 static int 9489 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 9490 void *cb_priv, enum tc_setup_type type, void *type_data, 9491 void *data, 9492 void (*cleanup)(struct flow_block_cb *block_cb)) 9493 { 9494 switch (type) { 9495 case TC_SETUP_BLOCK: 9496 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, 9497 data, cleanup); 9498 9499 default: 9500 return -EOPNOTSUPP; 9501 } 9502 } 9503 9504 /** 9505 * ice_open - Called when a network interface becomes active 9506 * @netdev: network interface device structure 9507 * 9508 * The open entry point is called when a network interface is made 9509 * active by the system (IFF_UP). At this point all resources needed 9510 * for transmit and receive operations are allocated, the interrupt 9511 * handler is registered with the OS, the netdev watchdog is enabled, 9512 * and the stack is notified that the interface is ready. 9513 * 9514 * Returns 0 on success, negative value on failure 9515 */ 9516 int ice_open(struct net_device *netdev) 9517 { 9518 struct ice_netdev_priv *np = netdev_priv(netdev); 9519 struct ice_pf *pf = np->vsi->back; 9520 9521 if (ice_is_reset_in_progress(pf->state)) { 9522 netdev_err(netdev, "can't open net device while reset is in progress"); 9523 return -EBUSY; 9524 } 9525 9526 return ice_open_internal(netdev); 9527 } 9528 9529 /** 9530 * ice_open_internal - Called when a network interface becomes active 9531 * @netdev: network interface device structure 9532 * 9533 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 9534 * handling routine 9535 * 9536 * Returns 0 on success, negative value on failure 9537 */ 9538 int ice_open_internal(struct net_device *netdev) 9539 { 9540 struct ice_netdev_priv *np = netdev_priv(netdev); 9541 struct ice_vsi *vsi = np->vsi; 9542 struct ice_pf *pf = vsi->back; 9543 struct ice_port_info *pi; 9544 int err; 9545 9546 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 9547 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 9548 return -EIO; 9549 } 9550 9551 netif_carrier_off(netdev); 9552 9553 pi = vsi->port_info; 9554 err = ice_update_link_info(pi); 9555 if (err) { 9556 netdev_err(netdev, "Failed to get link info, error %d\n", err); 9557 return err; 9558 } 9559 9560 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 9561 9562 /* Set PHY if there is media, otherwise, turn off PHY */ 9563 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 9564 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 9565 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 9566 err = ice_init_phy_user_cfg(pi); 9567 if (err) { 9568 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 9569 err); 9570 return err; 9571 } 9572 } 9573 9574 err = ice_configure_phy(vsi); 9575 if (err) { 9576 netdev_err(netdev, "Failed to set physical link up, error %d\n", 9577 err); 9578 return err; 9579 } 9580 } else { 9581 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 9582 ice_set_link(vsi, false); 9583 } 9584 9585 err = ice_vsi_open(vsi); 9586 if (err) 9587 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 9588 vsi->vsi_num, vsi->vsw->sw_id); 9589 9590 /* Update existing tunnels information */ 9591 udp_tunnel_get_rx_info(netdev); 9592 9593 return err; 9594 } 9595 9596 /** 9597 * ice_stop - Disables a network interface 9598 * @netdev: network interface device structure 9599 * 9600 * The stop entry point is called when an interface is de-activated by the OS, 9601 * and the netdevice enters the DOWN state. The hardware is still under the 9602 * driver's control, but the netdev interface is disabled. 9603 * 9604 * Returns success only - not allowed to fail 9605 */ 9606 int ice_stop(struct net_device *netdev) 9607 { 9608 struct ice_netdev_priv *np = netdev_priv(netdev); 9609 struct ice_vsi *vsi = np->vsi; 9610 struct ice_pf *pf = vsi->back; 9611 9612 if (ice_is_reset_in_progress(pf->state)) { 9613 netdev_err(netdev, "can't stop net device while reset is in progress"); 9614 return -EBUSY; 9615 } 9616 9617 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 9618 int link_err = ice_force_phys_link_state(vsi, false); 9619 9620 if (link_err) { 9621 if (link_err == -ENOMEDIUM) 9622 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", 9623 vsi->vsi_num); 9624 else 9625 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 9626 vsi->vsi_num, link_err); 9627 9628 ice_vsi_close(vsi); 9629 return -EIO; 9630 } 9631 } 9632 9633 ice_vsi_close(vsi); 9634 9635 return 0; 9636 } 9637 9638 /** 9639 * ice_features_check - Validate encapsulated packet conforms to limits 9640 * @skb: skb buffer 9641 * @netdev: This port's netdev 9642 * @features: Offload features that the stack believes apply 9643 */ 9644 static netdev_features_t 9645 ice_features_check(struct sk_buff *skb, 9646 struct net_device __always_unused *netdev, 9647 netdev_features_t features) 9648 { 9649 bool gso = skb_is_gso(skb); 9650 size_t len; 9651 9652 /* No point in doing any of this if neither checksum nor GSO are 9653 * being requested for this frame. We can rule out both by just 9654 * checking for CHECKSUM_PARTIAL 9655 */ 9656 if (skb->ip_summed != CHECKSUM_PARTIAL) 9657 return features; 9658 9659 /* We cannot support GSO if the MSS is going to be less than 9660 * 64 bytes. If it is then we need to drop support for GSO. 9661 */ 9662 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) 9663 features &= ~NETIF_F_GSO_MASK; 9664 9665 len = skb_network_offset(skb); 9666 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 9667 goto out_rm_features; 9668 9669 len = skb_network_header_len(skb); 9670 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9671 goto out_rm_features; 9672 9673 if (skb->encapsulation) { 9674 /* this must work for VXLAN frames AND IPIP/SIT frames, and in 9675 * the case of IPIP frames, the transport header pointer is 9676 * after the inner header! So check to make sure that this 9677 * is a GRE or UDP_TUNNEL frame before doing that math. 9678 */ 9679 if (gso && (skb_shinfo(skb)->gso_type & 9680 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { 9681 len = skb_inner_network_header(skb) - 9682 skb_transport_header(skb); 9683 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 9684 goto out_rm_features; 9685 } 9686 9687 len = skb_inner_network_header_len(skb); 9688 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9689 goto out_rm_features; 9690 } 9691 9692 return features; 9693 out_rm_features: 9694 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 9695 } 9696 9697 static const struct net_device_ops ice_netdev_safe_mode_ops = { 9698 .ndo_open = ice_open, 9699 .ndo_stop = ice_stop, 9700 .ndo_start_xmit = ice_start_xmit, 9701 .ndo_set_mac_address = ice_set_mac_address, 9702 .ndo_validate_addr = eth_validate_addr, 9703 .ndo_change_mtu = ice_change_mtu, 9704 .ndo_get_stats64 = ice_get_stats64, 9705 .ndo_tx_timeout = ice_tx_timeout, 9706 .ndo_bpf = ice_xdp_safe_mode, 9707 }; 9708 9709 static const struct net_device_ops ice_netdev_ops = { 9710 .ndo_open = ice_open, 9711 .ndo_stop = ice_stop, 9712 .ndo_start_xmit = ice_start_xmit, 9713 .ndo_select_queue = ice_select_queue, 9714 .ndo_features_check = ice_features_check, 9715 .ndo_fix_features = ice_fix_features, 9716 .ndo_set_rx_mode = ice_set_rx_mode, 9717 .ndo_set_mac_address = ice_set_mac_address, 9718 .ndo_validate_addr = eth_validate_addr, 9719 .ndo_change_mtu = ice_change_mtu, 9720 .ndo_get_stats64 = ice_get_stats64, 9721 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 9722 .ndo_eth_ioctl = ice_eth_ioctl, 9723 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 9724 .ndo_set_vf_mac = ice_set_vf_mac, 9725 .ndo_get_vf_config = ice_get_vf_cfg, 9726 .ndo_set_vf_trust = ice_set_vf_trust, 9727 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 9728 .ndo_set_vf_link_state = ice_set_vf_link_state, 9729 .ndo_get_vf_stats = ice_get_vf_stats, 9730 .ndo_set_vf_rate = ice_set_vf_bw, 9731 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 9732 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 9733 .ndo_setup_tc = ice_setup_tc, 9734 .ndo_set_features = ice_set_features, 9735 .ndo_bridge_getlink = ice_bridge_getlink, 9736 .ndo_bridge_setlink = ice_bridge_setlink, 9737 .ndo_fdb_add = ice_fdb_add, 9738 .ndo_fdb_del = ice_fdb_del, 9739 #ifdef CONFIG_RFS_ACCEL 9740 .ndo_rx_flow_steer = ice_rx_flow_steer, 9741 #endif 9742 .ndo_tx_timeout = ice_tx_timeout, 9743 .ndo_bpf = ice_xdp, 9744 .ndo_xdp_xmit = ice_xdp_xmit, 9745 .ndo_xsk_wakeup = ice_xsk_wakeup, 9746 }; 9747