1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include <linux/crash_dump.h> 10 #include "ice.h" 11 #include "ice_base.h" 12 #include "ice_lib.h" 13 #include "ice_fltr.h" 14 #include "ice_dcb_lib.h" 15 #include "ice_dcb_nl.h" 16 #include "ice_devlink.h" 17 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 18 * ice tracepoint functions. This must be done exactly once across the 19 * ice driver. 20 */ 21 #define CREATE_TRACE_POINTS 22 #include "ice_trace.h" 23 #include "ice_eswitch.h" 24 #include "ice_tc_lib.h" 25 #include "ice_vsi_vlan_ops.h" 26 #include <net/xdp_sock_drv.h> 27 28 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 29 static const char ice_driver_string[] = DRV_SUMMARY; 30 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 31 32 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 33 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 34 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 35 36 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 37 MODULE_DESCRIPTION(DRV_SUMMARY); 38 MODULE_LICENSE("GPL v2"); 39 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 40 41 static int debug = -1; 42 module_param(debug, int, 0644); 43 #ifndef CONFIG_DYNAMIC_DEBUG 44 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 45 #else 46 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 47 #endif /* !CONFIG_DYNAMIC_DEBUG */ 48 49 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); 50 EXPORT_SYMBOL(ice_xdp_locking_key); 51 52 /** 53 * ice_hw_to_dev - Get device pointer from the hardware structure 54 * @hw: pointer to the device HW structure 55 * 56 * Used to access the device pointer from compilation units which can't easily 57 * include the definition of struct ice_pf without leading to circular header 58 * dependencies. 59 */ 60 struct device *ice_hw_to_dev(struct ice_hw *hw) 61 { 62 struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 63 64 return &pf->pdev->dev; 65 } 66 67 static struct workqueue_struct *ice_wq; 68 struct workqueue_struct *ice_lag_wq; 69 static const struct net_device_ops ice_netdev_safe_mode_ops; 70 static const struct net_device_ops ice_netdev_ops; 71 72 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 73 74 static void ice_vsi_release_all(struct ice_pf *pf); 75 76 static int ice_rebuild_channels(struct ice_pf *pf); 77 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); 78 79 static int 80 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 81 void *cb_priv, enum tc_setup_type type, void *type_data, 82 void *data, 83 void (*cleanup)(struct flow_block_cb *block_cb)); 84 85 bool netif_is_ice(const struct net_device *dev) 86 { 87 return dev && (dev->netdev_ops == &ice_netdev_ops); 88 } 89 90 /** 91 * ice_get_tx_pending - returns number of Tx descriptors not processed 92 * @ring: the ring of descriptors 93 */ 94 static u16 ice_get_tx_pending(struct ice_tx_ring *ring) 95 { 96 u16 head, tail; 97 98 head = ring->next_to_clean; 99 tail = ring->next_to_use; 100 101 if (head != tail) 102 return (head < tail) ? 103 tail - head : (tail + ring->count - head); 104 return 0; 105 } 106 107 /** 108 * ice_check_for_hang_subtask - check for and recover hung queues 109 * @pf: pointer to PF struct 110 */ 111 static void ice_check_for_hang_subtask(struct ice_pf *pf) 112 { 113 struct ice_vsi *vsi = NULL; 114 struct ice_hw *hw; 115 unsigned int i; 116 int packets; 117 u32 v; 118 119 ice_for_each_vsi(pf, v) 120 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 121 vsi = pf->vsi[v]; 122 break; 123 } 124 125 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 126 return; 127 128 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 129 return; 130 131 hw = &vsi->back->hw; 132 133 ice_for_each_txq(vsi, i) { 134 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; 135 struct ice_ring_stats *ring_stats; 136 137 if (!tx_ring) 138 continue; 139 if (ice_ring_ch_enabled(tx_ring)) 140 continue; 141 142 ring_stats = tx_ring->ring_stats; 143 if (!ring_stats) 144 continue; 145 146 if (tx_ring->desc) { 147 /* If packet counter has not changed the queue is 148 * likely stalled, so force an interrupt for this 149 * queue. 150 * 151 * prev_pkt would be negative if there was no 152 * pending work. 153 */ 154 packets = ring_stats->stats.pkts & INT_MAX; 155 if (ring_stats->tx_stats.prev_pkt == packets) { 156 /* Trigger sw interrupt to revive the queue */ 157 ice_trigger_sw_intr(hw, tx_ring->q_vector); 158 continue; 159 } 160 161 /* Memory barrier between read of packet count and call 162 * to ice_get_tx_pending() 163 */ 164 smp_rmb(); 165 ring_stats->tx_stats.prev_pkt = 166 ice_get_tx_pending(tx_ring) ? packets : -1; 167 } 168 } 169 } 170 171 /** 172 * ice_init_mac_fltr - Set initial MAC filters 173 * @pf: board private structure 174 * 175 * Set initial set of MAC filters for PF VSI; configure filters for permanent 176 * address and broadcast address. If an error is encountered, netdevice will be 177 * unregistered. 178 */ 179 static int ice_init_mac_fltr(struct ice_pf *pf) 180 { 181 struct ice_vsi *vsi; 182 u8 *perm_addr; 183 184 vsi = ice_get_main_vsi(pf); 185 if (!vsi) 186 return -EINVAL; 187 188 perm_addr = vsi->port_info->mac.perm_addr; 189 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 190 } 191 192 /** 193 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 194 * @netdev: the net device on which the sync is happening 195 * @addr: MAC address to sync 196 * 197 * This is a callback function which is called by the in kernel device sync 198 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 199 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 200 * MAC filters from the hardware. 201 */ 202 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 203 { 204 struct ice_netdev_priv *np = netdev_priv(netdev); 205 struct ice_vsi *vsi = np->vsi; 206 207 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 208 ICE_FWD_TO_VSI)) 209 return -EINVAL; 210 211 return 0; 212 } 213 214 /** 215 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 216 * @netdev: the net device on which the unsync is happening 217 * @addr: MAC address to unsync 218 * 219 * This is a callback function which is called by the in kernel device unsync 220 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 221 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 222 * delete the MAC filters from the hardware. 223 */ 224 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 225 { 226 struct ice_netdev_priv *np = netdev_priv(netdev); 227 struct ice_vsi *vsi = np->vsi; 228 229 /* Under some circumstances, we might receive a request to delete our 230 * own device address from our uc list. Because we store the device 231 * address in the VSI's MAC filter list, we need to ignore such 232 * requests and not delete our device address from this list. 233 */ 234 if (ether_addr_equal(addr, netdev->dev_addr)) 235 return 0; 236 237 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 238 ICE_FWD_TO_VSI)) 239 return -EINVAL; 240 241 return 0; 242 } 243 244 /** 245 * ice_vsi_fltr_changed - check if filter state changed 246 * @vsi: VSI to be checked 247 * 248 * returns true if filter state has changed, false otherwise. 249 */ 250 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 251 { 252 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 253 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 254 } 255 256 /** 257 * ice_set_promisc - Enable promiscuous mode for a given PF 258 * @vsi: the VSI being configured 259 * @promisc_m: mask of promiscuous config bits 260 * 261 */ 262 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) 263 { 264 int status; 265 266 if (vsi->type != ICE_VSI_PF) 267 return 0; 268 269 if (ice_vsi_has_non_zero_vlans(vsi)) { 270 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 271 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, 272 promisc_m); 273 } else { 274 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 275 promisc_m, 0); 276 } 277 if (status && status != -EEXIST) 278 return status; 279 280 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", 281 vsi->vsi_num, promisc_m); 282 return 0; 283 } 284 285 /** 286 * ice_clear_promisc - Disable promiscuous mode for a given PF 287 * @vsi: the VSI being configured 288 * @promisc_m: mask of promiscuous config bits 289 * 290 */ 291 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) 292 { 293 int status; 294 295 if (vsi->type != ICE_VSI_PF) 296 return 0; 297 298 if (ice_vsi_has_non_zero_vlans(vsi)) { 299 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 300 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, 301 promisc_m); 302 } else { 303 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 304 promisc_m, 0); 305 } 306 307 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", 308 vsi->vsi_num, promisc_m); 309 return status; 310 } 311 312 /** 313 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 314 * @vsi: ptr to the VSI 315 * 316 * Push any outstanding VSI filter changes through the AdminQ. 317 */ 318 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 319 { 320 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 321 struct device *dev = ice_pf_to_dev(vsi->back); 322 struct net_device *netdev = vsi->netdev; 323 bool promisc_forced_on = false; 324 struct ice_pf *pf = vsi->back; 325 struct ice_hw *hw = &pf->hw; 326 u32 changed_flags = 0; 327 int err; 328 329 if (!vsi->netdev) 330 return -EINVAL; 331 332 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 333 usleep_range(1000, 2000); 334 335 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 336 vsi->current_netdev_flags = vsi->netdev->flags; 337 338 INIT_LIST_HEAD(&vsi->tmp_sync_list); 339 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 340 341 if (ice_vsi_fltr_changed(vsi)) { 342 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 343 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 344 345 /* grab the netdev's addr_list_lock */ 346 netif_addr_lock_bh(netdev); 347 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 348 ice_add_mac_to_unsync_list); 349 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 350 ice_add_mac_to_unsync_list); 351 /* our temp lists are populated. release lock */ 352 netif_addr_unlock_bh(netdev); 353 } 354 355 /* Remove MAC addresses in the unsync list */ 356 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 357 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 358 if (err) { 359 netdev_err(netdev, "Failed to delete MAC filters\n"); 360 /* if we failed because of alloc failures, just bail */ 361 if (err == -ENOMEM) 362 goto out; 363 } 364 365 /* Add MAC addresses in the sync list */ 366 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 367 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 368 /* If filter is added successfully or already exists, do not go into 369 * 'if' condition and report it as error. Instead continue processing 370 * rest of the function. 371 */ 372 if (err && err != -EEXIST) { 373 netdev_err(netdev, "Failed to add MAC filters\n"); 374 /* If there is no more space for new umac filters, VSI 375 * should go into promiscuous mode. There should be some 376 * space reserved for promiscuous filters. 377 */ 378 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && 379 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 380 vsi->state)) { 381 promisc_forced_on = true; 382 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 383 vsi->vsi_num); 384 } else { 385 goto out; 386 } 387 } 388 err = 0; 389 /* check for changes in promiscuous modes */ 390 if (changed_flags & IFF_ALLMULTI) { 391 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 392 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); 393 if (err) { 394 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 395 goto out_promisc; 396 } 397 } else { 398 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 399 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); 400 if (err) { 401 vsi->current_netdev_flags |= IFF_ALLMULTI; 402 goto out_promisc; 403 } 404 } 405 } 406 407 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 408 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 409 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 410 if (vsi->current_netdev_flags & IFF_PROMISC) { 411 /* Apply Rx filter rule to get traffic from wire */ 412 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { 413 err = ice_set_dflt_vsi(vsi); 414 if (err && err != -EEXIST) { 415 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 416 err, vsi->vsi_num); 417 vsi->current_netdev_flags &= 418 ~IFF_PROMISC; 419 goto out_promisc; 420 } 421 err = 0; 422 vlan_ops->dis_rx_filtering(vsi); 423 424 /* promiscuous mode implies allmulticast so 425 * that VSIs that are in promiscuous mode are 426 * subscribed to multicast packets coming to 427 * the port 428 */ 429 err = ice_set_promisc(vsi, 430 ICE_MCAST_PROMISC_BITS); 431 if (err) 432 goto out_promisc; 433 } 434 } else { 435 /* Clear Rx filter to remove traffic from wire */ 436 if (ice_is_vsi_dflt_vsi(vsi)) { 437 err = ice_clear_dflt_vsi(vsi); 438 if (err) { 439 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 440 err, vsi->vsi_num); 441 vsi->current_netdev_flags |= 442 IFF_PROMISC; 443 goto out_promisc; 444 } 445 if (vsi->netdev->features & 446 NETIF_F_HW_VLAN_CTAG_FILTER) 447 vlan_ops->ena_rx_filtering(vsi); 448 } 449 450 /* disable allmulti here, but only if allmulti is not 451 * still enabled for the netdev 452 */ 453 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { 454 err = ice_clear_promisc(vsi, 455 ICE_MCAST_PROMISC_BITS); 456 if (err) { 457 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n", 458 err, vsi->vsi_num); 459 } 460 } 461 } 462 } 463 goto exit; 464 465 out_promisc: 466 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 467 goto exit; 468 out: 469 /* if something went wrong then set the changed flag so we try again */ 470 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 471 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 472 exit: 473 clear_bit(ICE_CFG_BUSY, vsi->state); 474 return err; 475 } 476 477 /** 478 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 479 * @pf: board private structure 480 */ 481 static void ice_sync_fltr_subtask(struct ice_pf *pf) 482 { 483 int v; 484 485 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 486 return; 487 488 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 489 490 ice_for_each_vsi(pf, v) 491 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 492 ice_vsi_sync_fltr(pf->vsi[v])) { 493 /* come back and try again later */ 494 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 495 break; 496 } 497 } 498 499 /** 500 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 501 * @pf: the PF 502 * @locked: is the rtnl_lock already held 503 */ 504 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 505 { 506 int node; 507 int v; 508 509 ice_for_each_vsi(pf, v) 510 if (pf->vsi[v]) 511 ice_dis_vsi(pf->vsi[v], locked); 512 513 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 514 pf->pf_agg_node[node].num_vsis = 0; 515 516 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 517 pf->vf_agg_node[node].num_vsis = 0; 518 } 519 520 /** 521 * ice_clear_sw_switch_recipes - clear switch recipes 522 * @pf: board private structure 523 * 524 * Mark switch recipes as not created in sw structures. There are cases where 525 * rules (especially advanced rules) need to be restored, either re-read from 526 * hardware or added again. For example after the reset. 'recp_created' flag 527 * prevents from doing that and need to be cleared upfront. 528 */ 529 static void ice_clear_sw_switch_recipes(struct ice_pf *pf) 530 { 531 struct ice_sw_recipe *recp; 532 u8 i; 533 534 recp = pf->hw.switch_info->recp_list; 535 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 536 recp[i].recp_created = false; 537 } 538 539 /** 540 * ice_prepare_for_reset - prep for reset 541 * @pf: board private structure 542 * @reset_type: reset type requested 543 * 544 * Inform or close all dependent features in prep for reset. 545 */ 546 static void 547 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 548 { 549 struct ice_hw *hw = &pf->hw; 550 struct ice_vsi *vsi; 551 struct ice_vf *vf; 552 unsigned int bkt; 553 554 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); 555 556 /* already prepared for reset */ 557 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 558 return; 559 560 ice_unplug_aux_dev(pf); 561 562 /* Notify VFs of impending reset */ 563 if (ice_check_sq_alive(hw, &hw->mailboxq)) 564 ice_vc_notify_reset(pf); 565 566 /* Disable VFs until reset is completed */ 567 mutex_lock(&pf->vfs.table_lock); 568 ice_for_each_vf(pf, bkt, vf) 569 ice_set_vf_state_dis(vf); 570 mutex_unlock(&pf->vfs.table_lock); 571 572 if (ice_is_eswitch_mode_switchdev(pf)) { 573 if (reset_type != ICE_RESET_PFR) 574 ice_clear_sw_switch_recipes(pf); 575 } 576 577 /* release ADQ specific HW and SW resources */ 578 vsi = ice_get_main_vsi(pf); 579 if (!vsi) 580 goto skip; 581 582 /* to be on safe side, reset orig_rss_size so that normal flow 583 * of deciding rss_size can take precedence 584 */ 585 vsi->orig_rss_size = 0; 586 587 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 588 if (reset_type == ICE_RESET_PFR) { 589 vsi->old_ena_tc = vsi->all_enatc; 590 vsi->old_numtc = vsi->all_numtc; 591 } else { 592 ice_remove_q_channels(vsi, true); 593 594 /* for other reset type, do not support channel rebuild 595 * hence reset needed info 596 */ 597 vsi->old_ena_tc = 0; 598 vsi->all_enatc = 0; 599 vsi->old_numtc = 0; 600 vsi->all_numtc = 0; 601 vsi->req_txq = 0; 602 vsi->req_rxq = 0; 603 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 604 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); 605 } 606 } 607 skip: 608 609 /* clear SW filtering DB */ 610 ice_clear_hw_tbls(hw); 611 /* disable the VSIs and their queues that are not already DOWN */ 612 ice_pf_dis_all_vsi(pf, false); 613 614 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 615 ice_ptp_prepare_for_reset(pf); 616 617 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 618 ice_gnss_exit(pf); 619 620 if (hw->port_info) 621 ice_sched_clear_port(hw->port_info); 622 623 ice_shutdown_all_ctrlq(hw); 624 625 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 626 } 627 628 /** 629 * ice_do_reset - Initiate one of many types of resets 630 * @pf: board private structure 631 * @reset_type: reset type requested before this function was called. 632 */ 633 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 634 { 635 struct device *dev = ice_pf_to_dev(pf); 636 struct ice_hw *hw = &pf->hw; 637 638 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 639 640 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { 641 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n"); 642 reset_type = ICE_RESET_CORER; 643 } 644 645 ice_prepare_for_reset(pf, reset_type); 646 647 /* trigger the reset */ 648 if (ice_reset(hw, reset_type)) { 649 dev_err(dev, "reset %d failed\n", reset_type); 650 set_bit(ICE_RESET_FAILED, pf->state); 651 clear_bit(ICE_RESET_OICR_RECV, pf->state); 652 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 653 clear_bit(ICE_PFR_REQ, pf->state); 654 clear_bit(ICE_CORER_REQ, pf->state); 655 clear_bit(ICE_GLOBR_REQ, pf->state); 656 wake_up(&pf->reset_wait_queue); 657 return; 658 } 659 660 /* PFR is a bit of a special case because it doesn't result in an OICR 661 * interrupt. So for PFR, rebuild after the reset and clear the reset- 662 * associated state bits. 663 */ 664 if (reset_type == ICE_RESET_PFR) { 665 pf->pfr_count++; 666 ice_rebuild(pf, reset_type); 667 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 668 clear_bit(ICE_PFR_REQ, pf->state); 669 wake_up(&pf->reset_wait_queue); 670 ice_reset_all_vfs(pf); 671 } 672 } 673 674 /** 675 * ice_reset_subtask - Set up for resetting the device and driver 676 * @pf: board private structure 677 */ 678 static void ice_reset_subtask(struct ice_pf *pf) 679 { 680 enum ice_reset_req reset_type = ICE_RESET_INVAL; 681 682 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 683 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 684 * of reset is pending and sets bits in pf->state indicating the reset 685 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 686 * prepare for pending reset if not already (for PF software-initiated 687 * global resets the software should already be prepared for it as 688 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 689 * by firmware or software on other PFs, that bit is not set so prepare 690 * for the reset now), poll for reset done, rebuild and return. 691 */ 692 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 693 /* Perform the largest reset requested */ 694 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 695 reset_type = ICE_RESET_CORER; 696 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 697 reset_type = ICE_RESET_GLOBR; 698 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 699 reset_type = ICE_RESET_EMPR; 700 /* return if no valid reset type requested */ 701 if (reset_type == ICE_RESET_INVAL) 702 return; 703 ice_prepare_for_reset(pf, reset_type); 704 705 /* make sure we are ready to rebuild */ 706 if (ice_check_reset(&pf->hw)) { 707 set_bit(ICE_RESET_FAILED, pf->state); 708 } else { 709 /* done with reset. start rebuild */ 710 pf->hw.reset_ongoing = false; 711 ice_rebuild(pf, reset_type); 712 /* clear bit to resume normal operations, but 713 * ICE_NEEDS_RESTART bit is set in case rebuild failed 714 */ 715 clear_bit(ICE_RESET_OICR_RECV, pf->state); 716 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 717 clear_bit(ICE_PFR_REQ, pf->state); 718 clear_bit(ICE_CORER_REQ, pf->state); 719 clear_bit(ICE_GLOBR_REQ, pf->state); 720 wake_up(&pf->reset_wait_queue); 721 ice_reset_all_vfs(pf); 722 } 723 724 return; 725 } 726 727 /* No pending resets to finish processing. Check for new resets */ 728 if (test_bit(ICE_PFR_REQ, pf->state)) { 729 reset_type = ICE_RESET_PFR; 730 if (pf->lag && pf->lag->bonded) { 731 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); 732 reset_type = ICE_RESET_CORER; 733 } 734 } 735 if (test_bit(ICE_CORER_REQ, pf->state)) 736 reset_type = ICE_RESET_CORER; 737 if (test_bit(ICE_GLOBR_REQ, pf->state)) 738 reset_type = ICE_RESET_GLOBR; 739 /* If no valid reset type requested just return */ 740 if (reset_type == ICE_RESET_INVAL) 741 return; 742 743 /* reset if not already down or busy */ 744 if (!test_bit(ICE_DOWN, pf->state) && 745 !test_bit(ICE_CFG_BUSY, pf->state)) { 746 ice_do_reset(pf, reset_type); 747 } 748 } 749 750 /** 751 * ice_print_topo_conflict - print topology conflict message 752 * @vsi: the VSI whose topology status is being checked 753 */ 754 static void ice_print_topo_conflict(struct ice_vsi *vsi) 755 { 756 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 757 case ICE_AQ_LINK_TOPO_CONFLICT: 758 case ICE_AQ_LINK_MEDIA_CONFLICT: 759 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 760 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 761 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 762 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 763 break; 764 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 765 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) 766 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); 767 else 768 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 769 break; 770 default: 771 break; 772 } 773 } 774 775 /** 776 * ice_print_link_msg - print link up or down message 777 * @vsi: the VSI whose link status is being queried 778 * @isup: boolean for if the link is now up or down 779 */ 780 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 781 { 782 struct ice_aqc_get_phy_caps_data *caps; 783 const char *an_advertised; 784 const char *fec_req; 785 const char *speed; 786 const char *fec; 787 const char *fc; 788 const char *an; 789 int status; 790 791 if (!vsi) 792 return; 793 794 if (vsi->current_isup == isup) 795 return; 796 797 vsi->current_isup = isup; 798 799 if (!isup) { 800 netdev_info(vsi->netdev, "NIC Link is Down\n"); 801 return; 802 } 803 804 switch (vsi->port_info->phy.link_info.link_speed) { 805 case ICE_AQ_LINK_SPEED_100GB: 806 speed = "100 G"; 807 break; 808 case ICE_AQ_LINK_SPEED_50GB: 809 speed = "50 G"; 810 break; 811 case ICE_AQ_LINK_SPEED_40GB: 812 speed = "40 G"; 813 break; 814 case ICE_AQ_LINK_SPEED_25GB: 815 speed = "25 G"; 816 break; 817 case ICE_AQ_LINK_SPEED_20GB: 818 speed = "20 G"; 819 break; 820 case ICE_AQ_LINK_SPEED_10GB: 821 speed = "10 G"; 822 break; 823 case ICE_AQ_LINK_SPEED_5GB: 824 speed = "5 G"; 825 break; 826 case ICE_AQ_LINK_SPEED_2500MB: 827 speed = "2.5 G"; 828 break; 829 case ICE_AQ_LINK_SPEED_1000MB: 830 speed = "1 G"; 831 break; 832 case ICE_AQ_LINK_SPEED_100MB: 833 speed = "100 M"; 834 break; 835 default: 836 speed = "Unknown "; 837 break; 838 } 839 840 switch (vsi->port_info->fc.current_mode) { 841 case ICE_FC_FULL: 842 fc = "Rx/Tx"; 843 break; 844 case ICE_FC_TX_PAUSE: 845 fc = "Tx"; 846 break; 847 case ICE_FC_RX_PAUSE: 848 fc = "Rx"; 849 break; 850 case ICE_FC_NONE: 851 fc = "None"; 852 break; 853 default: 854 fc = "Unknown"; 855 break; 856 } 857 858 /* Get FEC mode based on negotiated link info */ 859 switch (vsi->port_info->phy.link_info.fec_info) { 860 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 861 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 862 fec = "RS-FEC"; 863 break; 864 case ICE_AQ_LINK_25G_KR_FEC_EN: 865 fec = "FC-FEC/BASE-R"; 866 break; 867 default: 868 fec = "NONE"; 869 break; 870 } 871 872 /* check if autoneg completed, might be false due to not supported */ 873 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 874 an = "True"; 875 else 876 an = "False"; 877 878 /* Get FEC mode requested based on PHY caps last SW configuration */ 879 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 880 if (!caps) { 881 fec_req = "Unknown"; 882 an_advertised = "Unknown"; 883 goto done; 884 } 885 886 status = ice_aq_get_phy_caps(vsi->port_info, false, 887 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 888 if (status) 889 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 890 891 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 892 893 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 894 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 895 fec_req = "RS-FEC"; 896 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 897 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 898 fec_req = "FC-FEC/BASE-R"; 899 else 900 fec_req = "NONE"; 901 902 kfree(caps); 903 904 done: 905 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 906 speed, fec_req, fec, an_advertised, an, fc); 907 ice_print_topo_conflict(vsi); 908 } 909 910 /** 911 * ice_vsi_link_event - update the VSI's netdev 912 * @vsi: the VSI on which the link event occurred 913 * @link_up: whether or not the VSI needs to be set up or down 914 */ 915 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 916 { 917 if (!vsi) 918 return; 919 920 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 921 return; 922 923 if (vsi->type == ICE_VSI_PF) { 924 if (link_up == netif_carrier_ok(vsi->netdev)) 925 return; 926 927 if (link_up) { 928 netif_carrier_on(vsi->netdev); 929 netif_tx_wake_all_queues(vsi->netdev); 930 } else { 931 netif_carrier_off(vsi->netdev); 932 netif_tx_stop_all_queues(vsi->netdev); 933 } 934 } 935 } 936 937 /** 938 * ice_set_dflt_mib - send a default config MIB to the FW 939 * @pf: private PF struct 940 * 941 * This function sends a default configuration MIB to the FW. 942 * 943 * If this function errors out at any point, the driver is still able to 944 * function. The main impact is that LFC may not operate as expected. 945 * Therefore an error state in this function should be treated with a DBG 946 * message and continue on with driver rebuild/reenable. 947 */ 948 static void ice_set_dflt_mib(struct ice_pf *pf) 949 { 950 struct device *dev = ice_pf_to_dev(pf); 951 u8 mib_type, *buf, *lldpmib = NULL; 952 u16 len, typelen, offset = 0; 953 struct ice_lldp_org_tlv *tlv; 954 struct ice_hw *hw = &pf->hw; 955 u32 ouisubtype; 956 957 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 958 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 959 if (!lldpmib) { 960 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 961 __func__); 962 return; 963 } 964 965 /* Add ETS CFG TLV */ 966 tlv = (struct ice_lldp_org_tlv *)lldpmib; 967 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 968 ICE_IEEE_ETS_TLV_LEN); 969 tlv->typelen = htons(typelen); 970 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 971 ICE_IEEE_SUBTYPE_ETS_CFG); 972 tlv->ouisubtype = htonl(ouisubtype); 973 974 buf = tlv->tlvinfo; 975 buf[0] = 0; 976 977 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 978 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 979 * Octets 13 - 20 are TSA values - leave as zeros 980 */ 981 buf[5] = 0x64; 982 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 983 offset += len + 2; 984 tlv = (struct ice_lldp_org_tlv *) 985 ((char *)tlv + sizeof(tlv->typelen) + len); 986 987 /* Add ETS REC TLV */ 988 buf = tlv->tlvinfo; 989 tlv->typelen = htons(typelen); 990 991 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 992 ICE_IEEE_SUBTYPE_ETS_REC); 993 tlv->ouisubtype = htonl(ouisubtype); 994 995 /* First octet of buf is reserved 996 * Octets 1 - 4 map UP to TC - all UPs map to zero 997 * Octets 5 - 12 are BW values - set TC 0 to 100%. 998 * Octets 13 - 20 are TSA value - leave as zeros 999 */ 1000 buf[5] = 0x64; 1001 offset += len + 2; 1002 tlv = (struct ice_lldp_org_tlv *) 1003 ((char *)tlv + sizeof(tlv->typelen) + len); 1004 1005 /* Add PFC CFG TLV */ 1006 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 1007 ICE_IEEE_PFC_TLV_LEN); 1008 tlv->typelen = htons(typelen); 1009 1010 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 1011 ICE_IEEE_SUBTYPE_PFC_CFG); 1012 tlv->ouisubtype = htonl(ouisubtype); 1013 1014 /* Octet 1 left as all zeros - PFC disabled */ 1015 buf[0] = 0x08; 1016 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 1017 offset += len + 2; 1018 1019 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 1020 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 1021 1022 kfree(lldpmib); 1023 } 1024 1025 /** 1026 * ice_check_phy_fw_load - check if PHY FW load failed 1027 * @pf: pointer to PF struct 1028 * @link_cfg_err: bitmap from the link info structure 1029 * 1030 * check if external PHY FW load failed and print an error message if it did 1031 */ 1032 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) 1033 { 1034 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { 1035 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 1036 return; 1037 } 1038 1039 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) 1040 return; 1041 1042 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { 1043 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); 1044 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 1045 } 1046 } 1047 1048 /** 1049 * ice_check_module_power 1050 * @pf: pointer to PF struct 1051 * @link_cfg_err: bitmap from the link info structure 1052 * 1053 * check module power level returned by a previous call to aq_get_link_info 1054 * and print error messages if module power level is not supported 1055 */ 1056 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 1057 { 1058 /* if module power level is supported, clear the flag */ 1059 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 1060 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 1061 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1062 return; 1063 } 1064 1065 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 1066 * above block didn't clear this bit, there's nothing to do 1067 */ 1068 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 1069 return; 1070 1071 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 1072 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 1073 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1074 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 1075 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 1076 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1077 } 1078 } 1079 1080 /** 1081 * ice_check_link_cfg_err - check if link configuration failed 1082 * @pf: pointer to the PF struct 1083 * @link_cfg_err: bitmap from the link info structure 1084 * 1085 * print if any link configuration failure happens due to the value in the 1086 * link_cfg_err parameter in the link info structure 1087 */ 1088 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) 1089 { 1090 ice_check_module_power(pf, link_cfg_err); 1091 ice_check_phy_fw_load(pf, link_cfg_err); 1092 } 1093 1094 /** 1095 * ice_link_event - process the link event 1096 * @pf: PF that the link event is associated with 1097 * @pi: port_info for the port that the link event is associated with 1098 * @link_up: true if the physical link is up and false if it is down 1099 * @link_speed: current link speed received from the link event 1100 * 1101 * Returns 0 on success and negative on failure 1102 */ 1103 static int 1104 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 1105 u16 link_speed) 1106 { 1107 struct device *dev = ice_pf_to_dev(pf); 1108 struct ice_phy_info *phy_info; 1109 struct ice_vsi *vsi; 1110 u16 old_link_speed; 1111 bool old_link; 1112 int status; 1113 1114 phy_info = &pi->phy; 1115 phy_info->link_info_old = phy_info->link_info; 1116 1117 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 1118 old_link_speed = phy_info->link_info_old.link_speed; 1119 1120 /* update the link info structures and re-enable link events, 1121 * don't bail on failure due to other book keeping needed 1122 */ 1123 status = ice_update_link_info(pi); 1124 if (status) 1125 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", 1126 pi->lport, status, 1127 ice_aq_str(pi->hw->adminq.sq_last_status)); 1128 1129 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 1130 1131 /* Check if the link state is up after updating link info, and treat 1132 * this event as an UP event since the link is actually UP now. 1133 */ 1134 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 1135 link_up = true; 1136 1137 vsi = ice_get_main_vsi(pf); 1138 if (!vsi || !vsi->port_info) 1139 return -EINVAL; 1140 1141 /* turn off PHY if media was removed */ 1142 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 1143 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1144 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1145 ice_set_link(vsi, false); 1146 } 1147 1148 /* if the old link up/down and speed is the same as the new */ 1149 if (link_up == old_link && link_speed == old_link_speed) 1150 return 0; 1151 1152 ice_ptp_link_change(pf, pf->hw.pf_id, link_up); 1153 1154 if (ice_is_dcb_active(pf)) { 1155 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 1156 ice_dcb_rebuild(pf); 1157 } else { 1158 if (link_up) 1159 ice_set_dflt_mib(pf); 1160 } 1161 ice_vsi_link_event(vsi, link_up); 1162 ice_print_link_msg(vsi, link_up); 1163 1164 ice_vc_notify_link_state(pf); 1165 1166 return 0; 1167 } 1168 1169 /** 1170 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 1171 * @pf: board private structure 1172 */ 1173 static void ice_watchdog_subtask(struct ice_pf *pf) 1174 { 1175 int i; 1176 1177 /* if interface is down do nothing */ 1178 if (test_bit(ICE_DOWN, pf->state) || 1179 test_bit(ICE_CFG_BUSY, pf->state)) 1180 return; 1181 1182 /* make sure we don't do these things too often */ 1183 if (time_before(jiffies, 1184 pf->serv_tmr_prev + pf->serv_tmr_period)) 1185 return; 1186 1187 pf->serv_tmr_prev = jiffies; 1188 1189 /* Update the stats for active netdevs so the network stack 1190 * can look at updated numbers whenever it cares to 1191 */ 1192 ice_update_pf_stats(pf); 1193 ice_for_each_vsi(pf, i) 1194 if (pf->vsi[i] && pf->vsi[i]->netdev) 1195 ice_update_vsi_stats(pf->vsi[i]); 1196 } 1197 1198 /** 1199 * ice_init_link_events - enable/initialize link events 1200 * @pi: pointer to the port_info instance 1201 * 1202 * Returns -EIO on failure, 0 on success 1203 */ 1204 static int ice_init_link_events(struct ice_port_info *pi) 1205 { 1206 u16 mask; 1207 1208 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1209 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | 1210 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); 1211 1212 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1213 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1214 pi->lport); 1215 return -EIO; 1216 } 1217 1218 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1219 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1220 pi->lport); 1221 return -EIO; 1222 } 1223 1224 return 0; 1225 } 1226 1227 /** 1228 * ice_handle_link_event - handle link event via ARQ 1229 * @pf: PF that the link event is associated with 1230 * @event: event structure containing link status info 1231 */ 1232 static int 1233 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1234 { 1235 struct ice_aqc_get_link_status_data *link_data; 1236 struct ice_port_info *port_info; 1237 int status; 1238 1239 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1240 port_info = pf->hw.port_info; 1241 if (!port_info) 1242 return -EINVAL; 1243 1244 status = ice_link_event(pf, port_info, 1245 !!(link_data->link_info & ICE_AQ_LINK_UP), 1246 le16_to_cpu(link_data->link_speed)); 1247 if (status) 1248 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1249 status); 1250 1251 return status; 1252 } 1253 1254 /** 1255 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware 1256 * @pf: pointer to the PF private structure 1257 * @task: intermediate helper storage and identifier for waiting 1258 * @opcode: the opcode to wait for 1259 * 1260 * Prepares to wait for a specific AdminQ completion event on the ARQ for 1261 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event(). 1262 * 1263 * Calls are separated to allow caller registering for event before sending 1264 * the command, which mitigates a race between registering and FW responding. 1265 * 1266 * To obtain only the descriptor contents, pass an task->event with null 1267 * msg_buf. If the complete data buffer is desired, allocate the 1268 * task->event.msg_buf with enough space ahead of time. 1269 */ 1270 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1271 u16 opcode) 1272 { 1273 INIT_HLIST_NODE(&task->entry); 1274 task->opcode = opcode; 1275 task->state = ICE_AQ_TASK_WAITING; 1276 1277 spin_lock_bh(&pf->aq_wait_lock); 1278 hlist_add_head(&task->entry, &pf->aq_wait_list); 1279 spin_unlock_bh(&pf->aq_wait_lock); 1280 } 1281 1282 /** 1283 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1284 * @pf: pointer to the PF private structure 1285 * @task: ptr prepared by ice_aq_prep_for_event() 1286 * @timeout: how long to wait, in jiffies 1287 * 1288 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1289 * current thread will be put to sleep until the specified event occurs or 1290 * until the given timeout is reached. 1291 * 1292 * Returns: zero on success, or a negative error code on failure. 1293 */ 1294 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1295 unsigned long timeout) 1296 { 1297 enum ice_aq_task_state *state = &task->state; 1298 struct device *dev = ice_pf_to_dev(pf); 1299 unsigned long start = jiffies; 1300 long ret; 1301 int err; 1302 1303 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, 1304 *state != ICE_AQ_TASK_WAITING, 1305 timeout); 1306 switch (*state) { 1307 case ICE_AQ_TASK_NOT_PREPARED: 1308 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__); 1309 err = -EINVAL; 1310 break; 1311 case ICE_AQ_TASK_WAITING: 1312 err = ret < 0 ? ret : -ETIMEDOUT; 1313 break; 1314 case ICE_AQ_TASK_CANCELED: 1315 err = ret < 0 ? ret : -ECANCELED; 1316 break; 1317 case ICE_AQ_TASK_COMPLETE: 1318 err = ret < 0 ? ret : 0; 1319 break; 1320 default: 1321 WARN(1, "Unexpected AdminQ wait task state %u", *state); 1322 err = -EINVAL; 1323 break; 1324 } 1325 1326 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1327 jiffies_to_msecs(jiffies - start), 1328 jiffies_to_msecs(timeout), 1329 task->opcode); 1330 1331 spin_lock_bh(&pf->aq_wait_lock); 1332 hlist_del(&task->entry); 1333 spin_unlock_bh(&pf->aq_wait_lock); 1334 1335 return err; 1336 } 1337 1338 /** 1339 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1340 * @pf: pointer to the PF private structure 1341 * @opcode: the opcode of the event 1342 * @event: the event to check 1343 * 1344 * Loops over the current list of pending threads waiting for an AdminQ event. 1345 * For each matching task, copy the contents of the event into the task 1346 * structure and wake up the thread. 1347 * 1348 * If multiple threads wait for the same opcode, they will all be woken up. 1349 * 1350 * Note that event->msg_buf will only be duplicated if the event has a buffer 1351 * with enough space already allocated. Otherwise, only the descriptor and 1352 * message length will be copied. 1353 * 1354 * Returns: true if an event was found, false otherwise 1355 */ 1356 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1357 struct ice_rq_event_info *event) 1358 { 1359 struct ice_rq_event_info *task_ev; 1360 struct ice_aq_task *task; 1361 bool found = false; 1362 1363 spin_lock_bh(&pf->aq_wait_lock); 1364 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1365 if (task->state != ICE_AQ_TASK_WAITING) 1366 continue; 1367 if (task->opcode != opcode) 1368 continue; 1369 1370 task_ev = &task->event; 1371 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); 1372 task_ev->msg_len = event->msg_len; 1373 1374 /* Only copy the data buffer if a destination was set */ 1375 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { 1376 memcpy(task_ev->msg_buf, event->msg_buf, 1377 event->buf_len); 1378 task_ev->buf_len = event->buf_len; 1379 } 1380 1381 task->state = ICE_AQ_TASK_COMPLETE; 1382 found = true; 1383 } 1384 spin_unlock_bh(&pf->aq_wait_lock); 1385 1386 if (found) 1387 wake_up(&pf->aq_wait_queue); 1388 } 1389 1390 /** 1391 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1392 * @pf: the PF private structure 1393 * 1394 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1395 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1396 */ 1397 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1398 { 1399 struct ice_aq_task *task; 1400 1401 spin_lock_bh(&pf->aq_wait_lock); 1402 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1403 task->state = ICE_AQ_TASK_CANCELED; 1404 spin_unlock_bh(&pf->aq_wait_lock); 1405 1406 wake_up(&pf->aq_wait_queue); 1407 } 1408 1409 #define ICE_MBX_OVERFLOW_WATERMARK 64 1410 1411 /** 1412 * __ice_clean_ctrlq - helper function to clean controlq rings 1413 * @pf: ptr to struct ice_pf 1414 * @q_type: specific Control queue type 1415 */ 1416 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1417 { 1418 struct device *dev = ice_pf_to_dev(pf); 1419 struct ice_rq_event_info event; 1420 struct ice_hw *hw = &pf->hw; 1421 struct ice_ctl_q_info *cq; 1422 u16 pending, i = 0; 1423 const char *qtype; 1424 u32 oldval, val; 1425 1426 /* Do not clean control queue if/when PF reset fails */ 1427 if (test_bit(ICE_RESET_FAILED, pf->state)) 1428 return 0; 1429 1430 switch (q_type) { 1431 case ICE_CTL_Q_ADMIN: 1432 cq = &hw->adminq; 1433 qtype = "Admin"; 1434 break; 1435 case ICE_CTL_Q_SB: 1436 cq = &hw->sbq; 1437 qtype = "Sideband"; 1438 break; 1439 case ICE_CTL_Q_MAILBOX: 1440 cq = &hw->mailboxq; 1441 qtype = "Mailbox"; 1442 /* we are going to try to detect a malicious VF, so set the 1443 * state to begin detection 1444 */ 1445 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1446 break; 1447 default: 1448 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1449 return 0; 1450 } 1451 1452 /* check for error indications - PF_xx_AxQLEN register layout for 1453 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1454 */ 1455 val = rd32(hw, cq->rq.len); 1456 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1457 PF_FW_ARQLEN_ARQCRIT_M)) { 1458 oldval = val; 1459 if (val & PF_FW_ARQLEN_ARQVFE_M) 1460 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1461 qtype); 1462 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1463 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1464 qtype); 1465 } 1466 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1467 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1468 qtype); 1469 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1470 PF_FW_ARQLEN_ARQCRIT_M); 1471 if (oldval != val) 1472 wr32(hw, cq->rq.len, val); 1473 } 1474 1475 val = rd32(hw, cq->sq.len); 1476 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1477 PF_FW_ATQLEN_ATQCRIT_M)) { 1478 oldval = val; 1479 if (val & PF_FW_ATQLEN_ATQVFE_M) 1480 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1481 qtype); 1482 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1483 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1484 qtype); 1485 } 1486 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1487 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1488 qtype); 1489 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1490 PF_FW_ATQLEN_ATQCRIT_M); 1491 if (oldval != val) 1492 wr32(hw, cq->sq.len, val); 1493 } 1494 1495 event.buf_len = cq->rq_buf_size; 1496 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1497 if (!event.msg_buf) 1498 return 0; 1499 1500 do { 1501 struct ice_mbx_data data = {}; 1502 u16 opcode; 1503 int ret; 1504 1505 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1506 if (ret == -EALREADY) 1507 break; 1508 if (ret) { 1509 dev_err(dev, "%s Receive Queue event error %d\n", qtype, 1510 ret); 1511 break; 1512 } 1513 1514 opcode = le16_to_cpu(event.desc.opcode); 1515 1516 /* Notify any thread that might be waiting for this event */ 1517 ice_aq_check_events(pf, opcode, &event); 1518 1519 switch (opcode) { 1520 case ice_aqc_opc_get_link_status: 1521 if (ice_handle_link_event(pf, &event)) 1522 dev_err(dev, "Could not handle link event\n"); 1523 break; 1524 case ice_aqc_opc_event_lan_overflow: 1525 ice_vf_lan_overflow_event(pf, &event); 1526 break; 1527 case ice_mbx_opc_send_msg_to_pf: 1528 data.num_msg_proc = i; 1529 data.num_pending_arq = pending; 1530 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries; 1531 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; 1532 1533 ice_vc_process_vf_msg(pf, &event, &data); 1534 break; 1535 case ice_aqc_opc_fw_logging: 1536 ice_output_fw_log(hw, &event.desc, event.msg_buf); 1537 break; 1538 case ice_aqc_opc_lldp_set_mib_change: 1539 ice_dcb_process_lldp_set_mib_change(pf, &event); 1540 break; 1541 default: 1542 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1543 qtype, opcode); 1544 break; 1545 } 1546 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1547 1548 kfree(event.msg_buf); 1549 1550 return pending && (i == ICE_DFLT_IRQ_WORK); 1551 } 1552 1553 /** 1554 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1555 * @hw: pointer to hardware info 1556 * @cq: control queue information 1557 * 1558 * returns true if there are pending messages in a queue, false if there aren't 1559 */ 1560 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1561 { 1562 u16 ntu; 1563 1564 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1565 return cq->rq.next_to_clean != ntu; 1566 } 1567 1568 /** 1569 * ice_clean_adminq_subtask - clean the AdminQ rings 1570 * @pf: board private structure 1571 */ 1572 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1573 { 1574 struct ice_hw *hw = &pf->hw; 1575 1576 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1577 return; 1578 1579 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1580 return; 1581 1582 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1583 1584 /* There might be a situation where new messages arrive to a control 1585 * queue between processing the last message and clearing the 1586 * EVENT_PENDING bit. So before exiting, check queue head again (using 1587 * ice_ctrlq_pending) and process new messages if any. 1588 */ 1589 if (ice_ctrlq_pending(hw, &hw->adminq)) 1590 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1591 1592 ice_flush(hw); 1593 } 1594 1595 /** 1596 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1597 * @pf: board private structure 1598 */ 1599 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1600 { 1601 struct ice_hw *hw = &pf->hw; 1602 1603 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1604 return; 1605 1606 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1607 return; 1608 1609 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1610 1611 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1612 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1613 1614 ice_flush(hw); 1615 } 1616 1617 /** 1618 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1619 * @pf: board private structure 1620 */ 1621 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1622 { 1623 struct ice_hw *hw = &pf->hw; 1624 1625 /* Nothing to do here if sideband queue is not supported */ 1626 if (!ice_is_sbq_supported(hw)) { 1627 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1628 return; 1629 } 1630 1631 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1632 return; 1633 1634 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1635 return; 1636 1637 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1638 1639 if (ice_ctrlq_pending(hw, &hw->sbq)) 1640 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1641 1642 ice_flush(hw); 1643 } 1644 1645 /** 1646 * ice_service_task_schedule - schedule the service task to wake up 1647 * @pf: board private structure 1648 * 1649 * If not already scheduled, this puts the task into the work queue. 1650 */ 1651 void ice_service_task_schedule(struct ice_pf *pf) 1652 { 1653 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1654 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1655 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1656 queue_work(ice_wq, &pf->serv_task); 1657 } 1658 1659 /** 1660 * ice_service_task_complete - finish up the service task 1661 * @pf: board private structure 1662 */ 1663 static void ice_service_task_complete(struct ice_pf *pf) 1664 { 1665 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1666 1667 /* force memory (pf->state) to sync before next service task */ 1668 smp_mb__before_atomic(); 1669 clear_bit(ICE_SERVICE_SCHED, pf->state); 1670 } 1671 1672 /** 1673 * ice_service_task_stop - stop service task and cancel works 1674 * @pf: board private structure 1675 * 1676 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1677 * 1 otherwise. 1678 */ 1679 static int ice_service_task_stop(struct ice_pf *pf) 1680 { 1681 int ret; 1682 1683 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1684 1685 if (pf->serv_tmr.function) 1686 del_timer_sync(&pf->serv_tmr); 1687 if (pf->serv_task.func) 1688 cancel_work_sync(&pf->serv_task); 1689 1690 clear_bit(ICE_SERVICE_SCHED, pf->state); 1691 return ret; 1692 } 1693 1694 /** 1695 * ice_service_task_restart - restart service task and schedule works 1696 * @pf: board private structure 1697 * 1698 * This function is needed for suspend and resume works (e.g WoL scenario) 1699 */ 1700 static void ice_service_task_restart(struct ice_pf *pf) 1701 { 1702 clear_bit(ICE_SERVICE_DIS, pf->state); 1703 ice_service_task_schedule(pf); 1704 } 1705 1706 /** 1707 * ice_service_timer - timer callback to schedule service task 1708 * @t: pointer to timer_list 1709 */ 1710 static void ice_service_timer(struct timer_list *t) 1711 { 1712 struct ice_pf *pf = from_timer(pf, t, serv_tmr); 1713 1714 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1715 ice_service_task_schedule(pf); 1716 } 1717 1718 /** 1719 * ice_handle_mdd_event - handle malicious driver detect event 1720 * @pf: pointer to the PF structure 1721 * 1722 * Called from service task. OICR interrupt handler indicates MDD event. 1723 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1724 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1725 * disable the queue, the PF can be configured to reset the VF using ethtool 1726 * private flag mdd-auto-reset-vf. 1727 */ 1728 static void ice_handle_mdd_event(struct ice_pf *pf) 1729 { 1730 struct device *dev = ice_pf_to_dev(pf); 1731 struct ice_hw *hw = &pf->hw; 1732 struct ice_vf *vf; 1733 unsigned int bkt; 1734 u32 reg; 1735 1736 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1737 /* Since the VF MDD event logging is rate limited, check if 1738 * there are pending MDD events. 1739 */ 1740 ice_print_vfs_mdd_events(pf); 1741 return; 1742 } 1743 1744 /* find what triggered an MDD event */ 1745 reg = rd32(hw, GL_MDET_TX_PQM); 1746 if (reg & GL_MDET_TX_PQM_VALID_M) { 1747 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> 1748 GL_MDET_TX_PQM_PF_NUM_S; 1749 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> 1750 GL_MDET_TX_PQM_VF_NUM_S; 1751 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> 1752 GL_MDET_TX_PQM_MAL_TYPE_S; 1753 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> 1754 GL_MDET_TX_PQM_QNUM_S); 1755 1756 if (netif_msg_tx_err(pf)) 1757 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1758 event, queue, pf_num, vf_num); 1759 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1760 } 1761 1762 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); 1763 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1764 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> 1765 GL_MDET_TX_TCLAN_PF_NUM_S; 1766 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> 1767 GL_MDET_TX_TCLAN_VF_NUM_S; 1768 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> 1769 GL_MDET_TX_TCLAN_MAL_TYPE_S; 1770 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> 1771 GL_MDET_TX_TCLAN_QNUM_S); 1772 1773 if (netif_msg_tx_err(pf)) 1774 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1775 event, queue, pf_num, vf_num); 1776 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); 1777 } 1778 1779 reg = rd32(hw, GL_MDET_RX); 1780 if (reg & GL_MDET_RX_VALID_M) { 1781 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> 1782 GL_MDET_RX_PF_NUM_S; 1783 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> 1784 GL_MDET_RX_VF_NUM_S; 1785 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> 1786 GL_MDET_RX_MAL_TYPE_S; 1787 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> 1788 GL_MDET_RX_QNUM_S); 1789 1790 if (netif_msg_rx_err(pf)) 1791 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1792 event, queue, pf_num, vf_num); 1793 wr32(hw, GL_MDET_RX, 0xffffffff); 1794 } 1795 1796 /* check to see if this PF caused an MDD event */ 1797 reg = rd32(hw, PF_MDET_TX_PQM); 1798 if (reg & PF_MDET_TX_PQM_VALID_M) { 1799 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1800 if (netif_msg_tx_err(pf)) 1801 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1802 } 1803 1804 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw)); 1805 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1806 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff); 1807 if (netif_msg_tx_err(pf)) 1808 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1809 } 1810 1811 reg = rd32(hw, PF_MDET_RX); 1812 if (reg & PF_MDET_RX_VALID_M) { 1813 wr32(hw, PF_MDET_RX, 0xFFFF); 1814 if (netif_msg_rx_err(pf)) 1815 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1816 } 1817 1818 /* Check to see if one of the VFs caused an MDD event, and then 1819 * increment counters and set print pending 1820 */ 1821 mutex_lock(&pf->vfs.table_lock); 1822 ice_for_each_vf(pf, bkt, vf) { 1823 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); 1824 if (reg & VP_MDET_TX_PQM_VALID_M) { 1825 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); 1826 vf->mdd_tx_events.count++; 1827 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1828 if (netif_msg_tx_err(pf)) 1829 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1830 vf->vf_id); 1831 } 1832 1833 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); 1834 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1835 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); 1836 vf->mdd_tx_events.count++; 1837 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1838 if (netif_msg_tx_err(pf)) 1839 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1840 vf->vf_id); 1841 } 1842 1843 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); 1844 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1845 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); 1846 vf->mdd_tx_events.count++; 1847 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1848 if (netif_msg_tx_err(pf)) 1849 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1850 vf->vf_id); 1851 } 1852 1853 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); 1854 if (reg & VP_MDET_RX_VALID_M) { 1855 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); 1856 vf->mdd_rx_events.count++; 1857 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1858 if (netif_msg_rx_err(pf)) 1859 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1860 vf->vf_id); 1861 1862 /* Since the queue is disabled on VF Rx MDD events, the 1863 * PF can be configured to reset the VF through ethtool 1864 * private flag mdd-auto-reset-vf. 1865 */ 1866 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { 1867 /* VF MDD event counters will be cleared by 1868 * reset, so print the event prior to reset. 1869 */ 1870 ice_print_vf_rx_mdd_event(vf); 1871 ice_reset_vf(vf, ICE_VF_RESET_LOCK); 1872 } 1873 } 1874 } 1875 mutex_unlock(&pf->vfs.table_lock); 1876 1877 ice_print_vfs_mdd_events(pf); 1878 } 1879 1880 /** 1881 * ice_force_phys_link_state - Force the physical link state 1882 * @vsi: VSI to force the physical link state to up/down 1883 * @link_up: true/false indicates to set the physical link to up/down 1884 * 1885 * Force the physical link state by getting the current PHY capabilities from 1886 * hardware and setting the PHY config based on the determined capabilities. If 1887 * link changes a link event will be triggered because both the Enable Automatic 1888 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1889 * 1890 * Returns 0 on success, negative on failure 1891 */ 1892 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1893 { 1894 struct ice_aqc_get_phy_caps_data *pcaps; 1895 struct ice_aqc_set_phy_cfg_data *cfg; 1896 struct ice_port_info *pi; 1897 struct device *dev; 1898 int retcode; 1899 1900 if (!vsi || !vsi->port_info || !vsi->back) 1901 return -EINVAL; 1902 if (vsi->type != ICE_VSI_PF) 1903 return 0; 1904 1905 dev = ice_pf_to_dev(vsi->back); 1906 1907 pi = vsi->port_info; 1908 1909 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1910 if (!pcaps) 1911 return -ENOMEM; 1912 1913 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1914 NULL); 1915 if (retcode) { 1916 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1917 vsi->vsi_num, retcode); 1918 retcode = -EIO; 1919 goto out; 1920 } 1921 1922 /* No change in link */ 1923 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1924 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1925 goto out; 1926 1927 /* Use the current user PHY configuration. The current user PHY 1928 * configuration is initialized during probe from PHY capabilities 1929 * software mode, and updated on set PHY configuration. 1930 */ 1931 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1932 if (!cfg) { 1933 retcode = -ENOMEM; 1934 goto out; 1935 } 1936 1937 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1938 if (link_up) 1939 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1940 else 1941 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1942 1943 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1944 if (retcode) { 1945 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1946 vsi->vsi_num, retcode); 1947 retcode = -EIO; 1948 } 1949 1950 kfree(cfg); 1951 out: 1952 kfree(pcaps); 1953 return retcode; 1954 } 1955 1956 /** 1957 * ice_init_nvm_phy_type - Initialize the NVM PHY type 1958 * @pi: port info structure 1959 * 1960 * Initialize nvm_phy_type_[low|high] for link lenient mode support 1961 */ 1962 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 1963 { 1964 struct ice_aqc_get_phy_caps_data *pcaps; 1965 struct ice_pf *pf = pi->hw->back; 1966 int err; 1967 1968 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1969 if (!pcaps) 1970 return -ENOMEM; 1971 1972 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, 1973 pcaps, NULL); 1974 1975 if (err) { 1976 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 1977 goto out; 1978 } 1979 1980 pf->nvm_phy_type_hi = pcaps->phy_type_high; 1981 pf->nvm_phy_type_lo = pcaps->phy_type_low; 1982 1983 out: 1984 kfree(pcaps); 1985 return err; 1986 } 1987 1988 /** 1989 * ice_init_link_dflt_override - Initialize link default override 1990 * @pi: port info structure 1991 * 1992 * Initialize link default override and PHY total port shutdown during probe 1993 */ 1994 static void ice_init_link_dflt_override(struct ice_port_info *pi) 1995 { 1996 struct ice_link_default_override_tlv *ldo; 1997 struct ice_pf *pf = pi->hw->back; 1998 1999 ldo = &pf->link_dflt_override; 2000 if (ice_get_link_default_override(ldo, pi)) 2001 return; 2002 2003 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 2004 return; 2005 2006 /* Enable Total Port Shutdown (override/replace link-down-on-close 2007 * ethtool private flag) for ports with Port Disable bit set. 2008 */ 2009 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 2010 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 2011 } 2012 2013 /** 2014 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 2015 * @pi: port info structure 2016 * 2017 * If default override is enabled, initialize the user PHY cfg speed and FEC 2018 * settings using the default override mask from the NVM. 2019 * 2020 * The PHY should only be configured with the default override settings the 2021 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 2022 * is used to indicate that the user PHY cfg default override is initialized 2023 * and the PHY has not been configured with the default override settings. The 2024 * state is set here, and cleared in ice_configure_phy the first time the PHY is 2025 * configured. 2026 * 2027 * This function should be called only if the FW doesn't support default 2028 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 2029 */ 2030 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 2031 { 2032 struct ice_link_default_override_tlv *ldo; 2033 struct ice_aqc_set_phy_cfg_data *cfg; 2034 struct ice_phy_info *phy = &pi->phy; 2035 struct ice_pf *pf = pi->hw->back; 2036 2037 ldo = &pf->link_dflt_override; 2038 2039 /* If link default override is enabled, use to mask NVM PHY capabilities 2040 * for speed and FEC default configuration. 2041 */ 2042 cfg = &phy->curr_user_phy_cfg; 2043 2044 if (ldo->phy_type_low || ldo->phy_type_high) { 2045 cfg->phy_type_low = pf->nvm_phy_type_lo & 2046 cpu_to_le64(ldo->phy_type_low); 2047 cfg->phy_type_high = pf->nvm_phy_type_hi & 2048 cpu_to_le64(ldo->phy_type_high); 2049 } 2050 cfg->link_fec_opt = ldo->fec_options; 2051 phy->curr_user_fec_req = ICE_FEC_AUTO; 2052 2053 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 2054 } 2055 2056 /** 2057 * ice_init_phy_user_cfg - Initialize the PHY user configuration 2058 * @pi: port info structure 2059 * 2060 * Initialize the current user PHY configuration, speed, FEC, and FC requested 2061 * mode to default. The PHY defaults are from get PHY capabilities topology 2062 * with media so call when media is first available. An error is returned if 2063 * called when media is not available. The PHY initialization completed state is 2064 * set here. 2065 * 2066 * These configurations are used when setting PHY 2067 * configuration. The user PHY configuration is updated on set PHY 2068 * configuration. Returns 0 on success, negative on failure 2069 */ 2070 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 2071 { 2072 struct ice_aqc_get_phy_caps_data *pcaps; 2073 struct ice_phy_info *phy = &pi->phy; 2074 struct ice_pf *pf = pi->hw->back; 2075 int err; 2076 2077 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2078 return -EIO; 2079 2080 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2081 if (!pcaps) 2082 return -ENOMEM; 2083 2084 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2085 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2086 pcaps, NULL); 2087 else 2088 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2089 pcaps, NULL); 2090 if (err) { 2091 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2092 goto err_out; 2093 } 2094 2095 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 2096 2097 /* check if lenient mode is supported and enabled */ 2098 if (ice_fw_supports_link_override(pi->hw) && 2099 !(pcaps->module_compliance_enforcement & 2100 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 2101 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 2102 2103 /* if the FW supports default PHY configuration mode, then the driver 2104 * does not have to apply link override settings. If not, 2105 * initialize user PHY configuration with link override values 2106 */ 2107 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 2108 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 2109 ice_init_phy_cfg_dflt_override(pi); 2110 goto out; 2111 } 2112 } 2113 2114 /* if link default override is not enabled, set user flow control and 2115 * FEC settings based on what get_phy_caps returned 2116 */ 2117 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 2118 pcaps->link_fec_options); 2119 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 2120 2121 out: 2122 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 2123 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 2124 err_out: 2125 kfree(pcaps); 2126 return err; 2127 } 2128 2129 /** 2130 * ice_configure_phy - configure PHY 2131 * @vsi: VSI of PHY 2132 * 2133 * Set the PHY configuration. If the current PHY configuration is the same as 2134 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 2135 * configure the based get PHY capabilities for topology with media. 2136 */ 2137 static int ice_configure_phy(struct ice_vsi *vsi) 2138 { 2139 struct device *dev = ice_pf_to_dev(vsi->back); 2140 struct ice_port_info *pi = vsi->port_info; 2141 struct ice_aqc_get_phy_caps_data *pcaps; 2142 struct ice_aqc_set_phy_cfg_data *cfg; 2143 struct ice_phy_info *phy = &pi->phy; 2144 struct ice_pf *pf = vsi->back; 2145 int err; 2146 2147 /* Ensure we have media as we cannot configure a medialess port */ 2148 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2149 return -EPERM; 2150 2151 ice_print_topo_conflict(vsi); 2152 2153 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && 2154 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 2155 return -EPERM; 2156 2157 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 2158 return ice_force_phys_link_state(vsi, true); 2159 2160 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2161 if (!pcaps) 2162 return -ENOMEM; 2163 2164 /* Get current PHY config */ 2165 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 2166 NULL); 2167 if (err) { 2168 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", 2169 vsi->vsi_num, err); 2170 goto done; 2171 } 2172 2173 /* If PHY enable link is configured and configuration has not changed, 2174 * there's nothing to do 2175 */ 2176 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 2177 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 2178 goto done; 2179 2180 /* Use PHY topology as baseline for configuration */ 2181 memset(pcaps, 0, sizeof(*pcaps)); 2182 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2183 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2184 pcaps, NULL); 2185 else 2186 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2187 pcaps, NULL); 2188 if (err) { 2189 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", 2190 vsi->vsi_num, err); 2191 goto done; 2192 } 2193 2194 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2195 if (!cfg) { 2196 err = -ENOMEM; 2197 goto done; 2198 } 2199 2200 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2201 2202 /* Speed - If default override pending, use curr_user_phy_cfg set in 2203 * ice_init_phy_user_cfg_ldo. 2204 */ 2205 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2206 vsi->back->state)) { 2207 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2208 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2209 } else { 2210 u64 phy_low = 0, phy_high = 0; 2211 2212 ice_update_phy_type(&phy_low, &phy_high, 2213 pi->phy.curr_user_speed_req); 2214 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2215 cfg->phy_type_high = pcaps->phy_type_high & 2216 cpu_to_le64(phy_high); 2217 } 2218 2219 /* Can't provide what was requested; use PHY capabilities */ 2220 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2221 cfg->phy_type_low = pcaps->phy_type_low; 2222 cfg->phy_type_high = pcaps->phy_type_high; 2223 } 2224 2225 /* FEC */ 2226 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2227 2228 /* Can't provide what was requested; use PHY capabilities */ 2229 if (cfg->link_fec_opt != 2230 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2231 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2232 cfg->link_fec_opt = pcaps->link_fec_options; 2233 } 2234 2235 /* Flow Control - always supported; no need to check against 2236 * capabilities 2237 */ 2238 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2239 2240 /* Enable link and link update */ 2241 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2242 2243 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2244 if (err) 2245 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 2246 vsi->vsi_num, err); 2247 2248 kfree(cfg); 2249 done: 2250 kfree(pcaps); 2251 return err; 2252 } 2253 2254 /** 2255 * ice_check_media_subtask - Check for media 2256 * @pf: pointer to PF struct 2257 * 2258 * If media is available, then initialize PHY user configuration if it is not 2259 * been, and configure the PHY if the interface is up. 2260 */ 2261 static void ice_check_media_subtask(struct ice_pf *pf) 2262 { 2263 struct ice_port_info *pi; 2264 struct ice_vsi *vsi; 2265 int err; 2266 2267 /* No need to check for media if it's already present */ 2268 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2269 return; 2270 2271 vsi = ice_get_main_vsi(pf); 2272 if (!vsi) 2273 return; 2274 2275 /* Refresh link info and check if media is present */ 2276 pi = vsi->port_info; 2277 err = ice_update_link_info(pi); 2278 if (err) 2279 return; 2280 2281 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 2282 2283 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2284 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2285 ice_init_phy_user_cfg(pi); 2286 2287 /* PHY settings are reset on media insertion, reconfigure 2288 * PHY to preserve settings. 2289 */ 2290 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2291 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2292 return; 2293 2294 err = ice_configure_phy(vsi); 2295 if (!err) 2296 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2297 2298 /* A Link Status Event will be generated; the event handler 2299 * will complete bringing the interface up 2300 */ 2301 } 2302 } 2303 2304 /** 2305 * ice_service_task - manage and run subtasks 2306 * @work: pointer to work_struct contained by the PF struct 2307 */ 2308 static void ice_service_task(struct work_struct *work) 2309 { 2310 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2311 unsigned long start_time = jiffies; 2312 2313 /* subtasks */ 2314 2315 /* process reset requests first */ 2316 ice_reset_subtask(pf); 2317 2318 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2319 if (ice_is_reset_in_progress(pf->state) || 2320 test_bit(ICE_SUSPENDED, pf->state) || 2321 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2322 ice_service_task_complete(pf); 2323 return; 2324 } 2325 2326 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { 2327 struct iidc_event *event; 2328 2329 event = kzalloc(sizeof(*event), GFP_KERNEL); 2330 if (event) { 2331 set_bit(IIDC_EVENT_CRIT_ERR, event->type); 2332 /* report the entire OICR value to AUX driver */ 2333 swap(event->reg, pf->oicr_err_reg); 2334 ice_send_event_to_aux(pf, event); 2335 kfree(event); 2336 } 2337 } 2338 2339 /* unplug aux dev per request, if an unplug request came in 2340 * while processing a plug request, this will handle it 2341 */ 2342 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) 2343 ice_unplug_aux_dev(pf); 2344 2345 /* Plug aux device per request */ 2346 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) 2347 ice_plug_aux_dev(pf); 2348 2349 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { 2350 struct iidc_event *event; 2351 2352 event = kzalloc(sizeof(*event), GFP_KERNEL); 2353 if (event) { 2354 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); 2355 ice_send_event_to_aux(pf, event); 2356 kfree(event); 2357 } 2358 } 2359 2360 ice_clean_adminq_subtask(pf); 2361 ice_check_media_subtask(pf); 2362 ice_check_for_hang_subtask(pf); 2363 ice_sync_fltr_subtask(pf); 2364 ice_handle_mdd_event(pf); 2365 ice_watchdog_subtask(pf); 2366 2367 if (ice_is_safe_mode(pf)) { 2368 ice_service_task_complete(pf); 2369 return; 2370 } 2371 2372 ice_process_vflr_event(pf); 2373 ice_clean_mailboxq_subtask(pf); 2374 ice_clean_sbq_subtask(pf); 2375 ice_sync_arfs_fltrs(pf); 2376 ice_flush_fdir_ctx(pf); 2377 2378 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2379 ice_service_task_complete(pf); 2380 2381 /* If the tasks have taken longer than one service timer period 2382 * or there is more work to be done, reset the service timer to 2383 * schedule the service task now. 2384 */ 2385 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2386 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2387 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2388 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2389 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2390 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2391 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2392 mod_timer(&pf->serv_tmr, jiffies); 2393 } 2394 2395 /** 2396 * ice_set_ctrlq_len - helper function to set controlq length 2397 * @hw: pointer to the HW instance 2398 */ 2399 static void ice_set_ctrlq_len(struct ice_hw *hw) 2400 { 2401 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2402 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2403 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2404 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2405 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2406 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2407 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2408 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2409 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2410 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2411 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2412 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2413 } 2414 2415 /** 2416 * ice_schedule_reset - schedule a reset 2417 * @pf: board private structure 2418 * @reset: reset being requested 2419 */ 2420 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2421 { 2422 struct device *dev = ice_pf_to_dev(pf); 2423 2424 /* bail out if earlier reset has failed */ 2425 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2426 dev_dbg(dev, "earlier reset has failed\n"); 2427 return -EIO; 2428 } 2429 /* bail if reset/recovery already in progress */ 2430 if (ice_is_reset_in_progress(pf->state)) { 2431 dev_dbg(dev, "Reset already in progress\n"); 2432 return -EBUSY; 2433 } 2434 2435 switch (reset) { 2436 case ICE_RESET_PFR: 2437 set_bit(ICE_PFR_REQ, pf->state); 2438 break; 2439 case ICE_RESET_CORER: 2440 set_bit(ICE_CORER_REQ, pf->state); 2441 break; 2442 case ICE_RESET_GLOBR: 2443 set_bit(ICE_GLOBR_REQ, pf->state); 2444 break; 2445 default: 2446 return -EINVAL; 2447 } 2448 2449 ice_service_task_schedule(pf); 2450 return 0; 2451 } 2452 2453 /** 2454 * ice_irq_affinity_notify - Callback for affinity changes 2455 * @notify: context as to what irq was changed 2456 * @mask: the new affinity mask 2457 * 2458 * This is a callback function used by the irq_set_affinity_notifier function 2459 * so that we may register to receive changes to the irq affinity masks. 2460 */ 2461 static void 2462 ice_irq_affinity_notify(struct irq_affinity_notify *notify, 2463 const cpumask_t *mask) 2464 { 2465 struct ice_q_vector *q_vector = 2466 container_of(notify, struct ice_q_vector, affinity_notify); 2467 2468 cpumask_copy(&q_vector->affinity_mask, mask); 2469 } 2470 2471 /** 2472 * ice_irq_affinity_release - Callback for affinity notifier release 2473 * @ref: internal core kernel usage 2474 * 2475 * This is a callback function used by the irq_set_affinity_notifier function 2476 * to inform the current notification subscriber that they will no longer 2477 * receive notifications. 2478 */ 2479 static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 2480 2481 /** 2482 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2483 * @vsi: the VSI being configured 2484 */ 2485 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2486 { 2487 struct ice_hw *hw = &vsi->back->hw; 2488 int i; 2489 2490 ice_for_each_q_vector(vsi, i) 2491 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2492 2493 ice_flush(hw); 2494 return 0; 2495 } 2496 2497 /** 2498 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2499 * @vsi: the VSI being configured 2500 * @basename: name for the vector 2501 */ 2502 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2503 { 2504 int q_vectors = vsi->num_q_vectors; 2505 struct ice_pf *pf = vsi->back; 2506 struct device *dev; 2507 int rx_int_idx = 0; 2508 int tx_int_idx = 0; 2509 int vector, err; 2510 int irq_num; 2511 2512 dev = ice_pf_to_dev(pf); 2513 for (vector = 0; vector < q_vectors; vector++) { 2514 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2515 2516 irq_num = q_vector->irq.virq; 2517 2518 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { 2519 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2520 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2521 tx_int_idx++; 2522 } else if (q_vector->rx.rx_ring) { 2523 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2524 "%s-%s-%d", basename, "rx", rx_int_idx++); 2525 } else if (q_vector->tx.tx_ring) { 2526 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2527 "%s-%s-%d", basename, "tx", tx_int_idx++); 2528 } else { 2529 /* skip this unused q_vector */ 2530 continue; 2531 } 2532 if (vsi->type == ICE_VSI_CTRL && vsi->vf) 2533 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2534 IRQF_SHARED, q_vector->name, 2535 q_vector); 2536 else 2537 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2538 0, q_vector->name, q_vector); 2539 if (err) { 2540 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2541 err); 2542 goto free_q_irqs; 2543 } 2544 2545 /* register for affinity change notifications */ 2546 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { 2547 struct irq_affinity_notify *affinity_notify; 2548 2549 affinity_notify = &q_vector->affinity_notify; 2550 affinity_notify->notify = ice_irq_affinity_notify; 2551 affinity_notify->release = ice_irq_affinity_release; 2552 irq_set_affinity_notifier(irq_num, affinity_notify); 2553 } 2554 2555 /* assign the mask for this irq */ 2556 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); 2557 } 2558 2559 err = ice_set_cpu_rx_rmap(vsi); 2560 if (err) { 2561 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", 2562 vsi->vsi_num, ERR_PTR(err)); 2563 goto free_q_irqs; 2564 } 2565 2566 vsi->irqs_ready = true; 2567 return 0; 2568 2569 free_q_irqs: 2570 while (vector--) { 2571 irq_num = vsi->q_vectors[vector]->irq.virq; 2572 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) 2573 irq_set_affinity_notifier(irq_num, NULL); 2574 irq_set_affinity_hint(irq_num, NULL); 2575 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2576 } 2577 return err; 2578 } 2579 2580 /** 2581 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2582 * @vsi: VSI to setup Tx rings used by XDP 2583 * 2584 * Return 0 on success and negative value on error 2585 */ 2586 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2587 { 2588 struct device *dev = ice_pf_to_dev(vsi->back); 2589 struct ice_tx_desc *tx_desc; 2590 int i, j; 2591 2592 ice_for_each_xdp_txq(vsi, i) { 2593 u16 xdp_q_idx = vsi->alloc_txq + i; 2594 struct ice_ring_stats *ring_stats; 2595 struct ice_tx_ring *xdp_ring; 2596 2597 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2598 if (!xdp_ring) 2599 goto free_xdp_rings; 2600 2601 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 2602 if (!ring_stats) { 2603 ice_free_tx_ring(xdp_ring); 2604 goto free_xdp_rings; 2605 } 2606 2607 xdp_ring->ring_stats = ring_stats; 2608 xdp_ring->q_index = xdp_q_idx; 2609 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2610 xdp_ring->vsi = vsi; 2611 xdp_ring->netdev = NULL; 2612 xdp_ring->dev = dev; 2613 xdp_ring->count = vsi->num_tx_desc; 2614 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2615 if (ice_setup_tx_ring(xdp_ring)) 2616 goto free_xdp_rings; 2617 ice_set_ring_xdp(xdp_ring); 2618 spin_lock_init(&xdp_ring->tx_lock); 2619 for (j = 0; j < xdp_ring->count; j++) { 2620 tx_desc = ICE_TX_DESC(xdp_ring, j); 2621 tx_desc->cmd_type_offset_bsz = 0; 2622 } 2623 } 2624 2625 return 0; 2626 2627 free_xdp_rings: 2628 for (; i >= 0; i--) { 2629 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { 2630 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2631 vsi->xdp_rings[i]->ring_stats = NULL; 2632 ice_free_tx_ring(vsi->xdp_rings[i]); 2633 } 2634 } 2635 return -ENOMEM; 2636 } 2637 2638 /** 2639 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2640 * @vsi: VSI to set the bpf prog on 2641 * @prog: the bpf prog pointer 2642 */ 2643 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2644 { 2645 struct bpf_prog *old_prog; 2646 int i; 2647 2648 old_prog = xchg(&vsi->xdp_prog, prog); 2649 ice_for_each_rxq(vsi, i) 2650 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2651 2652 if (old_prog) 2653 bpf_prog_put(old_prog); 2654 } 2655 2656 /** 2657 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2658 * @vsi: VSI to bring up Tx rings used by XDP 2659 * @prog: bpf program that will be assigned to VSI 2660 * 2661 * Return 0 on success and negative value on error 2662 */ 2663 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) 2664 { 2665 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2666 int xdp_rings_rem = vsi->num_xdp_txq; 2667 struct ice_pf *pf = vsi->back; 2668 struct ice_qs_cfg xdp_qs_cfg = { 2669 .qs_mutex = &pf->avail_q_mutex, 2670 .pf_map = pf->avail_txqs, 2671 .pf_map_size = pf->max_pf_txqs, 2672 .q_count = vsi->num_xdp_txq, 2673 .scatter_count = ICE_MAX_SCATTER_TXQS, 2674 .vsi_map = vsi->txq_map, 2675 .vsi_map_offset = vsi->alloc_txq, 2676 .mapping_mode = ICE_VSI_MAP_CONTIG 2677 }; 2678 struct device *dev; 2679 int i, v_idx; 2680 int status; 2681 2682 dev = ice_pf_to_dev(pf); 2683 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2684 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2685 if (!vsi->xdp_rings) 2686 return -ENOMEM; 2687 2688 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2689 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2690 goto err_map_xdp; 2691 2692 if (static_key_enabled(&ice_xdp_locking_key)) 2693 netdev_warn(vsi->netdev, 2694 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); 2695 2696 if (ice_xdp_alloc_setup_rings(vsi)) 2697 goto clear_xdp_rings; 2698 2699 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2700 ice_for_each_q_vector(vsi, v_idx) { 2701 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2702 int xdp_rings_per_v, q_id, q_base; 2703 2704 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2705 vsi->num_q_vectors - v_idx); 2706 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2707 2708 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2709 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; 2710 2711 xdp_ring->q_vector = q_vector; 2712 xdp_ring->next = q_vector->tx.tx_ring; 2713 q_vector->tx.tx_ring = xdp_ring; 2714 } 2715 xdp_rings_rem -= xdp_rings_per_v; 2716 } 2717 2718 ice_for_each_rxq(vsi, i) { 2719 if (static_key_enabled(&ice_xdp_locking_key)) { 2720 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; 2721 } else { 2722 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; 2723 struct ice_tx_ring *ring; 2724 2725 ice_for_each_tx_ring(ring, q_vector->tx) { 2726 if (ice_ring_is_xdp(ring)) { 2727 vsi->rx_rings[i]->xdp_ring = ring; 2728 break; 2729 } 2730 } 2731 } 2732 ice_tx_xsk_pool(vsi, i); 2733 } 2734 2735 /* omit the scheduler update if in reset path; XDP queues will be 2736 * taken into account at the end of ice_vsi_rebuild, where 2737 * ice_cfg_vsi_lan is being called 2738 */ 2739 if (ice_is_reset_in_progress(pf->state)) 2740 return 0; 2741 2742 /* tell the Tx scheduler that right now we have 2743 * additional queues 2744 */ 2745 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2746 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2747 2748 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2749 max_txqs); 2750 if (status) { 2751 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 2752 status); 2753 goto clear_xdp_rings; 2754 } 2755 2756 /* assign the prog only when it's not already present on VSI; 2757 * this flow is a subject of both ethtool -L and ndo_bpf flows; 2758 * VSI rebuild that happens under ethtool -L can expose us to 2759 * the bpf_prog refcount issues as we would be swapping same 2760 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put 2761 * on it as it would be treated as an 'old_prog'; for ndo_bpf 2762 * this is not harmful as dev_xdp_install bumps the refcount 2763 * before calling the op exposed by the driver; 2764 */ 2765 if (!ice_is_xdp_ena_vsi(vsi)) 2766 ice_vsi_assign_bpf_prog(vsi, prog); 2767 2768 return 0; 2769 clear_xdp_rings: 2770 ice_for_each_xdp_txq(vsi, i) 2771 if (vsi->xdp_rings[i]) { 2772 kfree_rcu(vsi->xdp_rings[i], rcu); 2773 vsi->xdp_rings[i] = NULL; 2774 } 2775 2776 err_map_xdp: 2777 mutex_lock(&pf->avail_q_mutex); 2778 ice_for_each_xdp_txq(vsi, i) { 2779 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2780 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2781 } 2782 mutex_unlock(&pf->avail_q_mutex); 2783 2784 devm_kfree(dev, vsi->xdp_rings); 2785 return -ENOMEM; 2786 } 2787 2788 /** 2789 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2790 * @vsi: VSI to remove XDP rings 2791 * 2792 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2793 * resources 2794 */ 2795 int ice_destroy_xdp_rings(struct ice_vsi *vsi) 2796 { 2797 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2798 struct ice_pf *pf = vsi->back; 2799 int i, v_idx; 2800 2801 /* q_vectors are freed in reset path so there's no point in detaching 2802 * rings; in case of rebuild being triggered not from reset bits 2803 * in pf->state won't be set, so additionally check first q_vector 2804 * against NULL 2805 */ 2806 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2807 goto free_qmap; 2808 2809 ice_for_each_q_vector(vsi, v_idx) { 2810 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2811 struct ice_tx_ring *ring; 2812 2813 ice_for_each_tx_ring(ring, q_vector->tx) 2814 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2815 break; 2816 2817 /* restore the value of last node prior to XDP setup */ 2818 q_vector->tx.tx_ring = ring; 2819 } 2820 2821 free_qmap: 2822 mutex_lock(&pf->avail_q_mutex); 2823 ice_for_each_xdp_txq(vsi, i) { 2824 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2825 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2826 } 2827 mutex_unlock(&pf->avail_q_mutex); 2828 2829 ice_for_each_xdp_txq(vsi, i) 2830 if (vsi->xdp_rings[i]) { 2831 if (vsi->xdp_rings[i]->desc) { 2832 synchronize_rcu(); 2833 ice_free_tx_ring(vsi->xdp_rings[i]); 2834 } 2835 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2836 vsi->xdp_rings[i]->ring_stats = NULL; 2837 kfree_rcu(vsi->xdp_rings[i], rcu); 2838 vsi->xdp_rings[i] = NULL; 2839 } 2840 2841 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2842 vsi->xdp_rings = NULL; 2843 2844 if (static_key_enabled(&ice_xdp_locking_key)) 2845 static_branch_dec(&ice_xdp_locking_key); 2846 2847 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) 2848 return 0; 2849 2850 ice_vsi_assign_bpf_prog(vsi, NULL); 2851 2852 /* notify Tx scheduler that we destroyed XDP queues and bring 2853 * back the old number of child nodes 2854 */ 2855 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2856 max_txqs[i] = vsi->num_txq; 2857 2858 /* change number of XDP Tx queues to 0 */ 2859 vsi->num_xdp_txq = 0; 2860 2861 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2862 max_txqs); 2863 } 2864 2865 /** 2866 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2867 * @vsi: VSI to schedule napi on 2868 */ 2869 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2870 { 2871 int i; 2872 2873 ice_for_each_rxq(vsi, i) { 2874 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2875 2876 if (rx_ring->xsk_pool) 2877 napi_schedule(&rx_ring->q_vector->napi); 2878 } 2879 } 2880 2881 /** 2882 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have 2883 * @vsi: VSI to determine the count of XDP Tx qs 2884 * 2885 * returns 0 if Tx qs count is higher than at least half of CPU count, 2886 * -ENOMEM otherwise 2887 */ 2888 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) 2889 { 2890 u16 avail = ice_get_avail_txq_count(vsi->back); 2891 u16 cpus = num_possible_cpus(); 2892 2893 if (avail < cpus / 2) 2894 return -ENOMEM; 2895 2896 vsi->num_xdp_txq = min_t(u16, avail, cpus); 2897 2898 if (vsi->num_xdp_txq < cpus) 2899 static_branch_inc(&ice_xdp_locking_key); 2900 2901 return 0; 2902 } 2903 2904 /** 2905 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 2906 * @vsi: Pointer to VSI structure 2907 */ 2908 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 2909 { 2910 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 2911 return ICE_RXBUF_1664; 2912 else 2913 return ICE_RXBUF_3072; 2914 } 2915 2916 /** 2917 * ice_xdp_setup_prog - Add or remove XDP eBPF program 2918 * @vsi: VSI to setup XDP for 2919 * @prog: XDP program 2920 * @extack: netlink extended ack 2921 */ 2922 static int 2923 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2924 struct netlink_ext_ack *extack) 2925 { 2926 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2927 bool if_running = netif_running(vsi->netdev); 2928 int ret = 0, xdp_ring_err = 0; 2929 2930 if (prog && !prog->aux->xdp_has_frags) { 2931 if (frame_size > ice_max_xdp_frame_size(vsi)) { 2932 NL_SET_ERR_MSG_MOD(extack, 2933 "MTU is too large for linear frames and XDP prog does not support frags"); 2934 return -EOPNOTSUPP; 2935 } 2936 } 2937 2938 /* hot swap progs and avoid toggling link */ 2939 if (ice_is_xdp_ena_vsi(vsi) == !!prog) { 2940 ice_vsi_assign_bpf_prog(vsi, prog); 2941 return 0; 2942 } 2943 2944 /* need to stop netdev while setting up the program for Rx rings */ 2945 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 2946 ret = ice_down(vsi); 2947 if (ret) { 2948 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 2949 return ret; 2950 } 2951 } 2952 2953 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 2954 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 2955 if (xdp_ring_err) { 2956 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 2957 } else { 2958 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); 2959 if (xdp_ring_err) 2960 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 2961 } 2962 xdp_features_set_redirect_target(vsi->netdev, true); 2963 /* reallocate Rx queues that are used for zero-copy */ 2964 xdp_ring_err = ice_realloc_zc_buf(vsi, true); 2965 if (xdp_ring_err) 2966 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); 2967 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 2968 xdp_features_clear_redirect_target(vsi->netdev); 2969 xdp_ring_err = ice_destroy_xdp_rings(vsi); 2970 if (xdp_ring_err) 2971 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 2972 /* reallocate Rx queues that were used for zero-copy */ 2973 xdp_ring_err = ice_realloc_zc_buf(vsi, false); 2974 if (xdp_ring_err) 2975 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); 2976 } 2977 2978 if (if_running) 2979 ret = ice_up(vsi); 2980 2981 if (!ret && prog) 2982 ice_vsi_rx_napi_schedule(vsi); 2983 2984 return (ret || xdp_ring_err) ? -ENOMEM : 0; 2985 } 2986 2987 /** 2988 * ice_xdp_safe_mode - XDP handler for safe mode 2989 * @dev: netdevice 2990 * @xdp: XDP command 2991 */ 2992 static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 2993 struct netdev_bpf *xdp) 2994 { 2995 NL_SET_ERR_MSG_MOD(xdp->extack, 2996 "Please provide working DDP firmware package in order to use XDP\n" 2997 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 2998 return -EOPNOTSUPP; 2999 } 3000 3001 /** 3002 * ice_xdp - implements XDP handler 3003 * @dev: netdevice 3004 * @xdp: XDP command 3005 */ 3006 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 3007 { 3008 struct ice_netdev_priv *np = netdev_priv(dev); 3009 struct ice_vsi *vsi = np->vsi; 3010 3011 if (vsi->type != ICE_VSI_PF) { 3012 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); 3013 return -EINVAL; 3014 } 3015 3016 switch (xdp->command) { 3017 case XDP_SETUP_PROG: 3018 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 3019 case XDP_SETUP_XSK_POOL: 3020 return ice_xsk_pool_setup(vsi, xdp->xsk.pool, 3021 xdp->xsk.queue_id); 3022 default: 3023 return -EINVAL; 3024 } 3025 } 3026 3027 /** 3028 * ice_ena_misc_vector - enable the non-queue interrupts 3029 * @pf: board private structure 3030 */ 3031 static void ice_ena_misc_vector(struct ice_pf *pf) 3032 { 3033 struct ice_hw *hw = &pf->hw; 3034 u32 val; 3035 3036 /* Disable anti-spoof detection interrupt to prevent spurious event 3037 * interrupts during a function reset. Anti-spoof functionally is 3038 * still supported. 3039 */ 3040 val = rd32(hw, GL_MDCK_TX_TDPU); 3041 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 3042 wr32(hw, GL_MDCK_TX_TDPU, val); 3043 3044 /* clear things first */ 3045 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 3046 rd32(hw, PFINT_OICR); /* read to clear */ 3047 3048 val = (PFINT_OICR_ECC_ERR_M | 3049 PFINT_OICR_MAL_DETECT_M | 3050 PFINT_OICR_GRST_M | 3051 PFINT_OICR_PCI_EXCEPTION_M | 3052 PFINT_OICR_VFLR_M | 3053 PFINT_OICR_HMC_ERR_M | 3054 PFINT_OICR_PE_PUSH_M | 3055 PFINT_OICR_PE_CRITERR_M); 3056 3057 wr32(hw, PFINT_OICR_ENA, val); 3058 3059 /* SW_ITR_IDX = 0, but don't change INTENA */ 3060 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), 3061 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 3062 } 3063 3064 /** 3065 * ice_misc_intr - misc interrupt handler 3066 * @irq: interrupt number 3067 * @data: pointer to a q_vector 3068 */ 3069 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 3070 { 3071 struct ice_pf *pf = (struct ice_pf *)data; 3072 struct ice_hw *hw = &pf->hw; 3073 struct device *dev; 3074 u32 oicr, ena_mask; 3075 3076 dev = ice_pf_to_dev(pf); 3077 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 3078 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 3079 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 3080 3081 oicr = rd32(hw, PFINT_OICR); 3082 ena_mask = rd32(hw, PFINT_OICR_ENA); 3083 3084 if (oicr & PFINT_OICR_SWINT_M) { 3085 ena_mask &= ~PFINT_OICR_SWINT_M; 3086 pf->sw_int_count++; 3087 } 3088 3089 if (oicr & PFINT_OICR_MAL_DETECT_M) { 3090 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 3091 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 3092 } 3093 if (oicr & PFINT_OICR_VFLR_M) { 3094 /* disable any further VFLR event notifications */ 3095 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 3096 u32 reg = rd32(hw, PFINT_OICR_ENA); 3097 3098 reg &= ~PFINT_OICR_VFLR_M; 3099 wr32(hw, PFINT_OICR_ENA, reg); 3100 } else { 3101 ena_mask &= ~PFINT_OICR_VFLR_M; 3102 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 3103 } 3104 } 3105 3106 if (oicr & PFINT_OICR_GRST_M) { 3107 u32 reset; 3108 3109 /* we have a reset warning */ 3110 ena_mask &= ~PFINT_OICR_GRST_M; 3111 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> 3112 GLGEN_RSTAT_RESET_TYPE_S; 3113 3114 if (reset == ICE_RESET_CORER) 3115 pf->corer_count++; 3116 else if (reset == ICE_RESET_GLOBR) 3117 pf->globr_count++; 3118 else if (reset == ICE_RESET_EMPR) 3119 pf->empr_count++; 3120 else 3121 dev_dbg(dev, "Invalid reset type %d\n", reset); 3122 3123 /* If a reset cycle isn't already in progress, we set a bit in 3124 * pf->state so that the service task can start a reset/rebuild. 3125 */ 3126 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 3127 if (reset == ICE_RESET_CORER) 3128 set_bit(ICE_CORER_RECV, pf->state); 3129 else if (reset == ICE_RESET_GLOBR) 3130 set_bit(ICE_GLOBR_RECV, pf->state); 3131 else 3132 set_bit(ICE_EMPR_RECV, pf->state); 3133 3134 /* There are couple of different bits at play here. 3135 * hw->reset_ongoing indicates whether the hardware is 3136 * in reset. This is set to true when a reset interrupt 3137 * is received and set back to false after the driver 3138 * has determined that the hardware is out of reset. 3139 * 3140 * ICE_RESET_OICR_RECV in pf->state indicates 3141 * that a post reset rebuild is required before the 3142 * driver is operational again. This is set above. 3143 * 3144 * As this is the start of the reset/rebuild cycle, set 3145 * both to indicate that. 3146 */ 3147 hw->reset_ongoing = true; 3148 } 3149 } 3150 3151 if (oicr & PFINT_OICR_TSYN_TX_M) { 3152 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 3153 if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf)) 3154 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); 3155 } 3156 3157 if (oicr & PFINT_OICR_TSYN_EVNT_M) { 3158 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3159 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 3160 3161 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 3162 3163 if (ice_pf_src_tmr_owned(pf)) { 3164 /* Save EVENTs from GLTSYN register */ 3165 pf->ptp.ext_ts_irq |= gltsyn_stat & 3166 (GLTSYN_STAT_EVENT0_M | 3167 GLTSYN_STAT_EVENT1_M | 3168 GLTSYN_STAT_EVENT2_M); 3169 3170 set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread); 3171 } 3172 } 3173 3174 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 3175 if (oicr & ICE_AUX_CRIT_ERR) { 3176 pf->oicr_err_reg |= oicr; 3177 set_bit(ICE_AUX_ERR_PENDING, pf->state); 3178 ena_mask &= ~ICE_AUX_CRIT_ERR; 3179 } 3180 3181 /* Report any remaining unexpected interrupts */ 3182 oicr &= ena_mask; 3183 if (oicr) { 3184 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 3185 /* If a critical error is pending there is no choice but to 3186 * reset the device. 3187 */ 3188 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 3189 PFINT_OICR_ECC_ERR_M)) { 3190 set_bit(ICE_PFR_REQ, pf->state); 3191 } 3192 } 3193 3194 return IRQ_WAKE_THREAD; 3195 } 3196 3197 /** 3198 * ice_misc_intr_thread_fn - misc interrupt thread function 3199 * @irq: interrupt number 3200 * @data: pointer to a q_vector 3201 */ 3202 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) 3203 { 3204 struct ice_pf *pf = data; 3205 struct ice_hw *hw; 3206 3207 hw = &pf->hw; 3208 3209 if (ice_is_reset_in_progress(pf->state)) 3210 return IRQ_HANDLED; 3211 3212 ice_service_task_schedule(pf); 3213 3214 if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread)) 3215 ice_ptp_extts_event(pf); 3216 3217 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { 3218 /* Process outstanding Tx timestamps. If there is more work, 3219 * re-arm the interrupt to trigger again. 3220 */ 3221 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 3222 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 3223 ice_flush(hw); 3224 } 3225 } 3226 3227 ice_irq_dynamic_ena(hw, NULL, NULL); 3228 3229 return IRQ_HANDLED; 3230 } 3231 3232 /** 3233 * ice_dis_ctrlq_interrupts - disable control queue interrupts 3234 * @hw: pointer to HW structure 3235 */ 3236 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 3237 { 3238 /* disable Admin queue Interrupt causes */ 3239 wr32(hw, PFINT_FW_CTL, 3240 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 3241 3242 /* disable Mailbox queue Interrupt causes */ 3243 wr32(hw, PFINT_MBX_CTL, 3244 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 3245 3246 wr32(hw, PFINT_SB_CTL, 3247 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 3248 3249 /* disable Control queue Interrupt causes */ 3250 wr32(hw, PFINT_OICR_CTL, 3251 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 3252 3253 ice_flush(hw); 3254 } 3255 3256 /** 3257 * ice_free_irq_msix_misc - Unroll misc vector setup 3258 * @pf: board private structure 3259 */ 3260 static void ice_free_irq_msix_misc(struct ice_pf *pf) 3261 { 3262 int misc_irq_num = pf->oicr_irq.virq; 3263 struct ice_hw *hw = &pf->hw; 3264 3265 ice_dis_ctrlq_interrupts(hw); 3266 3267 /* disable OICR interrupt */ 3268 wr32(hw, PFINT_OICR_ENA, 0); 3269 ice_flush(hw); 3270 3271 synchronize_irq(misc_irq_num); 3272 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); 3273 3274 ice_free_irq(pf, pf->oicr_irq); 3275 } 3276 3277 /** 3278 * ice_ena_ctrlq_interrupts - enable control queue interrupts 3279 * @hw: pointer to HW structure 3280 * @reg_idx: HW vector index to associate the control queue interrupts with 3281 */ 3282 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 3283 { 3284 u32 val; 3285 3286 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 3287 PFINT_OICR_CTL_CAUSE_ENA_M); 3288 wr32(hw, PFINT_OICR_CTL, val); 3289 3290 /* enable Admin queue Interrupt causes */ 3291 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 3292 PFINT_FW_CTL_CAUSE_ENA_M); 3293 wr32(hw, PFINT_FW_CTL, val); 3294 3295 /* enable Mailbox queue Interrupt causes */ 3296 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 3297 PFINT_MBX_CTL_CAUSE_ENA_M); 3298 wr32(hw, PFINT_MBX_CTL, val); 3299 3300 /* This enables Sideband queue Interrupt causes */ 3301 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 3302 PFINT_SB_CTL_CAUSE_ENA_M); 3303 wr32(hw, PFINT_SB_CTL, val); 3304 3305 ice_flush(hw); 3306 } 3307 3308 /** 3309 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 3310 * @pf: board private structure 3311 * 3312 * This sets up the handler for MSIX 0, which is used to manage the 3313 * non-queue interrupts, e.g. AdminQ and errors. This is not used 3314 * when in MSI or Legacy interrupt mode. 3315 */ 3316 static int ice_req_irq_msix_misc(struct ice_pf *pf) 3317 { 3318 struct device *dev = ice_pf_to_dev(pf); 3319 struct ice_hw *hw = &pf->hw; 3320 struct msi_map oicr_irq; 3321 int err = 0; 3322 3323 if (!pf->int_name[0]) 3324 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 3325 dev_driver_string(dev), dev_name(dev)); 3326 3327 /* Do not request IRQ but do enable OICR interrupt since settings are 3328 * lost during reset. Note that this function is called only during 3329 * rebuild path and not while reset is in progress. 3330 */ 3331 if (ice_is_reset_in_progress(pf->state)) 3332 goto skip_req_irq; 3333 3334 /* reserve one vector in irq_tracker for misc interrupts */ 3335 oicr_irq = ice_alloc_irq(pf, false); 3336 if (oicr_irq.index < 0) 3337 return oicr_irq.index; 3338 3339 pf->oicr_irq = oicr_irq; 3340 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, 3341 ice_misc_intr_thread_fn, 0, 3342 pf->int_name, pf); 3343 if (err) { 3344 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", 3345 pf->int_name, err); 3346 ice_free_irq(pf, pf->oicr_irq); 3347 return err; 3348 } 3349 3350 skip_req_irq: 3351 ice_ena_misc_vector(pf); 3352 3353 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); 3354 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), 3355 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3356 3357 ice_flush(hw); 3358 ice_irq_dynamic_ena(hw, NULL, NULL); 3359 3360 return 0; 3361 } 3362 3363 /** 3364 * ice_napi_add - register NAPI handler for the VSI 3365 * @vsi: VSI for which NAPI handler is to be registered 3366 * 3367 * This function is only called in the driver's load path. Registering the NAPI 3368 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, 3369 * reset/rebuild, etc.) 3370 */ 3371 static void ice_napi_add(struct ice_vsi *vsi) 3372 { 3373 int v_idx; 3374 3375 if (!vsi->netdev) 3376 return; 3377 3378 ice_for_each_q_vector(vsi, v_idx) { 3379 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, 3380 ice_napi_poll); 3381 ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); 3382 } 3383 } 3384 3385 /** 3386 * ice_set_ops - set netdev and ethtools ops for the given netdev 3387 * @vsi: the VSI associated with the new netdev 3388 */ 3389 static void ice_set_ops(struct ice_vsi *vsi) 3390 { 3391 struct net_device *netdev = vsi->netdev; 3392 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3393 3394 if (ice_is_safe_mode(pf)) { 3395 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3396 ice_set_ethtool_safe_mode_ops(netdev); 3397 return; 3398 } 3399 3400 netdev->netdev_ops = &ice_netdev_ops; 3401 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3402 ice_set_ethtool_ops(netdev); 3403 3404 if (vsi->type != ICE_VSI_PF) 3405 return; 3406 3407 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 3408 NETDEV_XDP_ACT_XSK_ZEROCOPY | 3409 NETDEV_XDP_ACT_RX_SG; 3410 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; 3411 } 3412 3413 /** 3414 * ice_set_netdev_features - set features for the given netdev 3415 * @netdev: netdev instance 3416 */ 3417 static void ice_set_netdev_features(struct net_device *netdev) 3418 { 3419 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3420 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); 3421 netdev_features_t csumo_features; 3422 netdev_features_t vlano_features; 3423 netdev_features_t dflt_features; 3424 netdev_features_t tso_features; 3425 3426 if (ice_is_safe_mode(pf)) { 3427 /* safe mode */ 3428 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3429 netdev->hw_features = netdev->features; 3430 return; 3431 } 3432 3433 dflt_features = NETIF_F_SG | 3434 NETIF_F_HIGHDMA | 3435 NETIF_F_NTUPLE | 3436 NETIF_F_RXHASH; 3437 3438 csumo_features = NETIF_F_RXCSUM | 3439 NETIF_F_IP_CSUM | 3440 NETIF_F_SCTP_CRC | 3441 NETIF_F_IPV6_CSUM; 3442 3443 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3444 NETIF_F_HW_VLAN_CTAG_TX | 3445 NETIF_F_HW_VLAN_CTAG_RX; 3446 3447 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ 3448 if (is_dvm_ena) 3449 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER; 3450 3451 tso_features = NETIF_F_TSO | 3452 NETIF_F_TSO_ECN | 3453 NETIF_F_TSO6 | 3454 NETIF_F_GSO_GRE | 3455 NETIF_F_GSO_UDP_TUNNEL | 3456 NETIF_F_GSO_GRE_CSUM | 3457 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3458 NETIF_F_GSO_PARTIAL | 3459 NETIF_F_GSO_IPXIP4 | 3460 NETIF_F_GSO_IPXIP6 | 3461 NETIF_F_GSO_UDP_L4; 3462 3463 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3464 NETIF_F_GSO_GRE_CSUM; 3465 /* set features that user can change */ 3466 netdev->hw_features = dflt_features | csumo_features | 3467 vlano_features | tso_features; 3468 3469 /* add support for HW_CSUM on packets with MPLS header */ 3470 netdev->mpls_features = NETIF_F_HW_CSUM | 3471 NETIF_F_TSO | 3472 NETIF_F_TSO6; 3473 3474 /* enable features */ 3475 netdev->features |= netdev->hw_features; 3476 3477 netdev->hw_features |= NETIF_F_HW_TC; 3478 netdev->hw_features |= NETIF_F_LOOPBACK; 3479 3480 /* encap and VLAN devices inherit default, csumo and tso features */ 3481 netdev->hw_enc_features |= dflt_features | csumo_features | 3482 tso_features; 3483 netdev->vlan_features |= dflt_features | csumo_features | 3484 tso_features; 3485 3486 /* advertise support but don't enable by default since only one type of 3487 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one 3488 * type turns on the other has to be turned off. This is enforced by the 3489 * ice_fix_features() ndo callback. 3490 */ 3491 if (is_dvm_ena) 3492 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | 3493 NETIF_F_HW_VLAN_STAG_TX; 3494 3495 /* Leave CRC / FCS stripping enabled by default, but allow the value to 3496 * be changed at runtime 3497 */ 3498 netdev->hw_features |= NETIF_F_RXFCS; 3499 3500 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); 3501 } 3502 3503 /** 3504 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3505 * @lut: Lookup table 3506 * @rss_table_size: Lookup table size 3507 * @rss_size: Range of queue number for hashing 3508 */ 3509 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3510 { 3511 u16 i; 3512 3513 for (i = 0; i < rss_table_size; i++) 3514 lut[i] = i % rss_size; 3515 } 3516 3517 /** 3518 * ice_pf_vsi_setup - Set up a PF VSI 3519 * @pf: board private structure 3520 * @pi: pointer to the port_info instance 3521 * 3522 * Returns pointer to the successfully allocated VSI software struct 3523 * on success, otherwise returns NULL on failure. 3524 */ 3525 static struct ice_vsi * 3526 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3527 { 3528 struct ice_vsi_cfg_params params = {}; 3529 3530 params.type = ICE_VSI_PF; 3531 params.pi = pi; 3532 params.flags = ICE_VSI_FLAG_INIT; 3533 3534 return ice_vsi_setup(pf, ¶ms); 3535 } 3536 3537 static struct ice_vsi * 3538 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 3539 struct ice_channel *ch) 3540 { 3541 struct ice_vsi_cfg_params params = {}; 3542 3543 params.type = ICE_VSI_CHNL; 3544 params.pi = pi; 3545 params.ch = ch; 3546 params.flags = ICE_VSI_FLAG_INIT; 3547 3548 return ice_vsi_setup(pf, ¶ms); 3549 } 3550 3551 /** 3552 * ice_ctrl_vsi_setup - Set up a control VSI 3553 * @pf: board private structure 3554 * @pi: pointer to the port_info instance 3555 * 3556 * Returns pointer to the successfully allocated VSI software struct 3557 * on success, otherwise returns NULL on failure. 3558 */ 3559 static struct ice_vsi * 3560 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3561 { 3562 struct ice_vsi_cfg_params params = {}; 3563 3564 params.type = ICE_VSI_CTRL; 3565 params.pi = pi; 3566 params.flags = ICE_VSI_FLAG_INIT; 3567 3568 return ice_vsi_setup(pf, ¶ms); 3569 } 3570 3571 /** 3572 * ice_lb_vsi_setup - Set up a loopback VSI 3573 * @pf: board private structure 3574 * @pi: pointer to the port_info instance 3575 * 3576 * Returns pointer to the successfully allocated VSI software struct 3577 * on success, otherwise returns NULL on failure. 3578 */ 3579 struct ice_vsi * 3580 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3581 { 3582 struct ice_vsi_cfg_params params = {}; 3583 3584 params.type = ICE_VSI_LB; 3585 params.pi = pi; 3586 params.flags = ICE_VSI_FLAG_INIT; 3587 3588 return ice_vsi_setup(pf, ¶ms); 3589 } 3590 3591 /** 3592 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3593 * @netdev: network interface to be adjusted 3594 * @proto: VLAN TPID 3595 * @vid: VLAN ID to be added 3596 * 3597 * net_device_ops implementation for adding VLAN IDs 3598 */ 3599 static int 3600 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3601 { 3602 struct ice_netdev_priv *np = netdev_priv(netdev); 3603 struct ice_vsi_vlan_ops *vlan_ops; 3604 struct ice_vsi *vsi = np->vsi; 3605 struct ice_vlan vlan; 3606 int ret; 3607 3608 /* VLAN 0 is added by default during load/reset */ 3609 if (!vid) 3610 return 0; 3611 3612 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3613 usleep_range(1000, 2000); 3614 3615 /* Add multicast promisc rule for the VLAN ID to be added if 3616 * all-multicast is currently enabled. 3617 */ 3618 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3619 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3620 ICE_MCAST_VLAN_PROMISC_BITS, 3621 vid); 3622 if (ret) 3623 goto finish; 3624 } 3625 3626 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3627 3628 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3629 * packets aren't pruned by the device's internal switch on Rx 3630 */ 3631 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3632 ret = vlan_ops->add_vlan(vsi, &vlan); 3633 if (ret) 3634 goto finish; 3635 3636 /* If all-multicast is currently enabled and this VLAN ID is only one 3637 * besides VLAN-0 we have to update look-up type of multicast promisc 3638 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. 3639 */ 3640 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && 3641 ice_vsi_num_non_zero_vlans(vsi) == 1) { 3642 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3643 ICE_MCAST_PROMISC_BITS, 0); 3644 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3645 ICE_MCAST_VLAN_PROMISC_BITS, 0); 3646 } 3647 3648 finish: 3649 clear_bit(ICE_CFG_BUSY, vsi->state); 3650 3651 return ret; 3652 } 3653 3654 /** 3655 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3656 * @netdev: network interface to be adjusted 3657 * @proto: VLAN TPID 3658 * @vid: VLAN ID to be removed 3659 * 3660 * net_device_ops implementation for removing VLAN IDs 3661 */ 3662 static int 3663 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3664 { 3665 struct ice_netdev_priv *np = netdev_priv(netdev); 3666 struct ice_vsi_vlan_ops *vlan_ops; 3667 struct ice_vsi *vsi = np->vsi; 3668 struct ice_vlan vlan; 3669 int ret; 3670 3671 /* don't allow removal of VLAN 0 */ 3672 if (!vid) 3673 return 0; 3674 3675 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3676 usleep_range(1000, 2000); 3677 3678 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3679 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3680 if (ret) { 3681 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n", 3682 vsi->vsi_num); 3683 vsi->current_netdev_flags |= IFF_ALLMULTI; 3684 } 3685 3686 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3687 3688 /* Make sure VLAN delete is successful before updating VLAN 3689 * information 3690 */ 3691 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3692 ret = vlan_ops->del_vlan(vsi, &vlan); 3693 if (ret) 3694 goto finish; 3695 3696 /* Remove multicast promisc rule for the removed VLAN ID if 3697 * all-multicast is enabled. 3698 */ 3699 if (vsi->current_netdev_flags & IFF_ALLMULTI) 3700 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3701 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3702 3703 if (!ice_vsi_has_non_zero_vlans(vsi)) { 3704 /* Update look-up type of multicast promisc rule for VLAN 0 3705 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when 3706 * all-multicast is enabled and VLAN 0 is the only VLAN rule. 3707 */ 3708 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3709 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3710 ICE_MCAST_VLAN_PROMISC_BITS, 3711 0); 3712 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3713 ICE_MCAST_PROMISC_BITS, 0); 3714 } 3715 } 3716 3717 finish: 3718 clear_bit(ICE_CFG_BUSY, vsi->state); 3719 3720 return ret; 3721 } 3722 3723 /** 3724 * ice_rep_indr_tc_block_unbind 3725 * @cb_priv: indirection block private data 3726 */ 3727 static void ice_rep_indr_tc_block_unbind(void *cb_priv) 3728 { 3729 struct ice_indr_block_priv *indr_priv = cb_priv; 3730 3731 list_del(&indr_priv->list); 3732 kfree(indr_priv); 3733 } 3734 3735 /** 3736 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications 3737 * @vsi: VSI struct which has the netdev 3738 */ 3739 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) 3740 { 3741 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); 3742 3743 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, 3744 ice_rep_indr_tc_block_unbind); 3745 } 3746 3747 /** 3748 * ice_tc_indir_block_register - Register TC indirect block notifications 3749 * @vsi: VSI struct which has the netdev 3750 * 3751 * Returns 0 on success, negative value on failure 3752 */ 3753 static int ice_tc_indir_block_register(struct ice_vsi *vsi) 3754 { 3755 struct ice_netdev_priv *np; 3756 3757 if (!vsi || !vsi->netdev) 3758 return -EINVAL; 3759 3760 np = netdev_priv(vsi->netdev); 3761 3762 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); 3763 return flow_indr_dev_register(ice_indr_setup_tc_cb, np); 3764 } 3765 3766 /** 3767 * ice_get_avail_q_count - Get count of queues in use 3768 * @pf_qmap: bitmap to get queue use count from 3769 * @lock: pointer to a mutex that protects access to pf_qmap 3770 * @size: size of the bitmap 3771 */ 3772 static u16 3773 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3774 { 3775 unsigned long bit; 3776 u16 count = 0; 3777 3778 mutex_lock(lock); 3779 for_each_clear_bit(bit, pf_qmap, size) 3780 count++; 3781 mutex_unlock(lock); 3782 3783 return count; 3784 } 3785 3786 /** 3787 * ice_get_avail_txq_count - Get count of Tx queues in use 3788 * @pf: pointer to an ice_pf instance 3789 */ 3790 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3791 { 3792 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3793 pf->max_pf_txqs); 3794 } 3795 3796 /** 3797 * ice_get_avail_rxq_count - Get count of Rx queues in use 3798 * @pf: pointer to an ice_pf instance 3799 */ 3800 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3801 { 3802 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3803 pf->max_pf_rxqs); 3804 } 3805 3806 /** 3807 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3808 * @pf: board private structure to initialize 3809 */ 3810 static void ice_deinit_pf(struct ice_pf *pf) 3811 { 3812 ice_service_task_stop(pf); 3813 mutex_destroy(&pf->lag_mutex); 3814 mutex_destroy(&pf->adev_mutex); 3815 mutex_destroy(&pf->sw_mutex); 3816 mutex_destroy(&pf->tc_mutex); 3817 mutex_destroy(&pf->avail_q_mutex); 3818 mutex_destroy(&pf->vfs.table_lock); 3819 3820 if (pf->avail_txqs) { 3821 bitmap_free(pf->avail_txqs); 3822 pf->avail_txqs = NULL; 3823 } 3824 3825 if (pf->avail_rxqs) { 3826 bitmap_free(pf->avail_rxqs); 3827 pf->avail_rxqs = NULL; 3828 } 3829 3830 if (pf->ptp.clock) 3831 ptp_clock_unregister(pf->ptp.clock); 3832 } 3833 3834 /** 3835 * ice_set_pf_caps - set PFs capability flags 3836 * @pf: pointer to the PF instance 3837 */ 3838 static void ice_set_pf_caps(struct ice_pf *pf) 3839 { 3840 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3841 3842 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3843 if (func_caps->common_cap.rdma) 3844 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3845 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3846 if (func_caps->common_cap.dcb) 3847 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3848 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3849 if (func_caps->common_cap.sr_iov_1_1) { 3850 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3851 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, 3852 ICE_MAX_SRIOV_VFS); 3853 } 3854 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 3855 if (func_caps->common_cap.rss_table_size) 3856 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 3857 3858 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 3859 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 3860 u16 unused; 3861 3862 /* ctrl_vsi_idx will be set to a valid value when flow director 3863 * is setup by ice_init_fdir 3864 */ 3865 pf->ctrl_vsi_idx = ICE_NO_VSI; 3866 set_bit(ICE_FLAG_FD_ENA, pf->flags); 3867 /* force guaranteed filter pool for PF */ 3868 ice_alloc_fd_guar_item(&pf->hw, &unused, 3869 func_caps->fd_fltr_guar); 3870 /* force shared filter pool for PF */ 3871 ice_alloc_fd_shrd_item(&pf->hw, &unused, 3872 func_caps->fd_fltr_best_effort); 3873 } 3874 3875 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3876 if (func_caps->common_cap.ieee_1588 && 3877 !(pf->hw.mac_type == ICE_MAC_E830)) 3878 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3879 3880 pf->max_pf_txqs = func_caps->common_cap.num_txq; 3881 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 3882 } 3883 3884 /** 3885 * ice_init_pf - Initialize general software structures (struct ice_pf) 3886 * @pf: board private structure to initialize 3887 */ 3888 static int ice_init_pf(struct ice_pf *pf) 3889 { 3890 ice_set_pf_caps(pf); 3891 3892 mutex_init(&pf->sw_mutex); 3893 mutex_init(&pf->tc_mutex); 3894 mutex_init(&pf->adev_mutex); 3895 mutex_init(&pf->lag_mutex); 3896 3897 INIT_HLIST_HEAD(&pf->aq_wait_list); 3898 spin_lock_init(&pf->aq_wait_lock); 3899 init_waitqueue_head(&pf->aq_wait_queue); 3900 3901 init_waitqueue_head(&pf->reset_wait_queue); 3902 3903 /* setup service timer and periodic service task */ 3904 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 3905 pf->serv_tmr_period = HZ; 3906 INIT_WORK(&pf->serv_task, ice_service_task); 3907 clear_bit(ICE_SERVICE_SCHED, pf->state); 3908 3909 mutex_init(&pf->avail_q_mutex); 3910 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 3911 if (!pf->avail_txqs) 3912 return -ENOMEM; 3913 3914 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 3915 if (!pf->avail_rxqs) { 3916 bitmap_free(pf->avail_txqs); 3917 pf->avail_txqs = NULL; 3918 return -ENOMEM; 3919 } 3920 3921 mutex_init(&pf->vfs.table_lock); 3922 hash_init(pf->vfs.table); 3923 ice_mbx_init_snapshot(&pf->hw); 3924 3925 return 0; 3926 } 3927 3928 /** 3929 * ice_is_wol_supported - check if WoL is supported 3930 * @hw: pointer to hardware info 3931 * 3932 * Check if WoL is supported based on the HW configuration. 3933 * Returns true if NVM supports and enables WoL for this port, false otherwise 3934 */ 3935 bool ice_is_wol_supported(struct ice_hw *hw) 3936 { 3937 u16 wol_ctrl; 3938 3939 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 3940 * word) indicates WoL is not supported on the corresponding PF ID. 3941 */ 3942 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 3943 return false; 3944 3945 return !(BIT(hw->port_info->lport) & wol_ctrl); 3946 } 3947 3948 /** 3949 * ice_vsi_recfg_qs - Change the number of queues on a VSI 3950 * @vsi: VSI being changed 3951 * @new_rx: new number of Rx queues 3952 * @new_tx: new number of Tx queues 3953 * @locked: is adev device_lock held 3954 * 3955 * Only change the number of queues if new_tx, or new_rx is non-0. 3956 * 3957 * Returns 0 on success. 3958 */ 3959 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) 3960 { 3961 struct ice_pf *pf = vsi->back; 3962 int err = 0, timeout = 50; 3963 3964 if (!new_rx && !new_tx) 3965 return -EINVAL; 3966 3967 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 3968 timeout--; 3969 if (!timeout) 3970 return -EBUSY; 3971 usleep_range(1000, 2000); 3972 } 3973 3974 if (new_tx) 3975 vsi->req_txq = (u16)new_tx; 3976 if (new_rx) 3977 vsi->req_rxq = (u16)new_rx; 3978 3979 /* set for the next time the netdev is started */ 3980 if (!netif_running(vsi->netdev)) { 3981 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 3982 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 3983 goto done; 3984 } 3985 3986 ice_vsi_close(vsi); 3987 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 3988 ice_pf_dcb_recfg(pf, locked); 3989 ice_vsi_open(vsi); 3990 done: 3991 clear_bit(ICE_CFG_BUSY, pf->state); 3992 return err; 3993 } 3994 3995 /** 3996 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 3997 * @pf: PF to configure 3998 * 3999 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 4000 * VSI can still Tx/Rx VLAN tagged packets. 4001 */ 4002 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 4003 { 4004 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4005 struct ice_vsi_ctx *ctxt; 4006 struct ice_hw *hw; 4007 int status; 4008 4009 if (!vsi) 4010 return; 4011 4012 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4013 if (!ctxt) 4014 return; 4015 4016 hw = &pf->hw; 4017 ctxt->info = vsi->info; 4018 4019 ctxt->info.valid_sections = 4020 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 4021 ICE_AQ_VSI_PROP_SECURITY_VALID | 4022 ICE_AQ_VSI_PROP_SW_VALID); 4023 4024 /* disable VLAN anti-spoof */ 4025 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4026 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4027 4028 /* disable VLAN pruning and keep all other settings */ 4029 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4030 4031 /* allow all VLANs on Tx and don't strip on Rx */ 4032 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | 4033 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 4034 4035 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4036 if (status) { 4037 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", 4038 status, ice_aq_str(hw->adminq.sq_last_status)); 4039 } else { 4040 vsi->info.sec_flags = ctxt->info.sec_flags; 4041 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 4042 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; 4043 } 4044 4045 kfree(ctxt); 4046 } 4047 4048 /** 4049 * ice_log_pkg_init - log result of DDP package load 4050 * @hw: pointer to hardware info 4051 * @state: state of package load 4052 */ 4053 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) 4054 { 4055 struct ice_pf *pf = hw->back; 4056 struct device *dev; 4057 4058 dev = ice_pf_to_dev(pf); 4059 4060 switch (state) { 4061 case ICE_DDP_PKG_SUCCESS: 4062 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 4063 hw->active_pkg_name, 4064 hw->active_pkg_ver.major, 4065 hw->active_pkg_ver.minor, 4066 hw->active_pkg_ver.update, 4067 hw->active_pkg_ver.draft); 4068 break; 4069 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 4070 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 4071 hw->active_pkg_name, 4072 hw->active_pkg_ver.major, 4073 hw->active_pkg_ver.minor, 4074 hw->active_pkg_ver.update, 4075 hw->active_pkg_ver.draft); 4076 break; 4077 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: 4078 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 4079 hw->active_pkg_name, 4080 hw->active_pkg_ver.major, 4081 hw->active_pkg_ver.minor, 4082 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4083 break; 4084 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 4085 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 4086 hw->active_pkg_name, 4087 hw->active_pkg_ver.major, 4088 hw->active_pkg_ver.minor, 4089 hw->active_pkg_ver.update, 4090 hw->active_pkg_ver.draft, 4091 hw->pkg_name, 4092 hw->pkg_ver.major, 4093 hw->pkg_ver.minor, 4094 hw->pkg_ver.update, 4095 hw->pkg_ver.draft); 4096 break; 4097 case ICE_DDP_PKG_FW_MISMATCH: 4098 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 4099 break; 4100 case ICE_DDP_PKG_INVALID_FILE: 4101 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 4102 break; 4103 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: 4104 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 4105 break; 4106 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: 4107 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 4108 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4109 break; 4110 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: 4111 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 4112 break; 4113 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: 4114 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 4115 break; 4116 case ICE_DDP_PKG_LOAD_ERROR: 4117 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 4118 /* poll for reset to complete */ 4119 if (ice_check_reset(hw)) 4120 dev_err(dev, "Error resetting device. Please reload the driver\n"); 4121 break; 4122 case ICE_DDP_PKG_ERR: 4123 default: 4124 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); 4125 break; 4126 } 4127 } 4128 4129 /** 4130 * ice_load_pkg - load/reload the DDP Package file 4131 * @firmware: firmware structure when firmware requested or NULL for reload 4132 * @pf: pointer to the PF instance 4133 * 4134 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 4135 * initialize HW tables. 4136 */ 4137 static void 4138 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 4139 { 4140 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 4141 struct device *dev = ice_pf_to_dev(pf); 4142 struct ice_hw *hw = &pf->hw; 4143 4144 /* Load DDP Package */ 4145 if (firmware && !hw->pkg_copy) { 4146 state = ice_copy_and_init_pkg(hw, firmware->data, 4147 firmware->size); 4148 ice_log_pkg_init(hw, state); 4149 } else if (!firmware && hw->pkg_copy) { 4150 /* Reload package during rebuild after CORER/GLOBR reset */ 4151 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 4152 ice_log_pkg_init(hw, state); 4153 } else { 4154 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 4155 } 4156 4157 if (!ice_is_init_pkg_successful(state)) { 4158 /* Safe Mode */ 4159 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4160 return; 4161 } 4162 4163 /* Successful download package is the precondition for advanced 4164 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 4165 */ 4166 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4167 } 4168 4169 /** 4170 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 4171 * @pf: pointer to the PF structure 4172 * 4173 * There is no error returned here because the driver should be able to handle 4174 * 128 Byte cache lines, so we only print a warning in case issues are seen, 4175 * specifically with Tx. 4176 */ 4177 static void ice_verify_cacheline_size(struct ice_pf *pf) 4178 { 4179 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 4180 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 4181 ICE_CACHE_LINE_BYTES); 4182 } 4183 4184 /** 4185 * ice_send_version - update firmware with driver version 4186 * @pf: PF struct 4187 * 4188 * Returns 0 on success, else error code 4189 */ 4190 static int ice_send_version(struct ice_pf *pf) 4191 { 4192 struct ice_driver_ver dv; 4193 4194 dv.major_ver = 0xff; 4195 dv.minor_ver = 0xff; 4196 dv.build_ver = 0xff; 4197 dv.subbuild_ver = 0; 4198 strscpy((char *)dv.driver_string, UTS_RELEASE, 4199 sizeof(dv.driver_string)); 4200 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4201 } 4202 4203 /** 4204 * ice_init_fdir - Initialize flow director VSI and configuration 4205 * @pf: pointer to the PF instance 4206 * 4207 * returns 0 on success, negative on error 4208 */ 4209 static int ice_init_fdir(struct ice_pf *pf) 4210 { 4211 struct device *dev = ice_pf_to_dev(pf); 4212 struct ice_vsi *ctrl_vsi; 4213 int err; 4214 4215 /* Side Band Flow Director needs to have a control VSI. 4216 * Allocate it and store it in the PF. 4217 */ 4218 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4219 if (!ctrl_vsi) { 4220 dev_dbg(dev, "could not create control VSI\n"); 4221 return -ENOMEM; 4222 } 4223 4224 err = ice_vsi_open_ctrl(ctrl_vsi); 4225 if (err) { 4226 dev_dbg(dev, "could not open control VSI\n"); 4227 goto err_vsi_open; 4228 } 4229 4230 mutex_init(&pf->hw.fdir_fltr_lock); 4231 4232 err = ice_fdir_create_dflt_rules(pf); 4233 if (err) 4234 goto err_fdir_rule; 4235 4236 return 0; 4237 4238 err_fdir_rule: 4239 ice_fdir_release_flows(&pf->hw); 4240 ice_vsi_close(ctrl_vsi); 4241 err_vsi_open: 4242 ice_vsi_release(ctrl_vsi); 4243 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4244 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4245 pf->ctrl_vsi_idx = ICE_NO_VSI; 4246 } 4247 return err; 4248 } 4249 4250 static void ice_deinit_fdir(struct ice_pf *pf) 4251 { 4252 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); 4253 4254 if (!vsi) 4255 return; 4256 4257 ice_vsi_manage_fdir(vsi, false); 4258 ice_vsi_release(vsi); 4259 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4260 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4261 pf->ctrl_vsi_idx = ICE_NO_VSI; 4262 } 4263 4264 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4265 } 4266 4267 /** 4268 * ice_get_opt_fw_name - return optional firmware file name or NULL 4269 * @pf: pointer to the PF instance 4270 */ 4271 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4272 { 4273 /* Optional firmware name same as default with additional dash 4274 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4275 */ 4276 struct pci_dev *pdev = pf->pdev; 4277 char *opt_fw_filename; 4278 u64 dsn; 4279 4280 /* Determine the name of the optional file using the DSN (two 4281 * dwords following the start of the DSN Capability). 4282 */ 4283 dsn = pci_get_dsn(pdev); 4284 if (!dsn) 4285 return NULL; 4286 4287 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4288 if (!opt_fw_filename) 4289 return NULL; 4290 4291 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4292 ICE_DDP_PKG_PATH, dsn); 4293 4294 return opt_fw_filename; 4295 } 4296 4297 /** 4298 * ice_request_fw - Device initialization routine 4299 * @pf: pointer to the PF instance 4300 */ 4301 static void ice_request_fw(struct ice_pf *pf) 4302 { 4303 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4304 const struct firmware *firmware = NULL; 4305 struct device *dev = ice_pf_to_dev(pf); 4306 int err = 0; 4307 4308 /* optional device-specific DDP (if present) overrides the default DDP 4309 * package file. kernel logs a debug message if the file doesn't exist, 4310 * and warning messages for other errors. 4311 */ 4312 if (opt_fw_filename) { 4313 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); 4314 if (err) { 4315 kfree(opt_fw_filename); 4316 goto dflt_pkg_load; 4317 } 4318 4319 /* request for firmware was successful. Download to device */ 4320 ice_load_pkg(firmware, pf); 4321 kfree(opt_fw_filename); 4322 release_firmware(firmware); 4323 return; 4324 } 4325 4326 dflt_pkg_load: 4327 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); 4328 if (err) { 4329 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4330 return; 4331 } 4332 4333 /* request for firmware was successful. Download to device */ 4334 ice_load_pkg(firmware, pf); 4335 release_firmware(firmware); 4336 } 4337 4338 /** 4339 * ice_print_wake_reason - show the wake up cause in the log 4340 * @pf: pointer to the PF struct 4341 */ 4342 static void ice_print_wake_reason(struct ice_pf *pf) 4343 { 4344 u32 wus = pf->wakeup_reason; 4345 const char *wake_str; 4346 4347 /* if no wake event, nothing to print */ 4348 if (!wus) 4349 return; 4350 4351 if (wus & PFPM_WUS_LNKC_M) 4352 wake_str = "Link\n"; 4353 else if (wus & PFPM_WUS_MAG_M) 4354 wake_str = "Magic Packet\n"; 4355 else if (wus & PFPM_WUS_MNG_M) 4356 wake_str = "Management\n"; 4357 else if (wus & PFPM_WUS_FW_RST_WK_M) 4358 wake_str = "Firmware Reset\n"; 4359 else 4360 wake_str = "Unknown\n"; 4361 4362 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4363 } 4364 4365 /** 4366 * ice_register_netdev - register netdev 4367 * @vsi: pointer to the VSI struct 4368 */ 4369 static int ice_register_netdev(struct ice_vsi *vsi) 4370 { 4371 int err; 4372 4373 if (!vsi || !vsi->netdev) 4374 return -EIO; 4375 4376 err = register_netdev(vsi->netdev); 4377 if (err) 4378 return err; 4379 4380 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4381 netif_carrier_off(vsi->netdev); 4382 netif_tx_stop_all_queues(vsi->netdev); 4383 4384 return 0; 4385 } 4386 4387 static void ice_unregister_netdev(struct ice_vsi *vsi) 4388 { 4389 if (!vsi || !vsi->netdev) 4390 return; 4391 4392 unregister_netdev(vsi->netdev); 4393 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4394 } 4395 4396 /** 4397 * ice_cfg_netdev - Allocate, configure and register a netdev 4398 * @vsi: the VSI associated with the new netdev 4399 * 4400 * Returns 0 on success, negative value on failure 4401 */ 4402 static int ice_cfg_netdev(struct ice_vsi *vsi) 4403 { 4404 struct ice_netdev_priv *np; 4405 struct net_device *netdev; 4406 u8 mac_addr[ETH_ALEN]; 4407 4408 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 4409 vsi->alloc_rxq); 4410 if (!netdev) 4411 return -ENOMEM; 4412 4413 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4414 vsi->netdev = netdev; 4415 np = netdev_priv(netdev); 4416 np->vsi = vsi; 4417 4418 ice_set_netdev_features(netdev); 4419 ice_set_ops(vsi); 4420 4421 if (vsi->type == ICE_VSI_PF) { 4422 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 4423 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4424 eth_hw_addr_set(netdev, mac_addr); 4425 } 4426 4427 netdev->priv_flags |= IFF_UNICAST_FLT; 4428 4429 /* Setup netdev TC information */ 4430 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 4431 4432 netdev->max_mtu = ICE_MAX_MTU; 4433 4434 return 0; 4435 } 4436 4437 static void ice_decfg_netdev(struct ice_vsi *vsi) 4438 { 4439 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4440 free_netdev(vsi->netdev); 4441 vsi->netdev = NULL; 4442 } 4443 4444 static int ice_start_eth(struct ice_vsi *vsi) 4445 { 4446 int err; 4447 4448 err = ice_init_mac_fltr(vsi->back); 4449 if (err) 4450 return err; 4451 4452 err = ice_vsi_open(vsi); 4453 if (err) 4454 ice_fltr_remove_all(vsi); 4455 4456 return err; 4457 } 4458 4459 static void ice_stop_eth(struct ice_vsi *vsi) 4460 { 4461 ice_fltr_remove_all(vsi); 4462 ice_vsi_close(vsi); 4463 } 4464 4465 static int ice_init_eth(struct ice_pf *pf) 4466 { 4467 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4468 int err; 4469 4470 if (!vsi) 4471 return -EINVAL; 4472 4473 /* init channel list */ 4474 INIT_LIST_HEAD(&vsi->ch_list); 4475 4476 err = ice_cfg_netdev(vsi); 4477 if (err) 4478 return err; 4479 /* Setup DCB netlink interface */ 4480 ice_dcbnl_setup(vsi); 4481 4482 err = ice_init_mac_fltr(pf); 4483 if (err) 4484 goto err_init_mac_fltr; 4485 4486 err = ice_devlink_create_pf_port(pf); 4487 if (err) 4488 goto err_devlink_create_pf_port; 4489 4490 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); 4491 4492 err = ice_register_netdev(vsi); 4493 if (err) 4494 goto err_register_netdev; 4495 4496 err = ice_tc_indir_block_register(vsi); 4497 if (err) 4498 goto err_tc_indir_block_register; 4499 4500 ice_napi_add(vsi); 4501 4502 return 0; 4503 4504 err_tc_indir_block_register: 4505 ice_unregister_netdev(vsi); 4506 err_register_netdev: 4507 ice_devlink_destroy_pf_port(pf); 4508 err_devlink_create_pf_port: 4509 err_init_mac_fltr: 4510 ice_decfg_netdev(vsi); 4511 return err; 4512 } 4513 4514 static void ice_deinit_eth(struct ice_pf *pf) 4515 { 4516 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4517 4518 if (!vsi) 4519 return; 4520 4521 ice_vsi_close(vsi); 4522 ice_unregister_netdev(vsi); 4523 ice_devlink_destroy_pf_port(pf); 4524 ice_tc_indir_block_unregister(vsi); 4525 ice_decfg_netdev(vsi); 4526 } 4527 4528 /** 4529 * ice_wait_for_fw - wait for full FW readiness 4530 * @hw: pointer to the hardware structure 4531 * @timeout: milliseconds that can elapse before timing out 4532 */ 4533 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) 4534 { 4535 int fw_loading; 4536 u32 elapsed = 0; 4537 4538 while (elapsed <= timeout) { 4539 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; 4540 4541 /* firmware was not yet loaded, we have to wait more */ 4542 if (fw_loading) { 4543 elapsed += 100; 4544 msleep(100); 4545 continue; 4546 } 4547 return 0; 4548 } 4549 4550 return -ETIMEDOUT; 4551 } 4552 4553 static int ice_init_dev(struct ice_pf *pf) 4554 { 4555 struct device *dev = ice_pf_to_dev(pf); 4556 struct ice_hw *hw = &pf->hw; 4557 int err; 4558 4559 err = ice_init_hw(hw); 4560 if (err) { 4561 dev_err(dev, "ice_init_hw failed: %d\n", err); 4562 return err; 4563 } 4564 4565 /* Some cards require longer initialization times 4566 * due to necessity of loading FW from an external source. 4567 * This can take even half a minute. 4568 */ 4569 if (ice_is_pf_c827(hw)) { 4570 err = ice_wait_for_fw(hw, 30000); 4571 if (err) { 4572 dev_err(dev, "ice_wait_for_fw timed out"); 4573 return err; 4574 } 4575 } 4576 4577 ice_init_feature_support(pf); 4578 4579 ice_request_fw(pf); 4580 4581 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be 4582 * set in pf->state, which will cause ice_is_safe_mode to return 4583 * true 4584 */ 4585 if (ice_is_safe_mode(pf)) { 4586 /* we already got function/device capabilities but these don't 4587 * reflect what the driver needs to do in safe mode. Instead of 4588 * adding conditional logic everywhere to ignore these 4589 * device/function capabilities, override them. 4590 */ 4591 ice_set_safe_mode_caps(hw); 4592 } 4593 4594 err = ice_init_pf(pf); 4595 if (err) { 4596 dev_err(dev, "ice_init_pf failed: %d\n", err); 4597 goto err_init_pf; 4598 } 4599 4600 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; 4601 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; 4602 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; 4603 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; 4604 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { 4605 pf->hw.udp_tunnel_nic.tables[0].n_entries = 4606 pf->hw.tnl.valid_count[TNL_VXLAN]; 4607 pf->hw.udp_tunnel_nic.tables[0].tunnel_types = 4608 UDP_TUNNEL_TYPE_VXLAN; 4609 } 4610 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { 4611 pf->hw.udp_tunnel_nic.tables[1].n_entries = 4612 pf->hw.tnl.valid_count[TNL_GENEVE]; 4613 pf->hw.udp_tunnel_nic.tables[1].tunnel_types = 4614 UDP_TUNNEL_TYPE_GENEVE; 4615 } 4616 4617 err = ice_init_interrupt_scheme(pf); 4618 if (err) { 4619 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4620 err = -EIO; 4621 goto err_init_interrupt_scheme; 4622 } 4623 4624 /* In case of MSIX we are going to setup the misc vector right here 4625 * to handle admin queue events etc. In case of legacy and MSI 4626 * the misc functionality and queue processing is combined in 4627 * the same vector and that gets setup at open. 4628 */ 4629 err = ice_req_irq_msix_misc(pf); 4630 if (err) { 4631 dev_err(dev, "setup of misc vector failed: %d\n", err); 4632 goto err_req_irq_msix_misc; 4633 } 4634 4635 return 0; 4636 4637 err_req_irq_msix_misc: 4638 ice_clear_interrupt_scheme(pf); 4639 err_init_interrupt_scheme: 4640 ice_deinit_pf(pf); 4641 err_init_pf: 4642 ice_deinit_hw(hw); 4643 return err; 4644 } 4645 4646 static void ice_deinit_dev(struct ice_pf *pf) 4647 { 4648 ice_free_irq_msix_misc(pf); 4649 ice_deinit_pf(pf); 4650 ice_deinit_hw(&pf->hw); 4651 4652 /* Service task is already stopped, so call reset directly. */ 4653 ice_reset(&pf->hw, ICE_RESET_PFR); 4654 pci_wait_for_pending_transaction(pf->pdev); 4655 ice_clear_interrupt_scheme(pf); 4656 } 4657 4658 static void ice_init_features(struct ice_pf *pf) 4659 { 4660 struct device *dev = ice_pf_to_dev(pf); 4661 4662 if (ice_is_safe_mode(pf)) 4663 return; 4664 4665 /* initialize DDP driven features */ 4666 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4667 ice_ptp_init(pf); 4668 4669 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4670 ice_gnss_init(pf); 4671 4672 if (ice_is_feature_supported(pf, ICE_F_CGU) || 4673 ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) 4674 ice_dpll_init(pf); 4675 4676 /* Note: Flow director init failure is non-fatal to load */ 4677 if (ice_init_fdir(pf)) 4678 dev_err(dev, "could not initialize flow director\n"); 4679 4680 /* Note: DCB init failure is non-fatal to load */ 4681 if (ice_init_pf_dcb(pf, false)) { 4682 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4683 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4684 } else { 4685 ice_cfg_lldp_mib_change(&pf->hw, true); 4686 } 4687 4688 if (ice_init_lag(pf)) 4689 dev_warn(dev, "Failed to init link aggregation support\n"); 4690 } 4691 4692 static void ice_deinit_features(struct ice_pf *pf) 4693 { 4694 if (ice_is_safe_mode(pf)) 4695 return; 4696 4697 ice_deinit_lag(pf); 4698 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) 4699 ice_cfg_lldp_mib_change(&pf->hw, false); 4700 ice_deinit_fdir(pf); 4701 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4702 ice_gnss_exit(pf); 4703 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4704 ice_ptp_release(pf); 4705 if (test_bit(ICE_FLAG_DPLL, pf->flags)) 4706 ice_dpll_deinit(pf); 4707 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) 4708 xa_destroy(&pf->eswitch.reprs); 4709 } 4710 4711 static void ice_init_wakeup(struct ice_pf *pf) 4712 { 4713 /* Save wakeup reason register for later use */ 4714 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); 4715 4716 /* check for a power management event */ 4717 ice_print_wake_reason(pf); 4718 4719 /* clear wake status, all bits */ 4720 wr32(&pf->hw, PFPM_WUS, U32_MAX); 4721 4722 /* Disable WoL at init, wait for user to enable */ 4723 device_set_wakeup_enable(ice_pf_to_dev(pf), false); 4724 } 4725 4726 static int ice_init_link(struct ice_pf *pf) 4727 { 4728 struct device *dev = ice_pf_to_dev(pf); 4729 int err; 4730 4731 err = ice_init_link_events(pf->hw.port_info); 4732 if (err) { 4733 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4734 return err; 4735 } 4736 4737 /* not a fatal error if this fails */ 4738 err = ice_init_nvm_phy_type(pf->hw.port_info); 4739 if (err) 4740 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4741 4742 /* not a fatal error if this fails */ 4743 err = ice_update_link_info(pf->hw.port_info); 4744 if (err) 4745 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4746 4747 ice_init_link_dflt_override(pf->hw.port_info); 4748 4749 ice_check_link_cfg_err(pf, 4750 pf->hw.port_info->phy.link_info.link_cfg_err); 4751 4752 /* if media available, initialize PHY settings */ 4753 if (pf->hw.port_info->phy.link_info.link_info & 4754 ICE_AQ_MEDIA_AVAILABLE) { 4755 /* not a fatal error if this fails */ 4756 err = ice_init_phy_user_cfg(pf->hw.port_info); 4757 if (err) 4758 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4759 4760 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4761 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4762 4763 if (vsi) 4764 ice_configure_phy(vsi); 4765 } 4766 } else { 4767 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4768 } 4769 4770 return err; 4771 } 4772 4773 static int ice_init_pf_sw(struct ice_pf *pf) 4774 { 4775 bool dvm = ice_is_dvm_ena(&pf->hw); 4776 struct ice_vsi *vsi; 4777 int err; 4778 4779 /* create switch struct for the switch element created by FW on boot */ 4780 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); 4781 if (!pf->first_sw) 4782 return -ENOMEM; 4783 4784 if (pf->hw.evb_veb) 4785 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4786 else 4787 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4788 4789 pf->first_sw->pf = pf; 4790 4791 /* record the sw_id available for later use */ 4792 pf->first_sw->sw_id = pf->hw.port_info->sw_id; 4793 4794 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 4795 if (err) 4796 goto err_aq_set_port_params; 4797 4798 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 4799 if (!vsi) { 4800 err = -ENOMEM; 4801 goto err_pf_vsi_setup; 4802 } 4803 4804 return 0; 4805 4806 err_pf_vsi_setup: 4807 err_aq_set_port_params: 4808 kfree(pf->first_sw); 4809 return err; 4810 } 4811 4812 static void ice_deinit_pf_sw(struct ice_pf *pf) 4813 { 4814 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4815 4816 if (!vsi) 4817 return; 4818 4819 ice_vsi_release(vsi); 4820 kfree(pf->first_sw); 4821 } 4822 4823 static int ice_alloc_vsis(struct ice_pf *pf) 4824 { 4825 struct device *dev = ice_pf_to_dev(pf); 4826 4827 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; 4828 if (!pf->num_alloc_vsi) 4829 return -EIO; 4830 4831 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4832 dev_warn(dev, 4833 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4834 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4835 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4836 } 4837 4838 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 4839 GFP_KERNEL); 4840 if (!pf->vsi) 4841 return -ENOMEM; 4842 4843 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, 4844 sizeof(*pf->vsi_stats), GFP_KERNEL); 4845 if (!pf->vsi_stats) { 4846 devm_kfree(dev, pf->vsi); 4847 return -ENOMEM; 4848 } 4849 4850 return 0; 4851 } 4852 4853 static void ice_dealloc_vsis(struct ice_pf *pf) 4854 { 4855 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); 4856 pf->vsi_stats = NULL; 4857 4858 pf->num_alloc_vsi = 0; 4859 devm_kfree(ice_pf_to_dev(pf), pf->vsi); 4860 pf->vsi = NULL; 4861 } 4862 4863 static int ice_init_devlink(struct ice_pf *pf) 4864 { 4865 int err; 4866 4867 err = ice_devlink_register_params(pf); 4868 if (err) 4869 return err; 4870 4871 ice_devlink_init_regions(pf); 4872 ice_devlink_register(pf); 4873 4874 return 0; 4875 } 4876 4877 static void ice_deinit_devlink(struct ice_pf *pf) 4878 { 4879 ice_devlink_unregister(pf); 4880 ice_devlink_destroy_regions(pf); 4881 ice_devlink_unregister_params(pf); 4882 } 4883 4884 static int ice_init(struct ice_pf *pf) 4885 { 4886 int err; 4887 4888 err = ice_init_dev(pf); 4889 if (err) 4890 return err; 4891 4892 err = ice_alloc_vsis(pf); 4893 if (err) 4894 goto err_alloc_vsis; 4895 4896 err = ice_init_pf_sw(pf); 4897 if (err) 4898 goto err_init_pf_sw; 4899 4900 ice_init_wakeup(pf); 4901 4902 err = ice_init_link(pf); 4903 if (err) 4904 goto err_init_link; 4905 4906 err = ice_send_version(pf); 4907 if (err) 4908 goto err_init_link; 4909 4910 ice_verify_cacheline_size(pf); 4911 4912 if (ice_is_safe_mode(pf)) 4913 ice_set_safe_mode_vlan_cfg(pf); 4914 else 4915 /* print PCI link speed and width */ 4916 pcie_print_link_status(pf->pdev); 4917 4918 /* ready to go, so clear down state bit */ 4919 clear_bit(ICE_DOWN, pf->state); 4920 clear_bit(ICE_SERVICE_DIS, pf->state); 4921 4922 /* since everything is good, start the service timer */ 4923 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 4924 4925 return 0; 4926 4927 err_init_link: 4928 ice_deinit_pf_sw(pf); 4929 err_init_pf_sw: 4930 ice_dealloc_vsis(pf); 4931 err_alloc_vsis: 4932 ice_deinit_dev(pf); 4933 return err; 4934 } 4935 4936 static void ice_deinit(struct ice_pf *pf) 4937 { 4938 set_bit(ICE_SERVICE_DIS, pf->state); 4939 set_bit(ICE_DOWN, pf->state); 4940 4941 ice_deinit_pf_sw(pf); 4942 ice_dealloc_vsis(pf); 4943 ice_deinit_dev(pf); 4944 } 4945 4946 /** 4947 * ice_load - load pf by init hw and starting VSI 4948 * @pf: pointer to the pf instance 4949 */ 4950 int ice_load(struct ice_pf *pf) 4951 { 4952 struct ice_vsi_cfg_params params = {}; 4953 struct ice_vsi *vsi; 4954 int err; 4955 4956 err = ice_init_dev(pf); 4957 if (err) 4958 return err; 4959 4960 vsi = ice_get_main_vsi(pf); 4961 4962 params = ice_vsi_to_params(vsi); 4963 params.flags = ICE_VSI_FLAG_INIT; 4964 4965 rtnl_lock(); 4966 err = ice_vsi_cfg(vsi, ¶ms); 4967 if (err) 4968 goto err_vsi_cfg; 4969 4970 err = ice_start_eth(ice_get_main_vsi(pf)); 4971 if (err) 4972 goto err_start_eth; 4973 rtnl_unlock(); 4974 4975 err = ice_init_rdma(pf); 4976 if (err) 4977 goto err_init_rdma; 4978 4979 ice_init_features(pf); 4980 ice_service_task_restart(pf); 4981 4982 clear_bit(ICE_DOWN, pf->state); 4983 4984 return 0; 4985 4986 err_init_rdma: 4987 ice_vsi_close(ice_get_main_vsi(pf)); 4988 rtnl_lock(); 4989 err_start_eth: 4990 ice_vsi_decfg(ice_get_main_vsi(pf)); 4991 err_vsi_cfg: 4992 rtnl_unlock(); 4993 ice_deinit_dev(pf); 4994 return err; 4995 } 4996 4997 /** 4998 * ice_unload - unload pf by stopping VSI and deinit hw 4999 * @pf: pointer to the pf instance 5000 */ 5001 void ice_unload(struct ice_pf *pf) 5002 { 5003 ice_deinit_features(pf); 5004 ice_deinit_rdma(pf); 5005 rtnl_lock(); 5006 ice_stop_eth(ice_get_main_vsi(pf)); 5007 ice_vsi_decfg(ice_get_main_vsi(pf)); 5008 rtnl_unlock(); 5009 ice_deinit_dev(pf); 5010 } 5011 5012 /** 5013 * ice_probe - Device initialization routine 5014 * @pdev: PCI device information struct 5015 * @ent: entry in ice_pci_tbl 5016 * 5017 * Returns 0 on success, negative on failure 5018 */ 5019 static int 5020 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 5021 { 5022 struct device *dev = &pdev->dev; 5023 struct ice_pf *pf; 5024 struct ice_hw *hw; 5025 int err; 5026 5027 if (pdev->is_virtfn) { 5028 dev_err(dev, "can't probe a virtual function\n"); 5029 return -EINVAL; 5030 } 5031 5032 /* when under a kdump kernel initiate a reset before enabling the 5033 * device in order to clear out any pending DMA transactions. These 5034 * transactions can cause some systems to machine check when doing 5035 * the pcim_enable_device() below. 5036 */ 5037 if (is_kdump_kernel()) { 5038 pci_save_state(pdev); 5039 pci_clear_master(pdev); 5040 err = pcie_flr(pdev); 5041 if (err) 5042 return err; 5043 pci_restore_state(pdev); 5044 } 5045 5046 /* this driver uses devres, see 5047 * Documentation/driver-api/driver-model/devres.rst 5048 */ 5049 err = pcim_enable_device(pdev); 5050 if (err) 5051 return err; 5052 5053 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 5054 if (err) { 5055 dev_err(dev, "BAR0 I/O map error %d\n", err); 5056 return err; 5057 } 5058 5059 pf = ice_allocate_pf(dev); 5060 if (!pf) 5061 return -ENOMEM; 5062 5063 /* initialize Auxiliary index to invalid value */ 5064 pf->aux_idx = -1; 5065 5066 /* set up for high or low DMA */ 5067 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5068 if (err) { 5069 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 5070 return err; 5071 } 5072 5073 pci_set_master(pdev); 5074 5075 pf->pdev = pdev; 5076 pci_set_drvdata(pdev, pf); 5077 set_bit(ICE_DOWN, pf->state); 5078 /* Disable service task until DOWN bit is cleared */ 5079 set_bit(ICE_SERVICE_DIS, pf->state); 5080 5081 hw = &pf->hw; 5082 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 5083 pci_save_state(pdev); 5084 5085 hw->back = pf; 5086 hw->port_info = NULL; 5087 hw->vendor_id = pdev->vendor; 5088 hw->device_id = pdev->device; 5089 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 5090 hw->subsystem_vendor_id = pdev->subsystem_vendor; 5091 hw->subsystem_device_id = pdev->subsystem_device; 5092 hw->bus.device = PCI_SLOT(pdev->devfn); 5093 hw->bus.func = PCI_FUNC(pdev->devfn); 5094 ice_set_ctrlq_len(hw); 5095 5096 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 5097 5098 #ifndef CONFIG_DYNAMIC_DEBUG 5099 if (debug < -1) 5100 hw->debug_mask = debug; 5101 #endif 5102 5103 err = ice_init(pf); 5104 if (err) 5105 goto err_init; 5106 5107 err = ice_init_eth(pf); 5108 if (err) 5109 goto err_init_eth; 5110 5111 err = ice_init_rdma(pf); 5112 if (err) 5113 goto err_init_rdma; 5114 5115 err = ice_init_devlink(pf); 5116 if (err) 5117 goto err_init_devlink; 5118 5119 ice_init_features(pf); 5120 5121 return 0; 5122 5123 err_init_devlink: 5124 ice_deinit_rdma(pf); 5125 err_init_rdma: 5126 ice_deinit_eth(pf); 5127 err_init_eth: 5128 ice_deinit(pf); 5129 err_init: 5130 pci_disable_device(pdev); 5131 return err; 5132 } 5133 5134 /** 5135 * ice_set_wake - enable or disable Wake on LAN 5136 * @pf: pointer to the PF struct 5137 * 5138 * Simple helper for WoL control 5139 */ 5140 static void ice_set_wake(struct ice_pf *pf) 5141 { 5142 struct ice_hw *hw = &pf->hw; 5143 bool wol = pf->wol_ena; 5144 5145 /* clear wake state, otherwise new wake events won't fire */ 5146 wr32(hw, PFPM_WUS, U32_MAX); 5147 5148 /* enable / disable APM wake up, no RMW needed */ 5149 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 5150 5151 /* set magic packet filter enabled */ 5152 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 5153 } 5154 5155 /** 5156 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 5157 * @pf: pointer to the PF struct 5158 * 5159 * Issue firmware command to enable multicast magic wake, making 5160 * sure that any locally administered address (LAA) is used for 5161 * wake, and that PF reset doesn't undo the LAA. 5162 */ 5163 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 5164 { 5165 struct device *dev = ice_pf_to_dev(pf); 5166 struct ice_hw *hw = &pf->hw; 5167 u8 mac_addr[ETH_ALEN]; 5168 struct ice_vsi *vsi; 5169 int status; 5170 u8 flags; 5171 5172 if (!pf->wol_ena) 5173 return; 5174 5175 vsi = ice_get_main_vsi(pf); 5176 if (!vsi) 5177 return; 5178 5179 /* Get current MAC address in case it's an LAA */ 5180 if (vsi->netdev) 5181 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 5182 else 5183 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 5184 5185 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 5186 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 5187 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 5188 5189 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 5190 if (status) 5191 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", 5192 status, ice_aq_str(hw->adminq.sq_last_status)); 5193 } 5194 5195 /** 5196 * ice_remove - Device removal routine 5197 * @pdev: PCI device information struct 5198 */ 5199 static void ice_remove(struct pci_dev *pdev) 5200 { 5201 struct ice_pf *pf = pci_get_drvdata(pdev); 5202 int i; 5203 5204 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 5205 if (!ice_is_reset_in_progress(pf->state)) 5206 break; 5207 msleep(100); 5208 } 5209 5210 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 5211 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 5212 ice_free_vfs(pf); 5213 } 5214 5215 ice_service_task_stop(pf); 5216 ice_aq_cancel_waiting_tasks(pf); 5217 set_bit(ICE_DOWN, pf->state); 5218 5219 if (!ice_is_safe_mode(pf)) 5220 ice_remove_arfs(pf); 5221 ice_deinit_features(pf); 5222 ice_deinit_devlink(pf); 5223 ice_deinit_rdma(pf); 5224 ice_deinit_eth(pf); 5225 ice_deinit(pf); 5226 5227 ice_vsi_release_all(pf); 5228 5229 ice_setup_mc_magic_wake(pf); 5230 ice_set_wake(pf); 5231 5232 pci_disable_device(pdev); 5233 } 5234 5235 /** 5236 * ice_shutdown - PCI callback for shutting down device 5237 * @pdev: PCI device information struct 5238 */ 5239 static void ice_shutdown(struct pci_dev *pdev) 5240 { 5241 struct ice_pf *pf = pci_get_drvdata(pdev); 5242 5243 ice_remove(pdev); 5244 5245 if (system_state == SYSTEM_POWER_OFF) { 5246 pci_wake_from_d3(pdev, pf->wol_ena); 5247 pci_set_power_state(pdev, PCI_D3hot); 5248 } 5249 } 5250 5251 #ifdef CONFIG_PM 5252 /** 5253 * ice_prepare_for_shutdown - prep for PCI shutdown 5254 * @pf: board private structure 5255 * 5256 * Inform or close all dependent features in prep for PCI device shutdown 5257 */ 5258 static void ice_prepare_for_shutdown(struct ice_pf *pf) 5259 { 5260 struct ice_hw *hw = &pf->hw; 5261 u32 v; 5262 5263 /* Notify VFs of impending reset */ 5264 if (ice_check_sq_alive(hw, &hw->mailboxq)) 5265 ice_vc_notify_reset(pf); 5266 5267 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 5268 5269 /* disable the VSIs and their queues that are not already DOWN */ 5270 ice_pf_dis_all_vsi(pf, false); 5271 5272 ice_for_each_vsi(pf, v) 5273 if (pf->vsi[v]) 5274 pf->vsi[v]->vsi_num = 0; 5275 5276 ice_shutdown_all_ctrlq(hw); 5277 } 5278 5279 /** 5280 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 5281 * @pf: board private structure to reinitialize 5282 * 5283 * This routine reinitialize interrupt scheme that was cleared during 5284 * power management suspend callback. 5285 * 5286 * This should be called during resume routine to re-allocate the q_vectors 5287 * and reacquire interrupts. 5288 */ 5289 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 5290 { 5291 struct device *dev = ice_pf_to_dev(pf); 5292 int ret, v; 5293 5294 /* Since we clear MSIX flag during suspend, we need to 5295 * set it back during resume... 5296 */ 5297 5298 ret = ice_init_interrupt_scheme(pf); 5299 if (ret) { 5300 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 5301 return ret; 5302 } 5303 5304 /* Remap vectors and rings, after successful re-init interrupts */ 5305 ice_for_each_vsi(pf, v) { 5306 if (!pf->vsi[v]) 5307 continue; 5308 5309 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 5310 if (ret) 5311 goto err_reinit; 5312 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 5313 } 5314 5315 ret = ice_req_irq_msix_misc(pf); 5316 if (ret) { 5317 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 5318 ret); 5319 goto err_reinit; 5320 } 5321 5322 return 0; 5323 5324 err_reinit: 5325 while (v--) 5326 if (pf->vsi[v]) 5327 ice_vsi_free_q_vectors(pf->vsi[v]); 5328 5329 return ret; 5330 } 5331 5332 /** 5333 * ice_suspend 5334 * @dev: generic device information structure 5335 * 5336 * Power Management callback to quiesce the device and prepare 5337 * for D3 transition. 5338 */ 5339 static int __maybe_unused ice_suspend(struct device *dev) 5340 { 5341 struct pci_dev *pdev = to_pci_dev(dev); 5342 struct ice_pf *pf; 5343 int disabled, v; 5344 5345 pf = pci_get_drvdata(pdev); 5346 5347 if (!ice_pf_state_is_nominal(pf)) { 5348 dev_err(dev, "Device is not ready, no need to suspend it\n"); 5349 return -EBUSY; 5350 } 5351 5352 /* Stop watchdog tasks until resume completion. 5353 * Even though it is most likely that the service task is 5354 * disabled if the device is suspended or down, the service task's 5355 * state is controlled by a different state bit, and we should 5356 * store and honor whatever state that bit is in at this point. 5357 */ 5358 disabled = ice_service_task_stop(pf); 5359 5360 ice_unplug_aux_dev(pf); 5361 5362 /* Already suspended?, then there is nothing to do */ 5363 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 5364 if (!disabled) 5365 ice_service_task_restart(pf); 5366 return 0; 5367 } 5368 5369 if (test_bit(ICE_DOWN, pf->state) || 5370 ice_is_reset_in_progress(pf->state)) { 5371 dev_err(dev, "can't suspend device in reset or already down\n"); 5372 if (!disabled) 5373 ice_service_task_restart(pf); 5374 return 0; 5375 } 5376 5377 ice_setup_mc_magic_wake(pf); 5378 5379 ice_prepare_for_shutdown(pf); 5380 5381 ice_set_wake(pf); 5382 5383 /* Free vectors, clear the interrupt scheme and release IRQs 5384 * for proper hibernation, especially with large number of CPUs. 5385 * Otherwise hibernation might fail when mapping all the vectors back 5386 * to CPU0. 5387 */ 5388 ice_free_irq_msix_misc(pf); 5389 ice_for_each_vsi(pf, v) { 5390 if (!pf->vsi[v]) 5391 continue; 5392 ice_vsi_free_q_vectors(pf->vsi[v]); 5393 } 5394 ice_clear_interrupt_scheme(pf); 5395 5396 pci_save_state(pdev); 5397 pci_wake_from_d3(pdev, pf->wol_ena); 5398 pci_set_power_state(pdev, PCI_D3hot); 5399 return 0; 5400 } 5401 5402 /** 5403 * ice_resume - PM callback for waking up from D3 5404 * @dev: generic device information structure 5405 */ 5406 static int __maybe_unused ice_resume(struct device *dev) 5407 { 5408 struct pci_dev *pdev = to_pci_dev(dev); 5409 enum ice_reset_req reset_type; 5410 struct ice_pf *pf; 5411 struct ice_hw *hw; 5412 int ret; 5413 5414 pci_set_power_state(pdev, PCI_D0); 5415 pci_restore_state(pdev); 5416 pci_save_state(pdev); 5417 5418 if (!pci_device_is_present(pdev)) 5419 return -ENODEV; 5420 5421 ret = pci_enable_device_mem(pdev); 5422 if (ret) { 5423 dev_err(dev, "Cannot enable device after suspend\n"); 5424 return ret; 5425 } 5426 5427 pf = pci_get_drvdata(pdev); 5428 hw = &pf->hw; 5429 5430 pf->wakeup_reason = rd32(hw, PFPM_WUS); 5431 ice_print_wake_reason(pf); 5432 5433 /* We cleared the interrupt scheme when we suspended, so we need to 5434 * restore it now to resume device functionality. 5435 */ 5436 ret = ice_reinit_interrupt_scheme(pf); 5437 if (ret) 5438 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5439 5440 clear_bit(ICE_DOWN, pf->state); 5441 /* Now perform PF reset and rebuild */ 5442 reset_type = ICE_RESET_PFR; 5443 /* re-enable service task for reset, but allow reset to schedule it */ 5444 clear_bit(ICE_SERVICE_DIS, pf->state); 5445 5446 if (ice_schedule_reset(pf, reset_type)) 5447 dev_err(dev, "Reset during resume failed.\n"); 5448 5449 clear_bit(ICE_SUSPENDED, pf->state); 5450 ice_service_task_restart(pf); 5451 5452 /* Restart the service task */ 5453 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5454 5455 return 0; 5456 } 5457 #endif /* CONFIG_PM */ 5458 5459 /** 5460 * ice_pci_err_detected - warning that PCI error has been detected 5461 * @pdev: PCI device information struct 5462 * @err: the type of PCI error 5463 * 5464 * Called to warn that something happened on the PCI bus and the error handling 5465 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 5466 */ 5467 static pci_ers_result_t 5468 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 5469 { 5470 struct ice_pf *pf = pci_get_drvdata(pdev); 5471 5472 if (!pf) { 5473 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 5474 __func__, err); 5475 return PCI_ERS_RESULT_DISCONNECT; 5476 } 5477 5478 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5479 ice_service_task_stop(pf); 5480 5481 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5482 set_bit(ICE_PFR_REQ, pf->state); 5483 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5484 } 5485 } 5486 5487 return PCI_ERS_RESULT_NEED_RESET; 5488 } 5489 5490 /** 5491 * ice_pci_err_slot_reset - a PCI slot reset has just happened 5492 * @pdev: PCI device information struct 5493 * 5494 * Called to determine if the driver can recover from the PCI slot reset by 5495 * using a register read to determine if the device is recoverable. 5496 */ 5497 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 5498 { 5499 struct ice_pf *pf = pci_get_drvdata(pdev); 5500 pci_ers_result_t result; 5501 int err; 5502 u32 reg; 5503 5504 err = pci_enable_device_mem(pdev); 5505 if (err) { 5506 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 5507 err); 5508 result = PCI_ERS_RESULT_DISCONNECT; 5509 } else { 5510 pci_set_master(pdev); 5511 pci_restore_state(pdev); 5512 pci_save_state(pdev); 5513 pci_wake_from_d3(pdev, false); 5514 5515 /* Check for life */ 5516 reg = rd32(&pf->hw, GLGEN_RTRIG); 5517 if (!reg) 5518 result = PCI_ERS_RESULT_RECOVERED; 5519 else 5520 result = PCI_ERS_RESULT_DISCONNECT; 5521 } 5522 5523 return result; 5524 } 5525 5526 /** 5527 * ice_pci_err_resume - restart operations after PCI error recovery 5528 * @pdev: PCI device information struct 5529 * 5530 * Called to allow the driver to bring things back up after PCI error and/or 5531 * reset recovery have finished 5532 */ 5533 static void ice_pci_err_resume(struct pci_dev *pdev) 5534 { 5535 struct ice_pf *pf = pci_get_drvdata(pdev); 5536 5537 if (!pf) { 5538 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 5539 __func__); 5540 return; 5541 } 5542 5543 if (test_bit(ICE_SUSPENDED, pf->state)) { 5544 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 5545 __func__); 5546 return; 5547 } 5548 5549 ice_restore_all_vfs_msi_state(pf); 5550 5551 ice_do_reset(pf, ICE_RESET_PFR); 5552 ice_service_task_restart(pf); 5553 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5554 } 5555 5556 /** 5557 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 5558 * @pdev: PCI device information struct 5559 */ 5560 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 5561 { 5562 struct ice_pf *pf = pci_get_drvdata(pdev); 5563 5564 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5565 ice_service_task_stop(pf); 5566 5567 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5568 set_bit(ICE_PFR_REQ, pf->state); 5569 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5570 } 5571 } 5572 } 5573 5574 /** 5575 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 5576 * @pdev: PCI device information struct 5577 */ 5578 static void ice_pci_err_reset_done(struct pci_dev *pdev) 5579 { 5580 ice_pci_err_resume(pdev); 5581 } 5582 5583 /* ice_pci_tbl - PCI Device ID Table 5584 * 5585 * Wildcard entries (PCI_ANY_ID) should come last 5586 * Last entry must be all 0s 5587 * 5588 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5589 * Class, Class Mask, private data (not used) } 5590 */ 5591 static const struct pci_device_id ice_pci_tbl[] = { 5592 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) }, 5593 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) }, 5594 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) }, 5595 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) }, 5596 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) }, 5597 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) }, 5598 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) }, 5599 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) }, 5600 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) }, 5601 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) }, 5602 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) }, 5603 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) }, 5604 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) }, 5605 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) }, 5606 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) }, 5607 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) }, 5608 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) }, 5609 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) }, 5610 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) }, 5611 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) }, 5612 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) }, 5613 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) }, 5614 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) }, 5615 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, 5616 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, 5617 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, 5618 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, 5619 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, 5620 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, 5621 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) }, 5622 /* required last entry */ 5623 {} 5624 }; 5625 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5626 5627 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5628 5629 static const struct pci_error_handlers ice_pci_err_handler = { 5630 .error_detected = ice_pci_err_detected, 5631 .slot_reset = ice_pci_err_slot_reset, 5632 .reset_prepare = ice_pci_err_reset_prepare, 5633 .reset_done = ice_pci_err_reset_done, 5634 .resume = ice_pci_err_resume 5635 }; 5636 5637 static struct pci_driver ice_driver = { 5638 .name = KBUILD_MODNAME, 5639 .id_table = ice_pci_tbl, 5640 .probe = ice_probe, 5641 .remove = ice_remove, 5642 #ifdef CONFIG_PM 5643 .driver.pm = &ice_pm_ops, 5644 #endif /* CONFIG_PM */ 5645 .shutdown = ice_shutdown, 5646 .sriov_configure = ice_sriov_configure, 5647 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, 5648 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count, 5649 .err_handler = &ice_pci_err_handler 5650 }; 5651 5652 /** 5653 * ice_module_init - Driver registration routine 5654 * 5655 * ice_module_init is the first routine called when the driver is 5656 * loaded. All it does is register with the PCI subsystem. 5657 */ 5658 static int __init ice_module_init(void) 5659 { 5660 int status = -ENOMEM; 5661 5662 pr_info("%s\n", ice_driver_string); 5663 pr_info("%s\n", ice_copyright); 5664 5665 ice_adv_lnk_speed_maps_init(); 5666 5667 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); 5668 if (!ice_wq) { 5669 pr_err("Failed to create workqueue\n"); 5670 return status; 5671 } 5672 5673 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0); 5674 if (!ice_lag_wq) { 5675 pr_err("Failed to create LAG workqueue\n"); 5676 goto err_dest_wq; 5677 } 5678 5679 status = pci_register_driver(&ice_driver); 5680 if (status) { 5681 pr_err("failed to register PCI driver, err %d\n", status); 5682 goto err_dest_lag_wq; 5683 } 5684 5685 return 0; 5686 5687 err_dest_lag_wq: 5688 destroy_workqueue(ice_lag_wq); 5689 err_dest_wq: 5690 destroy_workqueue(ice_wq); 5691 return status; 5692 } 5693 module_init(ice_module_init); 5694 5695 /** 5696 * ice_module_exit - Driver exit cleanup routine 5697 * 5698 * ice_module_exit is called just before the driver is removed 5699 * from memory. 5700 */ 5701 static void __exit ice_module_exit(void) 5702 { 5703 pci_unregister_driver(&ice_driver); 5704 destroy_workqueue(ice_wq); 5705 destroy_workqueue(ice_lag_wq); 5706 pr_info("module unloaded\n"); 5707 } 5708 module_exit(ice_module_exit); 5709 5710 /** 5711 * ice_set_mac_address - NDO callback to set MAC address 5712 * @netdev: network interface device structure 5713 * @pi: pointer to an address structure 5714 * 5715 * Returns 0 on success, negative on failure 5716 */ 5717 static int ice_set_mac_address(struct net_device *netdev, void *pi) 5718 { 5719 struct ice_netdev_priv *np = netdev_priv(netdev); 5720 struct ice_vsi *vsi = np->vsi; 5721 struct ice_pf *pf = vsi->back; 5722 struct ice_hw *hw = &pf->hw; 5723 struct sockaddr *addr = pi; 5724 u8 old_mac[ETH_ALEN]; 5725 u8 flags = 0; 5726 u8 *mac; 5727 int err; 5728 5729 mac = (u8 *)addr->sa_data; 5730 5731 if (!is_valid_ether_addr(mac)) 5732 return -EADDRNOTAVAIL; 5733 5734 if (test_bit(ICE_DOWN, pf->state) || 5735 ice_is_reset_in_progress(pf->state)) { 5736 netdev_err(netdev, "can't set mac %pM. device not ready\n", 5737 mac); 5738 return -EBUSY; 5739 } 5740 5741 if (ice_chnl_dmac_fltr_cnt(pf)) { 5742 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", 5743 mac); 5744 return -EAGAIN; 5745 } 5746 5747 netif_addr_lock_bh(netdev); 5748 ether_addr_copy(old_mac, netdev->dev_addr); 5749 /* change the netdev's MAC address */ 5750 eth_hw_addr_set(netdev, mac); 5751 netif_addr_unlock_bh(netdev); 5752 5753 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 5754 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 5755 if (err && err != -ENOENT) { 5756 err = -EADDRNOTAVAIL; 5757 goto err_update_filters; 5758 } 5759 5760 /* Add filter for new MAC. If filter exists, return success */ 5761 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 5762 if (err == -EEXIST) { 5763 /* Although this MAC filter is already present in hardware it's 5764 * possible in some cases (e.g. bonding) that dev_addr was 5765 * modified outside of the driver and needs to be restored back 5766 * to this value. 5767 */ 5768 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 5769 5770 return 0; 5771 } else if (err) { 5772 /* error if the new filter addition failed */ 5773 err = -EADDRNOTAVAIL; 5774 } 5775 5776 err_update_filters: 5777 if (err) { 5778 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 5779 mac); 5780 netif_addr_lock_bh(netdev); 5781 eth_hw_addr_set(netdev, old_mac); 5782 netif_addr_unlock_bh(netdev); 5783 return err; 5784 } 5785 5786 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 5787 netdev->dev_addr); 5788 5789 /* write new MAC address to the firmware */ 5790 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 5791 err = ice_aq_manage_mac_write(hw, mac, flags, NULL); 5792 if (err) { 5793 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 5794 mac, err); 5795 } 5796 return 0; 5797 } 5798 5799 /** 5800 * ice_set_rx_mode - NDO callback to set the netdev filters 5801 * @netdev: network interface device structure 5802 */ 5803 static void ice_set_rx_mode(struct net_device *netdev) 5804 { 5805 struct ice_netdev_priv *np = netdev_priv(netdev); 5806 struct ice_vsi *vsi = np->vsi; 5807 5808 if (!vsi || ice_is_switchdev_running(vsi->back)) 5809 return; 5810 5811 /* Set the flags to synchronize filters 5812 * ndo_set_rx_mode may be triggered even without a change in netdev 5813 * flags 5814 */ 5815 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 5816 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 5817 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 5818 5819 /* schedule our worker thread which will take care of 5820 * applying the new filter changes 5821 */ 5822 ice_service_task_schedule(vsi->back); 5823 } 5824 5825 /** 5826 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 5827 * @netdev: network interface device structure 5828 * @queue_index: Queue ID 5829 * @maxrate: maximum bandwidth in Mbps 5830 */ 5831 static int 5832 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 5833 { 5834 struct ice_netdev_priv *np = netdev_priv(netdev); 5835 struct ice_vsi *vsi = np->vsi; 5836 u16 q_handle; 5837 int status; 5838 u8 tc; 5839 5840 /* Validate maxrate requested is within permitted range */ 5841 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 5842 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 5843 maxrate, queue_index); 5844 return -EINVAL; 5845 } 5846 5847 q_handle = vsi->tx_rings[queue_index]->q_handle; 5848 tc = ice_dcb_get_tc(vsi, queue_index); 5849 5850 vsi = ice_locate_vsi_using_queue(vsi, queue_index); 5851 if (!vsi) { 5852 netdev_err(netdev, "Invalid VSI for given queue %d\n", 5853 queue_index); 5854 return -EINVAL; 5855 } 5856 5857 /* Set BW back to default, when user set maxrate to 0 */ 5858 if (!maxrate) 5859 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 5860 q_handle, ICE_MAX_BW); 5861 else 5862 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 5863 q_handle, ICE_MAX_BW, maxrate * 1000); 5864 if (status) 5865 netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 5866 status); 5867 5868 return status; 5869 } 5870 5871 /** 5872 * ice_fdb_add - add an entry to the hardware database 5873 * @ndm: the input from the stack 5874 * @tb: pointer to array of nladdr (unused) 5875 * @dev: the net device pointer 5876 * @addr: the MAC address entry being added 5877 * @vid: VLAN ID 5878 * @flags: instructions from stack about fdb operation 5879 * @extack: netlink extended ack 5880 */ 5881 static int 5882 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 5883 struct net_device *dev, const unsigned char *addr, u16 vid, 5884 u16 flags, struct netlink_ext_ack __always_unused *extack) 5885 { 5886 int err; 5887 5888 if (vid) { 5889 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 5890 return -EINVAL; 5891 } 5892 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 5893 netdev_err(dev, "FDB only supports static addresses\n"); 5894 return -EINVAL; 5895 } 5896 5897 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 5898 err = dev_uc_add_excl(dev, addr); 5899 else if (is_multicast_ether_addr(addr)) 5900 err = dev_mc_add_excl(dev, addr); 5901 else 5902 err = -EINVAL; 5903 5904 /* Only return duplicate errors if NLM_F_EXCL is set */ 5905 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 5906 err = 0; 5907 5908 return err; 5909 } 5910 5911 /** 5912 * ice_fdb_del - delete an entry from the hardware database 5913 * @ndm: the input from the stack 5914 * @tb: pointer to array of nladdr (unused) 5915 * @dev: the net device pointer 5916 * @addr: the MAC address entry being added 5917 * @vid: VLAN ID 5918 * @extack: netlink extended ack 5919 */ 5920 static int 5921 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 5922 struct net_device *dev, const unsigned char *addr, 5923 __always_unused u16 vid, struct netlink_ext_ack *extack) 5924 { 5925 int err; 5926 5927 if (ndm->ndm_state & NUD_PERMANENT) { 5928 netdev_err(dev, "FDB only supports static addresses\n"); 5929 return -EINVAL; 5930 } 5931 5932 if (is_unicast_ether_addr(addr)) 5933 err = dev_uc_del(dev, addr); 5934 else if (is_multicast_ether_addr(addr)) 5935 err = dev_mc_del(dev, addr); 5936 else 5937 err = -EINVAL; 5938 5939 return err; 5940 } 5941 5942 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 5943 NETIF_F_HW_VLAN_CTAG_TX | \ 5944 NETIF_F_HW_VLAN_STAG_RX | \ 5945 NETIF_F_HW_VLAN_STAG_TX) 5946 5947 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 5948 NETIF_F_HW_VLAN_STAG_RX) 5949 5950 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ 5951 NETIF_F_HW_VLAN_STAG_FILTER) 5952 5953 /** 5954 * ice_fix_features - fix the netdev features flags based on device limitations 5955 * @netdev: ptr to the netdev that flags are being fixed on 5956 * @features: features that need to be checked and possibly fixed 5957 * 5958 * Make sure any fixups are made to features in this callback. This enables the 5959 * driver to not have to check unsupported configurations throughout the driver 5960 * because that's the responsiblity of this callback. 5961 * 5962 * Single VLAN Mode (SVM) Supported Features: 5963 * NETIF_F_HW_VLAN_CTAG_FILTER 5964 * NETIF_F_HW_VLAN_CTAG_RX 5965 * NETIF_F_HW_VLAN_CTAG_TX 5966 * 5967 * Double VLAN Mode (DVM) Supported Features: 5968 * NETIF_F_HW_VLAN_CTAG_FILTER 5969 * NETIF_F_HW_VLAN_CTAG_RX 5970 * NETIF_F_HW_VLAN_CTAG_TX 5971 * 5972 * NETIF_F_HW_VLAN_STAG_FILTER 5973 * NETIF_HW_VLAN_STAG_RX 5974 * NETIF_HW_VLAN_STAG_TX 5975 * 5976 * Features that need fixing: 5977 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion. 5978 * These are mutually exlusive as the VSI context cannot support multiple 5979 * VLAN ethertypes simultaneously for stripping and/or insertion. If this 5980 * is not done, then default to clearing the requested STAG offload 5981 * settings. 5982 * 5983 * All supported filtering has to be enabled or disabled together. For 5984 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled 5985 * together. If this is not done, then default to VLAN filtering disabled. 5986 * These are mutually exclusive as there is currently no way to 5987 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN 5988 * prune rules. 5989 */ 5990 static netdev_features_t 5991 ice_fix_features(struct net_device *netdev, netdev_features_t features) 5992 { 5993 struct ice_netdev_priv *np = netdev_priv(netdev); 5994 netdev_features_t req_vlan_fltr, cur_vlan_fltr; 5995 bool cur_ctag, cur_stag, req_ctag, req_stag; 5996 5997 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; 5998 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 5999 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 6000 6001 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; 6002 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 6003 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 6004 6005 if (req_vlan_fltr != cur_vlan_fltr) { 6006 if (ice_is_dvm_ena(&np->vsi->back->hw)) { 6007 if (req_ctag && req_stag) { 6008 features |= NETIF_VLAN_FILTERING_FEATURES; 6009 } else if (!req_ctag && !req_stag) { 6010 features &= ~NETIF_VLAN_FILTERING_FEATURES; 6011 } else if ((!cur_ctag && req_ctag && !cur_stag) || 6012 (!cur_stag && req_stag && !cur_ctag)) { 6013 features |= NETIF_VLAN_FILTERING_FEATURES; 6014 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); 6015 } else if ((cur_ctag && !req_ctag && cur_stag) || 6016 (cur_stag && !req_stag && cur_ctag)) { 6017 features &= ~NETIF_VLAN_FILTERING_FEATURES; 6018 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); 6019 } 6020 } else { 6021 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) 6022 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); 6023 6024 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) 6025 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6026 } 6027 } 6028 6029 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && 6030 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { 6031 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); 6032 features &= ~(NETIF_F_HW_VLAN_STAG_RX | 6033 NETIF_F_HW_VLAN_STAG_TX); 6034 } 6035 6036 if (!(netdev->features & NETIF_F_RXFCS) && 6037 (features & NETIF_F_RXFCS) && 6038 (features & NETIF_VLAN_STRIPPING_FEATURES) && 6039 !ice_vsi_has_non_zero_vlans(np->vsi)) { 6040 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); 6041 features &= ~NETIF_VLAN_STRIPPING_FEATURES; 6042 } 6043 6044 return features; 6045 } 6046 6047 /** 6048 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI 6049 * @vsi: PF's VSI 6050 * @features: features used to determine VLAN offload settings 6051 * 6052 * First, determine the vlan_ethertype based on the VLAN offload bits in 6053 * features. Then determine if stripping and insertion should be enabled or 6054 * disabled. Finally enable or disable VLAN stripping and insertion. 6055 */ 6056 static int 6057 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) 6058 { 6059 bool enable_stripping = true, enable_insertion = true; 6060 struct ice_vsi_vlan_ops *vlan_ops; 6061 int strip_err = 0, insert_err = 0; 6062 u16 vlan_ethertype = 0; 6063 6064 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 6065 6066 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 6067 vlan_ethertype = ETH_P_8021AD; 6068 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 6069 vlan_ethertype = ETH_P_8021Q; 6070 6071 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) 6072 enable_stripping = false; 6073 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) 6074 enable_insertion = false; 6075 6076 if (enable_stripping) 6077 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); 6078 else 6079 strip_err = vlan_ops->dis_stripping(vsi); 6080 6081 if (enable_insertion) 6082 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); 6083 else 6084 insert_err = vlan_ops->dis_insertion(vsi); 6085 6086 if (strip_err || insert_err) 6087 return -EIO; 6088 6089 return 0; 6090 } 6091 6092 /** 6093 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI 6094 * @vsi: PF's VSI 6095 * @features: features used to determine VLAN filtering settings 6096 * 6097 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the 6098 * features. 6099 */ 6100 static int 6101 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) 6102 { 6103 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 6104 int err = 0; 6105 6106 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking 6107 * if either bit is set 6108 */ 6109 if (features & 6110 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) 6111 err = vlan_ops->ena_rx_filtering(vsi); 6112 else 6113 err = vlan_ops->dis_rx_filtering(vsi); 6114 6115 return err; 6116 } 6117 6118 /** 6119 * ice_set_vlan_features - set VLAN settings based on suggested feature set 6120 * @netdev: ptr to the netdev being adjusted 6121 * @features: the feature set that the stack is suggesting 6122 * 6123 * Only update VLAN settings if the requested_vlan_features are different than 6124 * the current_vlan_features. 6125 */ 6126 static int 6127 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) 6128 { 6129 netdev_features_t current_vlan_features, requested_vlan_features; 6130 struct ice_netdev_priv *np = netdev_priv(netdev); 6131 struct ice_vsi *vsi = np->vsi; 6132 int err; 6133 6134 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; 6135 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; 6136 if (current_vlan_features ^ requested_vlan_features) { 6137 if ((features & NETIF_F_RXFCS) && 6138 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6139 dev_err(ice_pf_to_dev(vsi->back), 6140 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); 6141 return -EIO; 6142 } 6143 6144 err = ice_set_vlan_offload_features(vsi, features); 6145 if (err) 6146 return err; 6147 } 6148 6149 current_vlan_features = netdev->features & 6150 NETIF_VLAN_FILTERING_FEATURES; 6151 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES; 6152 if (current_vlan_features ^ requested_vlan_features) { 6153 err = ice_set_vlan_filtering_features(vsi, features); 6154 if (err) 6155 return err; 6156 } 6157 6158 return 0; 6159 } 6160 6161 /** 6162 * ice_set_loopback - turn on/off loopback mode on underlying PF 6163 * @vsi: ptr to VSI 6164 * @ena: flag to indicate the on/off setting 6165 */ 6166 static int ice_set_loopback(struct ice_vsi *vsi, bool ena) 6167 { 6168 bool if_running = netif_running(vsi->netdev); 6169 int ret; 6170 6171 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6172 ret = ice_down(vsi); 6173 if (ret) { 6174 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); 6175 return ret; 6176 } 6177 } 6178 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); 6179 if (ret) 6180 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); 6181 if (if_running) 6182 ret = ice_up(vsi); 6183 6184 return ret; 6185 } 6186 6187 /** 6188 * ice_set_features - set the netdev feature flags 6189 * @netdev: ptr to the netdev being adjusted 6190 * @features: the feature set that the stack is suggesting 6191 */ 6192 static int 6193 ice_set_features(struct net_device *netdev, netdev_features_t features) 6194 { 6195 netdev_features_t changed = netdev->features ^ features; 6196 struct ice_netdev_priv *np = netdev_priv(netdev); 6197 struct ice_vsi *vsi = np->vsi; 6198 struct ice_pf *pf = vsi->back; 6199 int ret = 0; 6200 6201 /* Don't set any netdev advanced features with device in Safe Mode */ 6202 if (ice_is_safe_mode(pf)) { 6203 dev_err(ice_pf_to_dev(pf), 6204 "Device is in Safe Mode - not enabling advanced netdev features\n"); 6205 return ret; 6206 } 6207 6208 /* Do not change setting during reset */ 6209 if (ice_is_reset_in_progress(pf->state)) { 6210 dev_err(ice_pf_to_dev(pf), 6211 "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 6212 return -EBUSY; 6213 } 6214 6215 /* Multiple features can be changed in one call so keep features in 6216 * separate if/else statements to guarantee each feature is checked 6217 */ 6218 if (changed & NETIF_F_RXHASH) 6219 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); 6220 6221 ret = ice_set_vlan_features(netdev, features); 6222 if (ret) 6223 return ret; 6224 6225 /* Turn on receive of FCS aka CRC, and after setting this 6226 * flag the packet data will have the 4 byte CRC appended 6227 */ 6228 if (changed & NETIF_F_RXFCS) { 6229 if ((features & NETIF_F_RXFCS) && 6230 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6231 dev_err(ice_pf_to_dev(vsi->back), 6232 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); 6233 return -EIO; 6234 } 6235 6236 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); 6237 ret = ice_down_up(vsi); 6238 if (ret) 6239 return ret; 6240 } 6241 6242 if (changed & NETIF_F_NTUPLE) { 6243 bool ena = !!(features & NETIF_F_NTUPLE); 6244 6245 ice_vsi_manage_fdir(vsi, ena); 6246 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); 6247 } 6248 6249 /* don't turn off hw_tc_offload when ADQ is already enabled */ 6250 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { 6251 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); 6252 return -EACCES; 6253 } 6254 6255 if (changed & NETIF_F_HW_TC) { 6256 bool ena = !!(features & NETIF_F_HW_TC); 6257 6258 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : 6259 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 6260 } 6261 6262 if (changed & NETIF_F_LOOPBACK) 6263 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); 6264 6265 return ret; 6266 } 6267 6268 /** 6269 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI 6270 * @vsi: VSI to setup VLAN properties for 6271 */ 6272 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 6273 { 6274 int err; 6275 6276 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); 6277 if (err) 6278 return err; 6279 6280 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); 6281 if (err) 6282 return err; 6283 6284 return ice_vsi_add_vlan_zero(vsi); 6285 } 6286 6287 /** 6288 * ice_vsi_cfg_lan - Setup the VSI lan related config 6289 * @vsi: the VSI being configured 6290 * 6291 * Return 0 on success and negative value on error 6292 */ 6293 int ice_vsi_cfg_lan(struct ice_vsi *vsi) 6294 { 6295 int err; 6296 6297 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6298 ice_set_rx_mode(vsi->netdev); 6299 6300 err = ice_vsi_vlan_setup(vsi); 6301 if (err) 6302 return err; 6303 } 6304 ice_vsi_cfg_dcb_rings(vsi); 6305 6306 err = ice_vsi_cfg_lan_txqs(vsi); 6307 if (!err && ice_is_xdp_ena_vsi(vsi)) 6308 err = ice_vsi_cfg_xdp_txqs(vsi); 6309 if (!err) 6310 err = ice_vsi_cfg_rxqs(vsi); 6311 6312 return err; 6313 } 6314 6315 /* THEORY OF MODERATION: 6316 * The ice driver hardware works differently than the hardware that DIMLIB was 6317 * originally made for. ice hardware doesn't have packet count limits that 6318 * can trigger an interrupt, but it *does* have interrupt rate limit support, 6319 * which is hard-coded to a limit of 250,000 ints/second. 6320 * If not using dynamic moderation, the INTRL value can be modified 6321 * by ethtool rx-usecs-high. 6322 */ 6323 struct ice_dim { 6324 /* the throttle rate for interrupts, basically worst case delay before 6325 * an initial interrupt fires, value is stored in microseconds. 6326 */ 6327 u16 itr; 6328 }; 6329 6330 /* Make a different profile for Rx that doesn't allow quite so aggressive 6331 * moderation at the high end (it maxes out at 126us or about 8k interrupts a 6332 * second. 6333 */ 6334 static const struct ice_dim rx_profile[] = { 6335 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6336 {8}, /* 125,000 ints/s */ 6337 {16}, /* 62,500 ints/s */ 6338 {62}, /* 16,129 ints/s */ 6339 {126} /* 7,936 ints/s */ 6340 }; 6341 6342 /* The transmit profile, which has the same sorts of values 6343 * as the previous struct 6344 */ 6345 static const struct ice_dim tx_profile[] = { 6346 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6347 {8}, /* 125,000 ints/s */ 6348 {40}, /* 16,125 ints/s */ 6349 {128}, /* 7,812 ints/s */ 6350 {256} /* 3,906 ints/s */ 6351 }; 6352 6353 static void ice_tx_dim_work(struct work_struct *work) 6354 { 6355 struct ice_ring_container *rc; 6356 struct dim *dim; 6357 u16 itr; 6358 6359 dim = container_of(work, struct dim, work); 6360 rc = dim->priv; 6361 6362 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); 6363 6364 /* look up the values in our local table */ 6365 itr = tx_profile[dim->profile_ix].itr; 6366 6367 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); 6368 ice_write_itr(rc, itr); 6369 6370 dim->state = DIM_START_MEASURE; 6371 } 6372 6373 static void ice_rx_dim_work(struct work_struct *work) 6374 { 6375 struct ice_ring_container *rc; 6376 struct dim *dim; 6377 u16 itr; 6378 6379 dim = container_of(work, struct dim, work); 6380 rc = dim->priv; 6381 6382 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); 6383 6384 /* look up the values in our local table */ 6385 itr = rx_profile[dim->profile_ix].itr; 6386 6387 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); 6388 ice_write_itr(rc, itr); 6389 6390 dim->state = DIM_START_MEASURE; 6391 } 6392 6393 #define ICE_DIM_DEFAULT_PROFILE_IX 1 6394 6395 /** 6396 * ice_init_moderation - set up interrupt moderation 6397 * @q_vector: the vector containing rings to be configured 6398 * 6399 * Set up interrupt moderation registers, with the intent to do the right thing 6400 * when called from reset or from probe, and whether or not dynamic moderation 6401 * is enabled or not. Take special care to write all the registers in both 6402 * dynamic moderation mode or not in order to make sure hardware is in a known 6403 * state. 6404 */ 6405 static void ice_init_moderation(struct ice_q_vector *q_vector) 6406 { 6407 struct ice_ring_container *rc; 6408 bool tx_dynamic, rx_dynamic; 6409 6410 rc = &q_vector->tx; 6411 INIT_WORK(&rc->dim.work, ice_tx_dim_work); 6412 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6413 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6414 rc->dim.priv = rc; 6415 tx_dynamic = ITR_IS_DYNAMIC(rc); 6416 6417 /* set the initial TX ITR to match the above */ 6418 ice_write_itr(rc, tx_dynamic ? 6419 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); 6420 6421 rc = &q_vector->rx; 6422 INIT_WORK(&rc->dim.work, ice_rx_dim_work); 6423 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6424 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6425 rc->dim.priv = rc; 6426 rx_dynamic = ITR_IS_DYNAMIC(rc); 6427 6428 /* set the initial RX ITR to match the above */ 6429 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : 6430 rc->itr_setting); 6431 6432 ice_set_q_vector_intrl(q_vector); 6433 } 6434 6435 /** 6436 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 6437 * @vsi: the VSI being configured 6438 */ 6439 static void ice_napi_enable_all(struct ice_vsi *vsi) 6440 { 6441 int q_idx; 6442 6443 if (!vsi->netdev) 6444 return; 6445 6446 ice_for_each_q_vector(vsi, q_idx) { 6447 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6448 6449 ice_init_moderation(q_vector); 6450 6451 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6452 napi_enable(&q_vector->napi); 6453 } 6454 } 6455 6456 /** 6457 * ice_up_complete - Finish the last steps of bringing up a connection 6458 * @vsi: The VSI being configured 6459 * 6460 * Return 0 on success and negative value on error 6461 */ 6462 static int ice_up_complete(struct ice_vsi *vsi) 6463 { 6464 struct ice_pf *pf = vsi->back; 6465 int err; 6466 6467 ice_vsi_cfg_msix(vsi); 6468 6469 /* Enable only Rx rings, Tx rings were enabled by the FW when the 6470 * Tx queue group list was configured and the context bits were 6471 * programmed using ice_vsi_cfg_txqs 6472 */ 6473 err = ice_vsi_start_all_rx_rings(vsi); 6474 if (err) 6475 return err; 6476 6477 clear_bit(ICE_VSI_DOWN, vsi->state); 6478 ice_napi_enable_all(vsi); 6479 ice_vsi_ena_irq(vsi); 6480 6481 if (vsi->port_info && 6482 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 6483 vsi->netdev && vsi->type == ICE_VSI_PF) { 6484 ice_print_link_msg(vsi, true); 6485 netif_tx_start_all_queues(vsi->netdev); 6486 netif_carrier_on(vsi->netdev); 6487 ice_ptp_link_change(pf, pf->hw.pf_id, true); 6488 } 6489 6490 /* Perform an initial read of the statistics registers now to 6491 * set the baseline so counters are ready when interface is up 6492 */ 6493 ice_update_eth_stats(vsi); 6494 6495 if (vsi->type == ICE_VSI_PF) 6496 ice_service_task_schedule(pf); 6497 6498 return 0; 6499 } 6500 6501 /** 6502 * ice_up - Bring the connection back up after being down 6503 * @vsi: VSI being configured 6504 */ 6505 int ice_up(struct ice_vsi *vsi) 6506 { 6507 int err; 6508 6509 err = ice_vsi_cfg_lan(vsi); 6510 if (!err) 6511 err = ice_up_complete(vsi); 6512 6513 return err; 6514 } 6515 6516 /** 6517 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 6518 * @syncp: pointer to u64_stats_sync 6519 * @stats: stats that pkts and bytes count will be taken from 6520 * @pkts: packets stats counter 6521 * @bytes: bytes stats counter 6522 * 6523 * This function fetches stats from the ring considering the atomic operations 6524 * that needs to be performed to read u64 values in 32 bit machine. 6525 */ 6526 void 6527 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, 6528 struct ice_q_stats stats, u64 *pkts, u64 *bytes) 6529 { 6530 unsigned int start; 6531 6532 do { 6533 start = u64_stats_fetch_begin(syncp); 6534 *pkts = stats.pkts; 6535 *bytes = stats.bytes; 6536 } while (u64_stats_fetch_retry(syncp, start)); 6537 } 6538 6539 /** 6540 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 6541 * @vsi: the VSI to be updated 6542 * @vsi_stats: the stats struct to be updated 6543 * @rings: rings to work on 6544 * @count: number of rings 6545 */ 6546 static void 6547 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, 6548 struct rtnl_link_stats64 *vsi_stats, 6549 struct ice_tx_ring **rings, u16 count) 6550 { 6551 u16 i; 6552 6553 for (i = 0; i < count; i++) { 6554 struct ice_tx_ring *ring; 6555 u64 pkts = 0, bytes = 0; 6556 6557 ring = READ_ONCE(rings[i]); 6558 if (!ring || !ring->ring_stats) 6559 continue; 6560 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, 6561 ring->ring_stats->stats, &pkts, 6562 &bytes); 6563 vsi_stats->tx_packets += pkts; 6564 vsi_stats->tx_bytes += bytes; 6565 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; 6566 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; 6567 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; 6568 } 6569 } 6570 6571 /** 6572 * ice_update_vsi_ring_stats - Update VSI stats counters 6573 * @vsi: the VSI to be updated 6574 */ 6575 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 6576 { 6577 struct rtnl_link_stats64 *net_stats, *stats_prev; 6578 struct rtnl_link_stats64 *vsi_stats; 6579 u64 pkts, bytes; 6580 int i; 6581 6582 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); 6583 if (!vsi_stats) 6584 return; 6585 6586 /* reset non-netdev (extended) stats */ 6587 vsi->tx_restart = 0; 6588 vsi->tx_busy = 0; 6589 vsi->tx_linearize = 0; 6590 vsi->rx_buf_failed = 0; 6591 vsi->rx_page_failed = 0; 6592 6593 rcu_read_lock(); 6594 6595 /* update Tx rings counters */ 6596 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, 6597 vsi->num_txq); 6598 6599 /* update Rx rings counters */ 6600 ice_for_each_rxq(vsi, i) { 6601 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); 6602 struct ice_ring_stats *ring_stats; 6603 6604 ring_stats = ring->ring_stats; 6605 ice_fetch_u64_stats_per_ring(&ring_stats->syncp, 6606 ring_stats->stats, &pkts, 6607 &bytes); 6608 vsi_stats->rx_packets += pkts; 6609 vsi_stats->rx_bytes += bytes; 6610 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; 6611 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; 6612 } 6613 6614 /* update XDP Tx rings counters */ 6615 if (ice_is_xdp_ena_vsi(vsi)) 6616 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, 6617 vsi->num_xdp_txq); 6618 6619 rcu_read_unlock(); 6620 6621 net_stats = &vsi->net_stats; 6622 stats_prev = &vsi->net_stats_prev; 6623 6624 /* clear prev counters after reset */ 6625 if (vsi_stats->tx_packets < stats_prev->tx_packets || 6626 vsi_stats->rx_packets < stats_prev->rx_packets) { 6627 stats_prev->tx_packets = 0; 6628 stats_prev->tx_bytes = 0; 6629 stats_prev->rx_packets = 0; 6630 stats_prev->rx_bytes = 0; 6631 } 6632 6633 /* update netdev counters */ 6634 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; 6635 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; 6636 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; 6637 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; 6638 6639 stats_prev->tx_packets = vsi_stats->tx_packets; 6640 stats_prev->tx_bytes = vsi_stats->tx_bytes; 6641 stats_prev->rx_packets = vsi_stats->rx_packets; 6642 stats_prev->rx_bytes = vsi_stats->rx_bytes; 6643 6644 kfree(vsi_stats); 6645 } 6646 6647 /** 6648 * ice_update_vsi_stats - Update VSI stats counters 6649 * @vsi: the VSI to be updated 6650 */ 6651 void ice_update_vsi_stats(struct ice_vsi *vsi) 6652 { 6653 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 6654 struct ice_eth_stats *cur_es = &vsi->eth_stats; 6655 struct ice_pf *pf = vsi->back; 6656 6657 if (test_bit(ICE_VSI_DOWN, vsi->state) || 6658 test_bit(ICE_CFG_BUSY, pf->state)) 6659 return; 6660 6661 /* get stats as recorded by Tx/Rx rings */ 6662 ice_update_vsi_ring_stats(vsi); 6663 6664 /* get VSI stats as recorded by the hardware */ 6665 ice_update_eth_stats(vsi); 6666 6667 cur_ns->tx_errors = cur_es->tx_errors; 6668 cur_ns->rx_dropped = cur_es->rx_discards; 6669 cur_ns->tx_dropped = cur_es->tx_discards; 6670 cur_ns->multicast = cur_es->rx_multicast; 6671 6672 /* update some more netdev stats if this is main VSI */ 6673 if (vsi->type == ICE_VSI_PF) { 6674 cur_ns->rx_crc_errors = pf->stats.crc_errors; 6675 cur_ns->rx_errors = pf->stats.crc_errors + 6676 pf->stats.illegal_bytes + 6677 pf->stats.rx_len_errors + 6678 pf->stats.rx_undersize + 6679 pf->hw_csum_rx_error + 6680 pf->stats.rx_jabber + 6681 pf->stats.rx_fragments + 6682 pf->stats.rx_oversize; 6683 cur_ns->rx_length_errors = pf->stats.rx_len_errors; 6684 /* record drops from the port level */ 6685 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 6686 } 6687 } 6688 6689 /** 6690 * ice_update_pf_stats - Update PF port stats counters 6691 * @pf: PF whose stats needs to be updated 6692 */ 6693 void ice_update_pf_stats(struct ice_pf *pf) 6694 { 6695 struct ice_hw_port_stats *prev_ps, *cur_ps; 6696 struct ice_hw *hw = &pf->hw; 6697 u16 fd_ctr_base; 6698 u8 port; 6699 6700 port = hw->port_info->lport; 6701 prev_ps = &pf->stats_prev; 6702 cur_ps = &pf->stats; 6703 6704 if (ice_is_reset_in_progress(pf->state)) 6705 pf->stat_prev_loaded = false; 6706 6707 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 6708 &prev_ps->eth.rx_bytes, 6709 &cur_ps->eth.rx_bytes); 6710 6711 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 6712 &prev_ps->eth.rx_unicast, 6713 &cur_ps->eth.rx_unicast); 6714 6715 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 6716 &prev_ps->eth.rx_multicast, 6717 &cur_ps->eth.rx_multicast); 6718 6719 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 6720 &prev_ps->eth.rx_broadcast, 6721 &cur_ps->eth.rx_broadcast); 6722 6723 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 6724 &prev_ps->eth.rx_discards, 6725 &cur_ps->eth.rx_discards); 6726 6727 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 6728 &prev_ps->eth.tx_bytes, 6729 &cur_ps->eth.tx_bytes); 6730 6731 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 6732 &prev_ps->eth.tx_unicast, 6733 &cur_ps->eth.tx_unicast); 6734 6735 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 6736 &prev_ps->eth.tx_multicast, 6737 &cur_ps->eth.tx_multicast); 6738 6739 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 6740 &prev_ps->eth.tx_broadcast, 6741 &cur_ps->eth.tx_broadcast); 6742 6743 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 6744 &prev_ps->tx_dropped_link_down, 6745 &cur_ps->tx_dropped_link_down); 6746 6747 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 6748 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 6749 6750 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 6751 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 6752 6753 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 6754 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 6755 6756 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 6757 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 6758 6759 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 6760 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 6761 6762 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 6763 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 6764 6765 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 6766 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 6767 6768 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 6769 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 6770 6771 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 6772 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 6773 6774 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 6775 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 6776 6777 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 6778 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 6779 6780 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 6781 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 6782 6783 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 6784 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 6785 6786 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 6787 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 6788 6789 fd_ctr_base = hw->fd_ctr_base; 6790 6791 ice_stat_update40(hw, 6792 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 6793 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 6794 &cur_ps->fd_sb_match); 6795 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 6796 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 6797 6798 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 6799 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 6800 6801 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 6802 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 6803 6804 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 6805 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 6806 6807 ice_update_dcb_stats(pf); 6808 6809 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 6810 &prev_ps->crc_errors, &cur_ps->crc_errors); 6811 6812 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 6813 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 6814 6815 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 6816 &prev_ps->mac_local_faults, 6817 &cur_ps->mac_local_faults); 6818 6819 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 6820 &prev_ps->mac_remote_faults, 6821 &cur_ps->mac_remote_faults); 6822 6823 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 6824 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 6825 6826 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 6827 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 6828 6829 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 6830 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 6831 6832 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 6833 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 6834 6835 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 6836 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 6837 6838 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 6839 6840 pf->stat_prev_loaded = true; 6841 } 6842 6843 /** 6844 * ice_get_stats64 - get statistics for network device structure 6845 * @netdev: network interface device structure 6846 * @stats: main device statistics structure 6847 */ 6848 static 6849 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 6850 { 6851 struct ice_netdev_priv *np = netdev_priv(netdev); 6852 struct rtnl_link_stats64 *vsi_stats; 6853 struct ice_vsi *vsi = np->vsi; 6854 6855 vsi_stats = &vsi->net_stats; 6856 6857 if (!vsi->num_txq || !vsi->num_rxq) 6858 return; 6859 6860 /* netdev packet/byte stats come from ring counter. These are obtained 6861 * by summing up ring counters (done by ice_update_vsi_ring_stats). 6862 * But, only call the update routine and read the registers if VSI is 6863 * not down. 6864 */ 6865 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 6866 ice_update_vsi_ring_stats(vsi); 6867 stats->tx_packets = vsi_stats->tx_packets; 6868 stats->tx_bytes = vsi_stats->tx_bytes; 6869 stats->rx_packets = vsi_stats->rx_packets; 6870 stats->rx_bytes = vsi_stats->rx_bytes; 6871 6872 /* The rest of the stats can be read from the hardware but instead we 6873 * just return values that the watchdog task has already obtained from 6874 * the hardware. 6875 */ 6876 stats->multicast = vsi_stats->multicast; 6877 stats->tx_errors = vsi_stats->tx_errors; 6878 stats->tx_dropped = vsi_stats->tx_dropped; 6879 stats->rx_errors = vsi_stats->rx_errors; 6880 stats->rx_dropped = vsi_stats->rx_dropped; 6881 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 6882 stats->rx_length_errors = vsi_stats->rx_length_errors; 6883 } 6884 6885 /** 6886 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 6887 * @vsi: VSI having NAPI disabled 6888 */ 6889 static void ice_napi_disable_all(struct ice_vsi *vsi) 6890 { 6891 int q_idx; 6892 6893 if (!vsi->netdev) 6894 return; 6895 6896 ice_for_each_q_vector(vsi, q_idx) { 6897 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6898 6899 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6900 napi_disable(&q_vector->napi); 6901 6902 cancel_work_sync(&q_vector->tx.dim.work); 6903 cancel_work_sync(&q_vector->rx.dim.work); 6904 } 6905 } 6906 6907 /** 6908 * ice_down - Shutdown the connection 6909 * @vsi: The VSI being stopped 6910 * 6911 * Caller of this function is expected to set the vsi->state ICE_DOWN bit 6912 */ 6913 int ice_down(struct ice_vsi *vsi) 6914 { 6915 int i, tx_err, rx_err, vlan_err = 0; 6916 6917 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); 6918 6919 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6920 vlan_err = ice_vsi_del_vlan_zero(vsi); 6921 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); 6922 netif_carrier_off(vsi->netdev); 6923 netif_tx_disable(vsi->netdev); 6924 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { 6925 ice_eswitch_stop_all_tx_queues(vsi->back); 6926 } 6927 6928 ice_vsi_dis_irq(vsi); 6929 6930 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 6931 if (tx_err) 6932 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 6933 vsi->vsi_num, tx_err); 6934 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { 6935 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 6936 if (tx_err) 6937 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 6938 vsi->vsi_num, tx_err); 6939 } 6940 6941 rx_err = ice_vsi_stop_all_rx_rings(vsi); 6942 if (rx_err) 6943 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 6944 vsi->vsi_num, rx_err); 6945 6946 ice_napi_disable_all(vsi); 6947 6948 ice_for_each_txq(vsi, i) 6949 ice_clean_tx_ring(vsi->tx_rings[i]); 6950 6951 if (ice_is_xdp_ena_vsi(vsi)) 6952 ice_for_each_xdp_txq(vsi, i) 6953 ice_clean_tx_ring(vsi->xdp_rings[i]); 6954 6955 ice_for_each_rxq(vsi, i) 6956 ice_clean_rx_ring(vsi->rx_rings[i]); 6957 6958 if (tx_err || rx_err || vlan_err) { 6959 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 6960 vsi->vsi_num, vsi->vsw->sw_id); 6961 return -EIO; 6962 } 6963 6964 return 0; 6965 } 6966 6967 /** 6968 * ice_down_up - shutdown the VSI connection and bring it up 6969 * @vsi: the VSI to be reconnected 6970 */ 6971 int ice_down_up(struct ice_vsi *vsi) 6972 { 6973 int ret; 6974 6975 /* if DOWN already set, nothing to do */ 6976 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 6977 return 0; 6978 6979 ret = ice_down(vsi); 6980 if (ret) 6981 return ret; 6982 6983 ret = ice_up(vsi); 6984 if (ret) { 6985 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); 6986 return ret; 6987 } 6988 6989 return 0; 6990 } 6991 6992 /** 6993 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 6994 * @vsi: VSI having resources allocated 6995 * 6996 * Return 0 on success, negative on failure 6997 */ 6998 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 6999 { 7000 int i, err = 0; 7001 7002 if (!vsi->num_txq) { 7003 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 7004 vsi->vsi_num); 7005 return -EINVAL; 7006 } 7007 7008 ice_for_each_txq(vsi, i) { 7009 struct ice_tx_ring *ring = vsi->tx_rings[i]; 7010 7011 if (!ring) 7012 return -EINVAL; 7013 7014 if (vsi->netdev) 7015 ring->netdev = vsi->netdev; 7016 err = ice_setup_tx_ring(ring); 7017 if (err) 7018 break; 7019 } 7020 7021 return err; 7022 } 7023 7024 /** 7025 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 7026 * @vsi: VSI having resources allocated 7027 * 7028 * Return 0 on success, negative on failure 7029 */ 7030 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 7031 { 7032 int i, err = 0; 7033 7034 if (!vsi->num_rxq) { 7035 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 7036 vsi->vsi_num); 7037 return -EINVAL; 7038 } 7039 7040 ice_for_each_rxq(vsi, i) { 7041 struct ice_rx_ring *ring = vsi->rx_rings[i]; 7042 7043 if (!ring) 7044 return -EINVAL; 7045 7046 if (vsi->netdev) 7047 ring->netdev = vsi->netdev; 7048 err = ice_setup_rx_ring(ring); 7049 if (err) 7050 break; 7051 } 7052 7053 return err; 7054 } 7055 7056 /** 7057 * ice_vsi_open_ctrl - open control VSI for use 7058 * @vsi: the VSI to open 7059 * 7060 * Initialization of the Control VSI 7061 * 7062 * Returns 0 on success, negative value on error 7063 */ 7064 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 7065 { 7066 char int_name[ICE_INT_NAME_STR_LEN]; 7067 struct ice_pf *pf = vsi->back; 7068 struct device *dev; 7069 int err; 7070 7071 dev = ice_pf_to_dev(pf); 7072 /* allocate descriptors */ 7073 err = ice_vsi_setup_tx_rings(vsi); 7074 if (err) 7075 goto err_setup_tx; 7076 7077 err = ice_vsi_setup_rx_rings(vsi); 7078 if (err) 7079 goto err_setup_rx; 7080 7081 err = ice_vsi_cfg_lan(vsi); 7082 if (err) 7083 goto err_setup_rx; 7084 7085 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 7086 dev_driver_string(dev), dev_name(dev)); 7087 err = ice_vsi_req_irq_msix(vsi, int_name); 7088 if (err) 7089 goto err_setup_rx; 7090 7091 ice_vsi_cfg_msix(vsi); 7092 7093 err = ice_vsi_start_all_rx_rings(vsi); 7094 if (err) 7095 goto err_up_complete; 7096 7097 clear_bit(ICE_VSI_DOWN, vsi->state); 7098 ice_vsi_ena_irq(vsi); 7099 7100 return 0; 7101 7102 err_up_complete: 7103 ice_down(vsi); 7104 err_setup_rx: 7105 ice_vsi_free_rx_rings(vsi); 7106 err_setup_tx: 7107 ice_vsi_free_tx_rings(vsi); 7108 7109 return err; 7110 } 7111 7112 /** 7113 * ice_vsi_open - Called when a network interface is made active 7114 * @vsi: the VSI to open 7115 * 7116 * Initialization of the VSI 7117 * 7118 * Returns 0 on success, negative value on error 7119 */ 7120 int ice_vsi_open(struct ice_vsi *vsi) 7121 { 7122 char int_name[ICE_INT_NAME_STR_LEN]; 7123 struct ice_pf *pf = vsi->back; 7124 int err; 7125 7126 /* allocate descriptors */ 7127 err = ice_vsi_setup_tx_rings(vsi); 7128 if (err) 7129 goto err_setup_tx; 7130 7131 err = ice_vsi_setup_rx_rings(vsi); 7132 if (err) 7133 goto err_setup_rx; 7134 7135 err = ice_vsi_cfg_lan(vsi); 7136 if (err) 7137 goto err_setup_rx; 7138 7139 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 7140 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 7141 err = ice_vsi_req_irq_msix(vsi, int_name); 7142 if (err) 7143 goto err_setup_rx; 7144 7145 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 7146 7147 if (vsi->type == ICE_VSI_PF) { 7148 /* Notify the stack of the actual queue counts. */ 7149 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 7150 if (err) 7151 goto err_set_qs; 7152 7153 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 7154 if (err) 7155 goto err_set_qs; 7156 } 7157 7158 err = ice_up_complete(vsi); 7159 if (err) 7160 goto err_up_complete; 7161 7162 return 0; 7163 7164 err_up_complete: 7165 ice_down(vsi); 7166 err_set_qs: 7167 ice_vsi_free_irq(vsi); 7168 err_setup_rx: 7169 ice_vsi_free_rx_rings(vsi); 7170 err_setup_tx: 7171 ice_vsi_free_tx_rings(vsi); 7172 7173 return err; 7174 } 7175 7176 /** 7177 * ice_vsi_release_all - Delete all VSIs 7178 * @pf: PF from which all VSIs are being removed 7179 */ 7180 static void ice_vsi_release_all(struct ice_pf *pf) 7181 { 7182 int err, i; 7183 7184 if (!pf->vsi) 7185 return; 7186 7187 ice_for_each_vsi(pf, i) { 7188 if (!pf->vsi[i]) 7189 continue; 7190 7191 if (pf->vsi[i]->type == ICE_VSI_CHNL) 7192 continue; 7193 7194 err = ice_vsi_release(pf->vsi[i]); 7195 if (err) 7196 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 7197 i, err, pf->vsi[i]->vsi_num); 7198 } 7199 } 7200 7201 /** 7202 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 7203 * @pf: pointer to the PF instance 7204 * @type: VSI type to rebuild 7205 * 7206 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 7207 */ 7208 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 7209 { 7210 struct device *dev = ice_pf_to_dev(pf); 7211 int i, err; 7212 7213 ice_for_each_vsi(pf, i) { 7214 struct ice_vsi *vsi = pf->vsi[i]; 7215 7216 if (!vsi || vsi->type != type) 7217 continue; 7218 7219 /* rebuild the VSI */ 7220 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); 7221 if (err) { 7222 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 7223 err, vsi->idx, ice_vsi_type_str(type)); 7224 return err; 7225 } 7226 7227 /* replay filters for the VSI */ 7228 err = ice_replay_vsi(&pf->hw, vsi->idx); 7229 if (err) { 7230 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", 7231 err, vsi->idx, ice_vsi_type_str(type)); 7232 return err; 7233 } 7234 7235 /* Re-map HW VSI number, using VSI handle that has been 7236 * previously validated in ice_replay_vsi() call above 7237 */ 7238 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 7239 7240 /* enable the VSI */ 7241 err = ice_ena_vsi(vsi, false); 7242 if (err) { 7243 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 7244 err, vsi->idx, ice_vsi_type_str(type)); 7245 return err; 7246 } 7247 7248 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 7249 ice_vsi_type_str(type)); 7250 } 7251 7252 return 0; 7253 } 7254 7255 /** 7256 * ice_update_pf_netdev_link - Update PF netdev link status 7257 * @pf: pointer to the PF instance 7258 */ 7259 static void ice_update_pf_netdev_link(struct ice_pf *pf) 7260 { 7261 bool link_up; 7262 int i; 7263 7264 ice_for_each_vsi(pf, i) { 7265 struct ice_vsi *vsi = pf->vsi[i]; 7266 7267 if (!vsi || vsi->type != ICE_VSI_PF) 7268 return; 7269 7270 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 7271 if (link_up) { 7272 netif_carrier_on(pf->vsi[i]->netdev); 7273 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 7274 } else { 7275 netif_carrier_off(pf->vsi[i]->netdev); 7276 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 7277 } 7278 } 7279 } 7280 7281 /** 7282 * ice_rebuild - rebuild after reset 7283 * @pf: PF to rebuild 7284 * @reset_type: type of reset 7285 * 7286 * Do not rebuild VF VSI in this flow because that is already handled via 7287 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 7288 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 7289 * to reset/rebuild all the VF VSI twice. 7290 */ 7291 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 7292 { 7293 struct device *dev = ice_pf_to_dev(pf); 7294 struct ice_hw *hw = &pf->hw; 7295 bool dvm; 7296 int err; 7297 7298 if (test_bit(ICE_DOWN, pf->state)) 7299 goto clear_recovery; 7300 7301 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 7302 7303 #define ICE_EMP_RESET_SLEEP_MS 5000 7304 if (reset_type == ICE_RESET_EMPR) { 7305 /* If an EMP reset has occurred, any previously pending flash 7306 * update will have completed. We no longer know whether or 7307 * not the NVM update EMP reset is restricted. 7308 */ 7309 pf->fw_emp_reset_disabled = false; 7310 7311 msleep(ICE_EMP_RESET_SLEEP_MS); 7312 } 7313 7314 err = ice_init_all_ctrlq(hw); 7315 if (err) { 7316 dev_err(dev, "control queues init failed %d\n", err); 7317 goto err_init_ctrlq; 7318 } 7319 7320 /* if DDP was previously loaded successfully */ 7321 if (!ice_is_safe_mode(pf)) { 7322 /* reload the SW DB of filter tables */ 7323 if (reset_type == ICE_RESET_PFR) 7324 ice_fill_blk_tbls(hw); 7325 else 7326 /* Reload DDP Package after CORER/GLOBR reset */ 7327 ice_load_pkg(NULL, pf); 7328 } 7329 7330 err = ice_clear_pf_cfg(hw); 7331 if (err) { 7332 dev_err(dev, "clear PF configuration failed %d\n", err); 7333 goto err_init_ctrlq; 7334 } 7335 7336 ice_clear_pxe_mode(hw); 7337 7338 err = ice_init_nvm(hw); 7339 if (err) { 7340 dev_err(dev, "ice_init_nvm failed %d\n", err); 7341 goto err_init_ctrlq; 7342 } 7343 7344 err = ice_get_caps(hw); 7345 if (err) { 7346 dev_err(dev, "ice_get_caps failed %d\n", err); 7347 goto err_init_ctrlq; 7348 } 7349 7350 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 7351 if (err) { 7352 dev_err(dev, "set_mac_cfg failed %d\n", err); 7353 goto err_init_ctrlq; 7354 } 7355 7356 dvm = ice_is_dvm_ena(hw); 7357 7358 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 7359 if (err) 7360 goto err_init_ctrlq; 7361 7362 err = ice_sched_init_port(hw->port_info); 7363 if (err) 7364 goto err_sched_init_port; 7365 7366 /* start misc vector */ 7367 err = ice_req_irq_msix_misc(pf); 7368 if (err) { 7369 dev_err(dev, "misc vector setup failed: %d\n", err); 7370 goto err_sched_init_port; 7371 } 7372 7373 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7374 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 7375 if (!rd32(hw, PFQF_FD_SIZE)) { 7376 u16 unused, guar, b_effort; 7377 7378 guar = hw->func_caps.fd_fltr_guar; 7379 b_effort = hw->func_caps.fd_fltr_best_effort; 7380 7381 /* force guaranteed filter pool for PF */ 7382 ice_alloc_fd_guar_item(hw, &unused, guar); 7383 /* force shared filter pool for PF */ 7384 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 7385 } 7386 } 7387 7388 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 7389 ice_dcb_rebuild(pf); 7390 7391 /* If the PF previously had enabled PTP, PTP init needs to happen before 7392 * the VSI rebuild. If not, this causes the PTP link status events to 7393 * fail. 7394 */ 7395 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 7396 ice_ptp_reset(pf); 7397 7398 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 7399 ice_gnss_init(pf); 7400 7401 /* rebuild PF VSI */ 7402 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 7403 if (err) { 7404 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 7405 goto err_vsi_rebuild; 7406 } 7407 7408 err = ice_eswitch_rebuild(pf); 7409 if (err) { 7410 dev_err(dev, "Switchdev rebuild failed: %d\n", err); 7411 goto err_vsi_rebuild; 7412 } 7413 7414 if (reset_type == ICE_RESET_PFR) { 7415 err = ice_rebuild_channels(pf); 7416 if (err) { 7417 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", 7418 err); 7419 goto err_vsi_rebuild; 7420 } 7421 } 7422 7423 /* If Flow Director is active */ 7424 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7425 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 7426 if (err) { 7427 dev_err(dev, "control VSI rebuild failed: %d\n", err); 7428 goto err_vsi_rebuild; 7429 } 7430 7431 /* replay HW Flow Director recipes */ 7432 if (hw->fdir_prof) 7433 ice_fdir_replay_flows(hw); 7434 7435 /* replay Flow Director filters */ 7436 ice_fdir_replay_fltrs(pf); 7437 7438 ice_rebuild_arfs(pf); 7439 } 7440 7441 ice_update_pf_netdev_link(pf); 7442 7443 /* tell the firmware we are up */ 7444 err = ice_send_version(pf); 7445 if (err) { 7446 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 7447 err); 7448 goto err_vsi_rebuild; 7449 } 7450 7451 ice_replay_post(hw); 7452 7453 /* if we get here, reset flow is successful */ 7454 clear_bit(ICE_RESET_FAILED, pf->state); 7455 7456 ice_plug_aux_dev(pf); 7457 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) 7458 ice_lag_rebuild(pf); 7459 7460 /* Restore timestamp mode settings after VSI rebuild */ 7461 ice_ptp_restore_timestamp_mode(pf); 7462 return; 7463 7464 err_vsi_rebuild: 7465 err_sched_init_port: 7466 ice_sched_cleanup_all(hw); 7467 err_init_ctrlq: 7468 ice_shutdown_all_ctrlq(hw); 7469 set_bit(ICE_RESET_FAILED, pf->state); 7470 clear_recovery: 7471 /* set this bit in PF state to control service task scheduling */ 7472 set_bit(ICE_NEEDS_RESTART, pf->state); 7473 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 7474 } 7475 7476 /** 7477 * ice_change_mtu - NDO callback to change the MTU 7478 * @netdev: network interface device structure 7479 * @new_mtu: new value for maximum frame size 7480 * 7481 * Returns 0 on success, negative on failure 7482 */ 7483 static int ice_change_mtu(struct net_device *netdev, int new_mtu) 7484 { 7485 struct ice_netdev_priv *np = netdev_priv(netdev); 7486 struct ice_vsi *vsi = np->vsi; 7487 struct ice_pf *pf = vsi->back; 7488 struct bpf_prog *prog; 7489 u8 count = 0; 7490 int err = 0; 7491 7492 if (new_mtu == (int)netdev->mtu) { 7493 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 7494 return 0; 7495 } 7496 7497 prog = vsi->xdp_prog; 7498 if (prog && !prog->aux->xdp_has_frags) { 7499 int frame_size = ice_max_xdp_frame_size(vsi); 7500 7501 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 7502 netdev_err(netdev, "max MTU for XDP usage is %d\n", 7503 frame_size - ICE_ETH_PKT_HDR_PAD); 7504 return -EINVAL; 7505 } 7506 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { 7507 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { 7508 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", 7509 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); 7510 return -EINVAL; 7511 } 7512 } 7513 7514 /* if a reset is in progress, wait for some time for it to complete */ 7515 do { 7516 if (ice_is_reset_in_progress(pf->state)) { 7517 count++; 7518 usleep_range(1000, 2000); 7519 } else { 7520 break; 7521 } 7522 7523 } while (count < 100); 7524 7525 if (count == 100) { 7526 netdev_err(netdev, "can't change MTU. Device is busy\n"); 7527 return -EBUSY; 7528 } 7529 7530 netdev->mtu = (unsigned int)new_mtu; 7531 err = ice_down_up(vsi); 7532 if (err) 7533 return err; 7534 7535 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 7536 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); 7537 7538 return err; 7539 } 7540 7541 /** 7542 * ice_eth_ioctl - Access the hwtstamp interface 7543 * @netdev: network interface device structure 7544 * @ifr: interface request data 7545 * @cmd: ioctl command 7546 */ 7547 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 7548 { 7549 struct ice_netdev_priv *np = netdev_priv(netdev); 7550 struct ice_pf *pf = np->vsi->back; 7551 7552 switch (cmd) { 7553 case SIOCGHWTSTAMP: 7554 return ice_ptp_get_ts_config(pf, ifr); 7555 case SIOCSHWTSTAMP: 7556 return ice_ptp_set_ts_config(pf, ifr); 7557 default: 7558 return -EOPNOTSUPP; 7559 } 7560 } 7561 7562 /** 7563 * ice_aq_str - convert AQ err code to a string 7564 * @aq_err: the AQ error code to convert 7565 */ 7566 const char *ice_aq_str(enum ice_aq_err aq_err) 7567 { 7568 switch (aq_err) { 7569 case ICE_AQ_RC_OK: 7570 return "OK"; 7571 case ICE_AQ_RC_EPERM: 7572 return "ICE_AQ_RC_EPERM"; 7573 case ICE_AQ_RC_ENOENT: 7574 return "ICE_AQ_RC_ENOENT"; 7575 case ICE_AQ_RC_ENOMEM: 7576 return "ICE_AQ_RC_ENOMEM"; 7577 case ICE_AQ_RC_EBUSY: 7578 return "ICE_AQ_RC_EBUSY"; 7579 case ICE_AQ_RC_EEXIST: 7580 return "ICE_AQ_RC_EEXIST"; 7581 case ICE_AQ_RC_EINVAL: 7582 return "ICE_AQ_RC_EINVAL"; 7583 case ICE_AQ_RC_ENOSPC: 7584 return "ICE_AQ_RC_ENOSPC"; 7585 case ICE_AQ_RC_ENOSYS: 7586 return "ICE_AQ_RC_ENOSYS"; 7587 case ICE_AQ_RC_EMODE: 7588 return "ICE_AQ_RC_EMODE"; 7589 case ICE_AQ_RC_ENOSEC: 7590 return "ICE_AQ_RC_ENOSEC"; 7591 case ICE_AQ_RC_EBADSIG: 7592 return "ICE_AQ_RC_EBADSIG"; 7593 case ICE_AQ_RC_ESVN: 7594 return "ICE_AQ_RC_ESVN"; 7595 case ICE_AQ_RC_EBADMAN: 7596 return "ICE_AQ_RC_EBADMAN"; 7597 case ICE_AQ_RC_EBADBUF: 7598 return "ICE_AQ_RC_EBADBUF"; 7599 } 7600 7601 return "ICE_AQ_RC_UNKNOWN"; 7602 } 7603 7604 /** 7605 * ice_set_rss_lut - Set RSS LUT 7606 * @vsi: Pointer to VSI structure 7607 * @lut: Lookup table 7608 * @lut_size: Lookup table size 7609 * 7610 * Returns 0 on success, negative on failure 7611 */ 7612 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7613 { 7614 struct ice_aq_get_set_rss_lut_params params = {}; 7615 struct ice_hw *hw = &vsi->back->hw; 7616 int status; 7617 7618 if (!lut) 7619 return -EINVAL; 7620 7621 params.vsi_handle = vsi->idx; 7622 params.lut_size = lut_size; 7623 params.lut_type = vsi->rss_lut_type; 7624 params.lut = lut; 7625 7626 status = ice_aq_set_rss_lut(hw, ¶ms); 7627 if (status) 7628 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", 7629 status, ice_aq_str(hw->adminq.sq_last_status)); 7630 7631 return status; 7632 } 7633 7634 /** 7635 * ice_set_rss_key - Set RSS key 7636 * @vsi: Pointer to the VSI structure 7637 * @seed: RSS hash seed 7638 * 7639 * Returns 0 on success, negative on failure 7640 */ 7641 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 7642 { 7643 struct ice_hw *hw = &vsi->back->hw; 7644 int status; 7645 7646 if (!seed) 7647 return -EINVAL; 7648 7649 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7650 if (status) 7651 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", 7652 status, ice_aq_str(hw->adminq.sq_last_status)); 7653 7654 return status; 7655 } 7656 7657 /** 7658 * ice_get_rss_lut - Get RSS LUT 7659 * @vsi: Pointer to VSI structure 7660 * @lut: Buffer to store the lookup table entries 7661 * @lut_size: Size of buffer to store the lookup table entries 7662 * 7663 * Returns 0 on success, negative on failure 7664 */ 7665 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7666 { 7667 struct ice_aq_get_set_rss_lut_params params = {}; 7668 struct ice_hw *hw = &vsi->back->hw; 7669 int status; 7670 7671 if (!lut) 7672 return -EINVAL; 7673 7674 params.vsi_handle = vsi->idx; 7675 params.lut_size = lut_size; 7676 params.lut_type = vsi->rss_lut_type; 7677 params.lut = lut; 7678 7679 status = ice_aq_get_rss_lut(hw, ¶ms); 7680 if (status) 7681 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", 7682 status, ice_aq_str(hw->adminq.sq_last_status)); 7683 7684 return status; 7685 } 7686 7687 /** 7688 * ice_get_rss_key - Get RSS key 7689 * @vsi: Pointer to VSI structure 7690 * @seed: Buffer to store the key in 7691 * 7692 * Returns 0 on success, negative on failure 7693 */ 7694 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 7695 { 7696 struct ice_hw *hw = &vsi->back->hw; 7697 int status; 7698 7699 if (!seed) 7700 return -EINVAL; 7701 7702 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7703 if (status) 7704 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", 7705 status, ice_aq_str(hw->adminq.sq_last_status)); 7706 7707 return status; 7708 } 7709 7710 /** 7711 * ice_bridge_getlink - Get the hardware bridge mode 7712 * @skb: skb buff 7713 * @pid: process ID 7714 * @seq: RTNL message seq 7715 * @dev: the netdev being configured 7716 * @filter_mask: filter mask passed in 7717 * @nlflags: netlink flags passed in 7718 * 7719 * Return the bridge mode (VEB/VEPA) 7720 */ 7721 static int 7722 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7723 struct net_device *dev, u32 filter_mask, int nlflags) 7724 { 7725 struct ice_netdev_priv *np = netdev_priv(dev); 7726 struct ice_vsi *vsi = np->vsi; 7727 struct ice_pf *pf = vsi->back; 7728 u16 bmode; 7729 7730 bmode = pf->first_sw->bridge_mode; 7731 7732 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 7733 filter_mask, NULL); 7734 } 7735 7736 /** 7737 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 7738 * @vsi: Pointer to VSI structure 7739 * @bmode: Hardware bridge mode (VEB/VEPA) 7740 * 7741 * Returns 0 on success, negative on failure 7742 */ 7743 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 7744 { 7745 struct ice_aqc_vsi_props *vsi_props; 7746 struct ice_hw *hw = &vsi->back->hw; 7747 struct ice_vsi_ctx *ctxt; 7748 int ret; 7749 7750 vsi_props = &vsi->info; 7751 7752 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 7753 if (!ctxt) 7754 return -ENOMEM; 7755 7756 ctxt->info = vsi->info; 7757 7758 if (bmode == BRIDGE_MODE_VEB) 7759 /* change from VEPA to VEB mode */ 7760 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7761 else 7762 /* change from VEB to VEPA mode */ 7763 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 7764 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 7765 7766 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 7767 if (ret) { 7768 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", 7769 bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); 7770 goto out; 7771 } 7772 /* Update sw flags for book keeping */ 7773 vsi_props->sw_flags = ctxt->info.sw_flags; 7774 7775 out: 7776 kfree(ctxt); 7777 return ret; 7778 } 7779 7780 /** 7781 * ice_bridge_setlink - Set the hardware bridge mode 7782 * @dev: the netdev being configured 7783 * @nlh: RTNL message 7784 * @flags: bridge setlink flags 7785 * @extack: netlink extended ack 7786 * 7787 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 7788 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 7789 * not already set for all VSIs connected to this switch. And also update the 7790 * unicast switch filter rules for the corresponding switch of the netdev. 7791 */ 7792 static int 7793 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 7794 u16 __always_unused flags, 7795 struct netlink_ext_ack __always_unused *extack) 7796 { 7797 struct ice_netdev_priv *np = netdev_priv(dev); 7798 struct ice_pf *pf = np->vsi->back; 7799 struct nlattr *attr, *br_spec; 7800 struct ice_hw *hw = &pf->hw; 7801 struct ice_sw *pf_sw; 7802 int rem, v, err = 0; 7803 7804 pf_sw = pf->first_sw; 7805 /* find the attribute in the netlink message */ 7806 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7807 7808 nla_for_each_nested(attr, br_spec, rem) { 7809 __u16 mode; 7810 7811 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7812 continue; 7813 mode = nla_get_u16(attr); 7814 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 7815 return -EINVAL; 7816 /* Continue if bridge mode is not being flipped */ 7817 if (mode == pf_sw->bridge_mode) 7818 continue; 7819 /* Iterates through the PF VSI list and update the loopback 7820 * mode of the VSI 7821 */ 7822 ice_for_each_vsi(pf, v) { 7823 if (!pf->vsi[v]) 7824 continue; 7825 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 7826 if (err) 7827 return err; 7828 } 7829 7830 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 7831 /* Update the unicast switch filter rules for the corresponding 7832 * switch of the netdev 7833 */ 7834 err = ice_update_sw_rule_bridge_mode(hw); 7835 if (err) { 7836 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", 7837 mode, err, 7838 ice_aq_str(hw->adminq.sq_last_status)); 7839 /* revert hw->evb_veb */ 7840 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 7841 return err; 7842 } 7843 7844 pf_sw->bridge_mode = mode; 7845 } 7846 7847 return 0; 7848 } 7849 7850 /** 7851 * ice_tx_timeout - Respond to a Tx Hang 7852 * @netdev: network interface device structure 7853 * @txqueue: Tx queue 7854 */ 7855 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 7856 { 7857 struct ice_netdev_priv *np = netdev_priv(netdev); 7858 struct ice_tx_ring *tx_ring = NULL; 7859 struct ice_vsi *vsi = np->vsi; 7860 struct ice_pf *pf = vsi->back; 7861 u32 i; 7862 7863 pf->tx_timeout_count++; 7864 7865 /* Check if PFC is enabled for the TC to which the queue belongs 7866 * to. If yes then Tx timeout is not caused by a hung queue, no 7867 * need to reset and rebuild 7868 */ 7869 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 7870 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 7871 txqueue); 7872 return; 7873 } 7874 7875 /* now that we have an index, find the tx_ring struct */ 7876 ice_for_each_txq(vsi, i) 7877 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 7878 if (txqueue == vsi->tx_rings[i]->q_index) { 7879 tx_ring = vsi->tx_rings[i]; 7880 break; 7881 } 7882 7883 /* Reset recovery level if enough time has elapsed after last timeout. 7884 * Also ensure no new reset action happens before next timeout period. 7885 */ 7886 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 7887 pf->tx_timeout_recovery_level = 1; 7888 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 7889 netdev->watchdog_timeo))) 7890 return; 7891 7892 if (tx_ring) { 7893 struct ice_hw *hw = &pf->hw; 7894 u32 head, val = 0; 7895 7896 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & 7897 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; 7898 /* Read interrupt register */ 7899 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 7900 7901 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 7902 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 7903 head, tx_ring->next_to_use, val); 7904 } 7905 7906 pf->tx_timeout_last_recovery = jiffies; 7907 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 7908 pf->tx_timeout_recovery_level, txqueue); 7909 7910 switch (pf->tx_timeout_recovery_level) { 7911 case 1: 7912 set_bit(ICE_PFR_REQ, pf->state); 7913 break; 7914 case 2: 7915 set_bit(ICE_CORER_REQ, pf->state); 7916 break; 7917 case 3: 7918 set_bit(ICE_GLOBR_REQ, pf->state); 7919 break; 7920 default: 7921 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 7922 set_bit(ICE_DOWN, pf->state); 7923 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 7924 set_bit(ICE_SERVICE_DIS, pf->state); 7925 break; 7926 } 7927 7928 ice_service_task_schedule(pf); 7929 pf->tx_timeout_recovery_level++; 7930 } 7931 7932 /** 7933 * ice_setup_tc_cls_flower - flower classifier offloads 7934 * @np: net device to configure 7935 * @filter_dev: device on which filter is added 7936 * @cls_flower: offload data 7937 */ 7938 static int 7939 ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 7940 struct net_device *filter_dev, 7941 struct flow_cls_offload *cls_flower) 7942 { 7943 struct ice_vsi *vsi = np->vsi; 7944 7945 if (cls_flower->common.chain_index) 7946 return -EOPNOTSUPP; 7947 7948 switch (cls_flower->command) { 7949 case FLOW_CLS_REPLACE: 7950 return ice_add_cls_flower(filter_dev, vsi, cls_flower); 7951 case FLOW_CLS_DESTROY: 7952 return ice_del_cls_flower(vsi, cls_flower); 7953 default: 7954 return -EINVAL; 7955 } 7956 } 7957 7958 /** 7959 * ice_setup_tc_block_cb - callback handler registered for TC block 7960 * @type: TC SETUP type 7961 * @type_data: TC flower offload data that contains user input 7962 * @cb_priv: netdev private data 7963 */ 7964 static int 7965 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 7966 { 7967 struct ice_netdev_priv *np = cb_priv; 7968 7969 switch (type) { 7970 case TC_SETUP_CLSFLOWER: 7971 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 7972 type_data); 7973 default: 7974 return -EOPNOTSUPP; 7975 } 7976 } 7977 7978 /** 7979 * ice_validate_mqprio_qopt - Validate TCF input parameters 7980 * @vsi: Pointer to VSI 7981 * @mqprio_qopt: input parameters for mqprio queue configuration 7982 * 7983 * This function validates MQPRIO params, such as qcount (power of 2 wherever 7984 * needed), and make sure user doesn't specify qcount and BW rate limit 7985 * for TCs, which are more than "num_tc" 7986 */ 7987 static int 7988 ice_validate_mqprio_qopt(struct ice_vsi *vsi, 7989 struct tc_mqprio_qopt_offload *mqprio_qopt) 7990 { 7991 int non_power_of_2_qcount = 0; 7992 struct ice_pf *pf = vsi->back; 7993 int max_rss_q_cnt = 0; 7994 u64 sum_min_rate = 0; 7995 struct device *dev; 7996 int i, speed; 7997 u8 num_tc; 7998 7999 if (vsi->type != ICE_VSI_PF) 8000 return -EINVAL; 8001 8002 if (mqprio_qopt->qopt.offset[0] != 0 || 8003 mqprio_qopt->qopt.num_tc < 1 || 8004 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) 8005 return -EINVAL; 8006 8007 dev = ice_pf_to_dev(pf); 8008 vsi->ch_rss_size = 0; 8009 num_tc = mqprio_qopt->qopt.num_tc; 8010 speed = ice_get_link_speed_kbps(vsi); 8011 8012 for (i = 0; num_tc; i++) { 8013 int qcount = mqprio_qopt->qopt.count[i]; 8014 u64 max_rate, min_rate, rem; 8015 8016 if (!qcount) 8017 return -EINVAL; 8018 8019 if (is_power_of_2(qcount)) { 8020 if (non_power_of_2_qcount && 8021 qcount > non_power_of_2_qcount) { 8022 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", 8023 qcount, non_power_of_2_qcount); 8024 return -EINVAL; 8025 } 8026 if (qcount > max_rss_q_cnt) 8027 max_rss_q_cnt = qcount; 8028 } else { 8029 if (non_power_of_2_qcount && 8030 qcount != non_power_of_2_qcount) { 8031 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", 8032 qcount, non_power_of_2_qcount); 8033 return -EINVAL; 8034 } 8035 if (qcount < max_rss_q_cnt) { 8036 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", 8037 qcount, max_rss_q_cnt); 8038 return -EINVAL; 8039 } 8040 max_rss_q_cnt = qcount; 8041 non_power_of_2_qcount = qcount; 8042 } 8043 8044 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but 8045 * converts the bandwidth rate limit into Bytes/s when 8046 * passing it down to the driver. So convert input bandwidth 8047 * from Bytes/s to Kbps 8048 */ 8049 max_rate = mqprio_qopt->max_rate[i]; 8050 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); 8051 8052 /* min_rate is minimum guaranteed rate and it can't be zero */ 8053 min_rate = mqprio_qopt->min_rate[i]; 8054 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); 8055 sum_min_rate += min_rate; 8056 8057 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { 8058 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, 8059 min_rate, ICE_MIN_BW_LIMIT); 8060 return -EINVAL; 8061 } 8062 8063 if (max_rate && max_rate > speed) { 8064 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n", 8065 i, max_rate, speed); 8066 return -EINVAL; 8067 } 8068 8069 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); 8070 if (rem) { 8071 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", 8072 i, ICE_MIN_BW_LIMIT); 8073 return -EINVAL; 8074 } 8075 8076 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); 8077 if (rem) { 8078 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", 8079 i, ICE_MIN_BW_LIMIT); 8080 return -EINVAL; 8081 } 8082 8083 /* min_rate can't be more than max_rate, except when max_rate 8084 * is zero (implies max_rate sought is max line rate). In such 8085 * a case min_rate can be more than max. 8086 */ 8087 if (max_rate && min_rate > max_rate) { 8088 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", 8089 min_rate, max_rate); 8090 return -EINVAL; 8091 } 8092 8093 if (i >= mqprio_qopt->qopt.num_tc - 1) 8094 break; 8095 if (mqprio_qopt->qopt.offset[i + 1] != 8096 (mqprio_qopt->qopt.offset[i] + qcount)) 8097 return -EINVAL; 8098 } 8099 if (vsi->num_rxq < 8100 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 8101 return -EINVAL; 8102 if (vsi->num_txq < 8103 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 8104 return -EINVAL; 8105 8106 if (sum_min_rate && sum_min_rate > (u64)speed) { 8107 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", 8108 sum_min_rate, speed); 8109 return -EINVAL; 8110 } 8111 8112 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ 8113 vsi->ch_rss_size = max_rss_q_cnt; 8114 8115 return 0; 8116 } 8117 8118 /** 8119 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF 8120 * @pf: ptr to PF device 8121 * @vsi: ptr to VSI 8122 */ 8123 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) 8124 { 8125 struct device *dev = ice_pf_to_dev(pf); 8126 bool added = false; 8127 struct ice_hw *hw; 8128 int flow; 8129 8130 if (!(vsi->num_gfltr || vsi->num_bfltr)) 8131 return -EINVAL; 8132 8133 hw = &pf->hw; 8134 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 8135 struct ice_fd_hw_prof *prof; 8136 int tun, status; 8137 u64 entry_h; 8138 8139 if (!(hw->fdir_prof && hw->fdir_prof[flow] && 8140 hw->fdir_prof[flow]->cnt)) 8141 continue; 8142 8143 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 8144 enum ice_flow_priority prio; 8145 u64 prof_id; 8146 8147 /* add this VSI to FDir profile for this flow */ 8148 prio = ICE_FLOW_PRIO_NORMAL; 8149 prof = hw->fdir_prof[flow]; 8150 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; 8151 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, 8152 prof->vsi_h[0], vsi->idx, 8153 prio, prof->fdir_seg[tun], 8154 &entry_h); 8155 if (status) { 8156 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", 8157 vsi->idx, flow); 8158 continue; 8159 } 8160 8161 prof->entry_h[prof->cnt][tun] = entry_h; 8162 } 8163 8164 /* store VSI for filter replay and delete */ 8165 prof->vsi_h[prof->cnt] = vsi->idx; 8166 prof->cnt++; 8167 8168 added = true; 8169 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, 8170 flow); 8171 } 8172 8173 if (!added) 8174 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); 8175 8176 return 0; 8177 } 8178 8179 /** 8180 * ice_add_channel - add a channel by adding VSI 8181 * @pf: ptr to PF device 8182 * @sw_id: underlying HW switching element ID 8183 * @ch: ptr to channel structure 8184 * 8185 * Add a channel (VSI) using add_vsi and queue_map 8186 */ 8187 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) 8188 { 8189 struct device *dev = ice_pf_to_dev(pf); 8190 struct ice_vsi *vsi; 8191 8192 if (ch->type != ICE_VSI_CHNL) { 8193 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); 8194 return -EINVAL; 8195 } 8196 8197 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); 8198 if (!vsi || vsi->type != ICE_VSI_CHNL) { 8199 dev_err(dev, "create chnl VSI failure\n"); 8200 return -EINVAL; 8201 } 8202 8203 ice_add_vsi_to_fdir(pf, vsi); 8204 8205 ch->sw_id = sw_id; 8206 ch->vsi_num = vsi->vsi_num; 8207 ch->info.mapping_flags = vsi->info.mapping_flags; 8208 ch->ch_vsi = vsi; 8209 /* set the back pointer of channel for newly created VSI */ 8210 vsi->ch = ch; 8211 8212 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, 8213 sizeof(vsi->info.q_mapping)); 8214 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, 8215 sizeof(vsi->info.tc_mapping)); 8216 8217 return 0; 8218 } 8219 8220 /** 8221 * ice_chnl_cfg_res 8222 * @vsi: the VSI being setup 8223 * @ch: ptr to channel structure 8224 * 8225 * Configure channel specific resources such as rings, vector. 8226 */ 8227 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) 8228 { 8229 int i; 8230 8231 for (i = 0; i < ch->num_txq; i++) { 8232 struct ice_q_vector *tx_q_vector, *rx_q_vector; 8233 struct ice_ring_container *rc; 8234 struct ice_tx_ring *tx_ring; 8235 struct ice_rx_ring *rx_ring; 8236 8237 tx_ring = vsi->tx_rings[ch->base_q + i]; 8238 rx_ring = vsi->rx_rings[ch->base_q + i]; 8239 if (!tx_ring || !rx_ring) 8240 continue; 8241 8242 /* setup ring being channel enabled */ 8243 tx_ring->ch = ch; 8244 rx_ring->ch = ch; 8245 8246 /* following code block sets up vector specific attributes */ 8247 tx_q_vector = tx_ring->q_vector; 8248 rx_q_vector = rx_ring->q_vector; 8249 if (!tx_q_vector && !rx_q_vector) 8250 continue; 8251 8252 if (tx_q_vector) { 8253 tx_q_vector->ch = ch; 8254 /* setup Tx and Rx ITR setting if DIM is off */ 8255 rc = &tx_q_vector->tx; 8256 if (!ITR_IS_DYNAMIC(rc)) 8257 ice_write_itr(rc, rc->itr_setting); 8258 } 8259 if (rx_q_vector) { 8260 rx_q_vector->ch = ch; 8261 /* setup Tx and Rx ITR setting if DIM is off */ 8262 rc = &rx_q_vector->rx; 8263 if (!ITR_IS_DYNAMIC(rc)) 8264 ice_write_itr(rc, rc->itr_setting); 8265 } 8266 } 8267 8268 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then 8269 * GLINT_ITR register would have written to perform in-context 8270 * update, hence perform flush 8271 */ 8272 if (ch->num_txq || ch->num_rxq) 8273 ice_flush(&vsi->back->hw); 8274 } 8275 8276 /** 8277 * ice_cfg_chnl_all_res - configure channel resources 8278 * @vsi: pte to main_vsi 8279 * @ch: ptr to channel structure 8280 * 8281 * This function configures channel specific resources such as flow-director 8282 * counter index, and other resources such as queues, vectors, ITR settings 8283 */ 8284 static void 8285 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) 8286 { 8287 /* configure channel (aka ADQ) resources such as queues, vectors, 8288 * ITR settings for channel specific vectors and anything else 8289 */ 8290 ice_chnl_cfg_res(vsi, ch); 8291 } 8292 8293 /** 8294 * ice_setup_hw_channel - setup new channel 8295 * @pf: ptr to PF device 8296 * @vsi: the VSI being setup 8297 * @ch: ptr to channel structure 8298 * @sw_id: underlying HW switching element ID 8299 * @type: type of channel to be created (VMDq2/VF) 8300 * 8301 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8302 * and configures Tx rings accordingly 8303 */ 8304 static int 8305 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8306 struct ice_channel *ch, u16 sw_id, u8 type) 8307 { 8308 struct device *dev = ice_pf_to_dev(pf); 8309 int ret; 8310 8311 ch->base_q = vsi->next_base_q; 8312 ch->type = type; 8313 8314 ret = ice_add_channel(pf, sw_id, ch); 8315 if (ret) { 8316 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); 8317 return ret; 8318 } 8319 8320 /* configure/setup ADQ specific resources */ 8321 ice_cfg_chnl_all_res(vsi, ch); 8322 8323 /* make sure to update the next_base_q so that subsequent channel's 8324 * (aka ADQ) VSI queue map is correct 8325 */ 8326 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; 8327 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, 8328 ch->num_rxq); 8329 8330 return 0; 8331 } 8332 8333 /** 8334 * ice_setup_channel - setup new channel using uplink element 8335 * @pf: ptr to PF device 8336 * @vsi: the VSI being setup 8337 * @ch: ptr to channel structure 8338 * 8339 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8340 * and uplink switching element 8341 */ 8342 static bool 8343 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8344 struct ice_channel *ch) 8345 { 8346 struct device *dev = ice_pf_to_dev(pf); 8347 u16 sw_id; 8348 int ret; 8349 8350 if (vsi->type != ICE_VSI_PF) { 8351 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); 8352 return false; 8353 } 8354 8355 sw_id = pf->first_sw->sw_id; 8356 8357 /* create channel (VSI) */ 8358 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); 8359 if (ret) { 8360 dev_err(dev, "failed to setup hw_channel\n"); 8361 return false; 8362 } 8363 dev_dbg(dev, "successfully created channel()\n"); 8364 8365 return ch->ch_vsi ? true : false; 8366 } 8367 8368 /** 8369 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 8370 * @vsi: VSI to be configured 8371 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit 8372 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit 8373 */ 8374 static int 8375 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) 8376 { 8377 int err; 8378 8379 err = ice_set_min_bw_limit(vsi, min_tx_rate); 8380 if (err) 8381 return err; 8382 8383 return ice_set_max_bw_limit(vsi, max_tx_rate); 8384 } 8385 8386 /** 8387 * ice_create_q_channel - function to create channel 8388 * @vsi: VSI to be configured 8389 * @ch: ptr to channel (it contains channel specific params) 8390 * 8391 * This function creates channel (VSI) using num_queues specified by user, 8392 * reconfigs RSS if needed. 8393 */ 8394 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) 8395 { 8396 struct ice_pf *pf = vsi->back; 8397 struct device *dev; 8398 8399 if (!ch) 8400 return -EINVAL; 8401 8402 dev = ice_pf_to_dev(pf); 8403 if (!ch->num_txq || !ch->num_rxq) { 8404 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); 8405 return -EINVAL; 8406 } 8407 8408 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { 8409 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", 8410 vsi->cnt_q_avail, ch->num_txq); 8411 return -EINVAL; 8412 } 8413 8414 if (!ice_setup_channel(pf, vsi, ch)) { 8415 dev_info(dev, "Failed to setup channel\n"); 8416 return -EINVAL; 8417 } 8418 /* configure BW rate limit */ 8419 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { 8420 int ret; 8421 8422 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, 8423 ch->min_tx_rate); 8424 if (ret) 8425 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", 8426 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8427 else 8428 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", 8429 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8430 } 8431 8432 vsi->cnt_q_avail -= ch->num_txq; 8433 8434 return 0; 8435 } 8436 8437 /** 8438 * ice_rem_all_chnl_fltrs - removes all channel filters 8439 * @pf: ptr to PF, TC-flower based filter are tracked at PF level 8440 * 8441 * Remove all advanced switch filters only if they are channel specific 8442 * tc-flower based filter 8443 */ 8444 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) 8445 { 8446 struct ice_tc_flower_fltr *fltr; 8447 struct hlist_node *node; 8448 8449 /* to remove all channel filters, iterate an ordered list of filters */ 8450 hlist_for_each_entry_safe(fltr, node, 8451 &pf->tc_flower_fltr_list, 8452 tc_flower_node) { 8453 struct ice_rule_query_data rule; 8454 int status; 8455 8456 /* for now process only channel specific filters */ 8457 if (!ice_is_chnl_fltr(fltr)) 8458 continue; 8459 8460 rule.rid = fltr->rid; 8461 rule.rule_id = fltr->rule_id; 8462 rule.vsi_handle = fltr->dest_vsi_handle; 8463 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 8464 if (status) { 8465 if (status == -ENOENT) 8466 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", 8467 rule.rule_id); 8468 else 8469 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", 8470 status); 8471 } else if (fltr->dest_vsi) { 8472 /* update advanced switch filter count */ 8473 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 8474 u32 flags = fltr->flags; 8475 8476 fltr->dest_vsi->num_chnl_fltr--; 8477 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 8478 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 8479 pf->num_dmac_chnl_fltrs--; 8480 } 8481 } 8482 8483 hlist_del(&fltr->tc_flower_node); 8484 kfree(fltr); 8485 } 8486 } 8487 8488 /** 8489 * ice_remove_q_channels - Remove queue channels for the TCs 8490 * @vsi: VSI to be configured 8491 * @rem_fltr: delete advanced switch filter or not 8492 * 8493 * Remove queue channels for the TCs 8494 */ 8495 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) 8496 { 8497 struct ice_channel *ch, *ch_tmp; 8498 struct ice_pf *pf = vsi->back; 8499 int i; 8500 8501 /* remove all tc-flower based filter if they are channel filters only */ 8502 if (rem_fltr) 8503 ice_rem_all_chnl_fltrs(pf); 8504 8505 /* remove ntuple filters since queue configuration is being changed */ 8506 if (vsi->netdev->features & NETIF_F_NTUPLE) { 8507 struct ice_hw *hw = &pf->hw; 8508 8509 mutex_lock(&hw->fdir_fltr_lock); 8510 ice_fdir_del_all_fltrs(vsi); 8511 mutex_unlock(&hw->fdir_fltr_lock); 8512 } 8513 8514 /* perform cleanup for channels if they exist */ 8515 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 8516 struct ice_vsi *ch_vsi; 8517 8518 list_del(&ch->list); 8519 ch_vsi = ch->ch_vsi; 8520 if (!ch_vsi) { 8521 kfree(ch); 8522 continue; 8523 } 8524 8525 /* Reset queue contexts */ 8526 for (i = 0; i < ch->num_rxq; i++) { 8527 struct ice_tx_ring *tx_ring; 8528 struct ice_rx_ring *rx_ring; 8529 8530 tx_ring = vsi->tx_rings[ch->base_q + i]; 8531 rx_ring = vsi->rx_rings[ch->base_q + i]; 8532 if (tx_ring) { 8533 tx_ring->ch = NULL; 8534 if (tx_ring->q_vector) 8535 tx_ring->q_vector->ch = NULL; 8536 } 8537 if (rx_ring) { 8538 rx_ring->ch = NULL; 8539 if (rx_ring->q_vector) 8540 rx_ring->q_vector->ch = NULL; 8541 } 8542 } 8543 8544 /* Release FD resources for the channel VSI */ 8545 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); 8546 8547 /* clear the VSI from scheduler tree */ 8548 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); 8549 8550 /* Delete VSI from FW, PF and HW VSI arrays */ 8551 ice_vsi_delete(ch->ch_vsi); 8552 8553 /* free the channel */ 8554 kfree(ch); 8555 } 8556 8557 /* clear the channel VSI map which is stored in main VSI */ 8558 ice_for_each_chnl_tc(i) 8559 vsi->tc_map_vsi[i] = NULL; 8560 8561 /* reset main VSI's all TC information */ 8562 vsi->all_enatc = 0; 8563 vsi->all_numtc = 0; 8564 } 8565 8566 /** 8567 * ice_rebuild_channels - rebuild channel 8568 * @pf: ptr to PF 8569 * 8570 * Recreate channel VSIs and replay filters 8571 */ 8572 static int ice_rebuild_channels(struct ice_pf *pf) 8573 { 8574 struct device *dev = ice_pf_to_dev(pf); 8575 struct ice_vsi *main_vsi; 8576 bool rem_adv_fltr = true; 8577 struct ice_channel *ch; 8578 struct ice_vsi *vsi; 8579 int tc_idx = 1; 8580 int i, err; 8581 8582 main_vsi = ice_get_main_vsi(pf); 8583 if (!main_vsi) 8584 return 0; 8585 8586 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || 8587 main_vsi->old_numtc == 1) 8588 return 0; /* nothing to be done */ 8589 8590 /* reconfigure main VSI based on old value of TC and cached values 8591 * for MQPRIO opts 8592 */ 8593 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); 8594 if (err) { 8595 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", 8596 main_vsi->old_ena_tc, main_vsi->vsi_num); 8597 return err; 8598 } 8599 8600 /* rebuild ADQ VSIs */ 8601 ice_for_each_vsi(pf, i) { 8602 enum ice_vsi_type type; 8603 8604 vsi = pf->vsi[i]; 8605 if (!vsi || vsi->type != ICE_VSI_CHNL) 8606 continue; 8607 8608 type = vsi->type; 8609 8610 /* rebuild ADQ VSI */ 8611 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); 8612 if (err) { 8613 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", 8614 ice_vsi_type_str(type), vsi->idx, err); 8615 goto cleanup; 8616 } 8617 8618 /* Re-map HW VSI number, using VSI handle that has been 8619 * previously validated in ice_replay_vsi() call above 8620 */ 8621 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 8622 8623 /* replay filters for the VSI */ 8624 err = ice_replay_vsi(&pf->hw, vsi->idx); 8625 if (err) { 8626 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", 8627 ice_vsi_type_str(type), err, vsi->idx); 8628 rem_adv_fltr = false; 8629 goto cleanup; 8630 } 8631 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", 8632 ice_vsi_type_str(type), vsi->idx); 8633 8634 /* store ADQ VSI at correct TC index in main VSI's 8635 * map of TC to VSI 8636 */ 8637 main_vsi->tc_map_vsi[tc_idx++] = vsi; 8638 } 8639 8640 /* ADQ VSI(s) has been rebuilt successfully, so setup 8641 * channel for main VSI's Tx and Rx rings 8642 */ 8643 list_for_each_entry(ch, &main_vsi->ch_list, list) { 8644 struct ice_vsi *ch_vsi; 8645 8646 ch_vsi = ch->ch_vsi; 8647 if (!ch_vsi) 8648 continue; 8649 8650 /* reconfig channel resources */ 8651 ice_cfg_chnl_all_res(main_vsi, ch); 8652 8653 /* replay BW rate limit if it is non-zero */ 8654 if (!ch->max_tx_rate && !ch->min_tx_rate) 8655 continue; 8656 8657 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, 8658 ch->min_tx_rate); 8659 if (err) 8660 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8661 err, ch->max_tx_rate, ch->min_tx_rate, 8662 ch_vsi->vsi_num); 8663 else 8664 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 8665 ch->max_tx_rate, ch->min_tx_rate, 8666 ch_vsi->vsi_num); 8667 } 8668 8669 /* reconfig RSS for main VSI */ 8670 if (main_vsi->ch_rss_size) 8671 ice_vsi_cfg_rss_lut_key(main_vsi); 8672 8673 return 0; 8674 8675 cleanup: 8676 ice_remove_q_channels(main_vsi, rem_adv_fltr); 8677 return err; 8678 } 8679 8680 /** 8681 * ice_create_q_channels - Add queue channel for the given TCs 8682 * @vsi: VSI to be configured 8683 * 8684 * Configures queue channel mapping to the given TCs 8685 */ 8686 static int ice_create_q_channels(struct ice_vsi *vsi) 8687 { 8688 struct ice_pf *pf = vsi->back; 8689 struct ice_channel *ch; 8690 int ret = 0, i; 8691 8692 ice_for_each_chnl_tc(i) { 8693 if (!(vsi->all_enatc & BIT(i))) 8694 continue; 8695 8696 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 8697 if (!ch) { 8698 ret = -ENOMEM; 8699 goto err_free; 8700 } 8701 INIT_LIST_HEAD(&ch->list); 8702 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; 8703 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; 8704 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; 8705 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; 8706 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; 8707 8708 /* convert to Kbits/s */ 8709 if (ch->max_tx_rate) 8710 ch->max_tx_rate = div_u64(ch->max_tx_rate, 8711 ICE_BW_KBPS_DIVISOR); 8712 if (ch->min_tx_rate) 8713 ch->min_tx_rate = div_u64(ch->min_tx_rate, 8714 ICE_BW_KBPS_DIVISOR); 8715 8716 ret = ice_create_q_channel(vsi, ch); 8717 if (ret) { 8718 dev_err(ice_pf_to_dev(pf), 8719 "failed creating channel TC:%d\n", i); 8720 kfree(ch); 8721 goto err_free; 8722 } 8723 list_add_tail(&ch->list, &vsi->ch_list); 8724 vsi->tc_map_vsi[i] = ch->ch_vsi; 8725 dev_dbg(ice_pf_to_dev(pf), 8726 "successfully created channel: VSI %pK\n", ch->ch_vsi); 8727 } 8728 return 0; 8729 8730 err_free: 8731 ice_remove_q_channels(vsi, false); 8732 8733 return ret; 8734 } 8735 8736 /** 8737 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes 8738 * @netdev: net device to configure 8739 * @type_data: TC offload data 8740 */ 8741 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) 8742 { 8743 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 8744 struct ice_netdev_priv *np = netdev_priv(netdev); 8745 struct ice_vsi *vsi = np->vsi; 8746 struct ice_pf *pf = vsi->back; 8747 u16 mode, ena_tc_qdisc = 0; 8748 int cur_txq, cur_rxq; 8749 u8 hw = 0, num_tcf; 8750 struct device *dev; 8751 int ret, i; 8752 8753 dev = ice_pf_to_dev(pf); 8754 num_tcf = mqprio_qopt->qopt.num_tc; 8755 hw = mqprio_qopt->qopt.hw; 8756 mode = mqprio_qopt->mode; 8757 if (!hw) { 8758 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8759 vsi->ch_rss_size = 0; 8760 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8761 goto config_tcf; 8762 } 8763 8764 /* Generate queue region map for number of TCF requested */ 8765 for (i = 0; i < num_tcf; i++) 8766 ena_tc_qdisc |= BIT(i); 8767 8768 switch (mode) { 8769 case TC_MQPRIO_MODE_CHANNEL: 8770 8771 if (pf->hw.port_info->is_custom_tx_enabled) { 8772 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); 8773 return -EBUSY; 8774 } 8775 ice_tear_down_devlink_rate_tree(pf); 8776 8777 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); 8778 if (ret) { 8779 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", 8780 ret); 8781 return ret; 8782 } 8783 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 8784 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 8785 /* don't assume state of hw_tc_offload during driver load 8786 * and set the flag for TC flower filter if hw_tc_offload 8787 * already ON 8788 */ 8789 if (vsi->netdev->features & NETIF_F_HW_TC) 8790 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 8791 break; 8792 default: 8793 return -EINVAL; 8794 } 8795 8796 config_tcf: 8797 8798 /* Requesting same TCF configuration as already enabled */ 8799 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && 8800 mode != TC_MQPRIO_MODE_CHANNEL) 8801 return 0; 8802 8803 /* Pause VSI queues */ 8804 ice_dis_vsi(vsi, true); 8805 8806 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 8807 ice_remove_q_channels(vsi, true); 8808 8809 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8810 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), 8811 num_online_cpus()); 8812 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), 8813 num_online_cpus()); 8814 } else { 8815 /* logic to rebuild VSI, same like ethtool -L */ 8816 u16 offset = 0, qcount_tx = 0, qcount_rx = 0; 8817 8818 for (i = 0; i < num_tcf; i++) { 8819 if (!(ena_tc_qdisc & BIT(i))) 8820 continue; 8821 8822 offset = vsi->mqprio_qopt.qopt.offset[i]; 8823 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 8824 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 8825 } 8826 vsi->req_txq = offset + qcount_tx; 8827 vsi->req_rxq = offset + qcount_rx; 8828 8829 /* store away original rss_size info, so that it gets reused 8830 * form ice_vsi_rebuild during tc-qdisc delete stage - to 8831 * determine, what should be the rss_sizefor main VSI 8832 */ 8833 vsi->orig_rss_size = vsi->rss_size; 8834 } 8835 8836 /* save current values of Tx and Rx queues before calling VSI rebuild 8837 * for fallback option 8838 */ 8839 cur_txq = vsi->num_txq; 8840 cur_rxq = vsi->num_rxq; 8841 8842 /* proceed with rebuild main VSI using correct number of queues */ 8843 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 8844 if (ret) { 8845 /* fallback to current number of queues */ 8846 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); 8847 vsi->req_txq = cur_txq; 8848 vsi->req_rxq = cur_rxq; 8849 clear_bit(ICE_RESET_FAILED, pf->state); 8850 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { 8851 dev_err(dev, "Rebuild of main VSI failed again\n"); 8852 return ret; 8853 } 8854 } 8855 8856 vsi->all_numtc = num_tcf; 8857 vsi->all_enatc = ena_tc_qdisc; 8858 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); 8859 if (ret) { 8860 netdev_err(netdev, "failed configuring TC for VSI id=%d\n", 8861 vsi->vsi_num); 8862 goto exit; 8863 } 8864 8865 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 8866 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 8867 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; 8868 8869 /* set TC0 rate limit if specified */ 8870 if (max_tx_rate || min_tx_rate) { 8871 /* convert to Kbits/s */ 8872 if (max_tx_rate) 8873 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); 8874 if (min_tx_rate) 8875 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); 8876 8877 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); 8878 if (!ret) { 8879 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", 8880 max_tx_rate, min_tx_rate, vsi->vsi_num); 8881 } else { 8882 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", 8883 max_tx_rate, min_tx_rate, vsi->vsi_num); 8884 goto exit; 8885 } 8886 } 8887 ret = ice_create_q_channels(vsi); 8888 if (ret) { 8889 netdev_err(netdev, "failed configuring queue channels\n"); 8890 goto exit; 8891 } else { 8892 netdev_dbg(netdev, "successfully configured channels\n"); 8893 } 8894 } 8895 8896 if (vsi->ch_rss_size) 8897 ice_vsi_cfg_rss_lut_key(vsi); 8898 8899 exit: 8900 /* if error, reset the all_numtc and all_enatc */ 8901 if (ret) { 8902 vsi->all_numtc = 0; 8903 vsi->all_enatc = 0; 8904 } 8905 /* resume VSI */ 8906 ice_ena_vsi(vsi, true); 8907 8908 return ret; 8909 } 8910 8911 static LIST_HEAD(ice_block_cb_list); 8912 8913 static int 8914 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, 8915 void *type_data) 8916 { 8917 struct ice_netdev_priv *np = netdev_priv(netdev); 8918 struct ice_pf *pf = np->vsi->back; 8919 bool locked = false; 8920 int err; 8921 8922 switch (type) { 8923 case TC_SETUP_BLOCK: 8924 return flow_block_cb_setup_simple(type_data, 8925 &ice_block_cb_list, 8926 ice_setup_tc_block_cb, 8927 np, np, true); 8928 case TC_SETUP_QDISC_MQPRIO: 8929 if (ice_is_eswitch_mode_switchdev(pf)) { 8930 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); 8931 return -EOPNOTSUPP; 8932 } 8933 8934 if (pf->adev) { 8935 mutex_lock(&pf->adev_mutex); 8936 device_lock(&pf->adev->dev); 8937 locked = true; 8938 if (pf->adev->dev.driver) { 8939 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); 8940 err = -EBUSY; 8941 goto adev_unlock; 8942 } 8943 } 8944 8945 /* setup traffic classifier for receive side */ 8946 mutex_lock(&pf->tc_mutex); 8947 err = ice_setup_tc_mqprio_qdisc(netdev, type_data); 8948 mutex_unlock(&pf->tc_mutex); 8949 8950 adev_unlock: 8951 if (locked) { 8952 device_unlock(&pf->adev->dev); 8953 mutex_unlock(&pf->adev_mutex); 8954 } 8955 return err; 8956 default: 8957 return -EOPNOTSUPP; 8958 } 8959 return -EOPNOTSUPP; 8960 } 8961 8962 static struct ice_indr_block_priv * 8963 ice_indr_block_priv_lookup(struct ice_netdev_priv *np, 8964 struct net_device *netdev) 8965 { 8966 struct ice_indr_block_priv *cb_priv; 8967 8968 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { 8969 if (!cb_priv->netdev) 8970 return NULL; 8971 if (cb_priv->netdev == netdev) 8972 return cb_priv; 8973 } 8974 return NULL; 8975 } 8976 8977 static int 8978 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, 8979 void *indr_priv) 8980 { 8981 struct ice_indr_block_priv *priv = indr_priv; 8982 struct ice_netdev_priv *np = priv->np; 8983 8984 switch (type) { 8985 case TC_SETUP_CLSFLOWER: 8986 return ice_setup_tc_cls_flower(np, priv->netdev, 8987 (struct flow_cls_offload *) 8988 type_data); 8989 default: 8990 return -EOPNOTSUPP; 8991 } 8992 } 8993 8994 static int 8995 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, 8996 struct ice_netdev_priv *np, 8997 struct flow_block_offload *f, void *data, 8998 void (*cleanup)(struct flow_block_cb *block_cb)) 8999 { 9000 struct ice_indr_block_priv *indr_priv; 9001 struct flow_block_cb *block_cb; 9002 9003 if (!ice_is_tunnel_supported(netdev) && 9004 !(is_vlan_dev(netdev) && 9005 vlan_dev_real_dev(netdev) == np->vsi->netdev)) 9006 return -EOPNOTSUPP; 9007 9008 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 9009 return -EOPNOTSUPP; 9010 9011 switch (f->command) { 9012 case FLOW_BLOCK_BIND: 9013 indr_priv = ice_indr_block_priv_lookup(np, netdev); 9014 if (indr_priv) 9015 return -EEXIST; 9016 9017 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); 9018 if (!indr_priv) 9019 return -ENOMEM; 9020 9021 indr_priv->netdev = netdev; 9022 indr_priv->np = np; 9023 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); 9024 9025 block_cb = 9026 flow_indr_block_cb_alloc(ice_indr_setup_block_cb, 9027 indr_priv, indr_priv, 9028 ice_rep_indr_tc_block_unbind, 9029 f, netdev, sch, data, np, 9030 cleanup); 9031 9032 if (IS_ERR(block_cb)) { 9033 list_del(&indr_priv->list); 9034 kfree(indr_priv); 9035 return PTR_ERR(block_cb); 9036 } 9037 flow_block_cb_add(block_cb, f); 9038 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); 9039 break; 9040 case FLOW_BLOCK_UNBIND: 9041 indr_priv = ice_indr_block_priv_lookup(np, netdev); 9042 if (!indr_priv) 9043 return -ENOENT; 9044 9045 block_cb = flow_block_cb_lookup(f->block, 9046 ice_indr_setup_block_cb, 9047 indr_priv); 9048 if (!block_cb) 9049 return -ENOENT; 9050 9051 flow_indr_block_cb_remove(block_cb, f); 9052 9053 list_del(&block_cb->driver_list); 9054 break; 9055 default: 9056 return -EOPNOTSUPP; 9057 } 9058 return 0; 9059 } 9060 9061 static int 9062 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 9063 void *cb_priv, enum tc_setup_type type, void *type_data, 9064 void *data, 9065 void (*cleanup)(struct flow_block_cb *block_cb)) 9066 { 9067 switch (type) { 9068 case TC_SETUP_BLOCK: 9069 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, 9070 data, cleanup); 9071 9072 default: 9073 return -EOPNOTSUPP; 9074 } 9075 } 9076 9077 /** 9078 * ice_open - Called when a network interface becomes active 9079 * @netdev: network interface device structure 9080 * 9081 * The open entry point is called when a network interface is made 9082 * active by the system (IFF_UP). At this point all resources needed 9083 * for transmit and receive operations are allocated, the interrupt 9084 * handler is registered with the OS, the netdev watchdog is enabled, 9085 * and the stack is notified that the interface is ready. 9086 * 9087 * Returns 0 on success, negative value on failure 9088 */ 9089 int ice_open(struct net_device *netdev) 9090 { 9091 struct ice_netdev_priv *np = netdev_priv(netdev); 9092 struct ice_pf *pf = np->vsi->back; 9093 9094 if (ice_is_reset_in_progress(pf->state)) { 9095 netdev_err(netdev, "can't open net device while reset is in progress"); 9096 return -EBUSY; 9097 } 9098 9099 return ice_open_internal(netdev); 9100 } 9101 9102 /** 9103 * ice_open_internal - Called when a network interface becomes active 9104 * @netdev: network interface device structure 9105 * 9106 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 9107 * handling routine 9108 * 9109 * Returns 0 on success, negative value on failure 9110 */ 9111 int ice_open_internal(struct net_device *netdev) 9112 { 9113 struct ice_netdev_priv *np = netdev_priv(netdev); 9114 struct ice_vsi *vsi = np->vsi; 9115 struct ice_pf *pf = vsi->back; 9116 struct ice_port_info *pi; 9117 int err; 9118 9119 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 9120 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 9121 return -EIO; 9122 } 9123 9124 netif_carrier_off(netdev); 9125 9126 pi = vsi->port_info; 9127 err = ice_update_link_info(pi); 9128 if (err) { 9129 netdev_err(netdev, "Failed to get link info, error %d\n", err); 9130 return err; 9131 } 9132 9133 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 9134 9135 /* Set PHY if there is media, otherwise, turn off PHY */ 9136 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 9137 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 9138 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 9139 err = ice_init_phy_user_cfg(pi); 9140 if (err) { 9141 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 9142 err); 9143 return err; 9144 } 9145 } 9146 9147 err = ice_configure_phy(vsi); 9148 if (err) { 9149 netdev_err(netdev, "Failed to set physical link up, error %d\n", 9150 err); 9151 return err; 9152 } 9153 } else { 9154 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 9155 ice_set_link(vsi, false); 9156 } 9157 9158 err = ice_vsi_open(vsi); 9159 if (err) 9160 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 9161 vsi->vsi_num, vsi->vsw->sw_id); 9162 9163 /* Update existing tunnels information */ 9164 udp_tunnel_get_rx_info(netdev); 9165 9166 return err; 9167 } 9168 9169 /** 9170 * ice_stop - Disables a network interface 9171 * @netdev: network interface device structure 9172 * 9173 * The stop entry point is called when an interface is de-activated by the OS, 9174 * and the netdevice enters the DOWN state. The hardware is still under the 9175 * driver's control, but the netdev interface is disabled. 9176 * 9177 * Returns success only - not allowed to fail 9178 */ 9179 int ice_stop(struct net_device *netdev) 9180 { 9181 struct ice_netdev_priv *np = netdev_priv(netdev); 9182 struct ice_vsi *vsi = np->vsi; 9183 struct ice_pf *pf = vsi->back; 9184 9185 if (ice_is_reset_in_progress(pf->state)) { 9186 netdev_err(netdev, "can't stop net device while reset is in progress"); 9187 return -EBUSY; 9188 } 9189 9190 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 9191 int link_err = ice_force_phys_link_state(vsi, false); 9192 9193 if (link_err) { 9194 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 9195 vsi->vsi_num, link_err); 9196 return -EIO; 9197 } 9198 } 9199 9200 ice_vsi_close(vsi); 9201 9202 return 0; 9203 } 9204 9205 /** 9206 * ice_features_check - Validate encapsulated packet conforms to limits 9207 * @skb: skb buffer 9208 * @netdev: This port's netdev 9209 * @features: Offload features that the stack believes apply 9210 */ 9211 static netdev_features_t 9212 ice_features_check(struct sk_buff *skb, 9213 struct net_device __always_unused *netdev, 9214 netdev_features_t features) 9215 { 9216 bool gso = skb_is_gso(skb); 9217 size_t len; 9218 9219 /* No point in doing any of this if neither checksum nor GSO are 9220 * being requested for this frame. We can rule out both by just 9221 * checking for CHECKSUM_PARTIAL 9222 */ 9223 if (skb->ip_summed != CHECKSUM_PARTIAL) 9224 return features; 9225 9226 /* We cannot support GSO if the MSS is going to be less than 9227 * 64 bytes. If it is then we need to drop support for GSO. 9228 */ 9229 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) 9230 features &= ~NETIF_F_GSO_MASK; 9231 9232 len = skb_network_offset(skb); 9233 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 9234 goto out_rm_features; 9235 9236 len = skb_network_header_len(skb); 9237 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9238 goto out_rm_features; 9239 9240 if (skb->encapsulation) { 9241 /* this must work for VXLAN frames AND IPIP/SIT frames, and in 9242 * the case of IPIP frames, the transport header pointer is 9243 * after the inner header! So check to make sure that this 9244 * is a GRE or UDP_TUNNEL frame before doing that math. 9245 */ 9246 if (gso && (skb_shinfo(skb)->gso_type & 9247 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { 9248 len = skb_inner_network_header(skb) - 9249 skb_transport_header(skb); 9250 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 9251 goto out_rm_features; 9252 } 9253 9254 len = skb_inner_network_header_len(skb); 9255 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9256 goto out_rm_features; 9257 } 9258 9259 return features; 9260 out_rm_features: 9261 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 9262 } 9263 9264 static const struct net_device_ops ice_netdev_safe_mode_ops = { 9265 .ndo_open = ice_open, 9266 .ndo_stop = ice_stop, 9267 .ndo_start_xmit = ice_start_xmit, 9268 .ndo_set_mac_address = ice_set_mac_address, 9269 .ndo_validate_addr = eth_validate_addr, 9270 .ndo_change_mtu = ice_change_mtu, 9271 .ndo_get_stats64 = ice_get_stats64, 9272 .ndo_tx_timeout = ice_tx_timeout, 9273 .ndo_bpf = ice_xdp_safe_mode, 9274 }; 9275 9276 static const struct net_device_ops ice_netdev_ops = { 9277 .ndo_open = ice_open, 9278 .ndo_stop = ice_stop, 9279 .ndo_start_xmit = ice_start_xmit, 9280 .ndo_select_queue = ice_select_queue, 9281 .ndo_features_check = ice_features_check, 9282 .ndo_fix_features = ice_fix_features, 9283 .ndo_set_rx_mode = ice_set_rx_mode, 9284 .ndo_set_mac_address = ice_set_mac_address, 9285 .ndo_validate_addr = eth_validate_addr, 9286 .ndo_change_mtu = ice_change_mtu, 9287 .ndo_get_stats64 = ice_get_stats64, 9288 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 9289 .ndo_eth_ioctl = ice_eth_ioctl, 9290 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 9291 .ndo_set_vf_mac = ice_set_vf_mac, 9292 .ndo_get_vf_config = ice_get_vf_cfg, 9293 .ndo_set_vf_trust = ice_set_vf_trust, 9294 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 9295 .ndo_set_vf_link_state = ice_set_vf_link_state, 9296 .ndo_get_vf_stats = ice_get_vf_stats, 9297 .ndo_set_vf_rate = ice_set_vf_bw, 9298 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 9299 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 9300 .ndo_setup_tc = ice_setup_tc, 9301 .ndo_set_features = ice_set_features, 9302 .ndo_bridge_getlink = ice_bridge_getlink, 9303 .ndo_bridge_setlink = ice_bridge_setlink, 9304 .ndo_fdb_add = ice_fdb_add, 9305 .ndo_fdb_del = ice_fdb_del, 9306 #ifdef CONFIG_RFS_ACCEL 9307 .ndo_rx_flow_steer = ice_rx_flow_steer, 9308 #endif 9309 .ndo_tx_timeout = ice_tx_timeout, 9310 .ndo_bpf = ice_xdp, 9311 .ndo_xdp_xmit = ice_xdp_xmit, 9312 .ndo_xsk_wakeup = ice_xsk_wakeup, 9313 }; 9314