1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <generated/utsrelease.h> 9 #include <linux/crash_dump.h> 10 #include "ice.h" 11 #include "ice_base.h" 12 #include "ice_lib.h" 13 #include "ice_fltr.h" 14 #include "ice_dcb_lib.h" 15 #include "ice_dcb_nl.h" 16 #include "devlink/devlink.h" 17 #include "devlink/port.h" 18 #include "ice_sf_eth.h" 19 #include "ice_hwmon.h" 20 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the 21 * ice tracepoint functions. This must be done exactly once across the 22 * ice driver. 23 */ 24 #define CREATE_TRACE_POINTS 25 #include "ice_trace.h" 26 #include "ice_eswitch.h" 27 #include "ice_tc_lib.h" 28 #include "ice_vsi_vlan_ops.h" 29 #include <net/xdp_sock_drv.h> 30 31 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" 32 static const char ice_driver_string[] = DRV_SUMMARY; 33 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; 34 35 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ 36 #define ICE_DDP_PKG_PATH "intel/ice/ddp/" 37 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" 38 39 MODULE_DESCRIPTION(DRV_SUMMARY); 40 MODULE_IMPORT_NS("LIBETH"); 41 MODULE_IMPORT_NS("LIBETH_XDP"); 42 MODULE_IMPORT_NS("LIBIE"); 43 MODULE_IMPORT_NS("LIBIE_ADMINQ"); 44 MODULE_IMPORT_NS("LIBIE_FWLOG"); 45 MODULE_LICENSE("GPL v2"); 46 MODULE_FIRMWARE(ICE_DDP_PKG_FILE); 47 48 static int debug = -1; 49 module_param(debug, int, 0644); 50 #ifndef CONFIG_DYNAMIC_DEBUG 51 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); 52 #else 53 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); 54 #endif /* !CONFIG_DYNAMIC_DEBUG */ 55 56 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); 57 EXPORT_SYMBOL(ice_xdp_locking_key); 58 59 /** 60 * ice_hw_to_dev - Get device pointer from the hardware structure 61 * @hw: pointer to the device HW structure 62 * 63 * Used to access the device pointer from compilation units which can't easily 64 * include the definition of struct ice_pf without leading to circular header 65 * dependencies. 66 */ 67 struct device *ice_hw_to_dev(struct ice_hw *hw) 68 { 69 struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 70 71 return &pf->pdev->dev; 72 } 73 74 static struct workqueue_struct *ice_wq; 75 struct workqueue_struct *ice_lag_wq; 76 static const struct net_device_ops ice_netdev_safe_mode_ops; 77 static const struct net_device_ops ice_netdev_ops; 78 79 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); 80 81 static void ice_vsi_release_all(struct ice_pf *pf); 82 83 static int ice_rebuild_channels(struct ice_pf *pf); 84 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); 85 86 static int 87 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 88 void *cb_priv, enum tc_setup_type type, void *type_data, 89 void *data, 90 void (*cleanup)(struct flow_block_cb *block_cb)); 91 92 bool netif_is_ice(const struct net_device *dev) 93 { 94 return dev && (dev->netdev_ops == &ice_netdev_ops || 95 dev->netdev_ops == &ice_netdev_safe_mode_ops); 96 } 97 98 /** 99 * ice_get_tx_pending - returns number of Tx descriptors not processed 100 * @ring: the ring of descriptors 101 */ 102 static u16 ice_get_tx_pending(struct ice_tx_ring *ring) 103 { 104 u16 head, tail; 105 106 head = ring->next_to_clean; 107 tail = ring->next_to_use; 108 109 if (head != tail) 110 return (head < tail) ? 111 tail - head : (tail + ring->count - head); 112 return 0; 113 } 114 115 /** 116 * ice_check_for_hang_subtask - check for and recover hung queues 117 * @pf: pointer to PF struct 118 */ 119 static void ice_check_for_hang_subtask(struct ice_pf *pf) 120 { 121 struct ice_vsi *vsi = NULL; 122 struct ice_hw *hw; 123 unsigned int i; 124 int packets; 125 u32 v; 126 127 ice_for_each_vsi(pf, v) 128 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { 129 vsi = pf->vsi[v]; 130 break; 131 } 132 133 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) 134 return; 135 136 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) 137 return; 138 139 hw = &vsi->back->hw; 140 141 ice_for_each_txq(vsi, i) { 142 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; 143 struct ice_ring_stats *ring_stats; 144 145 if (!tx_ring) 146 continue; 147 if (ice_ring_ch_enabled(tx_ring)) 148 continue; 149 150 ring_stats = tx_ring->ring_stats; 151 if (!ring_stats) 152 continue; 153 154 if (tx_ring->desc) { 155 /* If packet counter has not changed the queue is 156 * likely stalled, so force an interrupt for this 157 * queue. 158 * 159 * prev_pkt would be negative if there was no 160 * pending work. 161 */ 162 packets = ring_stats->stats.pkts & INT_MAX; 163 if (ring_stats->tx_stats.prev_pkt == packets) { 164 /* Trigger sw interrupt to revive the queue */ 165 ice_trigger_sw_intr(hw, tx_ring->q_vector); 166 continue; 167 } 168 169 /* Memory barrier between read of packet count and call 170 * to ice_get_tx_pending() 171 */ 172 smp_rmb(); 173 ring_stats->tx_stats.prev_pkt = 174 ice_get_tx_pending(tx_ring) ? packets : -1; 175 } 176 } 177 } 178 179 /** 180 * ice_init_mac_fltr - Set initial MAC filters 181 * @pf: board private structure 182 * 183 * Set initial set of MAC filters for PF VSI; configure filters for permanent 184 * address and broadcast address. If an error is encountered, netdevice will be 185 * unregistered. 186 */ 187 static int ice_init_mac_fltr(struct ice_pf *pf) 188 { 189 struct ice_vsi *vsi; 190 u8 *perm_addr; 191 192 vsi = ice_get_main_vsi(pf); 193 if (!vsi) 194 return -EINVAL; 195 196 perm_addr = vsi->port_info->mac.perm_addr; 197 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); 198 } 199 200 /** 201 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced 202 * @netdev: the net device on which the sync is happening 203 * @addr: MAC address to sync 204 * 205 * This is a callback function which is called by the in kernel device sync 206 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only 207 * populates the tmp_sync_list, which is later used by ice_add_mac to add the 208 * MAC filters from the hardware. 209 */ 210 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) 211 { 212 struct ice_netdev_priv *np = netdev_priv(netdev); 213 struct ice_vsi *vsi = np->vsi; 214 215 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, 216 ICE_FWD_TO_VSI)) 217 return -EINVAL; 218 219 return 0; 220 } 221 222 /** 223 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced 224 * @netdev: the net device on which the unsync is happening 225 * @addr: MAC address to unsync 226 * 227 * This is a callback function which is called by the in kernel device unsync 228 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only 229 * populates the tmp_unsync_list, which is later used by ice_remove_mac to 230 * delete the MAC filters from the hardware. 231 */ 232 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) 233 { 234 struct ice_netdev_priv *np = netdev_priv(netdev); 235 struct ice_vsi *vsi = np->vsi; 236 237 /* Under some circumstances, we might receive a request to delete our 238 * own device address from our uc list. Because we store the device 239 * address in the VSI's MAC filter list, we need to ignore such 240 * requests and not delete our device address from this list. 241 */ 242 if (ether_addr_equal(addr, netdev->dev_addr)) 243 return 0; 244 245 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, 246 ICE_FWD_TO_VSI)) 247 return -EINVAL; 248 249 return 0; 250 } 251 252 /** 253 * ice_vsi_fltr_changed - check if filter state changed 254 * @vsi: VSI to be checked 255 * 256 * returns true if filter state has changed, false otherwise. 257 */ 258 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) 259 { 260 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || 261 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 262 } 263 264 /** 265 * ice_set_promisc - Enable promiscuous mode for a given PF 266 * @vsi: the VSI being configured 267 * @promisc_m: mask of promiscuous config bits 268 * 269 */ 270 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) 271 { 272 int status; 273 274 if (vsi->type != ICE_VSI_PF) 275 return 0; 276 277 if (ice_vsi_has_non_zero_vlans(vsi)) { 278 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 279 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, 280 promisc_m); 281 } else { 282 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 283 promisc_m, 0); 284 } 285 if (status && status != -EEXIST) 286 return status; 287 288 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", 289 vsi->vsi_num, promisc_m); 290 return 0; 291 } 292 293 /** 294 * ice_clear_promisc - Disable promiscuous mode for a given PF 295 * @vsi: the VSI being configured 296 * @promisc_m: mask of promiscuous config bits 297 * 298 */ 299 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) 300 { 301 int status; 302 303 if (vsi->type != ICE_VSI_PF) 304 return 0; 305 306 if (ice_vsi_has_non_zero_vlans(vsi)) { 307 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); 308 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, 309 promisc_m); 310 } else { 311 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 312 promisc_m, 0); 313 } 314 315 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", 316 vsi->vsi_num, promisc_m); 317 return status; 318 } 319 320 /** 321 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 322 * @vsi: ptr to the VSI 323 * 324 * Push any outstanding VSI filter changes through the AdminQ. 325 */ 326 static int ice_vsi_sync_fltr(struct ice_vsi *vsi) 327 { 328 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 329 struct device *dev = ice_pf_to_dev(vsi->back); 330 struct net_device *netdev = vsi->netdev; 331 bool promisc_forced_on = false; 332 struct ice_pf *pf = vsi->back; 333 struct ice_hw *hw = &pf->hw; 334 u32 changed_flags = 0; 335 int err; 336 337 if (!vsi->netdev) 338 return -EINVAL; 339 340 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 341 usleep_range(1000, 2000); 342 343 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; 344 vsi->current_netdev_flags = vsi->netdev->flags; 345 346 INIT_LIST_HEAD(&vsi->tmp_sync_list); 347 INIT_LIST_HEAD(&vsi->tmp_unsync_list); 348 349 if (ice_vsi_fltr_changed(vsi)) { 350 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 351 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 352 353 /* grab the netdev's addr_list_lock */ 354 netif_addr_lock_bh(netdev); 355 __dev_uc_sync(netdev, ice_add_mac_to_sync_list, 356 ice_add_mac_to_unsync_list); 357 __dev_mc_sync(netdev, ice_add_mac_to_sync_list, 358 ice_add_mac_to_unsync_list); 359 /* our temp lists are populated. release lock */ 360 netif_addr_unlock_bh(netdev); 361 } 362 363 /* Remove MAC addresses in the unsync list */ 364 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); 365 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); 366 if (err) { 367 netdev_err(netdev, "Failed to delete MAC filters\n"); 368 /* if we failed because of alloc failures, just bail */ 369 if (err == -ENOMEM) 370 goto out; 371 } 372 373 /* Add MAC addresses in the sync list */ 374 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); 375 ice_fltr_free_list(dev, &vsi->tmp_sync_list); 376 /* If filter is added successfully or already exists, do not go into 377 * 'if' condition and report it as error. Instead continue processing 378 * rest of the function. 379 */ 380 if (err && err != -EEXIST) { 381 netdev_err(netdev, "Failed to add MAC filters\n"); 382 /* If there is no more space for new umac filters, VSI 383 * should go into promiscuous mode. There should be some 384 * space reserved for promiscuous filters. 385 */ 386 if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC && 387 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, 388 vsi->state)) { 389 promisc_forced_on = true; 390 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", 391 vsi->vsi_num); 392 } else { 393 goto out; 394 } 395 } 396 err = 0; 397 /* check for changes in promiscuous modes */ 398 if (changed_flags & IFF_ALLMULTI) { 399 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 400 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); 401 if (err) { 402 vsi->current_netdev_flags &= ~IFF_ALLMULTI; 403 goto out_promisc; 404 } 405 } else { 406 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ 407 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); 408 if (err) { 409 vsi->current_netdev_flags |= IFF_ALLMULTI; 410 goto out_promisc; 411 } 412 } 413 } 414 415 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || 416 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { 417 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 418 if (vsi->current_netdev_flags & IFF_PROMISC) { 419 /* Apply Rx filter rule to get traffic from wire */ 420 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { 421 err = ice_set_dflt_vsi(vsi); 422 if (err && err != -EEXIST) { 423 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", 424 err, vsi->vsi_num); 425 vsi->current_netdev_flags &= 426 ~IFF_PROMISC; 427 goto out_promisc; 428 } 429 err = 0; 430 vlan_ops->dis_rx_filtering(vsi); 431 432 /* promiscuous mode implies allmulticast so 433 * that VSIs that are in promiscuous mode are 434 * subscribed to multicast packets coming to 435 * the port 436 */ 437 err = ice_set_promisc(vsi, 438 ICE_MCAST_PROMISC_BITS); 439 if (err) 440 goto out_promisc; 441 } 442 } else { 443 /* Clear Rx filter to remove traffic from wire */ 444 if (ice_is_vsi_dflt_vsi(vsi)) { 445 err = ice_clear_dflt_vsi(vsi); 446 if (err) { 447 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", 448 err, vsi->vsi_num); 449 vsi->current_netdev_flags |= 450 IFF_PROMISC; 451 goto out_promisc; 452 } 453 if (vsi->netdev->features & 454 NETIF_F_HW_VLAN_CTAG_FILTER) 455 vlan_ops->ena_rx_filtering(vsi); 456 } 457 458 /* disable allmulti here, but only if allmulti is not 459 * still enabled for the netdev 460 */ 461 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { 462 err = ice_clear_promisc(vsi, 463 ICE_MCAST_PROMISC_BITS); 464 if (err) { 465 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n", 466 err, vsi->vsi_num); 467 } 468 } 469 } 470 } 471 goto exit; 472 473 out_promisc: 474 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); 475 goto exit; 476 out: 477 /* if something went wrong then set the changed flag so we try again */ 478 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 479 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 480 exit: 481 clear_bit(ICE_CFG_BUSY, vsi->state); 482 return err; 483 } 484 485 /** 486 * ice_sync_fltr_subtask - Sync the VSI filter list with HW 487 * @pf: board private structure 488 */ 489 static void ice_sync_fltr_subtask(struct ice_pf *pf) 490 { 491 int v; 492 493 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) 494 return; 495 496 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 497 498 ice_for_each_vsi(pf, v) 499 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && 500 ice_vsi_sync_fltr(pf->vsi[v])) { 501 /* come back and try again later */ 502 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); 503 break; 504 } 505 } 506 507 /** 508 * ice_pf_dis_all_vsi - Pause all VSIs on a PF 509 * @pf: the PF 510 * @locked: is the rtnl_lock already held 511 */ 512 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 513 { 514 int node; 515 int v; 516 517 ice_for_each_vsi(pf, v) 518 if (pf->vsi[v]) 519 ice_dis_vsi(pf->vsi[v], locked); 520 521 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) 522 pf->pf_agg_node[node].num_vsis = 0; 523 524 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) 525 pf->vf_agg_node[node].num_vsis = 0; 526 } 527 528 /** 529 * ice_prepare_for_reset - prep for reset 530 * @pf: board private structure 531 * @reset_type: reset type requested 532 * 533 * Inform or close all dependent features in prep for reset. 534 */ 535 static void 536 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 537 { 538 struct ice_hw *hw = &pf->hw; 539 struct ice_vsi *vsi; 540 struct ice_vf *vf; 541 unsigned int bkt; 542 543 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); 544 545 /* already prepared for reset */ 546 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) 547 return; 548 549 synchronize_irq(pf->oicr_irq.virq); 550 551 ice_unplug_aux_dev(pf); 552 553 /* Notify VFs of impending reset */ 554 if (ice_check_sq_alive(hw, &hw->mailboxq)) 555 ice_vc_notify_reset(pf); 556 557 /* Disable VFs until reset is completed */ 558 mutex_lock(&pf->vfs.table_lock); 559 ice_for_each_vf(pf, bkt, vf) 560 ice_set_vf_state_dis(vf); 561 mutex_unlock(&pf->vfs.table_lock); 562 563 if (ice_is_eswitch_mode_switchdev(pf)) { 564 rtnl_lock(); 565 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); 566 rtnl_unlock(); 567 } 568 569 /* release ADQ specific HW and SW resources */ 570 vsi = ice_get_main_vsi(pf); 571 if (!vsi) 572 goto skip; 573 574 /* to be on safe side, reset orig_rss_size so that normal flow 575 * of deciding rss_size can take precedence 576 */ 577 vsi->orig_rss_size = 0; 578 579 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 580 if (reset_type == ICE_RESET_PFR) { 581 vsi->old_ena_tc = vsi->all_enatc; 582 vsi->old_numtc = vsi->all_numtc; 583 } else { 584 ice_remove_q_channels(vsi, true); 585 586 /* for other reset type, do not support channel rebuild 587 * hence reset needed info 588 */ 589 vsi->old_ena_tc = 0; 590 vsi->all_enatc = 0; 591 vsi->old_numtc = 0; 592 vsi->all_numtc = 0; 593 vsi->req_txq = 0; 594 vsi->req_rxq = 0; 595 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 596 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); 597 } 598 } 599 600 if (vsi->netdev) 601 netif_device_detach(vsi->netdev); 602 skip: 603 604 /* clear SW filtering DB */ 605 ice_clear_hw_tbls(hw); 606 /* disable the VSIs and their queues that are not already DOWN */ 607 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); 608 ice_pf_dis_all_vsi(pf, false); 609 610 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 611 ice_ptp_prepare_for_reset(pf, reset_type); 612 613 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 614 ice_gnss_exit(pf); 615 616 if (hw->port_info) 617 ice_sched_clear_port(hw->port_info); 618 619 ice_shutdown_all_ctrlq(hw, false); 620 621 set_bit(ICE_PREPARED_FOR_RESET, pf->state); 622 } 623 624 /** 625 * ice_do_reset - Initiate one of many types of resets 626 * @pf: board private structure 627 * @reset_type: reset type requested before this function was called. 628 */ 629 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) 630 { 631 struct device *dev = ice_pf_to_dev(pf); 632 struct ice_hw *hw = &pf->hw; 633 634 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); 635 636 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { 637 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n"); 638 reset_type = ICE_RESET_CORER; 639 } 640 641 ice_prepare_for_reset(pf, reset_type); 642 643 /* trigger the reset */ 644 if (ice_reset(hw, reset_type)) { 645 dev_err(dev, "reset %d failed\n", reset_type); 646 set_bit(ICE_RESET_FAILED, pf->state); 647 clear_bit(ICE_RESET_OICR_RECV, pf->state); 648 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 649 clear_bit(ICE_PFR_REQ, pf->state); 650 clear_bit(ICE_CORER_REQ, pf->state); 651 clear_bit(ICE_GLOBR_REQ, pf->state); 652 wake_up(&pf->reset_wait_queue); 653 return; 654 } 655 656 /* PFR is a bit of a special case because it doesn't result in an OICR 657 * interrupt. So for PFR, rebuild after the reset and clear the reset- 658 * associated state bits. 659 */ 660 if (reset_type == ICE_RESET_PFR) { 661 pf->pfr_count++; 662 ice_rebuild(pf, reset_type); 663 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 664 clear_bit(ICE_PFR_REQ, pf->state); 665 wake_up(&pf->reset_wait_queue); 666 ice_reset_all_vfs(pf); 667 } 668 } 669 670 /** 671 * ice_reset_subtask - Set up for resetting the device and driver 672 * @pf: board private structure 673 */ 674 static void ice_reset_subtask(struct ice_pf *pf) 675 { 676 enum ice_reset_req reset_type = ICE_RESET_INVAL; 677 678 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an 679 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type 680 * of reset is pending and sets bits in pf->state indicating the reset 681 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set 682 * prepare for pending reset if not already (for PF software-initiated 683 * global resets the software should already be prepared for it as 684 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated 685 * by firmware or software on other PFs, that bit is not set so prepare 686 * for the reset now), poll for reset done, rebuild and return. 687 */ 688 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { 689 /* Perform the largest reset requested */ 690 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) 691 reset_type = ICE_RESET_CORER; 692 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) 693 reset_type = ICE_RESET_GLOBR; 694 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) 695 reset_type = ICE_RESET_EMPR; 696 /* return if no valid reset type requested */ 697 if (reset_type == ICE_RESET_INVAL) 698 return; 699 ice_prepare_for_reset(pf, reset_type); 700 701 /* make sure we are ready to rebuild */ 702 if (ice_check_reset(&pf->hw)) { 703 set_bit(ICE_RESET_FAILED, pf->state); 704 } else { 705 /* done with reset. start rebuild */ 706 pf->hw.reset_ongoing = false; 707 ice_rebuild(pf, reset_type); 708 /* clear bit to resume normal operations, but 709 * ICE_NEEDS_RESTART bit is set in case rebuild failed 710 */ 711 clear_bit(ICE_RESET_OICR_RECV, pf->state); 712 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); 713 clear_bit(ICE_PFR_REQ, pf->state); 714 clear_bit(ICE_CORER_REQ, pf->state); 715 clear_bit(ICE_GLOBR_REQ, pf->state); 716 wake_up(&pf->reset_wait_queue); 717 ice_reset_all_vfs(pf); 718 } 719 720 return; 721 } 722 723 /* No pending resets to finish processing. Check for new resets */ 724 if (test_bit(ICE_PFR_REQ, pf->state)) { 725 reset_type = ICE_RESET_PFR; 726 if (pf->lag && pf->lag->bonded) { 727 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); 728 reset_type = ICE_RESET_CORER; 729 } 730 } 731 if (test_bit(ICE_CORER_REQ, pf->state)) 732 reset_type = ICE_RESET_CORER; 733 if (test_bit(ICE_GLOBR_REQ, pf->state)) 734 reset_type = ICE_RESET_GLOBR; 735 /* If no valid reset type requested just return */ 736 if (reset_type == ICE_RESET_INVAL) 737 return; 738 739 /* reset if not already down or busy */ 740 if (!test_bit(ICE_DOWN, pf->state) && 741 !test_bit(ICE_CFG_BUSY, pf->state)) { 742 ice_do_reset(pf, reset_type); 743 } 744 } 745 746 /** 747 * ice_print_topo_conflict - print topology conflict message 748 * @vsi: the VSI whose topology status is being checked 749 */ 750 static void ice_print_topo_conflict(struct ice_vsi *vsi) 751 { 752 switch (vsi->port_info->phy.link_info.topo_media_conflict) { 753 case ICE_AQ_LINK_TOPO_CONFLICT: 754 case ICE_AQ_LINK_MEDIA_CONFLICT: 755 case ICE_AQ_LINK_TOPO_UNREACH_PRT: 756 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: 757 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: 758 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); 759 break; 760 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: 761 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) 762 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); 763 else 764 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 765 break; 766 default: 767 break; 768 } 769 } 770 771 /** 772 * ice_print_link_msg - print link up or down message 773 * @vsi: the VSI whose link status is being queried 774 * @isup: boolean for if the link is now up or down 775 */ 776 void ice_print_link_msg(struct ice_vsi *vsi, bool isup) 777 { 778 struct ice_aqc_get_phy_caps_data *caps; 779 const char *an_advertised; 780 const char *fec_req; 781 const char *speed; 782 const char *fec; 783 const char *fc; 784 const char *an; 785 int status; 786 787 if (!vsi) 788 return; 789 790 if (vsi->current_isup == isup) 791 return; 792 793 vsi->current_isup = isup; 794 795 if (!isup) { 796 netdev_info(vsi->netdev, "NIC Link is Down\n"); 797 return; 798 } 799 800 switch (vsi->port_info->phy.link_info.link_speed) { 801 case ICE_AQ_LINK_SPEED_200GB: 802 speed = "200 G"; 803 break; 804 case ICE_AQ_LINK_SPEED_100GB: 805 speed = "100 G"; 806 break; 807 case ICE_AQ_LINK_SPEED_50GB: 808 speed = "50 G"; 809 break; 810 case ICE_AQ_LINK_SPEED_40GB: 811 speed = "40 G"; 812 break; 813 case ICE_AQ_LINK_SPEED_25GB: 814 speed = "25 G"; 815 break; 816 case ICE_AQ_LINK_SPEED_20GB: 817 speed = "20 G"; 818 break; 819 case ICE_AQ_LINK_SPEED_10GB: 820 speed = "10 G"; 821 break; 822 case ICE_AQ_LINK_SPEED_5GB: 823 speed = "5 G"; 824 break; 825 case ICE_AQ_LINK_SPEED_2500MB: 826 speed = "2.5 G"; 827 break; 828 case ICE_AQ_LINK_SPEED_1000MB: 829 speed = "1 G"; 830 break; 831 case ICE_AQ_LINK_SPEED_100MB: 832 speed = "100 M"; 833 break; 834 default: 835 speed = "Unknown "; 836 break; 837 } 838 839 switch (vsi->port_info->fc.current_mode) { 840 case ICE_FC_FULL: 841 fc = "Rx/Tx"; 842 break; 843 case ICE_FC_TX_PAUSE: 844 fc = "Tx"; 845 break; 846 case ICE_FC_RX_PAUSE: 847 fc = "Rx"; 848 break; 849 case ICE_FC_NONE: 850 fc = "None"; 851 break; 852 default: 853 fc = "Unknown"; 854 break; 855 } 856 857 /* Get FEC mode based on negotiated link info */ 858 switch (vsi->port_info->phy.link_info.fec_info) { 859 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 860 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 861 fec = "RS-FEC"; 862 break; 863 case ICE_AQ_LINK_25G_KR_FEC_EN: 864 fec = "FC-FEC/BASE-R"; 865 break; 866 default: 867 fec = "NONE"; 868 break; 869 } 870 871 /* check if autoneg completed, might be false due to not supported */ 872 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 873 an = "True"; 874 else 875 an = "False"; 876 877 /* Get FEC mode requested based on PHY caps last SW configuration */ 878 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 879 if (!caps) { 880 fec_req = "Unknown"; 881 an_advertised = "Unknown"; 882 goto done; 883 } 884 885 status = ice_aq_get_phy_caps(vsi->port_info, false, 886 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 887 if (status) 888 netdev_info(vsi->netdev, "Get phy capability failed.\n"); 889 890 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; 891 892 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 893 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 894 fec_req = "RS-FEC"; 895 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 896 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 897 fec_req = "FC-FEC/BASE-R"; 898 else 899 fec_req = "NONE"; 900 901 kfree(caps); 902 903 done: 904 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", 905 speed, fec_req, fec, an_advertised, an, fc); 906 ice_print_topo_conflict(vsi); 907 } 908 909 /** 910 * ice_vsi_link_event - update the VSI's netdev 911 * @vsi: the VSI on which the link event occurred 912 * @link_up: whether or not the VSI needs to be set up or down 913 */ 914 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) 915 { 916 if (!vsi) 917 return; 918 919 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) 920 return; 921 922 if (vsi->type == ICE_VSI_PF) { 923 if (link_up == netif_carrier_ok(vsi->netdev)) 924 return; 925 926 if (link_up) { 927 netif_carrier_on(vsi->netdev); 928 netif_tx_wake_all_queues(vsi->netdev); 929 } else { 930 netif_carrier_off(vsi->netdev); 931 netif_tx_stop_all_queues(vsi->netdev); 932 } 933 } 934 } 935 936 /** 937 * ice_set_dflt_mib - send a default config MIB to the FW 938 * @pf: private PF struct 939 * 940 * This function sends a default configuration MIB to the FW. 941 * 942 * If this function errors out at any point, the driver is still able to 943 * function. The main impact is that LFC may not operate as expected. 944 * Therefore an error state in this function should be treated with a DBG 945 * message and continue on with driver rebuild/reenable. 946 */ 947 static void ice_set_dflt_mib(struct ice_pf *pf) 948 { 949 struct device *dev = ice_pf_to_dev(pf); 950 u8 mib_type, *buf, *lldpmib = NULL; 951 u16 len, typelen, offset = 0; 952 struct ice_lldp_org_tlv *tlv; 953 struct ice_hw *hw = &pf->hw; 954 u32 ouisubtype; 955 956 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 957 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); 958 if (!lldpmib) { 959 dev_dbg(dev, "%s Failed to allocate MIB memory\n", 960 __func__); 961 return; 962 } 963 964 /* Add ETS CFG TLV */ 965 tlv = (struct ice_lldp_org_tlv *)lldpmib; 966 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 967 ICE_IEEE_ETS_TLV_LEN); 968 tlv->typelen = htons(typelen); 969 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 970 ICE_IEEE_SUBTYPE_ETS_CFG); 971 tlv->ouisubtype = htonl(ouisubtype); 972 973 buf = tlv->tlvinfo; 974 buf[0] = 0; 975 976 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. 977 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. 978 * Octets 13 - 20 are TSA values - leave as zeros 979 */ 980 buf[5] = 0x64; 981 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); 982 offset += len + 2; 983 tlv = (struct ice_lldp_org_tlv *) 984 ((char *)tlv + sizeof(tlv->typelen) + len); 985 986 /* Add ETS REC TLV */ 987 buf = tlv->tlvinfo; 988 tlv->typelen = htons(typelen); 989 990 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 991 ICE_IEEE_SUBTYPE_ETS_REC); 992 tlv->ouisubtype = htonl(ouisubtype); 993 994 /* First octet of buf is reserved 995 * Octets 1 - 4 map UP to TC - all UPs map to zero 996 * Octets 5 - 12 are BW values - set TC 0 to 100%. 997 * Octets 13 - 20 are TSA value - leave as zeros 998 */ 999 buf[5] = 0x64; 1000 offset += len + 2; 1001 tlv = (struct ice_lldp_org_tlv *) 1002 ((char *)tlv + sizeof(tlv->typelen) + len); 1003 1004 /* Add PFC CFG TLV */ 1005 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 1006 ICE_IEEE_PFC_TLV_LEN); 1007 tlv->typelen = htons(typelen); 1008 1009 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 1010 ICE_IEEE_SUBTYPE_PFC_CFG); 1011 tlv->ouisubtype = htonl(ouisubtype); 1012 1013 /* Octet 1 left as all zeros - PFC disabled */ 1014 buf[0] = 0x08; 1015 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); 1016 offset += len + 2; 1017 1018 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) 1019 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); 1020 1021 kfree(lldpmib); 1022 } 1023 1024 /** 1025 * ice_check_phy_fw_load - check if PHY FW load failed 1026 * @pf: pointer to PF struct 1027 * @link_cfg_err: bitmap from the link info structure 1028 * 1029 * check if external PHY FW load failed and print an error message if it did 1030 */ 1031 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) 1032 { 1033 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { 1034 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 1035 return; 1036 } 1037 1038 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) 1039 return; 1040 1041 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { 1042 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); 1043 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); 1044 } 1045 } 1046 1047 /** 1048 * ice_check_module_power 1049 * @pf: pointer to PF struct 1050 * @link_cfg_err: bitmap from the link info structure 1051 * 1052 * check module power level returned by a previous call to aq_get_link_info 1053 * and print error messages if module power level is not supported 1054 */ 1055 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) 1056 { 1057 /* if module power level is supported, clear the flag */ 1058 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | 1059 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { 1060 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1061 return; 1062 } 1063 1064 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the 1065 * above block didn't clear this bit, there's nothing to do 1066 */ 1067 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) 1068 return; 1069 1070 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { 1071 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); 1072 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1073 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { 1074 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); 1075 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); 1076 } 1077 } 1078 1079 /** 1080 * ice_check_link_cfg_err - check if link configuration failed 1081 * @pf: pointer to the PF struct 1082 * @link_cfg_err: bitmap from the link info structure 1083 * 1084 * print if any link configuration failure happens due to the value in the 1085 * link_cfg_err parameter in the link info structure 1086 */ 1087 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) 1088 { 1089 ice_check_module_power(pf, link_cfg_err); 1090 ice_check_phy_fw_load(pf, link_cfg_err); 1091 } 1092 1093 /** 1094 * ice_link_event - process the link event 1095 * @pf: PF that the link event is associated with 1096 * @pi: port_info for the port that the link event is associated with 1097 * @link_up: true if the physical link is up and false if it is down 1098 * @link_speed: current link speed received from the link event 1099 * 1100 * Returns 0 on success and negative on failure 1101 */ 1102 static int 1103 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, 1104 u16 link_speed) 1105 { 1106 struct device *dev = ice_pf_to_dev(pf); 1107 struct ice_phy_info *phy_info; 1108 struct ice_vsi *vsi; 1109 u16 old_link_speed; 1110 bool old_link; 1111 int status; 1112 1113 phy_info = &pi->phy; 1114 phy_info->link_info_old = phy_info->link_info; 1115 1116 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); 1117 old_link_speed = phy_info->link_info_old.link_speed; 1118 1119 /* update the link info structures and re-enable link events, 1120 * don't bail on failure due to other book keeping needed 1121 */ 1122 status = ice_update_link_info(pi); 1123 if (status) 1124 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", 1125 pi->lport, status, 1126 libie_aq_str(pi->hw->adminq.sq_last_status)); 1127 1128 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 1129 1130 /* Check if the link state is up after updating link info, and treat 1131 * this event as an UP event since the link is actually UP now. 1132 */ 1133 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) 1134 link_up = true; 1135 1136 vsi = ice_get_main_vsi(pf); 1137 if (!vsi || !vsi->port_info) 1138 return -EINVAL; 1139 1140 /* turn off PHY if media was removed */ 1141 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && 1142 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1143 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 1144 ice_set_link(vsi, false); 1145 } 1146 1147 /* if the old link up/down and speed is the same as the new */ 1148 if (link_up == old_link && link_speed == old_link_speed) 1149 return 0; 1150 1151 if (!link_up && old_link) 1152 pf->link_down_events++; 1153 1154 ice_ptp_link_change(pf, link_up); 1155 1156 if (ice_is_dcb_active(pf)) { 1157 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 1158 ice_dcb_rebuild(pf); 1159 } else { 1160 if (link_up) 1161 ice_set_dflt_mib(pf); 1162 } 1163 ice_vsi_link_event(vsi, link_up); 1164 ice_print_link_msg(vsi, link_up); 1165 1166 ice_vc_notify_link_state(pf); 1167 1168 return 0; 1169 } 1170 1171 /** 1172 * ice_watchdog_subtask - periodic tasks not using event driven scheduling 1173 * @pf: board private structure 1174 */ 1175 static void ice_watchdog_subtask(struct ice_pf *pf) 1176 { 1177 int i; 1178 1179 /* if interface is down do nothing */ 1180 if (test_bit(ICE_DOWN, pf->state) || 1181 test_bit(ICE_CFG_BUSY, pf->state)) 1182 return; 1183 1184 /* make sure we don't do these things too often */ 1185 if (time_before(jiffies, 1186 pf->serv_tmr_prev + pf->serv_tmr_period)) 1187 return; 1188 1189 pf->serv_tmr_prev = jiffies; 1190 1191 /* Update the stats for active netdevs so the network stack 1192 * can look at updated numbers whenever it cares to 1193 */ 1194 ice_update_pf_stats(pf); 1195 ice_for_each_vsi(pf, i) 1196 if (pf->vsi[i] && pf->vsi[i]->netdev) 1197 ice_update_vsi_stats(pf->vsi[i]); 1198 } 1199 1200 /** 1201 * ice_init_link_events - enable/initialize link events 1202 * @pi: pointer to the port_info instance 1203 * 1204 * Returns -EIO on failure, 0 on success 1205 */ 1206 static int ice_init_link_events(struct ice_port_info *pi) 1207 { 1208 u16 mask; 1209 1210 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | 1211 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | 1212 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); 1213 1214 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { 1215 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", 1216 pi->lport); 1217 return -EIO; 1218 } 1219 1220 if (ice_aq_get_link_info(pi, true, NULL, NULL)) { 1221 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", 1222 pi->lport); 1223 return -EIO; 1224 } 1225 1226 return 0; 1227 } 1228 1229 /** 1230 * ice_handle_link_event - handle link event via ARQ 1231 * @pf: PF that the link event is associated with 1232 * @event: event structure containing link status info 1233 */ 1234 static int 1235 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) 1236 { 1237 struct ice_aqc_get_link_status_data *link_data; 1238 struct ice_port_info *port_info; 1239 int status; 1240 1241 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; 1242 port_info = pf->hw.port_info; 1243 if (!port_info) 1244 return -EINVAL; 1245 1246 status = ice_link_event(pf, port_info, 1247 !!(link_data->link_info & ICE_AQ_LINK_UP), 1248 le16_to_cpu(link_data->link_speed)); 1249 if (status) 1250 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", 1251 status); 1252 1253 return status; 1254 } 1255 1256 /** 1257 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware 1258 * @pf: pointer to the PF private structure 1259 * @task: intermediate helper storage and identifier for waiting 1260 * @opcode: the opcode to wait for 1261 * 1262 * Prepares to wait for a specific AdminQ completion event on the ARQ for 1263 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event(). 1264 * 1265 * Calls are separated to allow caller registering for event before sending 1266 * the command, which mitigates a race between registering and FW responding. 1267 * 1268 * To obtain only the descriptor contents, pass an task->event with null 1269 * msg_buf. If the complete data buffer is desired, allocate the 1270 * task->event.msg_buf with enough space ahead of time. 1271 */ 1272 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1273 u16 opcode) 1274 { 1275 INIT_HLIST_NODE(&task->entry); 1276 task->opcode = opcode; 1277 task->state = ICE_AQ_TASK_WAITING; 1278 1279 spin_lock_bh(&pf->aq_wait_lock); 1280 hlist_add_head(&task->entry, &pf->aq_wait_list); 1281 spin_unlock_bh(&pf->aq_wait_lock); 1282 } 1283 1284 /** 1285 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware 1286 * @pf: pointer to the PF private structure 1287 * @task: ptr prepared by ice_aq_prep_for_event() 1288 * @timeout: how long to wait, in jiffies 1289 * 1290 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The 1291 * current thread will be put to sleep until the specified event occurs or 1292 * until the given timeout is reached. 1293 * 1294 * Returns: zero on success, or a negative error code on failure. 1295 */ 1296 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1297 unsigned long timeout) 1298 { 1299 enum ice_aq_task_state *state = &task->state; 1300 struct device *dev = ice_pf_to_dev(pf); 1301 unsigned long start = jiffies; 1302 long ret; 1303 int err; 1304 1305 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, 1306 *state != ICE_AQ_TASK_WAITING, 1307 timeout); 1308 switch (*state) { 1309 case ICE_AQ_TASK_NOT_PREPARED: 1310 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__); 1311 err = -EINVAL; 1312 break; 1313 case ICE_AQ_TASK_WAITING: 1314 err = ret < 0 ? ret : -ETIMEDOUT; 1315 break; 1316 case ICE_AQ_TASK_CANCELED: 1317 err = ret < 0 ? ret : -ECANCELED; 1318 break; 1319 case ICE_AQ_TASK_COMPLETE: 1320 err = ret < 0 ? ret : 0; 1321 break; 1322 default: 1323 WARN(1, "Unexpected AdminQ wait task state %u", *state); 1324 err = -EINVAL; 1325 break; 1326 } 1327 1328 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", 1329 jiffies_to_msecs(jiffies - start), 1330 jiffies_to_msecs(timeout), 1331 task->opcode); 1332 1333 spin_lock_bh(&pf->aq_wait_lock); 1334 hlist_del(&task->entry); 1335 spin_unlock_bh(&pf->aq_wait_lock); 1336 1337 return err; 1338 } 1339 1340 /** 1341 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event 1342 * @pf: pointer to the PF private structure 1343 * @opcode: the opcode of the event 1344 * @event: the event to check 1345 * 1346 * Loops over the current list of pending threads waiting for an AdminQ event. 1347 * For each matching task, copy the contents of the event into the task 1348 * structure and wake up the thread. 1349 * 1350 * If multiple threads wait for the same opcode, they will all be woken up. 1351 * 1352 * Note that event->msg_buf will only be duplicated if the event has a buffer 1353 * with enough space already allocated. Otherwise, only the descriptor and 1354 * message length will be copied. 1355 * 1356 * Returns: true if an event was found, false otherwise 1357 */ 1358 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, 1359 struct ice_rq_event_info *event) 1360 { 1361 struct ice_rq_event_info *task_ev; 1362 struct ice_aq_task *task; 1363 bool found = false; 1364 1365 spin_lock_bh(&pf->aq_wait_lock); 1366 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { 1367 if (task->state != ICE_AQ_TASK_WAITING) 1368 continue; 1369 if (task->opcode != opcode) 1370 continue; 1371 1372 task_ev = &task->event; 1373 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); 1374 task_ev->msg_len = event->msg_len; 1375 1376 /* Only copy the data buffer if a destination was set */ 1377 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { 1378 memcpy(task_ev->msg_buf, event->msg_buf, 1379 event->buf_len); 1380 task_ev->buf_len = event->buf_len; 1381 } 1382 1383 task->state = ICE_AQ_TASK_COMPLETE; 1384 found = true; 1385 } 1386 spin_unlock_bh(&pf->aq_wait_lock); 1387 1388 if (found) 1389 wake_up(&pf->aq_wait_queue); 1390 } 1391 1392 /** 1393 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks 1394 * @pf: the PF private structure 1395 * 1396 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. 1397 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. 1398 */ 1399 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) 1400 { 1401 struct ice_aq_task *task; 1402 1403 spin_lock_bh(&pf->aq_wait_lock); 1404 hlist_for_each_entry(task, &pf->aq_wait_list, entry) 1405 task->state = ICE_AQ_TASK_CANCELED; 1406 spin_unlock_bh(&pf->aq_wait_lock); 1407 1408 wake_up(&pf->aq_wait_queue); 1409 } 1410 1411 #define ICE_MBX_OVERFLOW_WATERMARK 64 1412 1413 /** 1414 * __ice_clean_ctrlq - helper function to clean controlq rings 1415 * @pf: ptr to struct ice_pf 1416 * @q_type: specific Control queue type 1417 */ 1418 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) 1419 { 1420 struct device *dev = ice_pf_to_dev(pf); 1421 struct ice_rq_event_info event; 1422 struct ice_hw *hw = &pf->hw; 1423 struct ice_ctl_q_info *cq; 1424 u16 pending, i = 0; 1425 const char *qtype; 1426 u32 oldval, val; 1427 1428 /* Do not clean control queue if/when PF reset fails */ 1429 if (test_bit(ICE_RESET_FAILED, pf->state)) 1430 return 0; 1431 1432 switch (q_type) { 1433 case ICE_CTL_Q_ADMIN: 1434 cq = &hw->adminq; 1435 qtype = "Admin"; 1436 break; 1437 case ICE_CTL_Q_SB: 1438 cq = &hw->sbq; 1439 qtype = "Sideband"; 1440 break; 1441 case ICE_CTL_Q_MAILBOX: 1442 cq = &hw->mailboxq; 1443 qtype = "Mailbox"; 1444 /* we are going to try to detect a malicious VF, so set the 1445 * state to begin detection 1446 */ 1447 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; 1448 break; 1449 default: 1450 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); 1451 return 0; 1452 } 1453 1454 /* check for error indications - PF_xx_AxQLEN register layout for 1455 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. 1456 */ 1457 val = rd32(hw, cq->rq.len); 1458 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1459 PF_FW_ARQLEN_ARQCRIT_M)) { 1460 oldval = val; 1461 if (val & PF_FW_ARQLEN_ARQVFE_M) 1462 dev_dbg(dev, "%s Receive Queue VF Error detected\n", 1463 qtype); 1464 if (val & PF_FW_ARQLEN_ARQOVFL_M) { 1465 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", 1466 qtype); 1467 } 1468 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1469 dev_dbg(dev, "%s Receive Queue Critical Error detected\n", 1470 qtype); 1471 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1472 PF_FW_ARQLEN_ARQCRIT_M); 1473 if (oldval != val) 1474 wr32(hw, cq->rq.len, val); 1475 } 1476 1477 val = rd32(hw, cq->sq.len); 1478 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1479 PF_FW_ATQLEN_ATQCRIT_M)) { 1480 oldval = val; 1481 if (val & PF_FW_ATQLEN_ATQVFE_M) 1482 dev_dbg(dev, "%s Send Queue VF Error detected\n", 1483 qtype); 1484 if (val & PF_FW_ATQLEN_ATQOVFL_M) { 1485 dev_dbg(dev, "%s Send Queue Overflow Error detected\n", 1486 qtype); 1487 } 1488 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1489 dev_dbg(dev, "%s Send Queue Critical Error detected\n", 1490 qtype); 1491 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1492 PF_FW_ATQLEN_ATQCRIT_M); 1493 if (oldval != val) 1494 wr32(hw, cq->sq.len, val); 1495 } 1496 1497 event.buf_len = cq->rq_buf_size; 1498 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1499 if (!event.msg_buf) 1500 return 0; 1501 1502 do { 1503 struct ice_mbx_data data = {}; 1504 u16 opcode; 1505 int ret; 1506 1507 ret = ice_clean_rq_elem(hw, cq, &event, &pending); 1508 if (ret == -EALREADY) 1509 break; 1510 if (ret) { 1511 dev_err(dev, "%s Receive Queue event error %d\n", qtype, 1512 ret); 1513 break; 1514 } 1515 1516 opcode = le16_to_cpu(event.desc.opcode); 1517 1518 /* Notify any thread that might be waiting for this event */ 1519 ice_aq_check_events(pf, opcode, &event); 1520 1521 switch (opcode) { 1522 case ice_aqc_opc_get_link_status: 1523 if (ice_handle_link_event(pf, &event)) 1524 dev_err(dev, "Could not handle link event\n"); 1525 break; 1526 case ice_aqc_opc_event_lan_overflow: 1527 ice_vf_lan_overflow_event(pf, &event); 1528 break; 1529 case ice_mbx_opc_send_msg_to_pf: 1530 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) { 1531 ice_vc_process_vf_msg(pf, &event, NULL); 1532 ice_mbx_vf_dec_trig_e830(hw, &event); 1533 } else { 1534 u16 val = hw->mailboxq.num_rq_entries; 1535 1536 data.max_num_msgs_mbx = val; 1537 val = ICE_MBX_OVERFLOW_WATERMARK; 1538 data.async_watermark_val = val; 1539 data.num_msg_proc = i; 1540 data.num_pending_arq = pending; 1541 1542 ice_vc_process_vf_msg(pf, &event, &data); 1543 } 1544 break; 1545 case ice_aqc_opc_fw_logs_event: 1546 libie_get_fwlog_data(&hw->fwlog, event.msg_buf, 1547 le16_to_cpu(event.desc.datalen)); 1548 break; 1549 case ice_aqc_opc_lldp_set_mib_change: 1550 ice_dcb_process_lldp_set_mib_change(pf, &event); 1551 break; 1552 case ice_aqc_opc_get_health_status: 1553 ice_process_health_status_event(pf, &event); 1554 break; 1555 default: 1556 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", 1557 qtype, opcode); 1558 break; 1559 } 1560 } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); 1561 1562 kfree(event.msg_buf); 1563 1564 return pending && (i == ICE_DFLT_IRQ_WORK); 1565 } 1566 1567 /** 1568 * ice_ctrlq_pending - check if there is a difference between ntc and ntu 1569 * @hw: pointer to hardware info 1570 * @cq: control queue information 1571 * 1572 * returns true if there are pending messages in a queue, false if there aren't 1573 */ 1574 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) 1575 { 1576 u16 ntu; 1577 1578 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); 1579 return cq->rq.next_to_clean != ntu; 1580 } 1581 1582 /** 1583 * ice_clean_adminq_subtask - clean the AdminQ rings 1584 * @pf: board private structure 1585 */ 1586 static void ice_clean_adminq_subtask(struct ice_pf *pf) 1587 { 1588 struct ice_hw *hw = &pf->hw; 1589 1590 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 1591 return; 1592 1593 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) 1594 return; 1595 1596 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 1597 1598 /* There might be a situation where new messages arrive to a control 1599 * queue between processing the last message and clearing the 1600 * EVENT_PENDING bit. So before exiting, check queue head again (using 1601 * ice_ctrlq_pending) and process new messages if any. 1602 */ 1603 if (ice_ctrlq_pending(hw, &hw->adminq)) 1604 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); 1605 1606 ice_flush(hw); 1607 } 1608 1609 /** 1610 * ice_clean_mailboxq_subtask - clean the MailboxQ rings 1611 * @pf: board private structure 1612 */ 1613 static void ice_clean_mailboxq_subtask(struct ice_pf *pf) 1614 { 1615 struct ice_hw *hw = &pf->hw; 1616 1617 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) 1618 return; 1619 1620 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) 1621 return; 1622 1623 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 1624 1625 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1626 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1627 1628 ice_flush(hw); 1629 } 1630 1631 /** 1632 * ice_clean_sbq_subtask - clean the Sideband Queue rings 1633 * @pf: board private structure 1634 */ 1635 static void ice_clean_sbq_subtask(struct ice_pf *pf) 1636 { 1637 struct ice_hw *hw = &pf->hw; 1638 1639 /* if mac_type is not generic, sideband is not supported 1640 * and there's nothing to do here 1641 */ 1642 if (!ice_is_generic_mac(hw)) { 1643 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1644 return; 1645 } 1646 1647 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1648 return; 1649 1650 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1651 return; 1652 1653 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1654 1655 if (ice_ctrlq_pending(hw, &hw->sbq)) 1656 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1657 1658 ice_flush(hw); 1659 } 1660 1661 /** 1662 * ice_service_task_schedule - schedule the service task to wake up 1663 * @pf: board private structure 1664 * 1665 * If not already scheduled, this puts the task into the work queue. 1666 */ 1667 void ice_service_task_schedule(struct ice_pf *pf) 1668 { 1669 if (!test_bit(ICE_SERVICE_DIS, pf->state) && 1670 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && 1671 !test_bit(ICE_NEEDS_RESTART, pf->state)) 1672 queue_work(ice_wq, &pf->serv_task); 1673 } 1674 1675 /** 1676 * ice_service_task_complete - finish up the service task 1677 * @pf: board private structure 1678 */ 1679 static void ice_service_task_complete(struct ice_pf *pf) 1680 { 1681 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); 1682 1683 /* force memory (pf->state) to sync before next service task */ 1684 smp_mb__before_atomic(); 1685 clear_bit(ICE_SERVICE_SCHED, pf->state); 1686 } 1687 1688 /** 1689 * ice_service_task_stop - stop service task and cancel works 1690 * @pf: board private structure 1691 * 1692 * Return 0 if the ICE_SERVICE_DIS bit was not already set, 1693 * 1 otherwise. 1694 */ 1695 static int ice_service_task_stop(struct ice_pf *pf) 1696 { 1697 int ret; 1698 1699 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1700 1701 if (pf->serv_tmr.function) 1702 timer_delete_sync(&pf->serv_tmr); 1703 if (pf->serv_task.func) 1704 cancel_work_sync(&pf->serv_task); 1705 1706 clear_bit(ICE_SERVICE_SCHED, pf->state); 1707 return ret; 1708 } 1709 1710 /** 1711 * ice_service_task_restart - restart service task and schedule works 1712 * @pf: board private structure 1713 * 1714 * This function is needed for suspend and resume works (e.g WoL scenario) 1715 */ 1716 static void ice_service_task_restart(struct ice_pf *pf) 1717 { 1718 clear_bit(ICE_SERVICE_DIS, pf->state); 1719 ice_service_task_schedule(pf); 1720 } 1721 1722 /** 1723 * ice_service_timer - timer callback to schedule service task 1724 * @t: pointer to timer_list 1725 */ 1726 static void ice_service_timer(struct timer_list *t) 1727 { 1728 struct ice_pf *pf = timer_container_of(pf, t, serv_tmr); 1729 1730 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); 1731 ice_service_task_schedule(pf); 1732 } 1733 1734 /** 1735 * ice_mdd_maybe_reset_vf - reset VF after MDD event 1736 * @pf: pointer to the PF structure 1737 * @vf: pointer to the VF structure 1738 * @reset_vf_tx: whether Tx MDD has occurred 1739 * @reset_vf_rx: whether Rx MDD has occurred 1740 * 1741 * Since the queue can get stuck on VF MDD events, the PF can be configured to 1742 * automatically reset the VF by enabling the private ethtool flag 1743 * mdd-auto-reset-vf. 1744 */ 1745 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, 1746 bool reset_vf_tx, bool reset_vf_rx) 1747 { 1748 struct device *dev = ice_pf_to_dev(pf); 1749 1750 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) 1751 return; 1752 1753 /* VF MDD event counters will be cleared by reset, so print the event 1754 * prior to reset. 1755 */ 1756 if (reset_vf_tx) 1757 ice_print_vf_tx_mdd_event(vf); 1758 1759 if (reset_vf_rx) 1760 ice_print_vf_rx_mdd_event(vf); 1761 1762 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", 1763 pf->hw.pf_id, vf->vf_id); 1764 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); 1765 } 1766 1767 /** 1768 * ice_handle_mdd_event - handle malicious driver detect event 1769 * @pf: pointer to the PF structure 1770 * 1771 * Called from service task. OICR interrupt handler indicates MDD event. 1772 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log 1773 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events 1774 * disable the queue, the PF can be configured to reset the VF using ethtool 1775 * private flag mdd-auto-reset-vf. 1776 */ 1777 static void ice_handle_mdd_event(struct ice_pf *pf) 1778 { 1779 struct device *dev = ice_pf_to_dev(pf); 1780 struct ice_hw *hw = &pf->hw; 1781 struct ice_vf *vf; 1782 unsigned int bkt; 1783 u32 reg; 1784 1785 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { 1786 /* Since the VF MDD event logging is rate limited, check if 1787 * there are pending MDD events. 1788 */ 1789 ice_print_vfs_mdd_events(pf); 1790 return; 1791 } 1792 1793 /* find what triggered an MDD event */ 1794 reg = rd32(hw, GL_MDET_TX_PQM); 1795 if (reg & GL_MDET_TX_PQM_VALID_M) { 1796 u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg); 1797 u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg); 1798 u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg); 1799 u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg); 1800 1801 if (netif_msg_tx_err(pf)) 1802 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1803 event, queue, pf_num, vf_num); 1804 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num, 1805 event, queue); 1806 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 1807 } 1808 1809 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); 1810 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 1811 u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg); 1812 u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg); 1813 u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg); 1814 u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg); 1815 1816 if (netif_msg_tx_err(pf)) 1817 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", 1818 event, queue, pf_num, vf_num); 1819 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num, 1820 event, queue); 1821 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); 1822 } 1823 1824 reg = rd32(hw, GL_MDET_RX); 1825 if (reg & GL_MDET_RX_VALID_M) { 1826 u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg); 1827 u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg); 1828 u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg); 1829 u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg); 1830 1831 if (netif_msg_rx_err(pf)) 1832 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", 1833 event, queue, pf_num, vf_num); 1834 ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event, 1835 queue); 1836 wr32(hw, GL_MDET_RX, 0xffffffff); 1837 } 1838 1839 /* check to see if this PF caused an MDD event */ 1840 reg = rd32(hw, PF_MDET_TX_PQM); 1841 if (reg & PF_MDET_TX_PQM_VALID_M) { 1842 wr32(hw, PF_MDET_TX_PQM, 0xFFFF); 1843 if (netif_msg_tx_err(pf)) 1844 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); 1845 } 1846 1847 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw)); 1848 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 1849 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff); 1850 if (netif_msg_tx_err(pf)) 1851 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); 1852 } 1853 1854 reg = rd32(hw, PF_MDET_RX); 1855 if (reg & PF_MDET_RX_VALID_M) { 1856 wr32(hw, PF_MDET_RX, 0xFFFF); 1857 if (netif_msg_rx_err(pf)) 1858 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); 1859 } 1860 1861 /* Check to see if one of the VFs caused an MDD event, and then 1862 * increment counters and set print pending 1863 */ 1864 mutex_lock(&pf->vfs.table_lock); 1865 ice_for_each_vf(pf, bkt, vf) { 1866 bool reset_vf_tx = false, reset_vf_rx = false; 1867 1868 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); 1869 if (reg & VP_MDET_TX_PQM_VALID_M) { 1870 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); 1871 vf->mdd_tx_events.count++; 1872 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1873 if (netif_msg_tx_err(pf)) 1874 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", 1875 vf->vf_id); 1876 1877 reset_vf_tx = true; 1878 } 1879 1880 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); 1881 if (reg & VP_MDET_TX_TCLAN_VALID_M) { 1882 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); 1883 vf->mdd_tx_events.count++; 1884 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1885 if (netif_msg_tx_err(pf)) 1886 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", 1887 vf->vf_id); 1888 1889 reset_vf_tx = true; 1890 } 1891 1892 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); 1893 if (reg & VP_MDET_TX_TDPU_VALID_M) { 1894 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); 1895 vf->mdd_tx_events.count++; 1896 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1897 if (netif_msg_tx_err(pf)) 1898 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", 1899 vf->vf_id); 1900 1901 reset_vf_tx = true; 1902 } 1903 1904 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); 1905 if (reg & VP_MDET_RX_VALID_M) { 1906 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); 1907 vf->mdd_rx_events.count++; 1908 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); 1909 if (netif_msg_rx_err(pf)) 1910 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", 1911 vf->vf_id); 1912 1913 reset_vf_rx = true; 1914 } 1915 1916 if (reset_vf_tx || reset_vf_rx) 1917 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, 1918 reset_vf_rx); 1919 } 1920 mutex_unlock(&pf->vfs.table_lock); 1921 1922 ice_print_vfs_mdd_events(pf); 1923 } 1924 1925 /** 1926 * ice_force_phys_link_state - Force the physical link state 1927 * @vsi: VSI to force the physical link state to up/down 1928 * @link_up: true/false indicates to set the physical link to up/down 1929 * 1930 * Force the physical link state by getting the current PHY capabilities from 1931 * hardware and setting the PHY config based on the determined capabilities. If 1932 * link changes a link event will be triggered because both the Enable Automatic 1933 * Link Update and LESM Enable bits are set when setting the PHY capabilities. 1934 * 1935 * Returns 0 on success, negative on failure 1936 */ 1937 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) 1938 { 1939 struct ice_aqc_get_phy_caps_data *pcaps; 1940 struct ice_aqc_set_phy_cfg_data *cfg; 1941 struct ice_port_info *pi; 1942 struct device *dev; 1943 int retcode; 1944 1945 if (!vsi || !vsi->port_info || !vsi->back) 1946 return -EINVAL; 1947 if (vsi->type != ICE_VSI_PF) 1948 return 0; 1949 1950 dev = ice_pf_to_dev(vsi->back); 1951 1952 pi = vsi->port_info; 1953 1954 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 1955 if (!pcaps) 1956 return -ENOMEM; 1957 1958 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 1959 NULL); 1960 if (retcode) { 1961 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", 1962 vsi->vsi_num, retcode); 1963 retcode = -EIO; 1964 goto out; 1965 } 1966 1967 /* No change in link */ 1968 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && 1969 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) 1970 goto out; 1971 1972 /* Use the current user PHY configuration. The current user PHY 1973 * configuration is initialized during probe from PHY capabilities 1974 * software mode, and updated on set PHY configuration. 1975 */ 1976 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); 1977 if (!cfg) { 1978 retcode = -ENOMEM; 1979 goto out; 1980 } 1981 1982 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1983 if (link_up) 1984 cfg->caps |= ICE_AQ_PHY_ENA_LINK; 1985 else 1986 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; 1987 1988 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); 1989 if (retcode) { 1990 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 1991 vsi->vsi_num, retcode); 1992 retcode = -EIO; 1993 } 1994 1995 kfree(cfg); 1996 out: 1997 kfree(pcaps); 1998 return retcode; 1999 } 2000 2001 /** 2002 * ice_init_nvm_phy_type - Initialize the NVM PHY type 2003 * @pi: port info structure 2004 * 2005 * Initialize nvm_phy_type_[low|high] for link lenient mode support 2006 */ 2007 static int ice_init_nvm_phy_type(struct ice_port_info *pi) 2008 { 2009 struct ice_aqc_get_phy_caps_data *pcaps; 2010 struct ice_pf *pf = pi->hw->back; 2011 int err; 2012 2013 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2014 if (!pcaps) 2015 return -ENOMEM; 2016 2017 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, 2018 pcaps, NULL); 2019 2020 if (err) { 2021 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2022 goto out; 2023 } 2024 2025 pf->nvm_phy_type_hi = pcaps->phy_type_high; 2026 pf->nvm_phy_type_lo = pcaps->phy_type_low; 2027 2028 out: 2029 kfree(pcaps); 2030 return err; 2031 } 2032 2033 /** 2034 * ice_init_link_dflt_override - Initialize link default override 2035 * @pi: port info structure 2036 * 2037 * Initialize link default override and PHY total port shutdown during probe 2038 */ 2039 static void ice_init_link_dflt_override(struct ice_port_info *pi) 2040 { 2041 struct ice_link_default_override_tlv *ldo; 2042 struct ice_pf *pf = pi->hw->back; 2043 2044 ldo = &pf->link_dflt_override; 2045 if (ice_get_link_default_override(ldo, pi)) 2046 return; 2047 2048 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) 2049 return; 2050 2051 /* Enable Total Port Shutdown (override/replace link-down-on-close 2052 * ethtool private flag) for ports with Port Disable bit set. 2053 */ 2054 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); 2055 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 2056 } 2057 2058 /** 2059 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings 2060 * @pi: port info structure 2061 * 2062 * If default override is enabled, initialize the user PHY cfg speed and FEC 2063 * settings using the default override mask from the NVM. 2064 * 2065 * The PHY should only be configured with the default override settings the 2066 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state 2067 * is used to indicate that the user PHY cfg default override is initialized 2068 * and the PHY has not been configured with the default override settings. The 2069 * state is set here, and cleared in ice_configure_phy the first time the PHY is 2070 * configured. 2071 * 2072 * This function should be called only if the FW doesn't support default 2073 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. 2074 */ 2075 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) 2076 { 2077 struct ice_link_default_override_tlv *ldo; 2078 struct ice_aqc_set_phy_cfg_data *cfg; 2079 struct ice_phy_info *phy = &pi->phy; 2080 struct ice_pf *pf = pi->hw->back; 2081 2082 ldo = &pf->link_dflt_override; 2083 2084 /* If link default override is enabled, use to mask NVM PHY capabilities 2085 * for speed and FEC default configuration. 2086 */ 2087 cfg = &phy->curr_user_phy_cfg; 2088 2089 if (ldo->phy_type_low || ldo->phy_type_high) { 2090 cfg->phy_type_low = pf->nvm_phy_type_lo & 2091 cpu_to_le64(ldo->phy_type_low); 2092 cfg->phy_type_high = pf->nvm_phy_type_hi & 2093 cpu_to_le64(ldo->phy_type_high); 2094 } 2095 cfg->link_fec_opt = ldo->fec_options; 2096 phy->curr_user_fec_req = ICE_FEC_AUTO; 2097 2098 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); 2099 } 2100 2101 /** 2102 * ice_init_phy_user_cfg - Initialize the PHY user configuration 2103 * @pi: port info structure 2104 * 2105 * Initialize the current user PHY configuration, speed, FEC, and FC requested 2106 * mode to default. The PHY defaults are from get PHY capabilities topology 2107 * with media so call when media is first available. An error is returned if 2108 * called when media is not available. The PHY initialization completed state is 2109 * set here. 2110 * 2111 * These configurations are used when setting PHY 2112 * configuration. The user PHY configuration is updated on set PHY 2113 * configuration. Returns 0 on success, negative on failure 2114 */ 2115 static int ice_init_phy_user_cfg(struct ice_port_info *pi) 2116 { 2117 struct ice_aqc_get_phy_caps_data *pcaps; 2118 struct ice_phy_info *phy = &pi->phy; 2119 struct ice_pf *pf = pi->hw->back; 2120 int err; 2121 2122 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2123 return -EIO; 2124 2125 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2126 if (!pcaps) 2127 return -ENOMEM; 2128 2129 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2130 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2131 pcaps, NULL); 2132 else 2133 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2134 pcaps, NULL); 2135 if (err) { 2136 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); 2137 goto err_out; 2138 } 2139 2140 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); 2141 2142 /* check if lenient mode is supported and enabled */ 2143 if (ice_fw_supports_link_override(pi->hw) && 2144 !(pcaps->module_compliance_enforcement & 2145 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { 2146 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); 2147 2148 /* if the FW supports default PHY configuration mode, then the driver 2149 * does not have to apply link override settings. If not, 2150 * initialize user PHY configuration with link override values 2151 */ 2152 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && 2153 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { 2154 ice_init_phy_cfg_dflt_override(pi); 2155 goto out; 2156 } 2157 } 2158 2159 /* if link default override is not enabled, set user flow control and 2160 * FEC settings based on what get_phy_caps returned 2161 */ 2162 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, 2163 pcaps->link_fec_options); 2164 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); 2165 2166 out: 2167 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; 2168 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); 2169 err_out: 2170 kfree(pcaps); 2171 return err; 2172 } 2173 2174 /** 2175 * ice_configure_phy - configure PHY 2176 * @vsi: VSI of PHY 2177 * 2178 * Set the PHY configuration. If the current PHY configuration is the same as 2179 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise 2180 * configure the based get PHY capabilities for topology with media. 2181 */ 2182 static int ice_configure_phy(struct ice_vsi *vsi) 2183 { 2184 struct device *dev = ice_pf_to_dev(vsi->back); 2185 struct ice_port_info *pi = vsi->port_info; 2186 struct ice_aqc_get_phy_caps_data *pcaps; 2187 struct ice_aqc_set_phy_cfg_data *cfg; 2188 struct ice_phy_info *phy = &pi->phy; 2189 struct ice_pf *pf = vsi->back; 2190 int err; 2191 2192 /* Ensure we have media as we cannot configure a medialess port */ 2193 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 2194 return -ENOMEDIUM; 2195 2196 ice_print_topo_conflict(vsi); 2197 2198 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && 2199 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) 2200 return -EPERM; 2201 2202 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) 2203 return ice_force_phys_link_state(vsi, true); 2204 2205 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 2206 if (!pcaps) 2207 return -ENOMEM; 2208 2209 /* Get current PHY config */ 2210 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 2211 NULL); 2212 if (err) { 2213 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", 2214 vsi->vsi_num, err); 2215 goto done; 2216 } 2217 2218 /* If PHY enable link is configured and configuration has not changed, 2219 * there's nothing to do 2220 */ 2221 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && 2222 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) 2223 goto done; 2224 2225 /* Use PHY topology as baseline for configuration */ 2226 memset(pcaps, 0, sizeof(*pcaps)); 2227 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2228 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2229 pcaps, NULL); 2230 else 2231 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2232 pcaps, NULL); 2233 if (err) { 2234 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", 2235 vsi->vsi_num, err); 2236 goto done; 2237 } 2238 2239 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2240 if (!cfg) { 2241 err = -ENOMEM; 2242 goto done; 2243 } 2244 2245 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); 2246 2247 /* Speed - If default override pending, use curr_user_phy_cfg set in 2248 * ice_init_phy_user_cfg_ldo. 2249 */ 2250 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, 2251 vsi->back->state)) { 2252 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; 2253 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; 2254 } else { 2255 u64 phy_low = 0, phy_high = 0; 2256 2257 ice_update_phy_type(&phy_low, &phy_high, 2258 pi->phy.curr_user_speed_req); 2259 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); 2260 cfg->phy_type_high = pcaps->phy_type_high & 2261 cpu_to_le64(phy_high); 2262 } 2263 2264 /* Can't provide what was requested; use PHY capabilities */ 2265 if (!cfg->phy_type_low && !cfg->phy_type_high) { 2266 cfg->phy_type_low = pcaps->phy_type_low; 2267 cfg->phy_type_high = pcaps->phy_type_high; 2268 } 2269 2270 /* FEC */ 2271 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); 2272 2273 /* Can't provide what was requested; use PHY capabilities */ 2274 if (cfg->link_fec_opt != 2275 (cfg->link_fec_opt & pcaps->link_fec_options)) { 2276 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 2277 cfg->link_fec_opt = pcaps->link_fec_options; 2278 } 2279 2280 /* Flow Control - always supported; no need to check against 2281 * capabilities 2282 */ 2283 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); 2284 2285 /* Enable link and link update */ 2286 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 2287 2288 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); 2289 if (err) 2290 dev_err(dev, "Failed to set phy config, VSI %d error %d\n", 2291 vsi->vsi_num, err); 2292 2293 kfree(cfg); 2294 done: 2295 kfree(pcaps); 2296 return err; 2297 } 2298 2299 /** 2300 * ice_check_media_subtask - Check for media 2301 * @pf: pointer to PF struct 2302 * 2303 * If media is available, then initialize PHY user configuration if it is not 2304 * been, and configure the PHY if the interface is up. 2305 */ 2306 static void ice_check_media_subtask(struct ice_pf *pf) 2307 { 2308 struct ice_port_info *pi; 2309 struct ice_vsi *vsi; 2310 int err; 2311 2312 /* No need to check for media if it's already present */ 2313 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) 2314 return; 2315 2316 vsi = ice_get_main_vsi(pf); 2317 if (!vsi) 2318 return; 2319 2320 /* Refresh link info and check if media is present */ 2321 pi = vsi->port_info; 2322 err = ice_update_link_info(pi); 2323 if (err) 2324 return; 2325 2326 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 2327 2328 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 2329 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) 2330 ice_init_phy_user_cfg(pi); 2331 2332 /* PHY settings are reset on media insertion, reconfigure 2333 * PHY to preserve settings. 2334 */ 2335 if (test_bit(ICE_VSI_DOWN, vsi->state) && 2336 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) 2337 return; 2338 2339 err = ice_configure_phy(vsi); 2340 if (!err) 2341 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 2342 2343 /* A Link Status Event will be generated; the event handler 2344 * will complete bringing the interface up 2345 */ 2346 } 2347 } 2348 2349 static void ice_service_task_recovery_mode(struct work_struct *work) 2350 { 2351 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2352 2353 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 2354 ice_clean_adminq_subtask(pf); 2355 2356 ice_service_task_complete(pf); 2357 2358 mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); 2359 } 2360 2361 /** 2362 * ice_service_task - manage and run subtasks 2363 * @work: pointer to work_struct contained by the PF struct 2364 */ 2365 static void ice_service_task(struct work_struct *work) 2366 { 2367 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); 2368 unsigned long start_time = jiffies; 2369 2370 if (pf->health_reporters.tx_hang_buf.tx_ring) { 2371 ice_report_tx_hang(pf); 2372 pf->health_reporters.tx_hang_buf.tx_ring = NULL; 2373 } 2374 2375 ice_reset_subtask(pf); 2376 2377 /* bail if a reset/recovery cycle is pending or rebuild failed */ 2378 if (ice_is_reset_in_progress(pf->state) || 2379 test_bit(ICE_SUSPENDED, pf->state) || 2380 test_bit(ICE_NEEDS_RESTART, pf->state)) { 2381 ice_service_task_complete(pf); 2382 return; 2383 } 2384 2385 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { 2386 struct iidc_rdma_event *event; 2387 2388 event = kzalloc(sizeof(*event), GFP_KERNEL); 2389 if (event) { 2390 set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type); 2391 /* report the entire OICR value to AUX driver */ 2392 swap(event->reg, pf->oicr_err_reg); 2393 ice_send_event_to_aux(pf, event); 2394 kfree(event); 2395 } 2396 } 2397 2398 /* unplug aux dev per request, if an unplug request came in 2399 * while processing a plug request, this will handle it 2400 */ 2401 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) 2402 ice_unplug_aux_dev(pf); 2403 2404 /* Plug aux device per request */ 2405 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) 2406 ice_plug_aux_dev(pf); 2407 2408 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { 2409 struct iidc_rdma_event *event; 2410 2411 event = kzalloc(sizeof(*event), GFP_KERNEL); 2412 if (event) { 2413 set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type); 2414 ice_send_event_to_aux(pf, event); 2415 kfree(event); 2416 } 2417 } 2418 2419 ice_clean_adminq_subtask(pf); 2420 ice_check_media_subtask(pf); 2421 ice_check_for_hang_subtask(pf); 2422 ice_sync_fltr_subtask(pf); 2423 ice_handle_mdd_event(pf); 2424 ice_watchdog_subtask(pf); 2425 2426 if (ice_is_safe_mode(pf)) { 2427 ice_service_task_complete(pf); 2428 return; 2429 } 2430 2431 ice_process_vflr_event(pf); 2432 ice_clean_mailboxq_subtask(pf); 2433 ice_clean_sbq_subtask(pf); 2434 ice_sync_arfs_fltrs(pf); 2435 ice_flush_fdir_ctx(pf); 2436 2437 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ 2438 ice_service_task_complete(pf); 2439 2440 /* If the tasks have taken longer than one service timer period 2441 * or there is more work to be done, reset the service timer to 2442 * schedule the service task now. 2443 */ 2444 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || 2445 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || 2446 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2447 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2448 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2449 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2450 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2451 mod_timer(&pf->serv_tmr, jiffies); 2452 } 2453 2454 /** 2455 * ice_set_ctrlq_len - helper function to set controlq length 2456 * @hw: pointer to the HW instance 2457 */ 2458 static void ice_set_ctrlq_len(struct ice_hw *hw) 2459 { 2460 hw->adminq.num_rq_entries = ICE_AQ_LEN; 2461 hw->adminq.num_sq_entries = ICE_AQ_LEN; 2462 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 2463 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 2464 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; 2465 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2466 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2467 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2468 hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2469 hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2470 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2471 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2472 } 2473 2474 /** 2475 * ice_schedule_reset - schedule a reset 2476 * @pf: board private structure 2477 * @reset: reset being requested 2478 */ 2479 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) 2480 { 2481 struct device *dev = ice_pf_to_dev(pf); 2482 2483 /* bail out if earlier reset has failed */ 2484 if (test_bit(ICE_RESET_FAILED, pf->state)) { 2485 dev_dbg(dev, "earlier reset has failed\n"); 2486 return -EIO; 2487 } 2488 /* bail if reset/recovery already in progress */ 2489 if (ice_is_reset_in_progress(pf->state)) { 2490 dev_dbg(dev, "Reset already in progress\n"); 2491 return -EBUSY; 2492 } 2493 2494 switch (reset) { 2495 case ICE_RESET_PFR: 2496 set_bit(ICE_PFR_REQ, pf->state); 2497 break; 2498 case ICE_RESET_CORER: 2499 set_bit(ICE_CORER_REQ, pf->state); 2500 break; 2501 case ICE_RESET_GLOBR: 2502 set_bit(ICE_GLOBR_REQ, pf->state); 2503 break; 2504 default: 2505 return -EINVAL; 2506 } 2507 2508 ice_service_task_schedule(pf); 2509 return 0; 2510 } 2511 2512 /** 2513 * ice_vsi_ena_irq - Enable IRQ for the given VSI 2514 * @vsi: the VSI being configured 2515 */ 2516 static int ice_vsi_ena_irq(struct ice_vsi *vsi) 2517 { 2518 struct ice_hw *hw = &vsi->back->hw; 2519 int i; 2520 2521 ice_for_each_q_vector(vsi, i) 2522 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); 2523 2524 ice_flush(hw); 2525 return 0; 2526 } 2527 2528 /** 2529 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 2530 * @vsi: the VSI being configured 2531 * @basename: name for the vector 2532 */ 2533 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) 2534 { 2535 int q_vectors = vsi->num_q_vectors; 2536 struct ice_pf *pf = vsi->back; 2537 struct device *dev; 2538 int rx_int_idx = 0; 2539 int tx_int_idx = 0; 2540 int vector, err; 2541 int irq_num; 2542 2543 dev = ice_pf_to_dev(pf); 2544 for (vector = 0; vector < q_vectors; vector++) { 2545 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; 2546 2547 irq_num = q_vector->irq.virq; 2548 2549 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { 2550 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2551 "%s-%s-%d", basename, "TxRx", rx_int_idx++); 2552 tx_int_idx++; 2553 } else if (q_vector->rx.rx_ring) { 2554 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2555 "%s-%s-%d", basename, "rx", rx_int_idx++); 2556 } else if (q_vector->tx.tx_ring) { 2557 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 2558 "%s-%s-%d", basename, "tx", tx_int_idx++); 2559 } else { 2560 /* skip this unused q_vector */ 2561 continue; 2562 } 2563 if (vsi->type == ICE_VSI_CTRL && vsi->vf) 2564 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2565 IRQF_SHARED, q_vector->name, 2566 q_vector); 2567 else 2568 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 2569 0, q_vector->name, q_vector); 2570 if (err) { 2571 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", 2572 err); 2573 goto free_q_irqs; 2574 } 2575 } 2576 2577 err = ice_set_cpu_rx_rmap(vsi); 2578 if (err) { 2579 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", 2580 vsi->vsi_num, ERR_PTR(err)); 2581 goto free_q_irqs; 2582 } 2583 2584 vsi->irqs_ready = true; 2585 return 0; 2586 2587 free_q_irqs: 2588 while (vector--) { 2589 irq_num = vsi->q_vectors[vector]->irq.virq; 2590 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); 2591 } 2592 return err; 2593 } 2594 2595 /** 2596 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP 2597 * @vsi: VSI to setup Tx rings used by XDP 2598 * 2599 * Return 0 on success and negative value on error 2600 */ 2601 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) 2602 { 2603 struct device *dev = ice_pf_to_dev(vsi->back); 2604 struct ice_tx_desc *tx_desc; 2605 int i, j; 2606 2607 ice_for_each_xdp_txq(vsi, i) { 2608 u16 xdp_q_idx = vsi->alloc_txq + i; 2609 struct ice_ring_stats *ring_stats; 2610 struct ice_tx_ring *xdp_ring; 2611 2612 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); 2613 if (!xdp_ring) 2614 goto free_xdp_rings; 2615 2616 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); 2617 if (!ring_stats) { 2618 ice_free_tx_ring(xdp_ring); 2619 goto free_xdp_rings; 2620 } 2621 2622 xdp_ring->ring_stats = ring_stats; 2623 xdp_ring->q_index = xdp_q_idx; 2624 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; 2625 xdp_ring->vsi = vsi; 2626 xdp_ring->netdev = NULL; 2627 xdp_ring->dev = dev; 2628 xdp_ring->count = vsi->num_tx_desc; 2629 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); 2630 if (ice_setup_tx_ring(xdp_ring)) 2631 goto free_xdp_rings; 2632 ice_set_ring_xdp(xdp_ring); 2633 spin_lock_init(&xdp_ring->tx_lock); 2634 for (j = 0; j < xdp_ring->count; j++) { 2635 tx_desc = ICE_TX_DESC(xdp_ring, j); 2636 tx_desc->cmd_type_offset_bsz = 0; 2637 } 2638 } 2639 2640 return 0; 2641 2642 free_xdp_rings: 2643 for (; i >= 0; i--) { 2644 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { 2645 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2646 vsi->xdp_rings[i]->ring_stats = NULL; 2647 ice_free_tx_ring(vsi->xdp_rings[i]); 2648 } 2649 } 2650 return -ENOMEM; 2651 } 2652 2653 /** 2654 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI 2655 * @vsi: VSI to set the bpf prog on 2656 * @prog: the bpf prog pointer 2657 */ 2658 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) 2659 { 2660 struct bpf_prog *old_prog; 2661 int i; 2662 2663 old_prog = xchg(&vsi->xdp_prog, prog); 2664 ice_for_each_rxq(vsi, i) 2665 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); 2666 2667 if (old_prog) 2668 bpf_prog_put(old_prog); 2669 } 2670 2671 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) 2672 { 2673 struct ice_q_vector *q_vector; 2674 struct ice_tx_ring *ring; 2675 2676 if (static_key_enabled(&ice_xdp_locking_key)) 2677 return vsi->xdp_rings[qid % vsi->num_xdp_txq]; 2678 2679 q_vector = vsi->rx_rings[qid]->q_vector; 2680 ice_for_each_tx_ring(ring, q_vector->tx) 2681 if (ice_ring_is_xdp(ring)) 2682 return ring; 2683 2684 return NULL; 2685 } 2686 2687 /** 2688 * ice_map_xdp_rings - Map XDP rings to interrupt vectors 2689 * @vsi: the VSI with XDP rings being configured 2690 * 2691 * Map XDP rings to interrupt vectors and perform the configuration steps 2692 * dependent on the mapping. 2693 */ 2694 void ice_map_xdp_rings(struct ice_vsi *vsi) 2695 { 2696 int xdp_rings_rem = vsi->num_xdp_txq; 2697 int v_idx, q_idx; 2698 2699 /* follow the logic from ice_vsi_map_rings_to_vectors */ 2700 ice_for_each_q_vector(vsi, v_idx) { 2701 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2702 int xdp_rings_per_v, q_id, q_base; 2703 2704 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, 2705 vsi->num_q_vectors - v_idx); 2706 q_base = vsi->num_xdp_txq - xdp_rings_rem; 2707 2708 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { 2709 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; 2710 2711 xdp_ring->q_vector = q_vector; 2712 xdp_ring->next = q_vector->tx.tx_ring; 2713 q_vector->tx.tx_ring = xdp_ring; 2714 } 2715 xdp_rings_rem -= xdp_rings_per_v; 2716 } 2717 2718 ice_for_each_rxq(vsi, q_idx) { 2719 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, 2720 q_idx); 2721 ice_tx_xsk_pool(vsi, q_idx); 2722 } 2723 } 2724 2725 /** 2726 * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors 2727 * @vsi: the VSI with XDP rings being unmapped 2728 */ 2729 static void ice_unmap_xdp_rings(struct ice_vsi *vsi) 2730 { 2731 int v_idx; 2732 2733 ice_for_each_q_vector(vsi, v_idx) { 2734 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; 2735 struct ice_tx_ring *ring; 2736 2737 ice_for_each_tx_ring(ring, q_vector->tx) 2738 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) 2739 break; 2740 2741 /* restore the value of last node prior to XDP setup */ 2742 q_vector->tx.tx_ring = ring; 2743 } 2744 } 2745 2746 /** 2747 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP 2748 * @vsi: VSI to bring up Tx rings used by XDP 2749 * @prog: bpf program that will be assigned to VSI 2750 * @cfg_type: create from scratch or restore the existing configuration 2751 * 2752 * Return 0 on success and negative value on error 2753 */ 2754 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, 2755 enum ice_xdp_cfg cfg_type) 2756 { 2757 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2758 struct ice_pf *pf = vsi->back; 2759 struct ice_qs_cfg xdp_qs_cfg = { 2760 .qs_mutex = &pf->avail_q_mutex, 2761 .pf_map = pf->avail_txqs, 2762 .pf_map_size = pf->max_pf_txqs, 2763 .q_count = vsi->num_xdp_txq, 2764 .scatter_count = ICE_MAX_SCATTER_TXQS, 2765 .vsi_map = vsi->txq_map, 2766 .vsi_map_offset = vsi->alloc_txq, 2767 .mapping_mode = ICE_VSI_MAP_CONTIG 2768 }; 2769 struct device *dev; 2770 int status, i; 2771 2772 dev = ice_pf_to_dev(pf); 2773 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, 2774 sizeof(*vsi->xdp_rings), GFP_KERNEL); 2775 if (!vsi->xdp_rings) 2776 return -ENOMEM; 2777 2778 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; 2779 if (__ice_vsi_get_qs(&xdp_qs_cfg)) 2780 goto err_map_xdp; 2781 2782 if (static_key_enabled(&ice_xdp_locking_key)) 2783 netdev_warn(vsi->netdev, 2784 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); 2785 2786 if (ice_xdp_alloc_setup_rings(vsi)) 2787 goto clear_xdp_rings; 2788 2789 /* omit the scheduler update if in reset path; XDP queues will be 2790 * taken into account at the end of ice_vsi_rebuild, where 2791 * ice_cfg_vsi_lan is being called 2792 */ 2793 if (cfg_type == ICE_XDP_CFG_PART) 2794 return 0; 2795 2796 ice_map_xdp_rings(vsi); 2797 2798 /* tell the Tx scheduler that right now we have 2799 * additional queues 2800 */ 2801 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2802 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; 2803 2804 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2805 max_txqs); 2806 if (status) { 2807 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", 2808 status); 2809 goto unmap_xdp_rings; 2810 } 2811 2812 /* assign the prog only when it's not already present on VSI; 2813 * this flow is a subject of both ethtool -L and ndo_bpf flows; 2814 * VSI rebuild that happens under ethtool -L can expose us to 2815 * the bpf_prog refcount issues as we would be swapping same 2816 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put 2817 * on it as it would be treated as an 'old_prog'; for ndo_bpf 2818 * this is not harmful as dev_xdp_install bumps the refcount 2819 * before calling the op exposed by the driver; 2820 */ 2821 if (!ice_is_xdp_ena_vsi(vsi)) 2822 ice_vsi_assign_bpf_prog(vsi, prog); 2823 2824 return 0; 2825 unmap_xdp_rings: 2826 ice_unmap_xdp_rings(vsi); 2827 clear_xdp_rings: 2828 ice_for_each_xdp_txq(vsi, i) 2829 if (vsi->xdp_rings[i]) { 2830 kfree_rcu(vsi->xdp_rings[i], rcu); 2831 vsi->xdp_rings[i] = NULL; 2832 } 2833 2834 err_map_xdp: 2835 mutex_lock(&pf->avail_q_mutex); 2836 ice_for_each_xdp_txq(vsi, i) { 2837 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2838 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2839 } 2840 mutex_unlock(&pf->avail_q_mutex); 2841 2842 devm_kfree(dev, vsi->xdp_rings); 2843 vsi->xdp_rings = NULL; 2844 2845 return -ENOMEM; 2846 } 2847 2848 /** 2849 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings 2850 * @vsi: VSI to remove XDP rings 2851 * @cfg_type: disable XDP permanently or allow it to be restored later 2852 * 2853 * Detach XDP rings from irq vectors, clean up the PF bitmap and free 2854 * resources 2855 */ 2856 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) 2857 { 2858 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2859 struct ice_pf *pf = vsi->back; 2860 int i; 2861 2862 /* q_vectors are freed in reset path so there's no point in detaching 2863 * rings 2864 */ 2865 if (cfg_type == ICE_XDP_CFG_PART) 2866 goto free_qmap; 2867 2868 ice_unmap_xdp_rings(vsi); 2869 2870 free_qmap: 2871 mutex_lock(&pf->avail_q_mutex); 2872 ice_for_each_xdp_txq(vsi, i) { 2873 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); 2874 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; 2875 } 2876 mutex_unlock(&pf->avail_q_mutex); 2877 2878 ice_for_each_xdp_txq(vsi, i) 2879 if (vsi->xdp_rings[i]) { 2880 if (vsi->xdp_rings[i]->desc) { 2881 synchronize_rcu(); 2882 ice_free_tx_ring(vsi->xdp_rings[i]); 2883 } 2884 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); 2885 vsi->xdp_rings[i]->ring_stats = NULL; 2886 kfree_rcu(vsi->xdp_rings[i], rcu); 2887 vsi->xdp_rings[i] = NULL; 2888 } 2889 2890 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); 2891 vsi->xdp_rings = NULL; 2892 2893 if (static_key_enabled(&ice_xdp_locking_key)) 2894 static_branch_dec(&ice_xdp_locking_key); 2895 2896 if (cfg_type == ICE_XDP_CFG_PART) 2897 return 0; 2898 2899 ice_vsi_assign_bpf_prog(vsi, NULL); 2900 2901 /* notify Tx scheduler that we destroyed XDP queues and bring 2902 * back the old number of child nodes 2903 */ 2904 for (i = 0; i < vsi->tc_cfg.numtc; i++) 2905 max_txqs[i] = vsi->num_txq; 2906 2907 /* change number of XDP Tx queues to 0 */ 2908 vsi->num_xdp_txq = 0; 2909 2910 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2911 max_txqs); 2912 } 2913 2914 /** 2915 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI 2916 * @vsi: VSI to schedule napi on 2917 */ 2918 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) 2919 { 2920 int i; 2921 2922 ice_for_each_rxq(vsi, i) { 2923 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; 2924 2925 if (READ_ONCE(rx_ring->xsk_pool)) 2926 napi_schedule(&rx_ring->q_vector->napi); 2927 } 2928 } 2929 2930 /** 2931 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have 2932 * @vsi: VSI to determine the count of XDP Tx qs 2933 * 2934 * returns 0 if Tx qs count is higher than at least half of CPU count, 2935 * -ENOMEM otherwise 2936 */ 2937 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) 2938 { 2939 u16 avail = ice_get_avail_txq_count(vsi->back); 2940 u16 cpus = num_possible_cpus(); 2941 2942 if (avail < cpus / 2) 2943 return -ENOMEM; 2944 2945 if (vsi->type == ICE_VSI_SF) 2946 avail = vsi->alloc_txq; 2947 2948 vsi->num_xdp_txq = min_t(u16, avail, cpus); 2949 2950 if (vsi->num_xdp_txq < cpus) 2951 static_branch_inc(&ice_xdp_locking_key); 2952 2953 return 0; 2954 } 2955 2956 /** 2957 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP 2958 * @vsi: Pointer to VSI structure 2959 */ 2960 static int ice_max_xdp_frame_size(struct ice_vsi *vsi) 2961 { 2962 return ICE_RXBUF_3072; 2963 } 2964 2965 /** 2966 * ice_xdp_setup_prog - Add or remove XDP eBPF program 2967 * @vsi: VSI to setup XDP for 2968 * @prog: XDP program 2969 * @extack: netlink extended ack 2970 */ 2971 static int 2972 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, 2973 struct netlink_ext_ack *extack) 2974 { 2975 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; 2976 int ret = 0, xdp_ring_err = 0; 2977 bool if_running; 2978 2979 if (prog && !prog->aux->xdp_has_frags) { 2980 if (frame_size > ice_max_xdp_frame_size(vsi)) { 2981 NL_SET_ERR_MSG_MOD(extack, 2982 "MTU is too large for linear frames and XDP prog does not support frags"); 2983 return -EOPNOTSUPP; 2984 } 2985 } 2986 2987 /* hot swap progs and avoid toggling link */ 2988 if (ice_is_xdp_ena_vsi(vsi) == !!prog || 2989 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { 2990 ice_vsi_assign_bpf_prog(vsi, prog); 2991 return 0; 2992 } 2993 2994 if_running = netif_running(vsi->netdev) && 2995 !test_and_set_bit(ICE_VSI_DOWN, vsi->state); 2996 2997 /* need to stop netdev while setting up the program for Rx rings */ 2998 if (if_running) { 2999 ret = ice_down(vsi); 3000 if (ret) { 3001 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); 3002 return ret; 3003 } 3004 } 3005 3006 if (!ice_is_xdp_ena_vsi(vsi) && prog) { 3007 xdp_ring_err = ice_vsi_determine_xdp_res(vsi); 3008 if (xdp_ring_err) { 3009 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); 3010 goto resume_if; 3011 } else { 3012 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, 3013 ICE_XDP_CFG_FULL); 3014 if (xdp_ring_err) { 3015 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); 3016 goto resume_if; 3017 } 3018 } 3019 xdp_features_set_redirect_target(vsi->netdev, true); 3020 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { 3021 xdp_features_clear_redirect_target(vsi->netdev); 3022 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); 3023 if (xdp_ring_err) 3024 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); 3025 } 3026 3027 resume_if: 3028 if (if_running) 3029 ret = ice_up(vsi); 3030 3031 if (!ret && prog) 3032 ice_vsi_rx_napi_schedule(vsi); 3033 3034 return (ret || xdp_ring_err) ? -ENOMEM : 0; 3035 } 3036 3037 /** 3038 * ice_xdp_safe_mode - XDP handler for safe mode 3039 * @dev: netdevice 3040 * @xdp: XDP command 3041 */ 3042 static int ice_xdp_safe_mode(struct net_device __always_unused *dev, 3043 struct netdev_bpf *xdp) 3044 { 3045 NL_SET_ERR_MSG_MOD(xdp->extack, 3046 "Please provide working DDP firmware package in order to use XDP\n" 3047 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); 3048 return -EOPNOTSUPP; 3049 } 3050 3051 /** 3052 * ice_xdp - implements XDP handler 3053 * @dev: netdevice 3054 * @xdp: XDP command 3055 */ 3056 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) 3057 { 3058 struct ice_netdev_priv *np = netdev_priv(dev); 3059 struct ice_vsi *vsi = np->vsi; 3060 int ret; 3061 3062 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { 3063 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); 3064 return -EINVAL; 3065 } 3066 3067 mutex_lock(&vsi->xdp_state_lock); 3068 3069 switch (xdp->command) { 3070 case XDP_SETUP_PROG: 3071 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); 3072 break; 3073 case XDP_SETUP_XSK_POOL: 3074 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); 3075 break; 3076 default: 3077 ret = -EINVAL; 3078 } 3079 3080 mutex_unlock(&vsi->xdp_state_lock); 3081 return ret; 3082 } 3083 3084 /** 3085 * ice_ena_misc_vector - enable the non-queue interrupts 3086 * @pf: board private structure 3087 */ 3088 static void ice_ena_misc_vector(struct ice_pf *pf) 3089 { 3090 struct ice_hw *hw = &pf->hw; 3091 u32 pf_intr_start_offset; 3092 u32 val; 3093 3094 /* Disable anti-spoof detection interrupt to prevent spurious event 3095 * interrupts during a function reset. Anti-spoof functionally is 3096 * still supported. 3097 */ 3098 val = rd32(hw, GL_MDCK_TX_TDPU); 3099 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; 3100 wr32(hw, GL_MDCK_TX_TDPU, val); 3101 3102 /* clear things first */ 3103 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 3104 rd32(hw, PFINT_OICR); /* read to clear */ 3105 3106 val = (PFINT_OICR_ECC_ERR_M | 3107 PFINT_OICR_MAL_DETECT_M | 3108 PFINT_OICR_GRST_M | 3109 PFINT_OICR_PCI_EXCEPTION_M | 3110 PFINT_OICR_VFLR_M | 3111 PFINT_OICR_HMC_ERR_M | 3112 PFINT_OICR_PE_PUSH_M | 3113 PFINT_OICR_PE_CRITERR_M); 3114 3115 wr32(hw, PFINT_OICR_ENA, val); 3116 3117 /* SW_ITR_IDX = 0, but don't change INTENA */ 3118 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), 3119 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 3120 3121 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3122 return; 3123 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; 3124 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), 3125 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); 3126 } 3127 3128 /** 3129 * ice_ll_ts_intr - ll_ts interrupt handler 3130 * @irq: interrupt number 3131 * @data: pointer to a q_vector 3132 */ 3133 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data) 3134 { 3135 struct ice_pf *pf = data; 3136 u32 pf_intr_start_offset; 3137 struct ice_ptp_tx *tx; 3138 unsigned long flags; 3139 struct ice_hw *hw; 3140 u32 val; 3141 u8 idx; 3142 3143 hw = &pf->hw; 3144 tx = &pf->ptp.port.tx; 3145 spin_lock_irqsave(&tx->lock, flags); 3146 if (tx->init) { 3147 ice_ptp_complete_tx_single_tstamp(tx); 3148 3149 idx = find_next_bit_wrap(tx->in_use, tx->len, 3150 tx->last_ll_ts_idx_read + 1); 3151 if (idx != tx->len) 3152 ice_ptp_req_tx_single_tstamp(tx, idx); 3153 } 3154 spin_unlock_irqrestore(&tx->lock, flags); 3155 3156 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 3157 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 3158 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; 3159 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), 3160 val); 3161 3162 return IRQ_HANDLED; 3163 } 3164 3165 /** 3166 * ice_misc_intr - misc interrupt handler 3167 * @irq: interrupt number 3168 * @data: pointer to a q_vector 3169 */ 3170 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) 3171 { 3172 struct ice_pf *pf = (struct ice_pf *)data; 3173 irqreturn_t ret = IRQ_HANDLED; 3174 struct ice_hw *hw = &pf->hw; 3175 struct device *dev; 3176 u32 oicr, ena_mask; 3177 3178 dev = ice_pf_to_dev(pf); 3179 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 3180 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 3181 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 3182 3183 oicr = rd32(hw, PFINT_OICR); 3184 ena_mask = rd32(hw, PFINT_OICR_ENA); 3185 3186 if (oicr & PFINT_OICR_SWINT_M) { 3187 ena_mask &= ~PFINT_OICR_SWINT_M; 3188 pf->sw_int_count++; 3189 } 3190 3191 if (oicr & PFINT_OICR_MAL_DETECT_M) { 3192 ena_mask &= ~PFINT_OICR_MAL_DETECT_M; 3193 set_bit(ICE_MDD_EVENT_PENDING, pf->state); 3194 } 3195 if (oicr & PFINT_OICR_VFLR_M) { 3196 /* disable any further VFLR event notifications */ 3197 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { 3198 u32 reg = rd32(hw, PFINT_OICR_ENA); 3199 3200 reg &= ~PFINT_OICR_VFLR_M; 3201 wr32(hw, PFINT_OICR_ENA, reg); 3202 } else { 3203 ena_mask &= ~PFINT_OICR_VFLR_M; 3204 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); 3205 } 3206 } 3207 3208 if (oicr & PFINT_OICR_GRST_M) { 3209 u32 reset; 3210 3211 /* we have a reset warning */ 3212 ena_mask &= ~PFINT_OICR_GRST_M; 3213 reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M, 3214 rd32(hw, GLGEN_RSTAT)); 3215 3216 if (reset == ICE_RESET_CORER) 3217 pf->corer_count++; 3218 else if (reset == ICE_RESET_GLOBR) 3219 pf->globr_count++; 3220 else if (reset == ICE_RESET_EMPR) 3221 pf->empr_count++; 3222 else 3223 dev_dbg(dev, "Invalid reset type %d\n", reset); 3224 3225 /* If a reset cycle isn't already in progress, we set a bit in 3226 * pf->state so that the service task can start a reset/rebuild. 3227 */ 3228 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { 3229 if (reset == ICE_RESET_CORER) 3230 set_bit(ICE_CORER_RECV, pf->state); 3231 else if (reset == ICE_RESET_GLOBR) 3232 set_bit(ICE_GLOBR_RECV, pf->state); 3233 else 3234 set_bit(ICE_EMPR_RECV, pf->state); 3235 3236 /* There are couple of different bits at play here. 3237 * hw->reset_ongoing indicates whether the hardware is 3238 * in reset. This is set to true when a reset interrupt 3239 * is received and set back to false after the driver 3240 * has determined that the hardware is out of reset. 3241 * 3242 * ICE_RESET_OICR_RECV in pf->state indicates 3243 * that a post reset rebuild is required before the 3244 * driver is operational again. This is set above. 3245 * 3246 * As this is the start of the reset/rebuild cycle, set 3247 * both to indicate that. 3248 */ 3249 hw->reset_ongoing = true; 3250 } 3251 } 3252 3253 if (oicr & PFINT_OICR_TSYN_TX_M) { 3254 ena_mask &= ~PFINT_OICR_TSYN_TX_M; 3255 3256 ret = ice_ptp_ts_irq(pf); 3257 } 3258 3259 if (oicr & PFINT_OICR_TSYN_EVNT_M) { 3260 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3261 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); 3262 3263 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; 3264 3265 if (ice_pf_src_tmr_owned(pf)) { 3266 /* Save EVENTs from GLTSYN register */ 3267 pf->ptp.ext_ts_irq |= gltsyn_stat & 3268 (GLTSYN_STAT_EVENT0_M | 3269 GLTSYN_STAT_EVENT1_M | 3270 GLTSYN_STAT_EVENT2_M); 3271 3272 ice_ptp_extts_event(pf); 3273 } 3274 } 3275 3276 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 3277 if (oicr & ICE_AUX_CRIT_ERR) { 3278 pf->oicr_err_reg |= oicr; 3279 set_bit(ICE_AUX_ERR_PENDING, pf->state); 3280 ena_mask &= ~ICE_AUX_CRIT_ERR; 3281 } 3282 3283 /* Report any remaining unexpected interrupts */ 3284 oicr &= ena_mask; 3285 if (oicr) { 3286 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); 3287 /* If a critical error is pending there is no choice but to 3288 * reset the device. 3289 */ 3290 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | 3291 PFINT_OICR_ECC_ERR_M)) { 3292 set_bit(ICE_PFR_REQ, pf->state); 3293 } 3294 } 3295 ice_service_task_schedule(pf); 3296 if (ret == IRQ_HANDLED) 3297 ice_irq_dynamic_ena(hw, NULL, NULL); 3298 3299 return ret; 3300 } 3301 3302 /** 3303 * ice_misc_intr_thread_fn - misc interrupt thread function 3304 * @irq: interrupt number 3305 * @data: pointer to a q_vector 3306 */ 3307 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) 3308 { 3309 struct ice_pf *pf = data; 3310 struct ice_hw *hw; 3311 3312 hw = &pf->hw; 3313 3314 if (ice_is_reset_in_progress(pf->state)) 3315 goto skip_irq; 3316 3317 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { 3318 /* Process outstanding Tx timestamps. If there is more work, 3319 * re-arm the interrupt to trigger again. 3320 */ 3321 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 3322 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 3323 ice_flush(hw); 3324 } 3325 } 3326 3327 skip_irq: 3328 ice_irq_dynamic_ena(hw, NULL, NULL); 3329 3330 return IRQ_HANDLED; 3331 } 3332 3333 /** 3334 * ice_dis_ctrlq_interrupts - disable control queue interrupts 3335 * @hw: pointer to HW structure 3336 */ 3337 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 3338 { 3339 /* disable Admin queue Interrupt causes */ 3340 wr32(hw, PFINT_FW_CTL, 3341 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 3342 3343 /* disable Mailbox queue Interrupt causes */ 3344 wr32(hw, PFINT_MBX_CTL, 3345 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 3346 3347 wr32(hw, PFINT_SB_CTL, 3348 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 3349 3350 /* disable Control queue Interrupt causes */ 3351 wr32(hw, PFINT_OICR_CTL, 3352 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 3353 3354 ice_flush(hw); 3355 } 3356 3357 /** 3358 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup 3359 * @pf: board private structure 3360 */ 3361 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) 3362 { 3363 int irq_num = pf->ll_ts_irq.virq; 3364 3365 synchronize_irq(irq_num); 3366 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); 3367 3368 ice_free_irq(pf, pf->ll_ts_irq); 3369 } 3370 3371 /** 3372 * ice_free_irq_msix_misc - Unroll misc vector setup 3373 * @pf: board private structure 3374 */ 3375 static void ice_free_irq_msix_misc(struct ice_pf *pf) 3376 { 3377 int misc_irq_num = pf->oicr_irq.virq; 3378 struct ice_hw *hw = &pf->hw; 3379 3380 ice_dis_ctrlq_interrupts(hw); 3381 3382 /* disable OICR interrupt */ 3383 wr32(hw, PFINT_OICR_ENA, 0); 3384 ice_flush(hw); 3385 3386 synchronize_irq(misc_irq_num); 3387 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); 3388 3389 ice_free_irq(pf, pf->oicr_irq); 3390 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3391 ice_free_irq_msix_ll_ts(pf); 3392 } 3393 3394 /** 3395 * ice_ena_ctrlq_interrupts - enable control queue interrupts 3396 * @hw: pointer to HW structure 3397 * @reg_idx: HW vector index to associate the control queue interrupts with 3398 */ 3399 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) 3400 { 3401 u32 val; 3402 3403 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 3404 PFINT_OICR_CTL_CAUSE_ENA_M); 3405 wr32(hw, PFINT_OICR_CTL, val); 3406 3407 /* enable Admin queue Interrupt causes */ 3408 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | 3409 PFINT_FW_CTL_CAUSE_ENA_M); 3410 wr32(hw, PFINT_FW_CTL, val); 3411 3412 /* enable Mailbox queue Interrupt causes */ 3413 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 3414 PFINT_MBX_CTL_CAUSE_ENA_M); 3415 wr32(hw, PFINT_MBX_CTL, val); 3416 3417 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { 3418 /* enable Sideband queue Interrupt causes */ 3419 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 3420 PFINT_SB_CTL_CAUSE_ENA_M); 3421 wr32(hw, PFINT_SB_CTL, val); 3422 } 3423 3424 ice_flush(hw); 3425 } 3426 3427 /** 3428 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 3429 * @pf: board private structure 3430 * 3431 * This sets up the handler for MSIX 0, which is used to manage the 3432 * non-queue interrupts, e.g. AdminQ and errors. This is not used 3433 * when in MSI or Legacy interrupt mode. 3434 */ 3435 static int ice_req_irq_msix_misc(struct ice_pf *pf) 3436 { 3437 struct device *dev = ice_pf_to_dev(pf); 3438 struct ice_hw *hw = &pf->hw; 3439 u32 pf_intr_start_offset; 3440 struct msi_map irq; 3441 int err = 0; 3442 3443 if (!pf->int_name[0]) 3444 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", 3445 dev_driver_string(dev), dev_name(dev)); 3446 3447 if (!pf->int_name_ll_ts[0]) 3448 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, 3449 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); 3450 /* Do not request IRQ but do enable OICR interrupt since settings are 3451 * lost during reset. Note that this function is called only during 3452 * rebuild path and not while reset is in progress. 3453 */ 3454 if (ice_is_reset_in_progress(pf->state)) 3455 goto skip_req_irq; 3456 3457 /* reserve one vector in irq_tracker for misc interrupts */ 3458 irq = ice_alloc_irq(pf, false); 3459 if (irq.index < 0) 3460 return irq.index; 3461 3462 pf->oicr_irq = irq; 3463 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, 3464 ice_misc_intr_thread_fn, 0, 3465 pf->int_name, pf); 3466 if (err) { 3467 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", 3468 pf->int_name, err); 3469 ice_free_irq(pf, pf->oicr_irq); 3470 return err; 3471 } 3472 3473 /* reserve one vector in irq_tracker for ll_ts interrupt */ 3474 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3475 goto skip_req_irq; 3476 3477 irq = ice_alloc_irq(pf, false); 3478 if (irq.index < 0) 3479 return irq.index; 3480 3481 pf->ll_ts_irq = irq; 3482 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, 3483 pf->int_name_ll_ts, pf); 3484 if (err) { 3485 dev_err(dev, "devm_request_irq for %s failed: %d\n", 3486 pf->int_name_ll_ts, err); 3487 ice_free_irq(pf, pf->ll_ts_irq); 3488 return err; 3489 } 3490 3491 skip_req_irq: 3492 ice_ena_misc_vector(pf); 3493 3494 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); 3495 /* This enables LL TS interrupt */ 3496 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; 3497 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) 3498 wr32(hw, PFINT_SB_CTL, 3499 ((pf->ll_ts_irq.index + pf_intr_start_offset) & 3500 PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M); 3501 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), 3502 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 3503 3504 ice_flush(hw); 3505 ice_irq_dynamic_ena(hw, NULL, NULL); 3506 3507 return 0; 3508 } 3509 3510 /** 3511 * ice_set_ops - set netdev and ethtools ops for the given netdev 3512 * @vsi: the VSI associated with the new netdev 3513 */ 3514 static void ice_set_ops(struct ice_vsi *vsi) 3515 { 3516 struct net_device *netdev = vsi->netdev; 3517 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3518 3519 if (ice_is_safe_mode(pf)) { 3520 netdev->netdev_ops = &ice_netdev_safe_mode_ops; 3521 ice_set_ethtool_safe_mode_ops(netdev); 3522 return; 3523 } 3524 3525 netdev->netdev_ops = &ice_netdev_ops; 3526 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; 3527 netdev->xdp_metadata_ops = &ice_xdp_md_ops; 3528 ice_set_ethtool_ops(netdev); 3529 3530 if (vsi->type != ICE_VSI_PF) 3531 return; 3532 3533 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 3534 NETDEV_XDP_ACT_XSK_ZEROCOPY | 3535 NETDEV_XDP_ACT_RX_SG; 3536 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; 3537 } 3538 3539 /** 3540 * ice_set_netdev_features - set features for the given netdev 3541 * @netdev: netdev instance 3542 */ 3543 void ice_set_netdev_features(struct net_device *netdev) 3544 { 3545 struct ice_pf *pf = ice_netdev_to_pf(netdev); 3546 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); 3547 netdev_features_t csumo_features; 3548 netdev_features_t vlano_features; 3549 netdev_features_t dflt_features; 3550 netdev_features_t tso_features; 3551 3552 if (ice_is_safe_mode(pf)) { 3553 /* safe mode */ 3554 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; 3555 netdev->hw_features = netdev->features; 3556 return; 3557 } 3558 3559 dflt_features = NETIF_F_SG | 3560 NETIF_F_HIGHDMA | 3561 NETIF_F_NTUPLE | 3562 NETIF_F_RXHASH; 3563 3564 csumo_features = NETIF_F_RXCSUM | 3565 NETIF_F_IP_CSUM | 3566 NETIF_F_SCTP_CRC | 3567 NETIF_F_IPV6_CSUM; 3568 3569 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | 3570 NETIF_F_HW_VLAN_CTAG_TX | 3571 NETIF_F_HW_VLAN_CTAG_RX; 3572 3573 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ 3574 if (is_dvm_ena) 3575 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER; 3576 3577 tso_features = NETIF_F_TSO | 3578 NETIF_F_TSO_ECN | 3579 NETIF_F_TSO6 | 3580 NETIF_F_GSO_GRE | 3581 NETIF_F_GSO_UDP_TUNNEL | 3582 NETIF_F_GSO_GRE_CSUM | 3583 NETIF_F_GSO_UDP_TUNNEL_CSUM | 3584 NETIF_F_GSO_PARTIAL | 3585 NETIF_F_GSO_IPXIP4 | 3586 NETIF_F_GSO_IPXIP6 | 3587 NETIF_F_GSO_UDP_L4; 3588 3589 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | 3590 NETIF_F_GSO_GRE_CSUM; 3591 /* set features that user can change */ 3592 netdev->hw_features = dflt_features | csumo_features | 3593 vlano_features | tso_features; 3594 3595 /* add support for HW_CSUM on packets with MPLS header */ 3596 netdev->mpls_features = NETIF_F_HW_CSUM | 3597 NETIF_F_TSO | 3598 NETIF_F_TSO6; 3599 3600 /* enable features */ 3601 netdev->features |= netdev->hw_features; 3602 3603 netdev->hw_features |= NETIF_F_HW_TC; 3604 netdev->hw_features |= NETIF_F_LOOPBACK; 3605 3606 /* encap and VLAN devices inherit default, csumo and tso features */ 3607 netdev->hw_enc_features |= dflt_features | csumo_features | 3608 tso_features; 3609 netdev->vlan_features |= dflt_features | csumo_features | 3610 tso_features; 3611 3612 /* advertise support but don't enable by default since only one type of 3613 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one 3614 * type turns on the other has to be turned off. This is enforced by the 3615 * ice_fix_features() ndo callback. 3616 */ 3617 if (is_dvm_ena) 3618 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | 3619 NETIF_F_HW_VLAN_STAG_TX; 3620 3621 /* Leave CRC / FCS stripping enabled by default, but allow the value to 3622 * be changed at runtime 3623 */ 3624 netdev->hw_features |= NETIF_F_RXFCS; 3625 3626 /* Allow core to manage IRQs affinity */ 3627 netif_set_affinity_auto(netdev); 3628 3629 /* Mutual exclusivity for TSO and GCS is enforced by the set features 3630 * ndo callback. 3631 */ 3632 if (ice_is_feature_supported(pf, ICE_F_GCS)) 3633 netdev->hw_features |= NETIF_F_HW_CSUM; 3634 3635 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); 3636 } 3637 3638 /** 3639 * ice_fill_rss_lut - Fill the RSS lookup table with default values 3640 * @lut: Lookup table 3641 * @rss_table_size: Lookup table size 3642 * @rss_size: Range of queue number for hashing 3643 */ 3644 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) 3645 { 3646 u16 i; 3647 3648 for (i = 0; i < rss_table_size; i++) 3649 lut[i] = i % rss_size; 3650 } 3651 3652 /** 3653 * ice_pf_vsi_setup - Set up a PF VSI 3654 * @pf: board private structure 3655 * @pi: pointer to the port_info instance 3656 * 3657 * Returns pointer to the successfully allocated VSI software struct 3658 * on success, otherwise returns NULL on failure. 3659 */ 3660 static struct ice_vsi * 3661 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3662 { 3663 struct ice_vsi_cfg_params params = {}; 3664 3665 params.type = ICE_VSI_PF; 3666 params.port_info = pi; 3667 params.flags = ICE_VSI_FLAG_INIT; 3668 3669 return ice_vsi_setup(pf, ¶ms); 3670 } 3671 3672 static struct ice_vsi * 3673 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 3674 struct ice_channel *ch) 3675 { 3676 struct ice_vsi_cfg_params params = {}; 3677 3678 params.type = ICE_VSI_CHNL; 3679 params.port_info = pi; 3680 params.ch = ch; 3681 params.flags = ICE_VSI_FLAG_INIT; 3682 3683 return ice_vsi_setup(pf, ¶ms); 3684 } 3685 3686 /** 3687 * ice_ctrl_vsi_setup - Set up a control VSI 3688 * @pf: board private structure 3689 * @pi: pointer to the port_info instance 3690 * 3691 * Returns pointer to the successfully allocated VSI software struct 3692 * on success, otherwise returns NULL on failure. 3693 */ 3694 static struct ice_vsi * 3695 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3696 { 3697 struct ice_vsi_cfg_params params = {}; 3698 3699 params.type = ICE_VSI_CTRL; 3700 params.port_info = pi; 3701 params.flags = ICE_VSI_FLAG_INIT; 3702 3703 return ice_vsi_setup(pf, ¶ms); 3704 } 3705 3706 /** 3707 * ice_lb_vsi_setup - Set up a loopback VSI 3708 * @pf: board private structure 3709 * @pi: pointer to the port_info instance 3710 * 3711 * Returns pointer to the successfully allocated VSI software struct 3712 * on success, otherwise returns NULL on failure. 3713 */ 3714 struct ice_vsi * 3715 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) 3716 { 3717 struct ice_vsi_cfg_params params = {}; 3718 3719 params.type = ICE_VSI_LB; 3720 params.port_info = pi; 3721 params.flags = ICE_VSI_FLAG_INIT; 3722 3723 return ice_vsi_setup(pf, ¶ms); 3724 } 3725 3726 /** 3727 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload 3728 * @netdev: network interface to be adjusted 3729 * @proto: VLAN TPID 3730 * @vid: VLAN ID to be added 3731 * 3732 * net_device_ops implementation for adding VLAN IDs 3733 */ 3734 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3735 { 3736 struct ice_netdev_priv *np = netdev_priv(netdev); 3737 struct ice_vsi_vlan_ops *vlan_ops; 3738 struct ice_vsi *vsi = np->vsi; 3739 struct ice_vlan vlan; 3740 int ret; 3741 3742 /* VLAN 0 is added by default during load/reset */ 3743 if (!vid) 3744 return 0; 3745 3746 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3747 usleep_range(1000, 2000); 3748 3749 /* Add multicast promisc rule for the VLAN ID to be added if 3750 * all-multicast is currently enabled. 3751 */ 3752 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3753 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3754 ICE_MCAST_VLAN_PROMISC_BITS, 3755 vid); 3756 if (ret) 3757 goto finish; 3758 } 3759 3760 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3761 3762 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged 3763 * packets aren't pruned by the device's internal switch on Rx 3764 */ 3765 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3766 ret = vlan_ops->add_vlan(vsi, &vlan); 3767 if (ret) 3768 goto finish; 3769 3770 /* If all-multicast is currently enabled and this VLAN ID is only one 3771 * besides VLAN-0 we have to update look-up type of multicast promisc 3772 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. 3773 */ 3774 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && 3775 ice_vsi_num_non_zero_vlans(vsi) == 1) { 3776 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3777 ICE_MCAST_PROMISC_BITS, 0); 3778 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3779 ICE_MCAST_VLAN_PROMISC_BITS, 0); 3780 } 3781 3782 finish: 3783 clear_bit(ICE_CFG_BUSY, vsi->state); 3784 3785 return ret; 3786 } 3787 3788 /** 3789 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload 3790 * @netdev: network interface to be adjusted 3791 * @proto: VLAN TPID 3792 * @vid: VLAN ID to be removed 3793 * 3794 * net_device_ops implementation for removing VLAN IDs 3795 */ 3796 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3797 { 3798 struct ice_netdev_priv *np = netdev_priv(netdev); 3799 struct ice_vsi_vlan_ops *vlan_ops; 3800 struct ice_vsi *vsi = np->vsi; 3801 struct ice_vlan vlan; 3802 int ret; 3803 3804 /* don't allow removal of VLAN 0 */ 3805 if (!vid) 3806 return 0; 3807 3808 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) 3809 usleep_range(1000, 2000); 3810 3811 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3812 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3813 if (ret) { 3814 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n", 3815 vsi->vsi_num); 3816 vsi->current_netdev_flags |= IFF_ALLMULTI; 3817 } 3818 3819 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 3820 3821 /* Make sure VLAN delete is successful before updating VLAN 3822 * information 3823 */ 3824 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); 3825 ret = vlan_ops->del_vlan(vsi, &vlan); 3826 if (ret) 3827 goto finish; 3828 3829 /* Remove multicast promisc rule for the removed VLAN ID if 3830 * all-multicast is enabled. 3831 */ 3832 if (vsi->current_netdev_flags & IFF_ALLMULTI) 3833 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3834 ICE_MCAST_VLAN_PROMISC_BITS, vid); 3835 3836 if (!ice_vsi_has_non_zero_vlans(vsi)) { 3837 /* Update look-up type of multicast promisc rule for VLAN 0 3838 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when 3839 * all-multicast is enabled and VLAN 0 is the only VLAN rule. 3840 */ 3841 if (vsi->current_netdev_flags & IFF_ALLMULTI) { 3842 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, 3843 ICE_MCAST_VLAN_PROMISC_BITS, 3844 0); 3845 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, 3846 ICE_MCAST_PROMISC_BITS, 0); 3847 } 3848 } 3849 3850 finish: 3851 clear_bit(ICE_CFG_BUSY, vsi->state); 3852 3853 return ret; 3854 } 3855 3856 /** 3857 * ice_rep_indr_tc_block_unbind 3858 * @cb_priv: indirection block private data 3859 */ 3860 static void ice_rep_indr_tc_block_unbind(void *cb_priv) 3861 { 3862 struct ice_indr_block_priv *indr_priv = cb_priv; 3863 3864 list_del(&indr_priv->list); 3865 kfree(indr_priv); 3866 } 3867 3868 /** 3869 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications 3870 * @vsi: VSI struct which has the netdev 3871 */ 3872 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) 3873 { 3874 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); 3875 3876 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, 3877 ice_rep_indr_tc_block_unbind); 3878 } 3879 3880 /** 3881 * ice_tc_indir_block_register - Register TC indirect block notifications 3882 * @vsi: VSI struct which has the netdev 3883 * 3884 * Returns 0 on success, negative value on failure 3885 */ 3886 static int ice_tc_indir_block_register(struct ice_vsi *vsi) 3887 { 3888 struct ice_netdev_priv *np; 3889 3890 if (!vsi || !vsi->netdev) 3891 return -EINVAL; 3892 3893 np = netdev_priv(vsi->netdev); 3894 3895 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); 3896 return flow_indr_dev_register(ice_indr_setup_tc_cb, np); 3897 } 3898 3899 /** 3900 * ice_get_avail_q_count - Get count of queues in use 3901 * @pf_qmap: bitmap to get queue use count from 3902 * @lock: pointer to a mutex that protects access to pf_qmap 3903 * @size: size of the bitmap 3904 */ 3905 static u16 3906 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) 3907 { 3908 unsigned long bit; 3909 u16 count = 0; 3910 3911 mutex_lock(lock); 3912 for_each_clear_bit(bit, pf_qmap, size) 3913 count++; 3914 mutex_unlock(lock); 3915 3916 return count; 3917 } 3918 3919 /** 3920 * ice_get_avail_txq_count - Get count of Tx queues in use 3921 * @pf: pointer to an ice_pf instance 3922 */ 3923 u16 ice_get_avail_txq_count(struct ice_pf *pf) 3924 { 3925 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, 3926 pf->max_pf_txqs); 3927 } 3928 3929 /** 3930 * ice_get_avail_rxq_count - Get count of Rx queues in use 3931 * @pf: pointer to an ice_pf instance 3932 */ 3933 u16 ice_get_avail_rxq_count(struct ice_pf *pf) 3934 { 3935 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, 3936 pf->max_pf_rxqs); 3937 } 3938 3939 /** 3940 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf 3941 * @pf: board private structure to initialize 3942 */ 3943 void ice_deinit_pf(struct ice_pf *pf) 3944 { 3945 /* note that we unroll also on ice_init_pf() failure here */ 3946 3947 mutex_destroy(&pf->lag_mutex); 3948 mutex_destroy(&pf->adev_mutex); 3949 mutex_destroy(&pf->sw_mutex); 3950 mutex_destroy(&pf->tc_mutex); 3951 mutex_destroy(&pf->avail_q_mutex); 3952 mutex_destroy(&pf->vfs.table_lock); 3953 3954 if (pf->avail_txqs) { 3955 bitmap_free(pf->avail_txqs); 3956 pf->avail_txqs = NULL; 3957 } 3958 3959 if (pf->avail_rxqs) { 3960 bitmap_free(pf->avail_rxqs); 3961 pf->avail_rxqs = NULL; 3962 } 3963 3964 if (pf->txtime_txqs) { 3965 bitmap_free(pf->txtime_txqs); 3966 pf->txtime_txqs = NULL; 3967 } 3968 3969 if (pf->ptp.clock) 3970 ptp_clock_unregister(pf->ptp.clock); 3971 3972 if (!xa_empty(&pf->irq_tracker.entries)) 3973 ice_free_irq_msix_misc(pf); 3974 3975 xa_destroy(&pf->dyn_ports); 3976 xa_destroy(&pf->sf_nums); 3977 } 3978 3979 /** 3980 * ice_set_pf_caps - set PFs capability flags 3981 * @pf: pointer to the PF instance 3982 */ 3983 static void ice_set_pf_caps(struct ice_pf *pf) 3984 { 3985 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; 3986 3987 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3988 if (func_caps->common_cap.rdma) 3989 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 3990 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3991 if (func_caps->common_cap.dcb) 3992 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 3993 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3994 if (func_caps->common_cap.sr_iov_1_1) { 3995 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); 3996 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, 3997 ICE_MAX_SRIOV_VFS); 3998 } 3999 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); 4000 if (func_caps->common_cap.rss_table_size) 4001 set_bit(ICE_FLAG_RSS_ENA, pf->flags); 4002 4003 clear_bit(ICE_FLAG_FD_ENA, pf->flags); 4004 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { 4005 u16 unused; 4006 4007 /* ctrl_vsi_idx will be set to a valid value when flow director 4008 * is setup by ice_init_fdir 4009 */ 4010 pf->ctrl_vsi_idx = ICE_NO_VSI; 4011 set_bit(ICE_FLAG_FD_ENA, pf->flags); 4012 /* force guaranteed filter pool for PF */ 4013 ice_alloc_fd_guar_item(&pf->hw, &unused, 4014 func_caps->fd_fltr_guar); 4015 /* force shared filter pool for PF */ 4016 ice_alloc_fd_shrd_item(&pf->hw, &unused, 4017 func_caps->fd_fltr_best_effort); 4018 } 4019 4020 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 4021 if (func_caps->common_cap.ieee_1588) 4022 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 4023 4024 pf->max_pf_txqs = func_caps->common_cap.num_txq; 4025 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; 4026 } 4027 4028 void ice_start_service_task(struct ice_pf *pf) 4029 { 4030 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 4031 pf->serv_tmr_period = HZ; 4032 INIT_WORK(&pf->serv_task, ice_service_task); 4033 clear_bit(ICE_SERVICE_SCHED, pf->state); 4034 } 4035 4036 /** 4037 * ice_init_pf - Initialize general software structures (struct ice_pf) 4038 * @pf: board private structure to initialize 4039 * Return: 0 on success, negative errno otherwise. 4040 */ 4041 int ice_init_pf(struct ice_pf *pf) 4042 { 4043 struct udp_tunnel_nic_info *udp_tunnel_nic = &pf->hw.udp_tunnel_nic; 4044 struct device *dev = ice_pf_to_dev(pf); 4045 struct ice_hw *hw = &pf->hw; 4046 int err = -ENOMEM; 4047 4048 mutex_init(&pf->sw_mutex); 4049 mutex_init(&pf->tc_mutex); 4050 mutex_init(&pf->adev_mutex); 4051 mutex_init(&pf->lag_mutex); 4052 4053 INIT_HLIST_HEAD(&pf->aq_wait_list); 4054 spin_lock_init(&pf->aq_wait_lock); 4055 init_waitqueue_head(&pf->aq_wait_queue); 4056 4057 init_waitqueue_head(&pf->reset_wait_queue); 4058 4059 mutex_init(&pf->avail_q_mutex); 4060 4061 mutex_init(&pf->vfs.table_lock); 4062 hash_init(pf->vfs.table); 4063 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) 4064 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, 4065 ICE_MBX_OVERFLOW_WATERMARK); 4066 else 4067 ice_mbx_init_snapshot(&pf->hw); 4068 4069 xa_init(&pf->dyn_ports); 4070 xa_init(&pf->sf_nums); 4071 4072 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 4073 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); 4074 pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 4075 if (!pf->avail_txqs || !pf->avail_rxqs || !pf->txtime_txqs) 4076 goto undo_init; 4077 4078 udp_tunnel_nic->set_port = ice_udp_tunnel_set_port; 4079 udp_tunnel_nic->unset_port = ice_udp_tunnel_unset_port; 4080 udp_tunnel_nic->shared = &hw->udp_tunnel_shared; 4081 udp_tunnel_nic->tables[0].n_entries = hw->tnl.valid_count[TNL_VXLAN]; 4082 udp_tunnel_nic->tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; 4083 udp_tunnel_nic->tables[1].n_entries = hw->tnl.valid_count[TNL_GENEVE]; 4084 udp_tunnel_nic->tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE; 4085 4086 /* In case of MSIX we are going to setup the misc vector right here 4087 * to handle admin queue events etc. In case of legacy and MSI 4088 * the misc functionality and queue processing is combined in 4089 * the same vector and that gets setup at open. 4090 */ 4091 err = ice_req_irq_msix_misc(pf); 4092 if (err) { 4093 dev_err(dev, "setup of misc vector failed: %d\n", err); 4094 goto undo_init; 4095 } 4096 4097 return 0; 4098 undo_init: 4099 /* deinit handles half-initialized pf just fine */ 4100 ice_deinit_pf(pf); 4101 return err; 4102 } 4103 4104 /** 4105 * ice_is_wol_supported - check if WoL is supported 4106 * @hw: pointer to hardware info 4107 * 4108 * Check if WoL is supported based on the HW configuration. 4109 * Returns true if NVM supports and enables WoL for this port, false otherwise 4110 */ 4111 bool ice_is_wol_supported(struct ice_hw *hw) 4112 { 4113 u16 wol_ctrl; 4114 4115 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control 4116 * word) indicates WoL is not supported on the corresponding PF ID. 4117 */ 4118 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) 4119 return false; 4120 4121 return !(BIT(hw->port_info->lport) & wol_ctrl); 4122 } 4123 4124 /** 4125 * ice_vsi_recfg_qs - Change the number of queues on a VSI 4126 * @vsi: VSI being changed 4127 * @new_rx: new number of Rx queues 4128 * @new_tx: new number of Tx queues 4129 * @locked: is adev device_lock held 4130 * 4131 * Only change the number of queues if new_tx, or new_rx is non-0. 4132 * 4133 * Returns 0 on success. 4134 */ 4135 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) 4136 { 4137 struct ice_pf *pf = vsi->back; 4138 int i, err = 0, timeout = 50; 4139 4140 if (!new_rx && !new_tx) 4141 return -EINVAL; 4142 4143 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 4144 timeout--; 4145 if (!timeout) 4146 return -EBUSY; 4147 usleep_range(1000, 2000); 4148 } 4149 4150 if (new_tx) 4151 vsi->req_txq = (u16)new_tx; 4152 if (new_rx) 4153 vsi->req_rxq = (u16)new_rx; 4154 4155 /* set for the next time the netdev is started */ 4156 if (!netif_running(vsi->netdev)) { 4157 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 4158 if (err) 4159 goto rebuild_err; 4160 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); 4161 goto done; 4162 } 4163 4164 ice_vsi_close(vsi); 4165 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 4166 if (err) 4167 goto rebuild_err; 4168 4169 ice_for_each_traffic_class(i) { 4170 if (vsi->tc_cfg.ena_tc & BIT(i)) 4171 netdev_set_tc_queue(vsi->netdev, 4172 vsi->tc_cfg.tc_info[i].netdev_tc, 4173 vsi->tc_cfg.tc_info[i].qcount_tx, 4174 vsi->tc_cfg.tc_info[i].qoffset); 4175 } 4176 ice_pf_dcb_recfg(pf, locked); 4177 ice_vsi_open(vsi); 4178 goto done; 4179 4180 rebuild_err: 4181 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", 4182 err); 4183 done: 4184 clear_bit(ICE_CFG_BUSY, pf->state); 4185 return err; 4186 } 4187 4188 /** 4189 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode 4190 * @pf: PF to configure 4191 * 4192 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF 4193 * VSI can still Tx/Rx VLAN tagged packets. 4194 */ 4195 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) 4196 { 4197 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4198 struct ice_vsi_ctx *ctxt; 4199 struct ice_hw *hw; 4200 int status; 4201 4202 if (!vsi) 4203 return; 4204 4205 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 4206 if (!ctxt) 4207 return; 4208 4209 hw = &pf->hw; 4210 ctxt->info = vsi->info; 4211 4212 ctxt->info.valid_sections = 4213 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | 4214 ICE_AQ_VSI_PROP_SECURITY_VALID | 4215 ICE_AQ_VSI_PROP_SW_VALID); 4216 4217 /* disable VLAN anti-spoof */ 4218 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << 4219 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); 4220 4221 /* disable VLAN pruning and keep all other settings */ 4222 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; 4223 4224 /* allow all VLANs on Tx and don't strip on Rx */ 4225 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | 4226 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; 4227 4228 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 4229 if (status) { 4230 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", 4231 status, libie_aq_str(hw->adminq.sq_last_status)); 4232 } else { 4233 vsi->info.sec_flags = ctxt->info.sec_flags; 4234 vsi->info.sw_flags2 = ctxt->info.sw_flags2; 4235 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; 4236 } 4237 4238 kfree(ctxt); 4239 } 4240 4241 /** 4242 * ice_log_pkg_init - log result of DDP package load 4243 * @hw: pointer to hardware info 4244 * @state: state of package load 4245 */ 4246 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) 4247 { 4248 struct ice_pf *pf = hw->back; 4249 struct device *dev; 4250 4251 dev = ice_pf_to_dev(pf); 4252 4253 switch (state) { 4254 case ICE_DDP_PKG_SUCCESS: 4255 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", 4256 hw->active_pkg_name, 4257 hw->active_pkg_ver.major, 4258 hw->active_pkg_ver.minor, 4259 hw->active_pkg_ver.update, 4260 hw->active_pkg_ver.draft); 4261 break; 4262 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: 4263 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", 4264 hw->active_pkg_name, 4265 hw->active_pkg_ver.major, 4266 hw->active_pkg_ver.minor, 4267 hw->active_pkg_ver.update, 4268 hw->active_pkg_ver.draft); 4269 break; 4270 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: 4271 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 4272 hw->active_pkg_name, 4273 hw->active_pkg_ver.major, 4274 hw->active_pkg_ver.minor, 4275 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4276 break; 4277 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: 4278 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", 4279 hw->active_pkg_name, 4280 hw->active_pkg_ver.major, 4281 hw->active_pkg_ver.minor, 4282 hw->active_pkg_ver.update, 4283 hw->active_pkg_ver.draft, 4284 hw->pkg_name, 4285 hw->pkg_ver.major, 4286 hw->pkg_ver.minor, 4287 hw->pkg_ver.update, 4288 hw->pkg_ver.draft); 4289 break; 4290 case ICE_DDP_PKG_FW_MISMATCH: 4291 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 4292 break; 4293 case ICE_DDP_PKG_INVALID_FILE: 4294 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); 4295 break; 4296 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: 4297 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); 4298 break; 4299 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: 4300 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", 4301 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 4302 break; 4303 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: 4304 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); 4305 break; 4306 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: 4307 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); 4308 break; 4309 case ICE_DDP_PKG_LOAD_ERROR: 4310 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); 4311 /* poll for reset to complete */ 4312 if (ice_check_reset(hw)) 4313 dev_err(dev, "Error resetting device. Please reload the driver\n"); 4314 break; 4315 case ICE_DDP_PKG_ERR: 4316 default: 4317 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); 4318 break; 4319 } 4320 } 4321 4322 /** 4323 * ice_load_pkg - load/reload the DDP Package file 4324 * @firmware: firmware structure when firmware requested or NULL for reload 4325 * @pf: pointer to the PF instance 4326 * 4327 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and 4328 * initialize HW tables. 4329 */ 4330 static void 4331 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) 4332 { 4333 enum ice_ddp_state state = ICE_DDP_PKG_ERR; 4334 struct device *dev = ice_pf_to_dev(pf); 4335 struct ice_hw *hw = &pf->hw; 4336 4337 /* Load DDP Package */ 4338 if (firmware && !hw->pkg_copy) { 4339 state = ice_copy_and_init_pkg(hw, firmware->data, 4340 firmware->size); 4341 ice_log_pkg_init(hw, state); 4342 } else if (!firmware && hw->pkg_copy) { 4343 /* Reload package during rebuild after CORER/GLOBR reset */ 4344 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); 4345 ice_log_pkg_init(hw, state); 4346 } else { 4347 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); 4348 } 4349 4350 if (!ice_is_init_pkg_successful(state)) { 4351 /* Safe Mode */ 4352 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4353 return; 4354 } 4355 4356 /* Successful download package is the precondition for advanced 4357 * features, hence setting the ICE_FLAG_ADV_FEATURES flag 4358 */ 4359 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); 4360 } 4361 4362 /** 4363 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines 4364 * @pf: pointer to the PF structure 4365 * 4366 * There is no error returned here because the driver should be able to handle 4367 * 128 Byte cache lines, so we only print a warning in case issues are seen, 4368 * specifically with Tx. 4369 */ 4370 static void ice_verify_cacheline_size(struct ice_pf *pf) 4371 { 4372 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) 4373 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", 4374 ICE_CACHE_LINE_BYTES); 4375 } 4376 4377 /** 4378 * ice_send_version - update firmware with driver version 4379 * @pf: PF struct 4380 * 4381 * Returns 0 on success, else error code 4382 */ 4383 static int ice_send_version(struct ice_pf *pf) 4384 { 4385 struct ice_driver_ver dv; 4386 4387 dv.major_ver = 0xff; 4388 dv.minor_ver = 0xff; 4389 dv.build_ver = 0xff; 4390 dv.subbuild_ver = 0; 4391 strscpy((char *)dv.driver_string, UTS_RELEASE, 4392 sizeof(dv.driver_string)); 4393 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); 4394 } 4395 4396 /** 4397 * ice_init_fdir - Initialize flow director VSI and configuration 4398 * @pf: pointer to the PF instance 4399 * 4400 * returns 0 on success, negative on error 4401 */ 4402 static int ice_init_fdir(struct ice_pf *pf) 4403 { 4404 struct device *dev = ice_pf_to_dev(pf); 4405 struct ice_vsi *ctrl_vsi; 4406 int err; 4407 4408 /* Side Band Flow Director needs to have a control VSI. 4409 * Allocate it and store it in the PF. 4410 */ 4411 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); 4412 if (!ctrl_vsi) { 4413 dev_dbg(dev, "could not create control VSI\n"); 4414 return -ENOMEM; 4415 } 4416 4417 err = ice_vsi_open_ctrl(ctrl_vsi); 4418 if (err) { 4419 dev_dbg(dev, "could not open control VSI\n"); 4420 goto err_vsi_open; 4421 } 4422 4423 mutex_init(&pf->hw.fdir_fltr_lock); 4424 4425 err = ice_fdir_create_dflt_rules(pf); 4426 if (err) 4427 goto err_fdir_rule; 4428 4429 return 0; 4430 4431 err_fdir_rule: 4432 ice_fdir_release_flows(&pf->hw); 4433 ice_vsi_close(ctrl_vsi); 4434 err_vsi_open: 4435 ice_vsi_release(ctrl_vsi); 4436 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4437 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4438 pf->ctrl_vsi_idx = ICE_NO_VSI; 4439 } 4440 return err; 4441 } 4442 4443 static void ice_deinit_fdir(struct ice_pf *pf) 4444 { 4445 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); 4446 4447 if (!vsi) 4448 return; 4449 4450 ice_vsi_manage_fdir(vsi, false); 4451 ice_vsi_release(vsi); 4452 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { 4453 pf->vsi[pf->ctrl_vsi_idx] = NULL; 4454 pf->ctrl_vsi_idx = ICE_NO_VSI; 4455 } 4456 4457 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4458 } 4459 4460 /** 4461 * ice_get_opt_fw_name - return optional firmware file name or NULL 4462 * @pf: pointer to the PF instance 4463 */ 4464 static char *ice_get_opt_fw_name(struct ice_pf *pf) 4465 { 4466 /* Optional firmware name same as default with additional dash 4467 * followed by a EUI-64 identifier (PCIe Device Serial Number) 4468 */ 4469 struct pci_dev *pdev = pf->pdev; 4470 char *opt_fw_filename; 4471 u64 dsn; 4472 4473 /* Determine the name of the optional file using the DSN (two 4474 * dwords following the start of the DSN Capability). 4475 */ 4476 dsn = pci_get_dsn(pdev); 4477 if (!dsn) 4478 return NULL; 4479 4480 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); 4481 if (!opt_fw_filename) 4482 return NULL; 4483 4484 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", 4485 ICE_DDP_PKG_PATH, dsn); 4486 4487 return opt_fw_filename; 4488 } 4489 4490 /** 4491 * ice_request_fw - Device initialization routine 4492 * @pf: pointer to the PF instance 4493 * @firmware: double pointer to firmware struct 4494 * 4495 * Return: zero when successful, negative values otherwise. 4496 */ 4497 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) 4498 { 4499 char *opt_fw_filename = ice_get_opt_fw_name(pf); 4500 struct device *dev = ice_pf_to_dev(pf); 4501 int err = 0; 4502 4503 /* optional device-specific DDP (if present) overrides the default DDP 4504 * package file. kernel logs a debug message if the file doesn't exist, 4505 * and warning messages for other errors. 4506 */ 4507 if (opt_fw_filename) { 4508 err = firmware_request_nowarn(firmware, opt_fw_filename, dev); 4509 kfree(opt_fw_filename); 4510 if (!err) 4511 return err; 4512 } 4513 err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev); 4514 if (err) 4515 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); 4516 4517 return err; 4518 } 4519 4520 /** 4521 * ice_init_tx_topology - performs Tx topology initialization 4522 * @hw: pointer to the hardware structure 4523 * @firmware: pointer to firmware structure 4524 * 4525 * Return: zero when init was successful, negative values otherwise. 4526 */ 4527 static int 4528 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) 4529 { 4530 u8 num_tx_sched_layers = hw->num_tx_sched_layers; 4531 struct ice_pf *pf = hw->back; 4532 struct device *dev; 4533 int err; 4534 4535 dev = ice_pf_to_dev(pf); 4536 err = ice_cfg_tx_topo(hw, firmware->data, firmware->size); 4537 if (!err) { 4538 if (hw->num_tx_sched_layers > num_tx_sched_layers) 4539 dev_info(dev, "Tx scheduling layers switching feature disabled\n"); 4540 else 4541 dev_info(dev, "Tx scheduling layers switching feature enabled\n"); 4542 return 0; 4543 } else if (err == -ENODEV) { 4544 /* If we failed to re-initialize the device, we can no longer 4545 * continue loading. 4546 */ 4547 dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n"); 4548 return err; 4549 } else if (err == -EIO) { 4550 dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); 4551 return 0; 4552 } else if (err == -EEXIST) { 4553 return 0; 4554 } 4555 4556 /* Do not treat this as a fatal error. */ 4557 dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n", 4558 ERR_PTR(err)); 4559 return 0; 4560 } 4561 4562 /** 4563 * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs 4564 * @hw: pointer to the hardware structure 4565 * @pf: pointer to pf structure 4566 * 4567 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor 4568 * formats the PF hardware supports. The exact list of supported RXDIDs 4569 * depends on the loaded DDP package. The IDs can be determined by reading the 4570 * GLFLXP_RXDID_FLAGS register after the DDP package is loaded. 4571 * 4572 * Note that the legacy 32-byte RXDID 0 is always supported but is not listed 4573 * in the DDP package. The 16-byte legacy descriptor is never supported by 4574 * VFs. 4575 */ 4576 static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf) 4577 { 4578 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); 4579 4580 for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { 4581 u32 regval; 4582 4583 regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); 4584 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) 4585 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) 4586 pf->supported_rxdids |= BIT(i); 4587 } 4588 } 4589 4590 /** 4591 * ice_init_ddp_config - DDP related configuration 4592 * @hw: pointer to the hardware structure 4593 * @pf: pointer to pf structure 4594 * 4595 * This function loads DDP file from the disk, then initializes Tx 4596 * topology. At the end DDP package is loaded on the card. 4597 * 4598 * Return: zero when init was successful, negative values otherwise. 4599 */ 4600 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) 4601 { 4602 struct device *dev = ice_pf_to_dev(pf); 4603 const struct firmware *firmware = NULL; 4604 int err; 4605 4606 err = ice_request_fw(pf, &firmware); 4607 if (err) { 4608 dev_err(dev, "Fail during requesting FW: %d\n", err); 4609 return err; 4610 } 4611 4612 err = ice_init_tx_topology(hw, firmware); 4613 if (err) { 4614 dev_err(dev, "Fail during initialization of Tx topology: %d\n", 4615 err); 4616 release_firmware(firmware); 4617 return err; 4618 } 4619 4620 /* Download firmware to device */ 4621 ice_load_pkg(firmware, pf); 4622 release_firmware(firmware); 4623 4624 /* Initialize the supported Rx descriptor IDs after loading DDP */ 4625 ice_init_supported_rxdids(hw, pf); 4626 4627 return 0; 4628 } 4629 4630 /** 4631 * ice_print_wake_reason - show the wake up cause in the log 4632 * @pf: pointer to the PF struct 4633 */ 4634 static void ice_print_wake_reason(struct ice_pf *pf) 4635 { 4636 u32 wus = pf->wakeup_reason; 4637 const char *wake_str; 4638 4639 /* if no wake event, nothing to print */ 4640 if (!wus) 4641 return; 4642 4643 if (wus & PFPM_WUS_LNKC_M) 4644 wake_str = "Link\n"; 4645 else if (wus & PFPM_WUS_MAG_M) 4646 wake_str = "Magic Packet\n"; 4647 else if (wus & PFPM_WUS_MNG_M) 4648 wake_str = "Management\n"; 4649 else if (wus & PFPM_WUS_FW_RST_WK_M) 4650 wake_str = "Firmware Reset\n"; 4651 else 4652 wake_str = "Unknown\n"; 4653 4654 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); 4655 } 4656 4657 /** 4658 * ice_register_netdev - register netdev 4659 * @vsi: pointer to the VSI struct 4660 */ 4661 static int ice_register_netdev(struct ice_vsi *vsi) 4662 { 4663 int err; 4664 4665 if (!vsi || !vsi->netdev) 4666 return -EIO; 4667 4668 err = register_netdev(vsi->netdev); 4669 if (err) 4670 return err; 4671 4672 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4673 netif_carrier_off(vsi->netdev); 4674 netif_tx_stop_all_queues(vsi->netdev); 4675 4676 return 0; 4677 } 4678 4679 static void ice_unregister_netdev(struct ice_vsi *vsi) 4680 { 4681 if (!vsi || !vsi->netdev) 4682 return; 4683 4684 unregister_netdev(vsi->netdev); 4685 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); 4686 } 4687 4688 /** 4689 * ice_cfg_netdev - Allocate, configure and register a netdev 4690 * @vsi: the VSI associated with the new netdev 4691 * 4692 * Returns 0 on success, negative value on failure 4693 */ 4694 static int ice_cfg_netdev(struct ice_vsi *vsi) 4695 { 4696 struct ice_netdev_priv *np; 4697 struct net_device *netdev; 4698 u8 mac_addr[ETH_ALEN]; 4699 4700 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 4701 vsi->alloc_rxq); 4702 if (!netdev) 4703 return -ENOMEM; 4704 4705 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4706 vsi->netdev = netdev; 4707 np = netdev_priv(netdev); 4708 np->vsi = vsi; 4709 4710 ice_set_netdev_features(netdev); 4711 ice_set_ops(vsi); 4712 4713 if (vsi->type == ICE_VSI_PF) { 4714 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); 4715 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 4716 eth_hw_addr_set(netdev, mac_addr); 4717 } 4718 4719 netdev->priv_flags |= IFF_UNICAST_FLT; 4720 4721 /* Setup netdev TC information */ 4722 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 4723 4724 netdev->max_mtu = ICE_MAX_MTU; 4725 4726 return 0; 4727 } 4728 4729 static void ice_decfg_netdev(struct ice_vsi *vsi) 4730 { 4731 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); 4732 free_netdev(vsi->netdev); 4733 vsi->netdev = NULL; 4734 } 4735 4736 void ice_init_dev_hw(struct ice_pf *pf) 4737 { 4738 struct ice_hw *hw = &pf->hw; 4739 int err; 4740 4741 ice_init_feature_support(pf); 4742 4743 err = ice_init_ddp_config(hw, pf); 4744 4745 /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be 4746 * set in pf->state, which will cause ice_is_safe_mode to return 4747 * true 4748 */ 4749 if (err || ice_is_safe_mode(pf)) { 4750 /* we already got function/device capabilities but these don't 4751 * reflect what the driver needs to do in safe mode. Instead of 4752 * adding conditional logic everywhere to ignore these 4753 * device/function capabilities, override them. 4754 */ 4755 ice_set_safe_mode_caps(hw); 4756 } 4757 } 4758 4759 int ice_init_dev(struct ice_pf *pf) 4760 { 4761 struct device *dev = ice_pf_to_dev(pf); 4762 int err; 4763 4764 ice_set_pf_caps(pf); 4765 err = ice_init_interrupt_scheme(pf); 4766 if (err) { 4767 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); 4768 return -EIO; 4769 } 4770 4771 ice_start_service_task(pf); 4772 4773 return 0; 4774 } 4775 4776 void ice_deinit_dev(struct ice_pf *pf) 4777 { 4778 ice_service_task_stop(pf); 4779 4780 /* Service task is already stopped, so call reset directly. */ 4781 ice_reset(&pf->hw, ICE_RESET_PFR); 4782 pci_wait_for_pending_transaction(pf->pdev); 4783 ice_clear_interrupt_scheme(pf); 4784 } 4785 4786 static void ice_init_features(struct ice_pf *pf) 4787 { 4788 struct device *dev = ice_pf_to_dev(pf); 4789 4790 if (ice_is_safe_mode(pf)) 4791 return; 4792 4793 /* initialize DDP driven features */ 4794 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4795 ice_ptp_init(pf); 4796 4797 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4798 ice_gnss_init(pf); 4799 4800 if (ice_is_feature_supported(pf, ICE_F_CGU) || 4801 ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) 4802 ice_dpll_init(pf); 4803 4804 /* Note: Flow director init failure is non-fatal to load */ 4805 if (ice_init_fdir(pf)) 4806 dev_err(dev, "could not initialize flow director\n"); 4807 4808 /* Note: DCB init failure is non-fatal to load */ 4809 if (ice_init_pf_dcb(pf, false)) { 4810 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 4811 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 4812 } else { 4813 ice_cfg_lldp_mib_change(&pf->hw, true); 4814 } 4815 4816 if (ice_init_lag(pf)) 4817 dev_warn(dev, "Failed to init link aggregation support\n"); 4818 4819 ice_hwmon_init(pf); 4820 } 4821 4822 static void ice_deinit_features(struct ice_pf *pf) 4823 { 4824 if (ice_is_safe_mode(pf)) 4825 return; 4826 4827 ice_deinit_lag(pf); 4828 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) 4829 ice_cfg_lldp_mib_change(&pf->hw, false); 4830 ice_deinit_fdir(pf); 4831 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 4832 ice_gnss_exit(pf); 4833 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4834 ice_ptp_release(pf); 4835 if (test_bit(ICE_FLAG_DPLL, pf->flags)) 4836 ice_dpll_deinit(pf); 4837 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) 4838 xa_destroy(&pf->eswitch.reprs); 4839 ice_hwmon_exit(pf); 4840 } 4841 4842 static void ice_init_wakeup(struct ice_pf *pf) 4843 { 4844 /* Save wakeup reason register for later use */ 4845 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); 4846 4847 /* check for a power management event */ 4848 ice_print_wake_reason(pf); 4849 4850 /* clear wake status, all bits */ 4851 wr32(&pf->hw, PFPM_WUS, U32_MAX); 4852 4853 /* Disable WoL at init, wait for user to enable */ 4854 device_set_wakeup_enable(ice_pf_to_dev(pf), false); 4855 } 4856 4857 static int ice_init_link(struct ice_pf *pf) 4858 { 4859 struct device *dev = ice_pf_to_dev(pf); 4860 int err; 4861 4862 err = ice_init_link_events(pf->hw.port_info); 4863 if (err) { 4864 dev_err(dev, "ice_init_link_events failed: %d\n", err); 4865 return err; 4866 } 4867 4868 /* not a fatal error if this fails */ 4869 err = ice_init_nvm_phy_type(pf->hw.port_info); 4870 if (err) 4871 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); 4872 4873 /* not a fatal error if this fails */ 4874 err = ice_update_link_info(pf->hw.port_info); 4875 if (err) 4876 dev_err(dev, "ice_update_link_info failed: %d\n", err); 4877 4878 ice_init_link_dflt_override(pf->hw.port_info); 4879 4880 ice_check_link_cfg_err(pf, 4881 pf->hw.port_info->phy.link_info.link_cfg_err); 4882 4883 /* if media available, initialize PHY settings */ 4884 if (pf->hw.port_info->phy.link_info.link_info & 4885 ICE_AQ_MEDIA_AVAILABLE) { 4886 /* not a fatal error if this fails */ 4887 err = ice_init_phy_user_cfg(pf->hw.port_info); 4888 if (err) 4889 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); 4890 4891 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { 4892 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4893 4894 if (vsi) 4895 ice_configure_phy(vsi); 4896 } 4897 } else { 4898 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 4899 } 4900 4901 return err; 4902 } 4903 4904 static int ice_init_pf_sw(struct ice_pf *pf) 4905 { 4906 bool dvm = ice_is_dvm_ena(&pf->hw); 4907 struct ice_vsi *vsi; 4908 int err; 4909 4910 /* create switch struct for the switch element created by FW on boot */ 4911 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); 4912 if (!pf->first_sw) 4913 return -ENOMEM; 4914 4915 if (pf->hw.evb_veb) 4916 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; 4917 else 4918 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; 4919 4920 pf->first_sw->pf = pf; 4921 4922 /* record the sw_id available for later use */ 4923 pf->first_sw->sw_id = pf->hw.port_info->sw_id; 4924 4925 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 4926 if (err) 4927 goto err_aq_set_port_params; 4928 4929 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); 4930 if (!vsi) { 4931 err = -ENOMEM; 4932 goto err_pf_vsi_setup; 4933 } 4934 4935 return 0; 4936 4937 err_pf_vsi_setup: 4938 err_aq_set_port_params: 4939 kfree(pf->first_sw); 4940 return err; 4941 } 4942 4943 static void ice_deinit_pf_sw(struct ice_pf *pf) 4944 { 4945 struct ice_vsi *vsi = ice_get_main_vsi(pf); 4946 4947 if (!vsi) 4948 return; 4949 4950 ice_vsi_release(vsi); 4951 kfree(pf->first_sw); 4952 } 4953 4954 static int ice_alloc_vsis(struct ice_pf *pf) 4955 { 4956 struct device *dev = ice_pf_to_dev(pf); 4957 4958 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; 4959 if (!pf->num_alloc_vsi) 4960 return -EIO; 4961 4962 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { 4963 dev_warn(dev, 4964 "limiting the VSI count due to UDP tunnel limitation %d > %d\n", 4965 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); 4966 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; 4967 } 4968 4969 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), 4970 GFP_KERNEL); 4971 if (!pf->vsi) 4972 return -ENOMEM; 4973 4974 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, 4975 sizeof(*pf->vsi_stats), GFP_KERNEL); 4976 if (!pf->vsi_stats) { 4977 devm_kfree(dev, pf->vsi); 4978 return -ENOMEM; 4979 } 4980 4981 return 0; 4982 } 4983 4984 static void ice_dealloc_vsis(struct ice_pf *pf) 4985 { 4986 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); 4987 pf->vsi_stats = NULL; 4988 4989 pf->num_alloc_vsi = 0; 4990 devm_kfree(ice_pf_to_dev(pf), pf->vsi); 4991 pf->vsi = NULL; 4992 } 4993 4994 static int ice_init_devlink(struct ice_pf *pf) 4995 { 4996 int err; 4997 4998 err = ice_devlink_register_params(pf); 4999 if (err) 5000 return err; 5001 5002 ice_devlink_init_regions(pf); 5003 ice_devlink_register(pf); 5004 ice_health_init(pf); 5005 5006 return 0; 5007 } 5008 5009 static void ice_deinit_devlink(struct ice_pf *pf) 5010 { 5011 ice_health_deinit(pf); 5012 ice_devlink_unregister(pf); 5013 ice_devlink_destroy_regions(pf); 5014 ice_devlink_unregister_params(pf); 5015 } 5016 5017 static int ice_init(struct ice_pf *pf) 5018 { 5019 struct device *dev = ice_pf_to_dev(pf); 5020 int err; 5021 5022 err = ice_init_pf(pf); 5023 if (err) { 5024 dev_err(dev, "ice_init_pf failed: %d\n", err); 5025 return err; 5026 } 5027 5028 if (pf->hw.mac_type == ICE_MAC_E830) { 5029 err = pci_enable_ptm(pf->pdev, NULL); 5030 if (err) 5031 dev_dbg(dev, "PCIe PTM not supported by PCIe bus/controller\n"); 5032 } 5033 5034 err = ice_alloc_vsis(pf); 5035 if (err) 5036 goto unroll_pf_init; 5037 5038 err = ice_init_pf_sw(pf); 5039 if (err) 5040 goto err_init_pf_sw; 5041 5042 ice_init_wakeup(pf); 5043 5044 err = ice_init_link(pf); 5045 if (err) 5046 goto err_init_link; 5047 5048 err = ice_send_version(pf); 5049 if (err) 5050 goto err_init_link; 5051 5052 ice_verify_cacheline_size(pf); 5053 5054 if (ice_is_safe_mode(pf)) 5055 ice_set_safe_mode_vlan_cfg(pf); 5056 else 5057 /* print PCI link speed and width */ 5058 pcie_print_link_status(pf->pdev); 5059 5060 /* ready to go, so clear down state bit */ 5061 clear_bit(ICE_DOWN, pf->state); 5062 clear_bit(ICE_SERVICE_DIS, pf->state); 5063 5064 /* since everything is good, start the service timer */ 5065 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5066 5067 return 0; 5068 5069 err_init_link: 5070 ice_deinit_pf_sw(pf); 5071 err_init_pf_sw: 5072 ice_dealloc_vsis(pf); 5073 unroll_pf_init: 5074 ice_deinit_pf(pf); 5075 return err; 5076 } 5077 5078 static void ice_deinit(struct ice_pf *pf) 5079 { 5080 set_bit(ICE_SERVICE_DIS, pf->state); 5081 set_bit(ICE_DOWN, pf->state); 5082 5083 ice_deinit_pf_sw(pf); 5084 ice_dealloc_vsis(pf); 5085 ice_deinit_pf(pf); 5086 } 5087 5088 /** 5089 * ice_load - load pf by init hw and starting VSI 5090 * @pf: pointer to the pf instance 5091 * 5092 * This function has to be called under devl_lock. 5093 */ 5094 int ice_load(struct ice_pf *pf) 5095 { 5096 struct ice_vsi *vsi; 5097 int err; 5098 5099 devl_assert_locked(priv_to_devlink(pf)); 5100 5101 vsi = ice_get_main_vsi(pf); 5102 5103 /* init channel list */ 5104 INIT_LIST_HEAD(&vsi->ch_list); 5105 5106 err = ice_cfg_netdev(vsi); 5107 if (err) 5108 return err; 5109 5110 /* Setup DCB netlink interface */ 5111 ice_dcbnl_setup(vsi); 5112 5113 err = ice_init_mac_fltr(pf); 5114 if (err) 5115 goto err_init_mac_fltr; 5116 5117 err = ice_devlink_create_pf_port(pf); 5118 if (err) 5119 goto err_devlink_create_pf_port; 5120 5121 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); 5122 5123 err = ice_register_netdev(vsi); 5124 if (err) 5125 goto err_register_netdev; 5126 5127 err = ice_tc_indir_block_register(vsi); 5128 if (err) 5129 goto err_tc_indir_block_register; 5130 5131 ice_napi_add(vsi); 5132 5133 ice_init_features(pf); 5134 5135 err = ice_init_rdma(pf); 5136 if (err) 5137 goto err_init_rdma; 5138 5139 ice_service_task_restart(pf); 5140 5141 clear_bit(ICE_DOWN, pf->state); 5142 5143 return 0; 5144 5145 err_init_rdma: 5146 ice_deinit_features(pf); 5147 ice_tc_indir_block_unregister(vsi); 5148 err_tc_indir_block_register: 5149 ice_unregister_netdev(vsi); 5150 err_register_netdev: 5151 ice_devlink_destroy_pf_port(pf); 5152 err_devlink_create_pf_port: 5153 err_init_mac_fltr: 5154 ice_decfg_netdev(vsi); 5155 return err; 5156 } 5157 5158 /** 5159 * ice_unload - unload pf by stopping VSI and deinit hw 5160 * @pf: pointer to the pf instance 5161 * 5162 * This function has to be called under devl_lock. 5163 */ 5164 void ice_unload(struct ice_pf *pf) 5165 { 5166 struct ice_vsi *vsi = ice_get_main_vsi(pf); 5167 5168 devl_assert_locked(priv_to_devlink(pf)); 5169 5170 ice_deinit_rdma(pf); 5171 ice_deinit_features(pf); 5172 ice_tc_indir_block_unregister(vsi); 5173 ice_unregister_netdev(vsi); 5174 ice_devlink_destroy_pf_port(pf); 5175 ice_decfg_netdev(vsi); 5176 } 5177 5178 static int ice_probe_recovery_mode(struct ice_pf *pf) 5179 { 5180 struct device *dev = ice_pf_to_dev(pf); 5181 int err; 5182 5183 dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n"); 5184 5185 INIT_HLIST_HEAD(&pf->aq_wait_list); 5186 spin_lock_init(&pf->aq_wait_lock); 5187 init_waitqueue_head(&pf->aq_wait_queue); 5188 5189 timer_setup(&pf->serv_tmr, ice_service_timer, 0); 5190 pf->serv_tmr_period = HZ; 5191 INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); 5192 clear_bit(ICE_SERVICE_SCHED, pf->state); 5193 err = ice_create_all_ctrlq(&pf->hw); 5194 if (err) 5195 return err; 5196 5197 scoped_guard(devl, priv_to_devlink(pf)) { 5198 err = ice_init_devlink(pf); 5199 if (err) 5200 return err; 5201 } 5202 5203 ice_service_task_restart(pf); 5204 5205 return 0; 5206 } 5207 5208 /** 5209 * ice_probe - Device initialization routine 5210 * @pdev: PCI device information struct 5211 * @ent: entry in ice_pci_tbl 5212 * 5213 * Returns 0 on success, negative on failure 5214 */ 5215 static int 5216 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) 5217 { 5218 struct device *dev = &pdev->dev; 5219 bool need_dev_deinit = false; 5220 struct ice_adapter *adapter; 5221 struct ice_pf *pf; 5222 struct ice_hw *hw; 5223 int err; 5224 5225 if (pdev->is_virtfn) { 5226 dev_err(dev, "can't probe a virtual function\n"); 5227 return -EINVAL; 5228 } 5229 5230 /* when under a kdump kernel initiate a reset before enabling the 5231 * device in order to clear out any pending DMA transactions. These 5232 * transactions can cause some systems to machine check when doing 5233 * the pcim_enable_device() below. 5234 */ 5235 if (is_kdump_kernel()) { 5236 pci_save_state(pdev); 5237 pci_clear_master(pdev); 5238 err = pcie_flr(pdev); 5239 if (err) 5240 return err; 5241 pci_restore_state(pdev); 5242 } 5243 5244 /* this driver uses devres, see 5245 * Documentation/driver-api/driver-model/devres.rst 5246 */ 5247 err = pcim_enable_device(pdev); 5248 if (err) 5249 return err; 5250 5251 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); 5252 if (err) { 5253 dev_err(dev, "BAR0 I/O map error %d\n", err); 5254 return err; 5255 } 5256 5257 pf = ice_allocate_pf(dev); 5258 if (!pf) 5259 return -ENOMEM; 5260 5261 /* initialize Auxiliary index to invalid value */ 5262 pf->aux_idx = -1; 5263 5264 /* set up for high or low DMA */ 5265 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5266 if (err) { 5267 dev_err(dev, "DMA configuration failed: 0x%x\n", err); 5268 return err; 5269 } 5270 5271 pci_set_master(pdev); 5272 pf->pdev = pdev; 5273 pci_set_drvdata(pdev, pf); 5274 set_bit(ICE_DOWN, pf->state); 5275 /* Disable service task until DOWN bit is cleared */ 5276 set_bit(ICE_SERVICE_DIS, pf->state); 5277 5278 hw = &pf->hw; 5279 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; 5280 pci_save_state(pdev); 5281 5282 hw->back = pf; 5283 hw->port_info = NULL; 5284 hw->vendor_id = pdev->vendor; 5285 hw->device_id = pdev->device; 5286 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 5287 hw->subsystem_vendor_id = pdev->subsystem_vendor; 5288 hw->subsystem_device_id = pdev->subsystem_device; 5289 hw->bus.device = PCI_SLOT(pdev->devfn); 5290 hw->bus.func = PCI_FUNC(pdev->devfn); 5291 ice_set_ctrlq_len(hw); 5292 5293 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); 5294 5295 #ifndef CONFIG_DYNAMIC_DEBUG 5296 if (debug < -1) 5297 hw->debug_mask = debug; 5298 #endif 5299 5300 if (ice_is_recovery_mode(hw)) 5301 return ice_probe_recovery_mode(pf); 5302 5303 err = ice_init_hw(hw); 5304 if (err) { 5305 dev_err(dev, "ice_init_hw failed: %d\n", err); 5306 return err; 5307 } 5308 5309 adapter = ice_adapter_get(pdev); 5310 if (IS_ERR(adapter)) { 5311 err = PTR_ERR(adapter); 5312 goto unroll_hw_init; 5313 } 5314 pf->adapter = adapter; 5315 5316 err = ice_init_dev(pf); 5317 if (err) 5318 goto unroll_adapter; 5319 5320 err = ice_init(pf); 5321 if (err) 5322 goto unroll_dev_init; 5323 5324 devl_lock(priv_to_devlink(pf)); 5325 err = ice_load(pf); 5326 if (err) 5327 goto unroll_init; 5328 5329 err = ice_init_devlink(pf); 5330 if (err) 5331 goto unroll_load; 5332 devl_unlock(priv_to_devlink(pf)); 5333 5334 return 0; 5335 5336 unroll_load: 5337 ice_unload(pf); 5338 unroll_init: 5339 devl_unlock(priv_to_devlink(pf)); 5340 ice_deinit(pf); 5341 unroll_dev_init: 5342 need_dev_deinit = true; 5343 unroll_adapter: 5344 ice_adapter_put(pdev); 5345 unroll_hw_init: 5346 ice_deinit_hw(hw); 5347 if (need_dev_deinit) 5348 ice_deinit_dev(pf); 5349 return err; 5350 } 5351 5352 /** 5353 * ice_set_wake - enable or disable Wake on LAN 5354 * @pf: pointer to the PF struct 5355 * 5356 * Simple helper for WoL control 5357 */ 5358 static void ice_set_wake(struct ice_pf *pf) 5359 { 5360 struct ice_hw *hw = &pf->hw; 5361 bool wol = pf->wol_ena; 5362 5363 /* clear wake state, otherwise new wake events won't fire */ 5364 wr32(hw, PFPM_WUS, U32_MAX); 5365 5366 /* enable / disable APM wake up, no RMW needed */ 5367 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); 5368 5369 /* set magic packet filter enabled */ 5370 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); 5371 } 5372 5373 /** 5374 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet 5375 * @pf: pointer to the PF struct 5376 * 5377 * Issue firmware command to enable multicast magic wake, making 5378 * sure that any locally administered address (LAA) is used for 5379 * wake, and that PF reset doesn't undo the LAA. 5380 */ 5381 static void ice_setup_mc_magic_wake(struct ice_pf *pf) 5382 { 5383 struct device *dev = ice_pf_to_dev(pf); 5384 struct ice_hw *hw = &pf->hw; 5385 u8 mac_addr[ETH_ALEN]; 5386 struct ice_vsi *vsi; 5387 int status; 5388 u8 flags; 5389 5390 if (!pf->wol_ena) 5391 return; 5392 5393 vsi = ice_get_main_vsi(pf); 5394 if (!vsi) 5395 return; 5396 5397 /* Get current MAC address in case it's an LAA */ 5398 if (vsi->netdev) 5399 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); 5400 else 5401 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); 5402 5403 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | 5404 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | 5405 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; 5406 5407 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); 5408 if (status) 5409 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", 5410 status, libie_aq_str(hw->adminq.sq_last_status)); 5411 } 5412 5413 /** 5414 * ice_remove - Device removal routine 5415 * @pdev: PCI device information struct 5416 */ 5417 static void ice_remove(struct pci_dev *pdev) 5418 { 5419 struct ice_pf *pf = pci_get_drvdata(pdev); 5420 int i; 5421 5422 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { 5423 if (!ice_is_reset_in_progress(pf->state)) 5424 break; 5425 msleep(100); 5426 } 5427 5428 if (ice_is_recovery_mode(&pf->hw)) { 5429 ice_service_task_stop(pf); 5430 scoped_guard(devl, priv_to_devlink(pf)) { 5431 ice_deinit_devlink(pf); 5432 } 5433 return; 5434 } 5435 5436 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { 5437 set_bit(ICE_VF_RESETS_DISABLED, pf->state); 5438 ice_free_vfs(pf); 5439 } 5440 5441 if (!ice_is_safe_mode(pf)) 5442 ice_remove_arfs(pf); 5443 5444 devl_lock(priv_to_devlink(pf)); 5445 ice_dealloc_all_dynamic_ports(pf); 5446 ice_deinit_devlink(pf); 5447 5448 ice_unload(pf); 5449 devl_unlock(priv_to_devlink(pf)); 5450 5451 ice_deinit(pf); 5452 ice_vsi_release_all(pf); 5453 5454 ice_setup_mc_magic_wake(pf); 5455 ice_set_wake(pf); 5456 5457 ice_adapter_put(pdev); 5458 ice_deinit_hw(&pf->hw); 5459 5460 ice_deinit_dev(pf); 5461 ice_aq_cancel_waiting_tasks(pf); 5462 set_bit(ICE_DOWN, pf->state); 5463 } 5464 5465 /** 5466 * ice_shutdown - PCI callback for shutting down device 5467 * @pdev: PCI device information struct 5468 */ 5469 static void ice_shutdown(struct pci_dev *pdev) 5470 { 5471 struct ice_pf *pf = pci_get_drvdata(pdev); 5472 5473 ice_remove(pdev); 5474 5475 if (system_state == SYSTEM_POWER_OFF) { 5476 pci_wake_from_d3(pdev, pf->wol_ena); 5477 pci_set_power_state(pdev, PCI_D3hot); 5478 } 5479 } 5480 5481 /** 5482 * ice_prepare_for_shutdown - prep for PCI shutdown 5483 * @pf: board private structure 5484 * 5485 * Inform or close all dependent features in prep for PCI device shutdown 5486 */ 5487 static void ice_prepare_for_shutdown(struct ice_pf *pf) 5488 { 5489 struct ice_hw *hw = &pf->hw; 5490 u32 v; 5491 5492 /* Notify VFs of impending reset */ 5493 if (ice_check_sq_alive(hw, &hw->mailboxq)) 5494 ice_vc_notify_reset(pf); 5495 5496 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); 5497 5498 /* disable the VSIs and their queues that are not already DOWN */ 5499 ice_pf_dis_all_vsi(pf, false); 5500 5501 ice_for_each_vsi(pf, v) 5502 if (pf->vsi[v]) 5503 pf->vsi[v]->vsi_num = 0; 5504 5505 ice_shutdown_all_ctrlq(hw, true); 5506 } 5507 5508 /** 5509 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme 5510 * @pf: board private structure to reinitialize 5511 * 5512 * This routine reinitialize interrupt scheme that was cleared during 5513 * power management suspend callback. 5514 * 5515 * This should be called during resume routine to re-allocate the q_vectors 5516 * and reacquire interrupts. 5517 */ 5518 static int ice_reinit_interrupt_scheme(struct ice_pf *pf) 5519 { 5520 struct device *dev = ice_pf_to_dev(pf); 5521 int ret, v; 5522 5523 /* Since we clear MSIX flag during suspend, we need to 5524 * set it back during resume... 5525 */ 5526 5527 ret = ice_init_interrupt_scheme(pf); 5528 if (ret) { 5529 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); 5530 return ret; 5531 } 5532 5533 /* Remap vectors and rings, after successful re-init interrupts */ 5534 ice_for_each_vsi(pf, v) { 5535 if (!pf->vsi[v]) 5536 continue; 5537 5538 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); 5539 if (ret) 5540 goto err_reinit; 5541 ice_vsi_map_rings_to_vectors(pf->vsi[v]); 5542 rtnl_lock(); 5543 ice_vsi_set_napi_queues(pf->vsi[v]); 5544 rtnl_unlock(); 5545 } 5546 5547 ret = ice_req_irq_msix_misc(pf); 5548 if (ret) { 5549 dev_err(dev, "Setting up misc vector failed after device suspend %d\n", 5550 ret); 5551 goto err_reinit; 5552 } 5553 5554 return 0; 5555 5556 err_reinit: 5557 while (v--) 5558 if (pf->vsi[v]) { 5559 rtnl_lock(); 5560 ice_vsi_clear_napi_queues(pf->vsi[v]); 5561 rtnl_unlock(); 5562 ice_vsi_free_q_vectors(pf->vsi[v]); 5563 } 5564 5565 return ret; 5566 } 5567 5568 /** 5569 * ice_suspend 5570 * @dev: generic device information structure 5571 * 5572 * Power Management callback to quiesce the device and prepare 5573 * for D3 transition. 5574 */ 5575 static int ice_suspend(struct device *dev) 5576 { 5577 struct pci_dev *pdev = to_pci_dev(dev); 5578 struct ice_pf *pf; 5579 int disabled, v; 5580 5581 pf = pci_get_drvdata(pdev); 5582 5583 if (!ice_pf_state_is_nominal(pf)) { 5584 dev_err(dev, "Device is not ready, no need to suspend it\n"); 5585 return -EBUSY; 5586 } 5587 5588 /* Stop watchdog tasks until resume completion. 5589 * Even though it is most likely that the service task is 5590 * disabled if the device is suspended or down, the service task's 5591 * state is controlled by a different state bit, and we should 5592 * store and honor whatever state that bit is in at this point. 5593 */ 5594 disabled = ice_service_task_stop(pf); 5595 5596 ice_deinit_rdma(pf); 5597 5598 /* Already suspended?, then there is nothing to do */ 5599 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { 5600 if (!disabled) 5601 ice_service_task_restart(pf); 5602 return 0; 5603 } 5604 5605 if (test_bit(ICE_DOWN, pf->state) || 5606 ice_is_reset_in_progress(pf->state)) { 5607 dev_err(dev, "can't suspend device in reset or already down\n"); 5608 if (!disabled) 5609 ice_service_task_restart(pf); 5610 return 0; 5611 } 5612 5613 ice_setup_mc_magic_wake(pf); 5614 5615 ice_prepare_for_shutdown(pf); 5616 5617 ice_set_wake(pf); 5618 5619 /* Free vectors, clear the interrupt scheme and release IRQs 5620 * for proper hibernation, especially with large number of CPUs. 5621 * Otherwise hibernation might fail when mapping all the vectors back 5622 * to CPU0. 5623 */ 5624 ice_free_irq_msix_misc(pf); 5625 ice_for_each_vsi(pf, v) { 5626 if (!pf->vsi[v]) 5627 continue; 5628 rtnl_lock(); 5629 ice_vsi_clear_napi_queues(pf->vsi[v]); 5630 rtnl_unlock(); 5631 ice_vsi_free_q_vectors(pf->vsi[v]); 5632 } 5633 ice_clear_interrupt_scheme(pf); 5634 5635 pci_save_state(pdev); 5636 pci_wake_from_d3(pdev, pf->wol_ena); 5637 pci_set_power_state(pdev, PCI_D3hot); 5638 return 0; 5639 } 5640 5641 /** 5642 * ice_resume - PM callback for waking up from D3 5643 * @dev: generic device information structure 5644 */ 5645 static int ice_resume(struct device *dev) 5646 { 5647 struct pci_dev *pdev = to_pci_dev(dev); 5648 enum ice_reset_req reset_type; 5649 struct ice_pf *pf; 5650 struct ice_hw *hw; 5651 int ret; 5652 5653 pci_set_power_state(pdev, PCI_D0); 5654 pci_restore_state(pdev); 5655 5656 if (!pci_device_is_present(pdev)) 5657 return -ENODEV; 5658 5659 ret = pci_enable_device_mem(pdev); 5660 if (ret) { 5661 dev_err(dev, "Cannot enable device after suspend\n"); 5662 return ret; 5663 } 5664 5665 pf = pci_get_drvdata(pdev); 5666 hw = &pf->hw; 5667 5668 pf->wakeup_reason = rd32(hw, PFPM_WUS); 5669 ice_print_wake_reason(pf); 5670 5671 /* We cleared the interrupt scheme when we suspended, so we need to 5672 * restore it now to resume device functionality. 5673 */ 5674 ret = ice_reinit_interrupt_scheme(pf); 5675 if (ret) 5676 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); 5677 5678 ret = ice_init_rdma(pf); 5679 if (ret) 5680 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n", 5681 ret); 5682 5683 clear_bit(ICE_DOWN, pf->state); 5684 /* Now perform PF reset and rebuild */ 5685 reset_type = ICE_RESET_PFR; 5686 /* re-enable service task for reset, but allow reset to schedule it */ 5687 clear_bit(ICE_SERVICE_DIS, pf->state); 5688 5689 if (ice_schedule_reset(pf, reset_type)) 5690 dev_err(dev, "Reset during resume failed.\n"); 5691 5692 clear_bit(ICE_SUSPENDED, pf->state); 5693 ice_service_task_restart(pf); 5694 5695 /* Restart the service task */ 5696 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5697 5698 return 0; 5699 } 5700 5701 /** 5702 * ice_pci_err_detected - warning that PCI error has been detected 5703 * @pdev: PCI device information struct 5704 * @err: the type of PCI error 5705 * 5706 * Called to warn that something happened on the PCI bus and the error handling 5707 * is in progress. Allows the driver to gracefully prepare/handle PCI errors. 5708 */ 5709 static pci_ers_result_t 5710 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) 5711 { 5712 struct ice_pf *pf = pci_get_drvdata(pdev); 5713 5714 if (!pf) { 5715 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", 5716 __func__, err); 5717 return PCI_ERS_RESULT_DISCONNECT; 5718 } 5719 5720 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5721 ice_service_task_stop(pf); 5722 5723 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5724 set_bit(ICE_PFR_REQ, pf->state); 5725 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5726 } 5727 } 5728 5729 return PCI_ERS_RESULT_NEED_RESET; 5730 } 5731 5732 /** 5733 * ice_pci_err_slot_reset - a PCI slot reset has just happened 5734 * @pdev: PCI device information struct 5735 * 5736 * Called to determine if the driver can recover from the PCI slot reset by 5737 * using a register read to determine if the device is recoverable. 5738 */ 5739 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) 5740 { 5741 struct ice_pf *pf = pci_get_drvdata(pdev); 5742 pci_ers_result_t result; 5743 int err; 5744 u32 reg; 5745 5746 err = pci_enable_device_mem(pdev); 5747 if (err) { 5748 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", 5749 err); 5750 result = PCI_ERS_RESULT_DISCONNECT; 5751 } else { 5752 pci_set_master(pdev); 5753 pci_restore_state(pdev); 5754 pci_wake_from_d3(pdev, false); 5755 5756 /* Check for life */ 5757 reg = rd32(&pf->hw, GLGEN_RTRIG); 5758 if (!reg) 5759 result = PCI_ERS_RESULT_RECOVERED; 5760 else 5761 result = PCI_ERS_RESULT_DISCONNECT; 5762 } 5763 5764 return result; 5765 } 5766 5767 /** 5768 * ice_pci_err_resume - restart operations after PCI error recovery 5769 * @pdev: PCI device information struct 5770 * 5771 * Called to allow the driver to bring things back up after PCI error and/or 5772 * reset recovery have finished 5773 */ 5774 static void ice_pci_err_resume(struct pci_dev *pdev) 5775 { 5776 struct ice_pf *pf = pci_get_drvdata(pdev); 5777 5778 if (!pf) { 5779 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", 5780 __func__); 5781 return; 5782 } 5783 5784 if (test_bit(ICE_SUSPENDED, pf->state)) { 5785 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", 5786 __func__); 5787 return; 5788 } 5789 5790 ice_restore_all_vfs_msi_state(pf); 5791 5792 ice_do_reset(pf, ICE_RESET_PFR); 5793 ice_service_task_restart(pf); 5794 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); 5795 } 5796 5797 /** 5798 * ice_pci_err_reset_prepare - prepare device driver for PCI reset 5799 * @pdev: PCI device information struct 5800 */ 5801 static void ice_pci_err_reset_prepare(struct pci_dev *pdev) 5802 { 5803 struct ice_pf *pf = pci_get_drvdata(pdev); 5804 5805 if (!test_bit(ICE_SUSPENDED, pf->state)) { 5806 ice_service_task_stop(pf); 5807 5808 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { 5809 set_bit(ICE_PFR_REQ, pf->state); 5810 ice_prepare_for_reset(pf, ICE_RESET_PFR); 5811 } 5812 } 5813 } 5814 5815 /** 5816 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin 5817 * @pdev: PCI device information struct 5818 */ 5819 static void ice_pci_err_reset_done(struct pci_dev *pdev) 5820 { 5821 ice_pci_err_resume(pdev); 5822 } 5823 5824 /* ice_pci_tbl - PCI Device ID Table 5825 * 5826 * Wildcard entries (PCI_ANY_ID) should come last 5827 * Last entry must be all 0s 5828 * 5829 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 5830 * Class, Class Mask, private data (not used) } 5831 */ 5832 static const struct pci_device_id ice_pci_tbl[] = { 5833 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) }, 5834 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) }, 5835 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) }, 5836 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) }, 5837 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) }, 5838 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) }, 5839 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) }, 5840 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) }, 5841 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) }, 5842 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) }, 5843 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) }, 5844 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) }, 5845 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) }, 5846 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) }, 5847 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) }, 5848 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) }, 5849 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) }, 5850 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) }, 5851 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) }, 5852 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) }, 5853 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) }, 5854 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) }, 5855 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) }, 5856 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, 5857 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, 5858 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, 5859 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), }, 5860 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), }, 5861 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), }, 5862 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), }, 5863 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) }, 5864 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) }, 5865 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) }, 5866 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) }, 5867 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), }, 5868 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), }, 5869 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), }, 5870 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), }, 5871 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), }, 5872 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), }, 5873 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), }, 5874 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), }, 5875 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), }, 5876 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), }, 5877 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), }, 5878 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), }, 5879 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), }, 5880 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), }, 5881 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), }, 5882 /* required last entry */ 5883 {} 5884 }; 5885 MODULE_DEVICE_TABLE(pci, ice_pci_tbl); 5886 5887 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); 5888 5889 static const struct pci_error_handlers ice_pci_err_handler = { 5890 .error_detected = ice_pci_err_detected, 5891 .slot_reset = ice_pci_err_slot_reset, 5892 .reset_prepare = ice_pci_err_reset_prepare, 5893 .reset_done = ice_pci_err_reset_done, 5894 .resume = ice_pci_err_resume 5895 }; 5896 5897 static struct pci_driver ice_driver = { 5898 .name = KBUILD_MODNAME, 5899 .id_table = ice_pci_tbl, 5900 .probe = ice_probe, 5901 .remove = ice_remove, 5902 .driver.pm = pm_sleep_ptr(&ice_pm_ops), 5903 .shutdown = ice_shutdown, 5904 .sriov_configure = ice_sriov_configure, 5905 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, 5906 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count, 5907 .err_handler = &ice_pci_err_handler 5908 }; 5909 5910 /** 5911 * ice_module_init - Driver registration routine 5912 * 5913 * ice_module_init is the first routine called when the driver is 5914 * loaded. All it does is register with the PCI subsystem. 5915 */ 5916 static int __init ice_module_init(void) 5917 { 5918 int status = -ENOMEM; 5919 5920 pr_info("%s\n", ice_driver_string); 5921 pr_info("%s\n", ice_copyright); 5922 5923 ice_adv_lnk_speed_maps_init(); 5924 5925 ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME); 5926 if (!ice_wq) { 5927 pr_err("Failed to create workqueue\n"); 5928 return status; 5929 } 5930 5931 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0); 5932 if (!ice_lag_wq) { 5933 pr_err("Failed to create LAG workqueue\n"); 5934 goto err_dest_wq; 5935 } 5936 5937 ice_debugfs_init(); 5938 5939 status = pci_register_driver(&ice_driver); 5940 if (status) { 5941 pr_err("failed to register PCI driver, err %d\n", status); 5942 goto err_dest_lag_wq; 5943 } 5944 5945 status = ice_sf_driver_register(); 5946 if (status) { 5947 pr_err("Failed to register SF driver, err %d\n", status); 5948 goto err_sf_driver; 5949 } 5950 5951 return 0; 5952 5953 err_sf_driver: 5954 pci_unregister_driver(&ice_driver); 5955 err_dest_lag_wq: 5956 destroy_workqueue(ice_lag_wq); 5957 ice_debugfs_exit(); 5958 err_dest_wq: 5959 destroy_workqueue(ice_wq); 5960 return status; 5961 } 5962 module_init(ice_module_init); 5963 5964 /** 5965 * ice_module_exit - Driver exit cleanup routine 5966 * 5967 * ice_module_exit is called just before the driver is removed 5968 * from memory. 5969 */ 5970 static void __exit ice_module_exit(void) 5971 { 5972 ice_sf_driver_unregister(); 5973 pci_unregister_driver(&ice_driver); 5974 ice_debugfs_exit(); 5975 destroy_workqueue(ice_wq); 5976 destroy_workqueue(ice_lag_wq); 5977 pr_info("module unloaded\n"); 5978 } 5979 module_exit(ice_module_exit); 5980 5981 /** 5982 * ice_set_mac_address - NDO callback to set MAC address 5983 * @netdev: network interface device structure 5984 * @pi: pointer to an address structure 5985 * 5986 * Returns 0 on success, negative on failure 5987 */ 5988 static int ice_set_mac_address(struct net_device *netdev, void *pi) 5989 { 5990 struct ice_netdev_priv *np = netdev_priv(netdev); 5991 struct ice_vsi *vsi = np->vsi; 5992 struct ice_pf *pf = vsi->back; 5993 struct ice_hw *hw = &pf->hw; 5994 struct sockaddr *addr = pi; 5995 u8 old_mac[ETH_ALEN]; 5996 u8 flags = 0; 5997 u8 *mac; 5998 int err; 5999 6000 mac = (u8 *)addr->sa_data; 6001 6002 if (!is_valid_ether_addr(mac)) 6003 return -EADDRNOTAVAIL; 6004 6005 if (test_bit(ICE_DOWN, pf->state) || 6006 ice_is_reset_in_progress(pf->state)) { 6007 netdev_err(netdev, "can't set mac %pM. device not ready\n", 6008 mac); 6009 return -EBUSY; 6010 } 6011 6012 if (ice_chnl_dmac_fltr_cnt(pf)) { 6013 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", 6014 mac); 6015 return -EAGAIN; 6016 } 6017 6018 netif_addr_lock_bh(netdev); 6019 ether_addr_copy(old_mac, netdev->dev_addr); 6020 /* change the netdev's MAC address */ 6021 eth_hw_addr_set(netdev, mac); 6022 netif_addr_unlock_bh(netdev); 6023 6024 /* Clean up old MAC filter. Not an error if old filter doesn't exist */ 6025 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); 6026 if (err && err != -ENOENT) { 6027 err = -EADDRNOTAVAIL; 6028 goto err_update_filters; 6029 } 6030 6031 /* Add filter for new MAC. If filter exists, return success */ 6032 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); 6033 if (err == -EEXIST) { 6034 /* Although this MAC filter is already present in hardware it's 6035 * possible in some cases (e.g. bonding) that dev_addr was 6036 * modified outside of the driver and needs to be restored back 6037 * to this value. 6038 */ 6039 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); 6040 6041 return 0; 6042 } else if (err) { 6043 /* error if the new filter addition failed */ 6044 err = -EADDRNOTAVAIL; 6045 } 6046 6047 err_update_filters: 6048 if (err) { 6049 netdev_err(netdev, "can't set MAC %pM. filter update failed\n", 6050 mac); 6051 netif_addr_lock_bh(netdev); 6052 eth_hw_addr_set(netdev, old_mac); 6053 netif_addr_unlock_bh(netdev); 6054 return err; 6055 } 6056 6057 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", 6058 netdev->dev_addr); 6059 6060 /* write new MAC address to the firmware */ 6061 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; 6062 err = ice_aq_manage_mac_write(hw, mac, flags, NULL); 6063 if (err) { 6064 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", 6065 mac, err); 6066 } 6067 return 0; 6068 } 6069 6070 /** 6071 * ice_set_rx_mode - NDO callback to set the netdev filters 6072 * @netdev: network interface device structure 6073 */ 6074 static void ice_set_rx_mode(struct net_device *netdev) 6075 { 6076 struct ice_netdev_priv *np = netdev_priv(netdev); 6077 struct ice_vsi *vsi = np->vsi; 6078 6079 if (!vsi || ice_is_switchdev_running(vsi->back)) 6080 return; 6081 6082 /* Set the flags to synchronize filters 6083 * ndo_set_rx_mode may be triggered even without a change in netdev 6084 * flags 6085 */ 6086 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); 6087 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); 6088 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); 6089 6090 /* schedule our worker thread which will take care of 6091 * applying the new filter changes 6092 */ 6093 ice_service_task_schedule(vsi->back); 6094 } 6095 6096 /** 6097 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate 6098 * @netdev: network interface device structure 6099 * @queue_index: Queue ID 6100 * @maxrate: maximum bandwidth in Mbps 6101 */ 6102 static int 6103 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) 6104 { 6105 struct ice_netdev_priv *np = netdev_priv(netdev); 6106 struct ice_vsi *vsi = np->vsi; 6107 u16 q_handle; 6108 int status; 6109 u8 tc; 6110 6111 /* Validate maxrate requested is within permitted range */ 6112 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { 6113 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", 6114 maxrate, queue_index); 6115 return -EINVAL; 6116 } 6117 6118 q_handle = vsi->tx_rings[queue_index]->q_handle; 6119 tc = ice_dcb_get_tc(vsi, queue_index); 6120 6121 vsi = ice_locate_vsi_using_queue(vsi, queue_index); 6122 if (!vsi) { 6123 netdev_err(netdev, "Invalid VSI for given queue %d\n", 6124 queue_index); 6125 return -EINVAL; 6126 } 6127 6128 /* Set BW back to default, when user set maxrate to 0 */ 6129 if (!maxrate) 6130 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, 6131 q_handle, ICE_MAX_BW); 6132 else 6133 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, 6134 q_handle, ICE_MAX_BW, maxrate * 1000); 6135 if (status) 6136 netdev_err(netdev, "Unable to set Tx max rate, error %d\n", 6137 status); 6138 6139 return status; 6140 } 6141 6142 /** 6143 * ice_fdb_add - add an entry to the hardware database 6144 * @ndm: the input from the stack 6145 * @tb: pointer to array of nladdr (unused) 6146 * @dev: the net device pointer 6147 * @addr: the MAC address entry being added 6148 * @vid: VLAN ID 6149 * @flags: instructions from stack about fdb operation 6150 * @notified: whether notification was emitted 6151 * @extack: netlink extended ack 6152 */ 6153 static int 6154 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 6155 struct net_device *dev, const unsigned char *addr, u16 vid, 6156 u16 flags, bool *notified, 6157 struct netlink_ext_ack __always_unused *extack) 6158 { 6159 int err; 6160 6161 if (vid) { 6162 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); 6163 return -EINVAL; 6164 } 6165 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { 6166 netdev_err(dev, "FDB only supports static addresses\n"); 6167 return -EINVAL; 6168 } 6169 6170 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) 6171 err = dev_uc_add_excl(dev, addr); 6172 else if (is_multicast_ether_addr(addr)) 6173 err = dev_mc_add_excl(dev, addr); 6174 else 6175 err = -EINVAL; 6176 6177 /* Only return duplicate errors if NLM_F_EXCL is set */ 6178 if (err == -EEXIST && !(flags & NLM_F_EXCL)) 6179 err = 0; 6180 6181 return err; 6182 } 6183 6184 /** 6185 * ice_fdb_del - delete an entry from the hardware database 6186 * @ndm: the input from the stack 6187 * @tb: pointer to array of nladdr (unused) 6188 * @dev: the net device pointer 6189 * @addr: the MAC address entry being added 6190 * @vid: VLAN ID 6191 * @notified: whether notification was emitted 6192 * @extack: netlink extended ack 6193 */ 6194 static int 6195 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], 6196 struct net_device *dev, const unsigned char *addr, 6197 __always_unused u16 vid, bool *notified, 6198 struct netlink_ext_ack *extack) 6199 { 6200 int err; 6201 6202 if (ndm->ndm_state & NUD_PERMANENT) { 6203 netdev_err(dev, "FDB only supports static addresses\n"); 6204 return -EINVAL; 6205 } 6206 6207 if (is_unicast_ether_addr(addr)) 6208 err = dev_uc_del(dev, addr); 6209 else if (is_multicast_ether_addr(addr)) 6210 err = dev_mc_del(dev, addr); 6211 else 6212 err = -EINVAL; 6213 6214 return err; 6215 } 6216 6217 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 6218 NETIF_F_HW_VLAN_CTAG_TX | \ 6219 NETIF_F_HW_VLAN_STAG_RX | \ 6220 NETIF_F_HW_VLAN_STAG_TX) 6221 6222 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ 6223 NETIF_F_HW_VLAN_STAG_RX) 6224 6225 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ 6226 NETIF_F_HW_VLAN_STAG_FILTER) 6227 6228 /** 6229 * ice_fix_features - fix the netdev features flags based on device limitations 6230 * @netdev: ptr to the netdev that flags are being fixed on 6231 * @features: features that need to be checked and possibly fixed 6232 * 6233 * Make sure any fixups are made to features in this callback. This enables the 6234 * driver to not have to check unsupported configurations throughout the driver 6235 * because that's the responsiblity of this callback. 6236 * 6237 * Single VLAN Mode (SVM) Supported Features: 6238 * NETIF_F_HW_VLAN_CTAG_FILTER 6239 * NETIF_F_HW_VLAN_CTAG_RX 6240 * NETIF_F_HW_VLAN_CTAG_TX 6241 * 6242 * Double VLAN Mode (DVM) Supported Features: 6243 * NETIF_F_HW_VLAN_CTAG_FILTER 6244 * NETIF_F_HW_VLAN_CTAG_RX 6245 * NETIF_F_HW_VLAN_CTAG_TX 6246 * 6247 * NETIF_F_HW_VLAN_STAG_FILTER 6248 * NETIF_HW_VLAN_STAG_RX 6249 * NETIF_HW_VLAN_STAG_TX 6250 * 6251 * Features that need fixing: 6252 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion. 6253 * These are mutually exlusive as the VSI context cannot support multiple 6254 * VLAN ethertypes simultaneously for stripping and/or insertion. If this 6255 * is not done, then default to clearing the requested STAG offload 6256 * settings. 6257 * 6258 * All supported filtering has to be enabled or disabled together. For 6259 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled 6260 * together. If this is not done, then default to VLAN filtering disabled. 6261 * These are mutually exclusive as there is currently no way to 6262 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN 6263 * prune rules. 6264 */ 6265 static netdev_features_t 6266 ice_fix_features(struct net_device *netdev, netdev_features_t features) 6267 { 6268 struct ice_netdev_priv *np = netdev_priv(netdev); 6269 netdev_features_t req_vlan_fltr, cur_vlan_fltr; 6270 bool cur_ctag, cur_stag, req_ctag, req_stag; 6271 6272 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; 6273 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 6274 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 6275 6276 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; 6277 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; 6278 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; 6279 6280 if (req_vlan_fltr != cur_vlan_fltr) { 6281 if (ice_is_dvm_ena(&np->vsi->back->hw)) { 6282 if (req_ctag && req_stag) { 6283 features |= NETIF_VLAN_FILTERING_FEATURES; 6284 } else if (!req_ctag && !req_stag) { 6285 features &= ~NETIF_VLAN_FILTERING_FEATURES; 6286 } else if ((!cur_ctag && req_ctag && !cur_stag) || 6287 (!cur_stag && req_stag && !cur_ctag)) { 6288 features |= NETIF_VLAN_FILTERING_FEATURES; 6289 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); 6290 } else if ((cur_ctag && !req_ctag && cur_stag) || 6291 (cur_stag && !req_stag && cur_ctag)) { 6292 features &= ~NETIF_VLAN_FILTERING_FEATURES; 6293 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); 6294 } 6295 } else { 6296 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) 6297 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); 6298 6299 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) 6300 features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6301 } 6302 } 6303 6304 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && 6305 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { 6306 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); 6307 features &= ~(NETIF_F_HW_VLAN_STAG_RX | 6308 NETIF_F_HW_VLAN_STAG_TX); 6309 } 6310 6311 if (!(netdev->features & NETIF_F_RXFCS) && 6312 (features & NETIF_F_RXFCS) && 6313 (features & NETIF_VLAN_STRIPPING_FEATURES) && 6314 !ice_vsi_has_non_zero_vlans(np->vsi)) { 6315 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); 6316 features &= ~NETIF_VLAN_STRIPPING_FEATURES; 6317 } 6318 6319 return features; 6320 } 6321 6322 /** 6323 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto 6324 * @vsi: PF's VSI 6325 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order 6326 * 6327 * Store current stripped VLAN proto in ring packet context, 6328 * so it can be accessed more efficiently by packet processing code. 6329 */ 6330 static void 6331 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) 6332 { 6333 u16 i; 6334 6335 ice_for_each_alloc_rxq(vsi, i) 6336 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; 6337 } 6338 6339 /** 6340 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI 6341 * @vsi: PF's VSI 6342 * @features: features used to determine VLAN offload settings 6343 * 6344 * First, determine the vlan_ethertype based on the VLAN offload bits in 6345 * features. Then determine if stripping and insertion should be enabled or 6346 * disabled. Finally enable or disable VLAN stripping and insertion. 6347 */ 6348 static int 6349 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) 6350 { 6351 bool enable_stripping = true, enable_insertion = true; 6352 struct ice_vsi_vlan_ops *vlan_ops; 6353 int strip_err = 0, insert_err = 0; 6354 u16 vlan_ethertype = 0; 6355 6356 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 6357 6358 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) 6359 vlan_ethertype = ETH_P_8021AD; 6360 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) 6361 vlan_ethertype = ETH_P_8021Q; 6362 6363 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) 6364 enable_stripping = false; 6365 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) 6366 enable_insertion = false; 6367 6368 if (enable_stripping) 6369 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); 6370 else 6371 strip_err = vlan_ops->dis_stripping(vsi); 6372 6373 if (enable_insertion) 6374 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); 6375 else 6376 insert_err = vlan_ops->dis_insertion(vsi); 6377 6378 if (strip_err || insert_err) 6379 return -EIO; 6380 6381 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? 6382 htons(vlan_ethertype) : 0); 6383 6384 return 0; 6385 } 6386 6387 /** 6388 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI 6389 * @vsi: PF's VSI 6390 * @features: features used to determine VLAN filtering settings 6391 * 6392 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the 6393 * features. 6394 */ 6395 static int 6396 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) 6397 { 6398 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 6399 int err = 0; 6400 6401 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking 6402 * if either bit is set. In switchdev mode Rx filtering should never be 6403 * enabled. 6404 */ 6405 if ((features & 6406 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) && 6407 !ice_is_eswitch_mode_switchdev(vsi->back)) 6408 err = vlan_ops->ena_rx_filtering(vsi); 6409 else 6410 err = vlan_ops->dis_rx_filtering(vsi); 6411 6412 return err; 6413 } 6414 6415 /** 6416 * ice_set_vlan_features - set VLAN settings based on suggested feature set 6417 * @netdev: ptr to the netdev being adjusted 6418 * @features: the feature set that the stack is suggesting 6419 * 6420 * Only update VLAN settings if the requested_vlan_features are different than 6421 * the current_vlan_features. 6422 */ 6423 static int 6424 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) 6425 { 6426 netdev_features_t current_vlan_features, requested_vlan_features; 6427 struct ice_netdev_priv *np = netdev_priv(netdev); 6428 struct ice_vsi *vsi = np->vsi; 6429 int err; 6430 6431 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; 6432 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; 6433 if (current_vlan_features ^ requested_vlan_features) { 6434 if ((features & NETIF_F_RXFCS) && 6435 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6436 dev_err(ice_pf_to_dev(vsi->back), 6437 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); 6438 return -EIO; 6439 } 6440 6441 err = ice_set_vlan_offload_features(vsi, features); 6442 if (err) 6443 return err; 6444 } 6445 6446 current_vlan_features = netdev->features & 6447 NETIF_VLAN_FILTERING_FEATURES; 6448 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES; 6449 if (current_vlan_features ^ requested_vlan_features) { 6450 err = ice_set_vlan_filtering_features(vsi, features); 6451 if (err) 6452 return err; 6453 } 6454 6455 return 0; 6456 } 6457 6458 /** 6459 * ice_set_loopback - turn on/off loopback mode on underlying PF 6460 * @vsi: ptr to VSI 6461 * @ena: flag to indicate the on/off setting 6462 */ 6463 static int ice_set_loopback(struct ice_vsi *vsi, bool ena) 6464 { 6465 bool if_running = netif_running(vsi->netdev); 6466 int ret; 6467 6468 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 6469 ret = ice_down(vsi); 6470 if (ret) { 6471 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); 6472 return ret; 6473 } 6474 } 6475 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); 6476 if (ret) 6477 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); 6478 if (if_running) 6479 ret = ice_up(vsi); 6480 6481 return ret; 6482 } 6483 6484 /** 6485 * ice_set_features - set the netdev feature flags 6486 * @netdev: ptr to the netdev being adjusted 6487 * @features: the feature set that the stack is suggesting 6488 */ 6489 static int 6490 ice_set_features(struct net_device *netdev, netdev_features_t features) 6491 { 6492 netdev_features_t changed = netdev->features ^ features; 6493 struct ice_netdev_priv *np = netdev_priv(netdev); 6494 struct ice_vsi *vsi = np->vsi; 6495 struct ice_pf *pf = vsi->back; 6496 int ret = 0; 6497 6498 /* Don't set any netdev advanced features with device in Safe Mode */ 6499 if (ice_is_safe_mode(pf)) { 6500 dev_err(ice_pf_to_dev(pf), 6501 "Device is in Safe Mode - not enabling advanced netdev features\n"); 6502 return ret; 6503 } 6504 6505 /* Do not change setting during reset */ 6506 if (ice_is_reset_in_progress(pf->state)) { 6507 dev_err(ice_pf_to_dev(pf), 6508 "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); 6509 return -EBUSY; 6510 } 6511 6512 /* Multiple features can be changed in one call so keep features in 6513 * separate if/else statements to guarantee each feature is checked 6514 */ 6515 if (changed & NETIF_F_RXHASH) 6516 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); 6517 6518 ret = ice_set_vlan_features(netdev, features); 6519 if (ret) 6520 return ret; 6521 6522 /* Turn on receive of FCS aka CRC, and after setting this 6523 * flag the packet data will have the 4 byte CRC appended 6524 */ 6525 if (changed & NETIF_F_RXFCS) { 6526 if ((features & NETIF_F_RXFCS) && 6527 (features & NETIF_VLAN_STRIPPING_FEATURES)) { 6528 dev_err(ice_pf_to_dev(vsi->back), 6529 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); 6530 return -EIO; 6531 } 6532 6533 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); 6534 ret = ice_down_up(vsi); 6535 if (ret) 6536 return ret; 6537 } 6538 6539 if (changed & NETIF_F_NTUPLE) { 6540 bool ena = !!(features & NETIF_F_NTUPLE); 6541 6542 ice_vsi_manage_fdir(vsi, ena); 6543 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); 6544 } 6545 6546 /* don't turn off hw_tc_offload when ADQ is already enabled */ 6547 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { 6548 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); 6549 return -EACCES; 6550 } 6551 6552 if (changed & NETIF_F_HW_TC) { 6553 bool ena = !!(features & NETIF_F_HW_TC); 6554 6555 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); 6556 } 6557 6558 if (changed & NETIF_F_LOOPBACK) 6559 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); 6560 6561 /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS 6562 * (NETIF_F_HW_CSUM) is not supported. 6563 */ 6564 if (ice_is_feature_supported(pf, ICE_F_GCS) && 6565 ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) { 6566 if (netdev->features & NETIF_F_HW_CSUM) 6567 dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n"); 6568 else 6569 dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n"); 6570 return -EIO; 6571 } 6572 6573 return ret; 6574 } 6575 6576 /** 6577 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI 6578 * @vsi: VSI to setup VLAN properties for 6579 */ 6580 static int ice_vsi_vlan_setup(struct ice_vsi *vsi) 6581 { 6582 int err; 6583 6584 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); 6585 if (err) 6586 return err; 6587 6588 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); 6589 if (err) 6590 return err; 6591 6592 return ice_vsi_add_vlan_zero(vsi); 6593 } 6594 6595 /** 6596 * ice_vsi_cfg_lan - Setup the VSI lan related config 6597 * @vsi: the VSI being configured 6598 * 6599 * Return 0 on success and negative value on error 6600 */ 6601 int ice_vsi_cfg_lan(struct ice_vsi *vsi) 6602 { 6603 int err; 6604 6605 if (vsi->netdev && vsi->type == ICE_VSI_PF) { 6606 ice_set_rx_mode(vsi->netdev); 6607 6608 err = ice_vsi_vlan_setup(vsi); 6609 if (err) 6610 return err; 6611 } 6612 ice_vsi_cfg_dcb_rings(vsi); 6613 6614 err = ice_vsi_cfg_lan_txqs(vsi); 6615 if (!err && ice_is_xdp_ena_vsi(vsi)) 6616 err = ice_vsi_cfg_xdp_txqs(vsi); 6617 if (!err) 6618 err = ice_vsi_cfg_rxqs(vsi); 6619 6620 return err; 6621 } 6622 6623 /* THEORY OF MODERATION: 6624 * The ice driver hardware works differently than the hardware that DIMLIB was 6625 * originally made for. ice hardware doesn't have packet count limits that 6626 * can trigger an interrupt, but it *does* have interrupt rate limit support, 6627 * which is hard-coded to a limit of 250,000 ints/second. 6628 * If not using dynamic moderation, the INTRL value can be modified 6629 * by ethtool rx-usecs-high. 6630 */ 6631 struct ice_dim { 6632 /* the throttle rate for interrupts, basically worst case delay before 6633 * an initial interrupt fires, value is stored in microseconds. 6634 */ 6635 u16 itr; 6636 }; 6637 6638 /* Make a different profile for Rx that doesn't allow quite so aggressive 6639 * moderation at the high end (it maxes out at 126us or about 8k interrupts a 6640 * second. 6641 */ 6642 static const struct ice_dim rx_profile[] = { 6643 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6644 {8}, /* 125,000 ints/s */ 6645 {16}, /* 62,500 ints/s */ 6646 {62}, /* 16,129 ints/s */ 6647 {126} /* 7,936 ints/s */ 6648 }; 6649 6650 /* The transmit profile, which has the same sorts of values 6651 * as the previous struct 6652 */ 6653 static const struct ice_dim tx_profile[] = { 6654 {2}, /* 500,000 ints/s, capped at 250K by INTRL */ 6655 {8}, /* 125,000 ints/s */ 6656 {40}, /* 16,125 ints/s */ 6657 {128}, /* 7,812 ints/s */ 6658 {256} /* 3,906 ints/s */ 6659 }; 6660 6661 static void ice_tx_dim_work(struct work_struct *work) 6662 { 6663 struct ice_ring_container *rc; 6664 struct dim *dim; 6665 u16 itr; 6666 6667 dim = container_of(work, struct dim, work); 6668 rc = dim->priv; 6669 6670 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); 6671 6672 /* look up the values in our local table */ 6673 itr = tx_profile[dim->profile_ix].itr; 6674 6675 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); 6676 ice_write_itr(rc, itr); 6677 6678 dim->state = DIM_START_MEASURE; 6679 } 6680 6681 static void ice_rx_dim_work(struct work_struct *work) 6682 { 6683 struct ice_ring_container *rc; 6684 struct dim *dim; 6685 u16 itr; 6686 6687 dim = container_of(work, struct dim, work); 6688 rc = dim->priv; 6689 6690 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); 6691 6692 /* look up the values in our local table */ 6693 itr = rx_profile[dim->profile_ix].itr; 6694 6695 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); 6696 ice_write_itr(rc, itr); 6697 6698 dim->state = DIM_START_MEASURE; 6699 } 6700 6701 #define ICE_DIM_DEFAULT_PROFILE_IX 1 6702 6703 /** 6704 * ice_init_moderation - set up interrupt moderation 6705 * @q_vector: the vector containing rings to be configured 6706 * 6707 * Set up interrupt moderation registers, with the intent to do the right thing 6708 * when called from reset or from probe, and whether or not dynamic moderation 6709 * is enabled or not. Take special care to write all the registers in both 6710 * dynamic moderation mode or not in order to make sure hardware is in a known 6711 * state. 6712 */ 6713 static void ice_init_moderation(struct ice_q_vector *q_vector) 6714 { 6715 struct ice_ring_container *rc; 6716 bool tx_dynamic, rx_dynamic; 6717 6718 rc = &q_vector->tx; 6719 INIT_WORK(&rc->dim.work, ice_tx_dim_work); 6720 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6721 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6722 rc->dim.priv = rc; 6723 tx_dynamic = ITR_IS_DYNAMIC(rc); 6724 6725 /* set the initial TX ITR to match the above */ 6726 ice_write_itr(rc, tx_dynamic ? 6727 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); 6728 6729 rc = &q_vector->rx; 6730 INIT_WORK(&rc->dim.work, ice_rx_dim_work); 6731 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 6732 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; 6733 rc->dim.priv = rc; 6734 rx_dynamic = ITR_IS_DYNAMIC(rc); 6735 6736 /* set the initial RX ITR to match the above */ 6737 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : 6738 rc->itr_setting); 6739 6740 ice_set_q_vector_intrl(q_vector); 6741 } 6742 6743 /** 6744 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI 6745 * @vsi: the VSI being configured 6746 */ 6747 static void ice_napi_enable_all(struct ice_vsi *vsi) 6748 { 6749 int q_idx; 6750 6751 if (!vsi->netdev) 6752 return; 6753 6754 ice_for_each_q_vector(vsi, q_idx) { 6755 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 6756 6757 ice_init_moderation(q_vector); 6758 6759 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 6760 napi_enable(&q_vector->napi); 6761 } 6762 } 6763 6764 /** 6765 * ice_up_complete - Finish the last steps of bringing up a connection 6766 * @vsi: The VSI being configured 6767 * 6768 * Return 0 on success and negative value on error 6769 */ 6770 static int ice_up_complete(struct ice_vsi *vsi) 6771 { 6772 struct ice_pf *pf = vsi->back; 6773 int err; 6774 6775 ice_vsi_cfg_msix(vsi); 6776 6777 /* Enable only Rx rings, Tx rings were enabled by the FW when the 6778 * Tx queue group list was configured and the context bits were 6779 * programmed using ice_vsi_cfg_txqs 6780 */ 6781 err = ice_vsi_start_all_rx_rings(vsi); 6782 if (err) 6783 return err; 6784 6785 clear_bit(ICE_VSI_DOWN, vsi->state); 6786 ice_napi_enable_all(vsi); 6787 ice_vsi_ena_irq(vsi); 6788 6789 if (vsi->port_info && 6790 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && 6791 ((vsi->netdev && (vsi->type == ICE_VSI_PF || 6792 vsi->type == ICE_VSI_SF)))) { 6793 ice_print_link_msg(vsi, true); 6794 netif_tx_start_all_queues(vsi->netdev); 6795 netif_carrier_on(vsi->netdev); 6796 ice_ptp_link_change(pf, true); 6797 } 6798 6799 /* Perform an initial read of the statistics registers now to 6800 * set the baseline so counters are ready when interface is up 6801 */ 6802 ice_update_eth_stats(vsi); 6803 6804 if (vsi->type == ICE_VSI_PF) 6805 ice_service_task_schedule(pf); 6806 6807 return 0; 6808 } 6809 6810 /** 6811 * ice_up - Bring the connection back up after being down 6812 * @vsi: VSI being configured 6813 */ 6814 int ice_up(struct ice_vsi *vsi) 6815 { 6816 int err; 6817 6818 err = ice_vsi_cfg_lan(vsi); 6819 if (!err) 6820 err = ice_up_complete(vsi); 6821 6822 return err; 6823 } 6824 6825 /** 6826 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring 6827 * @syncp: pointer to u64_stats_sync 6828 * @stats: stats that pkts and bytes count will be taken from 6829 * @pkts: packets stats counter 6830 * @bytes: bytes stats counter 6831 * 6832 * This function fetches stats from the ring considering the atomic operations 6833 * that needs to be performed to read u64 values in 32 bit machine. 6834 */ 6835 void 6836 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, 6837 struct ice_q_stats stats, u64 *pkts, u64 *bytes) 6838 { 6839 unsigned int start; 6840 6841 do { 6842 start = u64_stats_fetch_begin(syncp); 6843 *pkts = stats.pkts; 6844 *bytes = stats.bytes; 6845 } while (u64_stats_fetch_retry(syncp, start)); 6846 } 6847 6848 /** 6849 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters 6850 * @vsi: the VSI to be updated 6851 * @vsi_stats: the stats struct to be updated 6852 * @rings: rings to work on 6853 * @count: number of rings 6854 */ 6855 static void 6856 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, 6857 struct rtnl_link_stats64 *vsi_stats, 6858 struct ice_tx_ring **rings, u16 count) 6859 { 6860 u16 i; 6861 6862 for (i = 0; i < count; i++) { 6863 struct ice_tx_ring *ring; 6864 u64 pkts = 0, bytes = 0; 6865 6866 ring = READ_ONCE(rings[i]); 6867 if (!ring || !ring->ring_stats) 6868 continue; 6869 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, 6870 ring->ring_stats->stats, &pkts, 6871 &bytes); 6872 vsi_stats->tx_packets += pkts; 6873 vsi_stats->tx_bytes += bytes; 6874 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; 6875 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; 6876 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; 6877 } 6878 } 6879 6880 /** 6881 * ice_update_vsi_ring_stats - Update VSI stats counters 6882 * @vsi: the VSI to be updated 6883 */ 6884 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) 6885 { 6886 struct rtnl_link_stats64 *net_stats, *stats_prev; 6887 struct rtnl_link_stats64 *vsi_stats; 6888 struct ice_pf *pf = vsi->back; 6889 u64 pkts, bytes; 6890 int i; 6891 6892 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); 6893 if (!vsi_stats) 6894 return; 6895 6896 /* reset non-netdev (extended) stats */ 6897 vsi->tx_restart = 0; 6898 vsi->tx_busy = 0; 6899 vsi->tx_linearize = 0; 6900 vsi->rx_buf_failed = 0; 6901 vsi->rx_page_failed = 0; 6902 6903 rcu_read_lock(); 6904 6905 /* update Tx rings counters */ 6906 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, 6907 vsi->num_txq); 6908 6909 /* update Rx rings counters */ 6910 ice_for_each_rxq(vsi, i) { 6911 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); 6912 struct ice_ring_stats *ring_stats; 6913 6914 ring_stats = ring->ring_stats; 6915 ice_fetch_u64_stats_per_ring(&ring_stats->syncp, 6916 ring_stats->stats, &pkts, 6917 &bytes); 6918 vsi_stats->rx_packets += pkts; 6919 vsi_stats->rx_bytes += bytes; 6920 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; 6921 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; 6922 } 6923 6924 /* update XDP Tx rings counters */ 6925 if (ice_is_xdp_ena_vsi(vsi)) 6926 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, 6927 vsi->num_xdp_txq); 6928 6929 rcu_read_unlock(); 6930 6931 net_stats = &vsi->net_stats; 6932 stats_prev = &vsi->net_stats_prev; 6933 6934 /* Update netdev counters, but keep in mind that values could start at 6935 * random value after PF reset. And as we increase the reported stat by 6936 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not, 6937 * let's skip this round. 6938 */ 6939 if (likely(pf->stat_prev_loaded)) { 6940 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; 6941 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; 6942 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; 6943 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; 6944 } 6945 6946 stats_prev->tx_packets = vsi_stats->tx_packets; 6947 stats_prev->tx_bytes = vsi_stats->tx_bytes; 6948 stats_prev->rx_packets = vsi_stats->rx_packets; 6949 stats_prev->rx_bytes = vsi_stats->rx_bytes; 6950 6951 kfree(vsi_stats); 6952 } 6953 6954 /** 6955 * ice_update_vsi_stats - Update VSI stats counters 6956 * @vsi: the VSI to be updated 6957 */ 6958 void ice_update_vsi_stats(struct ice_vsi *vsi) 6959 { 6960 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; 6961 struct ice_eth_stats *cur_es = &vsi->eth_stats; 6962 struct ice_pf *pf = vsi->back; 6963 6964 if (test_bit(ICE_VSI_DOWN, vsi->state) || 6965 test_bit(ICE_CFG_BUSY, pf->state)) 6966 return; 6967 6968 /* get stats as recorded by Tx/Rx rings */ 6969 ice_update_vsi_ring_stats(vsi); 6970 6971 /* get VSI stats as recorded by the hardware */ 6972 ice_update_eth_stats(vsi); 6973 6974 cur_ns->tx_errors = cur_es->tx_errors; 6975 cur_ns->rx_dropped = cur_es->rx_discards; 6976 cur_ns->tx_dropped = cur_es->tx_discards; 6977 cur_ns->multicast = cur_es->rx_multicast; 6978 6979 /* update some more netdev stats if this is main VSI */ 6980 if (vsi->type == ICE_VSI_PF) { 6981 cur_ns->rx_crc_errors = pf->stats.crc_errors; 6982 cur_ns->rx_errors = pf->stats.crc_errors + 6983 pf->stats.illegal_bytes + 6984 pf->stats.rx_undersize + 6985 pf->stats.rx_jabber + 6986 pf->stats.rx_fragments + 6987 pf->stats.rx_oversize; 6988 /* record drops from the port level */ 6989 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; 6990 } 6991 } 6992 6993 /** 6994 * ice_update_pf_stats - Update PF port stats counters 6995 * @pf: PF whose stats needs to be updated 6996 */ 6997 void ice_update_pf_stats(struct ice_pf *pf) 6998 { 6999 struct ice_hw_port_stats *prev_ps, *cur_ps; 7000 struct ice_hw *hw = &pf->hw; 7001 u16 fd_ctr_base; 7002 u8 port; 7003 7004 port = hw->port_info->lport; 7005 prev_ps = &pf->stats_prev; 7006 cur_ps = &pf->stats; 7007 7008 if (ice_is_reset_in_progress(pf->state)) 7009 pf->stat_prev_loaded = false; 7010 7011 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, 7012 &prev_ps->eth.rx_bytes, 7013 &cur_ps->eth.rx_bytes); 7014 7015 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, 7016 &prev_ps->eth.rx_unicast, 7017 &cur_ps->eth.rx_unicast); 7018 7019 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, 7020 &prev_ps->eth.rx_multicast, 7021 &cur_ps->eth.rx_multicast); 7022 7023 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, 7024 &prev_ps->eth.rx_broadcast, 7025 &cur_ps->eth.rx_broadcast); 7026 7027 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, 7028 &prev_ps->eth.rx_discards, 7029 &cur_ps->eth.rx_discards); 7030 7031 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, 7032 &prev_ps->eth.tx_bytes, 7033 &cur_ps->eth.tx_bytes); 7034 7035 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, 7036 &prev_ps->eth.tx_unicast, 7037 &cur_ps->eth.tx_unicast); 7038 7039 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, 7040 &prev_ps->eth.tx_multicast, 7041 &cur_ps->eth.tx_multicast); 7042 7043 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, 7044 &prev_ps->eth.tx_broadcast, 7045 &cur_ps->eth.tx_broadcast); 7046 7047 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, 7048 &prev_ps->tx_dropped_link_down, 7049 &cur_ps->tx_dropped_link_down); 7050 7051 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, 7052 &prev_ps->rx_size_64, &cur_ps->rx_size_64); 7053 7054 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, 7055 &prev_ps->rx_size_127, &cur_ps->rx_size_127); 7056 7057 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, 7058 &prev_ps->rx_size_255, &cur_ps->rx_size_255); 7059 7060 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, 7061 &prev_ps->rx_size_511, &cur_ps->rx_size_511); 7062 7063 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, 7064 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); 7065 7066 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, 7067 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); 7068 7069 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, 7070 &prev_ps->rx_size_big, &cur_ps->rx_size_big); 7071 7072 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, 7073 &prev_ps->tx_size_64, &cur_ps->tx_size_64); 7074 7075 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, 7076 &prev_ps->tx_size_127, &cur_ps->tx_size_127); 7077 7078 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, 7079 &prev_ps->tx_size_255, &cur_ps->tx_size_255); 7080 7081 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, 7082 &prev_ps->tx_size_511, &cur_ps->tx_size_511); 7083 7084 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, 7085 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); 7086 7087 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, 7088 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); 7089 7090 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, 7091 &prev_ps->tx_size_big, &cur_ps->tx_size_big); 7092 7093 fd_ctr_base = hw->fd_ctr_base; 7094 7095 ice_stat_update40(hw, 7096 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), 7097 pf->stat_prev_loaded, &prev_ps->fd_sb_match, 7098 &cur_ps->fd_sb_match); 7099 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, 7100 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); 7101 7102 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, 7103 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); 7104 7105 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, 7106 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); 7107 7108 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, 7109 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); 7110 7111 ice_update_dcb_stats(pf); 7112 7113 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, 7114 &prev_ps->crc_errors, &cur_ps->crc_errors); 7115 7116 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, 7117 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); 7118 7119 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, 7120 &prev_ps->mac_local_faults, 7121 &cur_ps->mac_local_faults); 7122 7123 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, 7124 &prev_ps->mac_remote_faults, 7125 &cur_ps->mac_remote_faults); 7126 7127 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, 7128 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); 7129 7130 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, 7131 &prev_ps->rx_undersize, &cur_ps->rx_undersize); 7132 7133 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, 7134 &prev_ps->rx_fragments, &cur_ps->rx_fragments); 7135 7136 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, 7137 &prev_ps->rx_oversize, &cur_ps->rx_oversize); 7138 7139 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, 7140 &prev_ps->rx_jabber, &cur_ps->rx_jabber); 7141 7142 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 7143 7144 pf->stat_prev_loaded = true; 7145 } 7146 7147 /** 7148 * ice_get_stats64 - get statistics for network device structure 7149 * @netdev: network interface device structure 7150 * @stats: main device statistics structure 7151 */ 7152 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 7153 { 7154 struct ice_netdev_priv *np = netdev_priv(netdev); 7155 struct rtnl_link_stats64 *vsi_stats; 7156 struct ice_vsi *vsi = np->vsi; 7157 7158 vsi_stats = &vsi->net_stats; 7159 7160 if (!vsi->num_txq || !vsi->num_rxq) 7161 return; 7162 7163 /* netdev packet/byte stats come from ring counter. These are obtained 7164 * by summing up ring counters (done by ice_update_vsi_ring_stats). 7165 * But, only call the update routine and read the registers if VSI is 7166 * not down. 7167 */ 7168 if (!test_bit(ICE_VSI_DOWN, vsi->state)) 7169 ice_update_vsi_ring_stats(vsi); 7170 stats->tx_packets = vsi_stats->tx_packets; 7171 stats->tx_bytes = vsi_stats->tx_bytes; 7172 stats->rx_packets = vsi_stats->rx_packets; 7173 stats->rx_bytes = vsi_stats->rx_bytes; 7174 7175 /* The rest of the stats can be read from the hardware but instead we 7176 * just return values that the watchdog task has already obtained from 7177 * the hardware. 7178 */ 7179 stats->multicast = vsi_stats->multicast; 7180 stats->tx_errors = vsi_stats->tx_errors; 7181 stats->tx_dropped = vsi_stats->tx_dropped; 7182 stats->rx_errors = vsi_stats->rx_errors; 7183 stats->rx_dropped = vsi_stats->rx_dropped; 7184 stats->rx_crc_errors = vsi_stats->rx_crc_errors; 7185 stats->rx_length_errors = vsi_stats->rx_length_errors; 7186 } 7187 7188 /** 7189 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 7190 * @vsi: VSI having NAPI disabled 7191 */ 7192 static void ice_napi_disable_all(struct ice_vsi *vsi) 7193 { 7194 int q_idx; 7195 7196 if (!vsi->netdev) 7197 return; 7198 7199 ice_for_each_q_vector(vsi, q_idx) { 7200 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 7201 7202 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) 7203 napi_disable(&q_vector->napi); 7204 7205 cancel_work_sync(&q_vector->tx.dim.work); 7206 cancel_work_sync(&q_vector->rx.dim.work); 7207 } 7208 } 7209 7210 /** 7211 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI 7212 * @vsi: the VSI being un-configured 7213 */ 7214 static void ice_vsi_dis_irq(struct ice_vsi *vsi) 7215 { 7216 struct ice_pf *pf = vsi->back; 7217 struct ice_hw *hw = &pf->hw; 7218 u32 val; 7219 int i; 7220 7221 /* disable interrupt causation from each Rx queue; Tx queues are 7222 * handled in ice_vsi_stop_tx_ring() 7223 */ 7224 if (vsi->rx_rings) { 7225 ice_for_each_rxq(vsi, i) { 7226 if (vsi->rx_rings[i]) { 7227 u16 reg; 7228 7229 reg = vsi->rx_rings[i]->reg_idx; 7230 val = rd32(hw, QINT_RQCTL(reg)); 7231 val &= ~QINT_RQCTL_CAUSE_ENA_M; 7232 wr32(hw, QINT_RQCTL(reg), val); 7233 } 7234 } 7235 } 7236 7237 /* disable each interrupt */ 7238 ice_for_each_q_vector(vsi, i) { 7239 if (!vsi->q_vectors[i]) 7240 continue; 7241 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); 7242 } 7243 7244 ice_flush(hw); 7245 7246 /* don't call synchronize_irq() for VF's from the host */ 7247 if (vsi->type == ICE_VSI_VF) 7248 return; 7249 7250 ice_for_each_q_vector(vsi, i) 7251 synchronize_irq(vsi->q_vectors[i]->irq.virq); 7252 } 7253 7254 /** 7255 * ice_down - Shutdown the connection 7256 * @vsi: The VSI being stopped 7257 * 7258 * Caller of this function is expected to set the vsi->state ICE_DOWN bit 7259 */ 7260 int ice_down(struct ice_vsi *vsi) 7261 { 7262 int i, tx_err, rx_err, vlan_err = 0; 7263 7264 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); 7265 7266 if (vsi->netdev) { 7267 vlan_err = ice_vsi_del_vlan_zero(vsi); 7268 ice_ptp_link_change(vsi->back, false); 7269 netif_carrier_off(vsi->netdev); 7270 netif_tx_disable(vsi->netdev); 7271 } 7272 7273 ice_vsi_dis_irq(vsi); 7274 7275 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 7276 if (tx_err) 7277 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", 7278 vsi->vsi_num, tx_err); 7279 if (!tx_err && vsi->xdp_rings) { 7280 tx_err = ice_vsi_stop_xdp_tx_rings(vsi); 7281 if (tx_err) 7282 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", 7283 vsi->vsi_num, tx_err); 7284 } 7285 7286 rx_err = ice_vsi_stop_all_rx_rings(vsi); 7287 if (rx_err) 7288 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", 7289 vsi->vsi_num, rx_err); 7290 7291 ice_napi_disable_all(vsi); 7292 7293 ice_for_each_txq(vsi, i) 7294 ice_clean_tx_ring(vsi->tx_rings[i]); 7295 7296 if (vsi->xdp_rings) 7297 ice_for_each_xdp_txq(vsi, i) 7298 ice_clean_tx_ring(vsi->xdp_rings[i]); 7299 7300 ice_for_each_rxq(vsi, i) 7301 ice_clean_rx_ring(vsi->rx_rings[i]); 7302 7303 if (tx_err || rx_err || vlan_err) { 7304 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", 7305 vsi->vsi_num, vsi->vsw->sw_id); 7306 return -EIO; 7307 } 7308 7309 return 0; 7310 } 7311 7312 /** 7313 * ice_down_up - shutdown the VSI connection and bring it up 7314 * @vsi: the VSI to be reconnected 7315 */ 7316 int ice_down_up(struct ice_vsi *vsi) 7317 { 7318 int ret; 7319 7320 /* if DOWN already set, nothing to do */ 7321 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) 7322 return 0; 7323 7324 ret = ice_down(vsi); 7325 if (ret) 7326 return ret; 7327 7328 ret = ice_up(vsi); 7329 if (ret) { 7330 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); 7331 return ret; 7332 } 7333 7334 return 0; 7335 } 7336 7337 /** 7338 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources 7339 * @vsi: VSI having resources allocated 7340 * 7341 * Return 0 on success, negative on failure 7342 */ 7343 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 7344 { 7345 int i, err = 0; 7346 7347 if (!vsi->num_txq) { 7348 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", 7349 vsi->vsi_num); 7350 return -EINVAL; 7351 } 7352 7353 ice_for_each_txq(vsi, i) { 7354 struct ice_tx_ring *ring = vsi->tx_rings[i]; 7355 7356 if (!ring) 7357 return -EINVAL; 7358 7359 if (vsi->netdev) 7360 ring->netdev = vsi->netdev; 7361 err = ice_setup_tx_ring(ring); 7362 if (err) 7363 break; 7364 } 7365 7366 return err; 7367 } 7368 7369 /** 7370 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources 7371 * @vsi: VSI having resources allocated 7372 * 7373 * Return 0 on success, negative on failure 7374 */ 7375 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 7376 { 7377 int i, err = 0; 7378 7379 if (!vsi->num_rxq) { 7380 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", 7381 vsi->vsi_num); 7382 return -EINVAL; 7383 } 7384 7385 ice_for_each_rxq(vsi, i) { 7386 struct ice_rx_ring *ring = vsi->rx_rings[i]; 7387 7388 if (!ring) 7389 return -EINVAL; 7390 7391 if (vsi->netdev) 7392 ring->netdev = vsi->netdev; 7393 err = ice_setup_rx_ring(ring); 7394 if (err) 7395 break; 7396 } 7397 7398 return err; 7399 } 7400 7401 /** 7402 * ice_vsi_open_ctrl - open control VSI for use 7403 * @vsi: the VSI to open 7404 * 7405 * Initialization of the Control VSI 7406 * 7407 * Returns 0 on success, negative value on error 7408 */ 7409 int ice_vsi_open_ctrl(struct ice_vsi *vsi) 7410 { 7411 char int_name[ICE_INT_NAME_STR_LEN]; 7412 struct ice_pf *pf = vsi->back; 7413 struct device *dev; 7414 int err; 7415 7416 dev = ice_pf_to_dev(pf); 7417 /* allocate descriptors */ 7418 err = ice_vsi_setup_tx_rings(vsi); 7419 if (err) 7420 goto err_setup_tx; 7421 7422 err = ice_vsi_setup_rx_rings(vsi); 7423 if (err) 7424 goto err_setup_rx; 7425 7426 err = ice_vsi_cfg_lan(vsi); 7427 if (err) 7428 goto err_setup_rx; 7429 7430 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", 7431 dev_driver_string(dev), dev_name(dev)); 7432 err = ice_vsi_req_irq_msix(vsi, int_name); 7433 if (err) 7434 goto err_setup_rx; 7435 7436 ice_vsi_cfg_msix(vsi); 7437 7438 err = ice_vsi_start_all_rx_rings(vsi); 7439 if (err) 7440 goto err_up_complete; 7441 7442 clear_bit(ICE_VSI_DOWN, vsi->state); 7443 ice_vsi_ena_irq(vsi); 7444 7445 return 0; 7446 7447 err_up_complete: 7448 ice_down(vsi); 7449 err_setup_rx: 7450 ice_vsi_free_rx_rings(vsi); 7451 err_setup_tx: 7452 ice_vsi_free_tx_rings(vsi); 7453 7454 return err; 7455 } 7456 7457 /** 7458 * ice_vsi_open - Called when a network interface is made active 7459 * @vsi: the VSI to open 7460 * 7461 * Initialization of the VSI 7462 * 7463 * Returns 0 on success, negative value on error 7464 */ 7465 int ice_vsi_open(struct ice_vsi *vsi) 7466 { 7467 char int_name[ICE_INT_NAME_STR_LEN]; 7468 struct ice_pf *pf = vsi->back; 7469 int err; 7470 7471 /* allocate descriptors */ 7472 err = ice_vsi_setup_tx_rings(vsi); 7473 if (err) 7474 goto err_setup_tx; 7475 7476 err = ice_vsi_setup_rx_rings(vsi); 7477 if (err) 7478 goto err_setup_rx; 7479 7480 err = ice_vsi_cfg_lan(vsi); 7481 if (err) 7482 goto err_setup_rx; 7483 7484 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 7485 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); 7486 err = ice_vsi_req_irq_msix(vsi, int_name); 7487 if (err) 7488 goto err_setup_rx; 7489 7490 if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs)) 7491 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 7492 7493 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { 7494 /* Notify the stack of the actual queue counts. */ 7495 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); 7496 if (err) 7497 goto err_set_qs; 7498 7499 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); 7500 if (err) 7501 goto err_set_qs; 7502 7503 ice_vsi_set_napi_queues(vsi); 7504 } 7505 7506 err = ice_up_complete(vsi); 7507 if (err) 7508 goto err_up_complete; 7509 7510 return 0; 7511 7512 err_up_complete: 7513 ice_down(vsi); 7514 err_set_qs: 7515 ice_vsi_free_irq(vsi); 7516 err_setup_rx: 7517 ice_vsi_free_rx_rings(vsi); 7518 err_setup_tx: 7519 ice_vsi_free_tx_rings(vsi); 7520 7521 return err; 7522 } 7523 7524 /** 7525 * ice_vsi_release_all - Delete all VSIs 7526 * @pf: PF from which all VSIs are being removed 7527 */ 7528 static void ice_vsi_release_all(struct ice_pf *pf) 7529 { 7530 int err, i; 7531 7532 if (!pf->vsi) 7533 return; 7534 7535 ice_for_each_vsi(pf, i) { 7536 if (!pf->vsi[i]) 7537 continue; 7538 7539 if (pf->vsi[i]->type == ICE_VSI_CHNL) 7540 continue; 7541 7542 err = ice_vsi_release(pf->vsi[i]); 7543 if (err) 7544 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", 7545 i, err, pf->vsi[i]->vsi_num); 7546 } 7547 } 7548 7549 /** 7550 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type 7551 * @pf: pointer to the PF instance 7552 * @type: VSI type to rebuild 7553 * 7554 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type 7555 */ 7556 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) 7557 { 7558 struct device *dev = ice_pf_to_dev(pf); 7559 int i, err; 7560 7561 ice_for_each_vsi(pf, i) { 7562 struct ice_vsi *vsi = pf->vsi[i]; 7563 7564 if (!vsi || vsi->type != type) 7565 continue; 7566 7567 /* rebuild the VSI */ 7568 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); 7569 if (err) { 7570 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", 7571 err, vsi->idx, ice_vsi_type_str(type)); 7572 return err; 7573 } 7574 7575 /* replay filters for the VSI */ 7576 err = ice_replay_vsi(&pf->hw, vsi->idx); 7577 if (err) { 7578 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", 7579 err, vsi->idx, ice_vsi_type_str(type)); 7580 return err; 7581 } 7582 7583 /* Re-map HW VSI number, using VSI handle that has been 7584 * previously validated in ice_replay_vsi() call above 7585 */ 7586 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 7587 7588 /* enable the VSI */ 7589 err = ice_ena_vsi(vsi, false); 7590 if (err) { 7591 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", 7592 err, vsi->idx, ice_vsi_type_str(type)); 7593 return err; 7594 } 7595 7596 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, 7597 ice_vsi_type_str(type)); 7598 } 7599 7600 return 0; 7601 } 7602 7603 /** 7604 * ice_update_pf_netdev_link - Update PF netdev link status 7605 * @pf: pointer to the PF instance 7606 */ 7607 static void ice_update_pf_netdev_link(struct ice_pf *pf) 7608 { 7609 bool link_up; 7610 int i; 7611 7612 ice_for_each_vsi(pf, i) { 7613 struct ice_vsi *vsi = pf->vsi[i]; 7614 7615 if (!vsi || vsi->type != ICE_VSI_PF) 7616 return; 7617 7618 ice_get_link_status(pf->vsi[i]->port_info, &link_up); 7619 if (link_up) { 7620 netif_carrier_on(pf->vsi[i]->netdev); 7621 netif_tx_wake_all_queues(pf->vsi[i]->netdev); 7622 } else { 7623 netif_carrier_off(pf->vsi[i]->netdev); 7624 netif_tx_stop_all_queues(pf->vsi[i]->netdev); 7625 } 7626 } 7627 } 7628 7629 /** 7630 * ice_rebuild - rebuild after reset 7631 * @pf: PF to rebuild 7632 * @reset_type: type of reset 7633 * 7634 * Do not rebuild VF VSI in this flow because that is already handled via 7635 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a 7636 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want 7637 * to reset/rebuild all the VF VSI twice. 7638 */ 7639 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) 7640 { 7641 struct ice_vsi *vsi = ice_get_main_vsi(pf); 7642 struct device *dev = ice_pf_to_dev(pf); 7643 struct ice_hw *hw = &pf->hw; 7644 bool dvm; 7645 int err; 7646 7647 if (test_bit(ICE_DOWN, pf->state)) 7648 goto clear_recovery; 7649 7650 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); 7651 7652 #define ICE_EMP_RESET_SLEEP_MS 5000 7653 if (reset_type == ICE_RESET_EMPR) { 7654 /* If an EMP reset has occurred, any previously pending flash 7655 * update will have completed. We no longer know whether or 7656 * not the NVM update EMP reset is restricted. 7657 */ 7658 pf->fw_emp_reset_disabled = false; 7659 7660 msleep(ICE_EMP_RESET_SLEEP_MS); 7661 } 7662 7663 err = ice_init_all_ctrlq(hw); 7664 if (err) { 7665 dev_err(dev, "control queues init failed %d\n", err); 7666 goto err_init_ctrlq; 7667 } 7668 7669 /* if DDP was previously loaded successfully */ 7670 if (!ice_is_safe_mode(pf)) { 7671 /* reload the SW DB of filter tables */ 7672 if (reset_type == ICE_RESET_PFR) 7673 ice_fill_blk_tbls(hw); 7674 else 7675 /* Reload DDP Package after CORER/GLOBR reset */ 7676 ice_load_pkg(NULL, pf); 7677 } 7678 7679 err = ice_clear_pf_cfg(hw); 7680 if (err) { 7681 dev_err(dev, "clear PF configuration failed %d\n", err); 7682 goto err_init_ctrlq; 7683 } 7684 7685 ice_clear_pxe_mode(hw); 7686 7687 err = ice_init_nvm(hw); 7688 if (err) { 7689 dev_err(dev, "ice_init_nvm failed %d\n", err); 7690 goto err_init_ctrlq; 7691 } 7692 7693 err = ice_get_caps(hw); 7694 if (err) { 7695 dev_err(dev, "ice_get_caps failed %d\n", err); 7696 goto err_init_ctrlq; 7697 } 7698 7699 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 7700 if (err) { 7701 dev_err(dev, "set_mac_cfg failed %d\n", err); 7702 goto err_init_ctrlq; 7703 } 7704 7705 dvm = ice_is_dvm_ena(hw); 7706 7707 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); 7708 if (err) 7709 goto err_init_ctrlq; 7710 7711 err = ice_sched_init_port(hw->port_info); 7712 if (err) 7713 goto err_sched_init_port; 7714 7715 /* start misc vector */ 7716 err = ice_req_irq_msix_misc(pf); 7717 if (err) { 7718 dev_err(dev, "misc vector setup failed: %d\n", err); 7719 goto err_sched_init_port; 7720 } 7721 7722 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7723 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 7724 if (!rd32(hw, PFQF_FD_SIZE)) { 7725 u16 unused, guar, b_effort; 7726 7727 guar = hw->func_caps.fd_fltr_guar; 7728 b_effort = hw->func_caps.fd_fltr_best_effort; 7729 7730 /* force guaranteed filter pool for PF */ 7731 ice_alloc_fd_guar_item(hw, &unused, guar); 7732 /* force shared filter pool for PF */ 7733 ice_alloc_fd_shrd_item(hw, &unused, b_effort); 7734 } 7735 } 7736 7737 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 7738 ice_dcb_rebuild(pf); 7739 7740 /* If the PF previously had enabled PTP, PTP init needs to happen before 7741 * the VSI rebuild. If not, this causes the PTP link status events to 7742 * fail. 7743 */ 7744 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 7745 ice_ptp_rebuild(pf, reset_type); 7746 7747 if (ice_is_feature_supported(pf, ICE_F_GNSS)) 7748 ice_gnss_init(pf); 7749 7750 /* rebuild PF VSI */ 7751 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 7752 if (err) { 7753 dev_err(dev, "PF VSI rebuild failed: %d\n", err); 7754 goto err_vsi_rebuild; 7755 } 7756 7757 if (reset_type == ICE_RESET_PFR) { 7758 err = ice_rebuild_channels(pf); 7759 if (err) { 7760 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", 7761 err); 7762 goto err_vsi_rebuild; 7763 } 7764 } 7765 7766 /* If Flow Director is active */ 7767 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { 7768 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); 7769 if (err) { 7770 dev_err(dev, "control VSI rebuild failed: %d\n", err); 7771 goto err_vsi_rebuild; 7772 } 7773 7774 /* replay HW Flow Director recipes */ 7775 if (hw->fdir_prof) 7776 ice_fdir_replay_flows(hw); 7777 7778 /* replay Flow Director filters */ 7779 ice_fdir_replay_fltrs(pf); 7780 7781 ice_rebuild_arfs(pf); 7782 } 7783 7784 if (vsi && vsi->netdev) 7785 netif_device_attach(vsi->netdev); 7786 7787 ice_update_pf_netdev_link(pf); 7788 7789 /* tell the firmware we are up */ 7790 err = ice_send_version(pf); 7791 if (err) { 7792 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", 7793 err); 7794 goto err_vsi_rebuild; 7795 } 7796 7797 ice_replay_post(hw); 7798 7799 /* if we get here, reset flow is successful */ 7800 clear_bit(ICE_RESET_FAILED, pf->state); 7801 7802 ice_health_clear(pf); 7803 7804 ice_plug_aux_dev(pf); 7805 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) 7806 ice_lag_rebuild(pf); 7807 7808 /* Restore timestamp mode settings after VSI rebuild */ 7809 ice_ptp_restore_timestamp_mode(pf); 7810 return; 7811 7812 err_vsi_rebuild: 7813 err_sched_init_port: 7814 ice_sched_cleanup_all(hw); 7815 err_init_ctrlq: 7816 ice_shutdown_all_ctrlq(hw, false); 7817 set_bit(ICE_RESET_FAILED, pf->state); 7818 clear_recovery: 7819 /* set this bit in PF state to control service task scheduling */ 7820 set_bit(ICE_NEEDS_RESTART, pf->state); 7821 dev_err(dev, "Rebuild failed, unload and reload driver\n"); 7822 } 7823 7824 /** 7825 * ice_change_mtu - NDO callback to change the MTU 7826 * @netdev: network interface device structure 7827 * @new_mtu: new value for maximum frame size 7828 * 7829 * Returns 0 on success, negative on failure 7830 */ 7831 int ice_change_mtu(struct net_device *netdev, int new_mtu) 7832 { 7833 struct ice_netdev_priv *np = netdev_priv(netdev); 7834 struct ice_vsi *vsi = np->vsi; 7835 struct ice_pf *pf = vsi->back; 7836 struct bpf_prog *prog; 7837 u8 count = 0; 7838 int err = 0; 7839 7840 if (new_mtu == (int)netdev->mtu) { 7841 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); 7842 return 0; 7843 } 7844 7845 prog = vsi->xdp_prog; 7846 if (prog && !prog->aux->xdp_has_frags) { 7847 int frame_size = ice_max_xdp_frame_size(vsi); 7848 7849 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { 7850 netdev_err(netdev, "max MTU for XDP usage is %d\n", 7851 frame_size - ICE_ETH_PKT_HDR_PAD); 7852 return -EINVAL; 7853 } 7854 } 7855 7856 /* if a reset is in progress, wait for some time for it to complete */ 7857 do { 7858 if (ice_is_reset_in_progress(pf->state)) { 7859 count++; 7860 usleep_range(1000, 2000); 7861 } else { 7862 break; 7863 } 7864 7865 } while (count < 100); 7866 7867 if (count == 100) { 7868 netdev_err(netdev, "can't change MTU. Device is busy\n"); 7869 return -EBUSY; 7870 } 7871 7872 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); 7873 err = ice_down_up(vsi); 7874 if (err) 7875 return err; 7876 7877 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); 7878 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); 7879 7880 return err; 7881 } 7882 7883 /** 7884 * ice_set_rss_lut - Set RSS LUT 7885 * @vsi: Pointer to VSI structure 7886 * @lut: Lookup table 7887 * @lut_size: Lookup table size 7888 * 7889 * Returns 0 on success, negative on failure 7890 */ 7891 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7892 { 7893 struct ice_aq_get_set_rss_lut_params params = {}; 7894 struct ice_hw *hw = &vsi->back->hw; 7895 int status; 7896 7897 if (!lut) 7898 return -EINVAL; 7899 7900 params.vsi_handle = vsi->idx; 7901 params.lut_size = lut_size; 7902 params.lut_type = vsi->rss_lut_type; 7903 params.lut = lut; 7904 7905 status = ice_aq_set_rss_lut(hw, ¶ms); 7906 if (status) 7907 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", 7908 status, libie_aq_str(hw->adminq.sq_last_status)); 7909 7910 return status; 7911 } 7912 7913 /** 7914 * ice_set_rss_key - Set RSS key 7915 * @vsi: Pointer to the VSI structure 7916 * @seed: RSS hash seed 7917 * 7918 * Returns 0 on success, negative on failure 7919 */ 7920 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) 7921 { 7922 struct ice_hw *hw = &vsi->back->hw; 7923 int status; 7924 7925 if (!seed) 7926 return -EINVAL; 7927 7928 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7929 if (status) 7930 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", 7931 status, libie_aq_str(hw->adminq.sq_last_status)); 7932 7933 return status; 7934 } 7935 7936 /** 7937 * ice_get_rss_lut - Get RSS LUT 7938 * @vsi: Pointer to VSI structure 7939 * @lut: Buffer to store the lookup table entries 7940 * @lut_size: Size of buffer to store the lookup table entries 7941 * 7942 * Returns 0 on success, negative on failure 7943 */ 7944 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) 7945 { 7946 struct ice_aq_get_set_rss_lut_params params = {}; 7947 struct ice_hw *hw = &vsi->back->hw; 7948 int status; 7949 7950 if (!lut) 7951 return -EINVAL; 7952 7953 params.vsi_handle = vsi->idx; 7954 params.lut_size = lut_size; 7955 params.lut_type = vsi->rss_lut_type; 7956 params.lut = lut; 7957 7958 status = ice_aq_get_rss_lut(hw, ¶ms); 7959 if (status) 7960 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", 7961 status, libie_aq_str(hw->adminq.sq_last_status)); 7962 7963 return status; 7964 } 7965 7966 /** 7967 * ice_get_rss_key - Get RSS key 7968 * @vsi: Pointer to VSI structure 7969 * @seed: Buffer to store the key in 7970 * 7971 * Returns 0 on success, negative on failure 7972 */ 7973 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) 7974 { 7975 struct ice_hw *hw = &vsi->back->hw; 7976 int status; 7977 7978 if (!seed) 7979 return -EINVAL; 7980 7981 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); 7982 if (status) 7983 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", 7984 status, libie_aq_str(hw->adminq.sq_last_status)); 7985 7986 return status; 7987 } 7988 7989 /** 7990 * ice_get_rss - Get RSS LUT and/or key 7991 * @vsi: Pointer to VSI structure 7992 * @seed: Buffer to store the key in 7993 * @lut: Buffer to store the lookup table entries 7994 * @lut_size: Size of buffer to store the lookup table entries 7995 * 7996 * Return: 0 on success, negative on failure 7997 */ 7998 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) 7999 { 8000 int err; 8001 8002 if (seed) { 8003 err = ice_get_rss_key(vsi, seed); 8004 if (err) 8005 return err; 8006 } 8007 8008 if (lut) { 8009 err = ice_get_rss_lut(vsi, lut, lut_size); 8010 if (err) 8011 return err; 8012 } 8013 8014 return 0; 8015 } 8016 8017 /** 8018 * ice_set_rss_hfunc - Set RSS HASH function 8019 * @vsi: Pointer to VSI structure 8020 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*) 8021 * 8022 * Returns 0 on success, negative on failure 8023 */ 8024 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) 8025 { 8026 struct ice_hw *hw = &vsi->back->hw; 8027 struct ice_vsi_ctx *ctx; 8028 bool symm; 8029 int err; 8030 8031 if (hfunc == vsi->rss_hfunc) 8032 return 0; 8033 8034 if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ && 8035 hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) 8036 return -EOPNOTSUPP; 8037 8038 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8039 if (!ctx) 8040 return -ENOMEM; 8041 8042 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 8043 ctx->info.q_opt_rss = vsi->info.q_opt_rss; 8044 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; 8045 ctx->info.q_opt_rss |= 8046 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc); 8047 ctx->info.q_opt_tc = vsi->info.q_opt_tc; 8048 ctx->info.q_opt_flags = vsi->info.q_opt_rss; 8049 8050 err = ice_update_vsi(hw, vsi->idx, ctx, NULL); 8051 if (err) { 8052 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", 8053 vsi->vsi_num, err); 8054 } else { 8055 vsi->info.q_opt_rss = ctx->info.q_opt_rss; 8056 vsi->rss_hfunc = hfunc; 8057 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", 8058 hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ? 8059 "Symmetric " : ""); 8060 } 8061 kfree(ctx); 8062 if (err) 8063 return err; 8064 8065 /* Fix the symmetry setting for all existing RSS configurations */ 8066 symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); 8067 return ice_set_rss_cfg_symm(hw, vsi, symm); 8068 } 8069 8070 /** 8071 * ice_bridge_getlink - Get the hardware bridge mode 8072 * @skb: skb buff 8073 * @pid: process ID 8074 * @seq: RTNL message seq 8075 * @dev: the netdev being configured 8076 * @filter_mask: filter mask passed in 8077 * @nlflags: netlink flags passed in 8078 * 8079 * Return the bridge mode (VEB/VEPA) 8080 */ 8081 static int 8082 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 8083 struct net_device *dev, u32 filter_mask, int nlflags) 8084 { 8085 struct ice_pf *pf = ice_netdev_to_pf(dev); 8086 u16 bmode; 8087 8088 bmode = pf->first_sw->bridge_mode; 8089 8090 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, 8091 filter_mask, NULL); 8092 } 8093 8094 /** 8095 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) 8096 * @vsi: Pointer to VSI structure 8097 * @bmode: Hardware bridge mode (VEB/VEPA) 8098 * 8099 * Returns 0 on success, negative on failure 8100 */ 8101 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) 8102 { 8103 struct ice_aqc_vsi_props *vsi_props; 8104 struct ice_hw *hw = &vsi->back->hw; 8105 struct ice_vsi_ctx *ctxt; 8106 int ret; 8107 8108 vsi_props = &vsi->info; 8109 8110 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 8111 if (!ctxt) 8112 return -ENOMEM; 8113 8114 ctxt->info = vsi->info; 8115 8116 if (bmode == BRIDGE_MODE_VEB) 8117 /* change from VEPA to VEB mode */ 8118 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 8119 else 8120 /* change from VEB to VEPA mode */ 8121 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 8122 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 8123 8124 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 8125 if (ret) { 8126 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", 8127 bmode, ret, libie_aq_str(hw->adminq.sq_last_status)); 8128 goto out; 8129 } 8130 /* Update sw flags for book keeping */ 8131 vsi_props->sw_flags = ctxt->info.sw_flags; 8132 8133 out: 8134 kfree(ctxt); 8135 return ret; 8136 } 8137 8138 /** 8139 * ice_bridge_setlink - Set the hardware bridge mode 8140 * @dev: the netdev being configured 8141 * @nlh: RTNL message 8142 * @flags: bridge setlink flags 8143 * @extack: netlink extended ack 8144 * 8145 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is 8146 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if 8147 * not already set for all VSIs connected to this switch. And also update the 8148 * unicast switch filter rules for the corresponding switch of the netdev. 8149 */ 8150 static int 8151 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 8152 u16 __always_unused flags, 8153 struct netlink_ext_ack __always_unused *extack) 8154 { 8155 struct ice_pf *pf = ice_netdev_to_pf(dev); 8156 struct nlattr *attr, *br_spec; 8157 struct ice_hw *hw = &pf->hw; 8158 struct ice_sw *pf_sw; 8159 int rem, v, err = 0; 8160 8161 pf_sw = pf->first_sw; 8162 /* find the attribute in the netlink message */ 8163 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 8164 if (!br_spec) 8165 return -EINVAL; 8166 8167 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { 8168 __u16 mode = nla_get_u16(attr); 8169 8170 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 8171 return -EINVAL; 8172 /* Continue if bridge mode is not being flipped */ 8173 if (mode == pf_sw->bridge_mode) 8174 continue; 8175 /* Iterates through the PF VSI list and update the loopback 8176 * mode of the VSI 8177 */ 8178 ice_for_each_vsi(pf, v) { 8179 if (!pf->vsi[v]) 8180 continue; 8181 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); 8182 if (err) 8183 return err; 8184 } 8185 8186 hw->evb_veb = (mode == BRIDGE_MODE_VEB); 8187 /* Update the unicast switch filter rules for the corresponding 8188 * switch of the netdev 8189 */ 8190 err = ice_update_sw_rule_bridge_mode(hw); 8191 if (err) { 8192 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", 8193 mode, err, 8194 libie_aq_str(hw->adminq.sq_last_status)); 8195 /* revert hw->evb_veb */ 8196 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); 8197 return err; 8198 } 8199 8200 pf_sw->bridge_mode = mode; 8201 } 8202 8203 return 0; 8204 } 8205 8206 /** 8207 * ice_tx_timeout - Respond to a Tx Hang 8208 * @netdev: network interface device structure 8209 * @txqueue: Tx queue 8210 */ 8211 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) 8212 { 8213 struct ice_netdev_priv *np = netdev_priv(netdev); 8214 struct ice_tx_ring *tx_ring = NULL; 8215 struct ice_vsi *vsi = np->vsi; 8216 struct ice_pf *pf = vsi->back; 8217 u32 i; 8218 8219 pf->tx_timeout_count++; 8220 8221 /* Check if PFC is enabled for the TC to which the queue belongs 8222 * to. If yes then Tx timeout is not caused by a hung queue, no 8223 * need to reset and rebuild 8224 */ 8225 if (ice_is_pfc_causing_hung_q(pf, txqueue)) { 8226 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", 8227 txqueue); 8228 return; 8229 } 8230 8231 /* now that we have an index, find the tx_ring struct */ 8232 ice_for_each_txq(vsi, i) 8233 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) 8234 if (txqueue == vsi->tx_rings[i]->q_index) { 8235 tx_ring = vsi->tx_rings[i]; 8236 break; 8237 } 8238 8239 /* Reset recovery level if enough time has elapsed after last timeout. 8240 * Also ensure no new reset action happens before next timeout period. 8241 */ 8242 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) 8243 pf->tx_timeout_recovery_level = 1; 8244 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + 8245 netdev->watchdog_timeo))) 8246 return; 8247 8248 if (tx_ring) { 8249 struct ice_hw *hw = &pf->hw; 8250 u32 head, intr = 0; 8251 8252 head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, 8253 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); 8254 /* Read interrupt register */ 8255 intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); 8256 8257 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", 8258 vsi->vsi_num, txqueue, tx_ring->next_to_clean, 8259 head, tx_ring->next_to_use, intr); 8260 8261 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); 8262 } 8263 8264 pf->tx_timeout_last_recovery = jiffies; 8265 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", 8266 pf->tx_timeout_recovery_level, txqueue); 8267 8268 switch (pf->tx_timeout_recovery_level) { 8269 case 1: 8270 set_bit(ICE_PFR_REQ, pf->state); 8271 break; 8272 case 2: 8273 set_bit(ICE_CORER_REQ, pf->state); 8274 break; 8275 case 3: 8276 set_bit(ICE_GLOBR_REQ, pf->state); 8277 break; 8278 default: 8279 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); 8280 set_bit(ICE_DOWN, pf->state); 8281 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); 8282 set_bit(ICE_SERVICE_DIS, pf->state); 8283 break; 8284 } 8285 8286 ice_service_task_schedule(pf); 8287 pf->tx_timeout_recovery_level++; 8288 } 8289 8290 /** 8291 * ice_setup_tc_cls_flower - flower classifier offloads 8292 * @np: net device to configure 8293 * @filter_dev: device on which filter is added 8294 * @cls_flower: offload data 8295 * @ingress: if the rule is added to an ingress block 8296 * 8297 * Return: 0 if the flower was successfully added or deleted, 8298 * negative error code otherwise. 8299 */ 8300 static int 8301 ice_setup_tc_cls_flower(struct ice_netdev_priv *np, 8302 struct net_device *filter_dev, 8303 struct flow_cls_offload *cls_flower, 8304 bool ingress) 8305 { 8306 struct ice_vsi *vsi = np->vsi; 8307 8308 if (cls_flower->common.chain_index) 8309 return -EOPNOTSUPP; 8310 8311 switch (cls_flower->command) { 8312 case FLOW_CLS_REPLACE: 8313 return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress); 8314 case FLOW_CLS_DESTROY: 8315 return ice_del_cls_flower(vsi, cls_flower); 8316 default: 8317 return -EINVAL; 8318 } 8319 } 8320 8321 /** 8322 * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block 8323 * @type: TC SETUP type 8324 * @type_data: TC flower offload data that contains user input 8325 * @cb_priv: netdev private data 8326 * 8327 * Return: 0 if the setup was successful, negative error code otherwise. 8328 */ 8329 static int 8330 ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data, 8331 void *cb_priv) 8332 { 8333 struct ice_netdev_priv *np = cb_priv; 8334 8335 switch (type) { 8336 case TC_SETUP_CLSFLOWER: 8337 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 8338 type_data, true); 8339 default: 8340 return -EOPNOTSUPP; 8341 } 8342 } 8343 8344 /** 8345 * ice_setup_tc_block_cb_egress - callback handler for egress TC block 8346 * @type: TC SETUP type 8347 * @type_data: TC flower offload data that contains user input 8348 * @cb_priv: netdev private data 8349 * 8350 * Return: 0 if the setup was successful, negative error code otherwise. 8351 */ 8352 static int 8353 ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data, 8354 void *cb_priv) 8355 { 8356 struct ice_netdev_priv *np = cb_priv; 8357 8358 switch (type) { 8359 case TC_SETUP_CLSFLOWER: 8360 return ice_setup_tc_cls_flower(np, np->vsi->netdev, 8361 type_data, false); 8362 default: 8363 return -EOPNOTSUPP; 8364 } 8365 } 8366 8367 /** 8368 * ice_validate_mqprio_qopt - Validate TCF input parameters 8369 * @vsi: Pointer to VSI 8370 * @mqprio_qopt: input parameters for mqprio queue configuration 8371 * 8372 * This function validates MQPRIO params, such as qcount (power of 2 wherever 8373 * needed), and make sure user doesn't specify qcount and BW rate limit 8374 * for TCs, which are more than "num_tc" 8375 */ 8376 static int 8377 ice_validate_mqprio_qopt(struct ice_vsi *vsi, 8378 struct tc_mqprio_qopt_offload *mqprio_qopt) 8379 { 8380 int non_power_of_2_qcount = 0; 8381 struct ice_pf *pf = vsi->back; 8382 int max_rss_q_cnt = 0; 8383 u64 sum_min_rate = 0; 8384 struct device *dev; 8385 int i, speed; 8386 u8 num_tc; 8387 8388 if (vsi->type != ICE_VSI_PF) 8389 return -EINVAL; 8390 8391 if (mqprio_qopt->qopt.offset[0] != 0 || 8392 mqprio_qopt->qopt.num_tc < 1 || 8393 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) 8394 return -EINVAL; 8395 8396 dev = ice_pf_to_dev(pf); 8397 vsi->ch_rss_size = 0; 8398 num_tc = mqprio_qopt->qopt.num_tc; 8399 speed = ice_get_link_speed_kbps(vsi); 8400 8401 for (i = 0; num_tc; i++) { 8402 int qcount = mqprio_qopt->qopt.count[i]; 8403 u64 max_rate, min_rate, rem; 8404 8405 if (!qcount) 8406 return -EINVAL; 8407 8408 if (is_power_of_2(qcount)) { 8409 if (non_power_of_2_qcount && 8410 qcount > non_power_of_2_qcount) { 8411 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", 8412 qcount, non_power_of_2_qcount); 8413 return -EINVAL; 8414 } 8415 if (qcount > max_rss_q_cnt) 8416 max_rss_q_cnt = qcount; 8417 } else { 8418 if (non_power_of_2_qcount && 8419 qcount != non_power_of_2_qcount) { 8420 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", 8421 qcount, non_power_of_2_qcount); 8422 return -EINVAL; 8423 } 8424 if (qcount < max_rss_q_cnt) { 8425 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", 8426 qcount, max_rss_q_cnt); 8427 return -EINVAL; 8428 } 8429 max_rss_q_cnt = qcount; 8430 non_power_of_2_qcount = qcount; 8431 } 8432 8433 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but 8434 * converts the bandwidth rate limit into Bytes/s when 8435 * passing it down to the driver. So convert input bandwidth 8436 * from Bytes/s to Kbps 8437 */ 8438 max_rate = mqprio_qopt->max_rate[i]; 8439 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); 8440 8441 /* min_rate is minimum guaranteed rate and it can't be zero */ 8442 min_rate = mqprio_qopt->min_rate[i]; 8443 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); 8444 sum_min_rate += min_rate; 8445 8446 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { 8447 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, 8448 min_rate, ICE_MIN_BW_LIMIT); 8449 return -EINVAL; 8450 } 8451 8452 if (max_rate && max_rate > speed) { 8453 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n", 8454 i, max_rate, speed); 8455 return -EINVAL; 8456 } 8457 8458 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); 8459 if (rem) { 8460 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", 8461 i, ICE_MIN_BW_LIMIT); 8462 return -EINVAL; 8463 } 8464 8465 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); 8466 if (rem) { 8467 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", 8468 i, ICE_MIN_BW_LIMIT); 8469 return -EINVAL; 8470 } 8471 8472 /* min_rate can't be more than max_rate, except when max_rate 8473 * is zero (implies max_rate sought is max line rate). In such 8474 * a case min_rate can be more than max. 8475 */ 8476 if (max_rate && min_rate > max_rate) { 8477 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", 8478 min_rate, max_rate); 8479 return -EINVAL; 8480 } 8481 8482 if (i >= mqprio_qopt->qopt.num_tc - 1) 8483 break; 8484 if (mqprio_qopt->qopt.offset[i + 1] != 8485 (mqprio_qopt->qopt.offset[i] + qcount)) 8486 return -EINVAL; 8487 } 8488 if (vsi->num_rxq < 8489 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 8490 return -EINVAL; 8491 if (vsi->num_txq < 8492 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) 8493 return -EINVAL; 8494 8495 if (sum_min_rate && sum_min_rate > (u64)speed) { 8496 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", 8497 sum_min_rate, speed); 8498 return -EINVAL; 8499 } 8500 8501 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ 8502 vsi->ch_rss_size = max_rss_q_cnt; 8503 8504 return 0; 8505 } 8506 8507 /** 8508 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF 8509 * @pf: ptr to PF device 8510 * @vsi: ptr to VSI 8511 */ 8512 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) 8513 { 8514 struct device *dev = ice_pf_to_dev(pf); 8515 bool added = false; 8516 struct ice_hw *hw; 8517 int flow; 8518 8519 if (!(vsi->num_gfltr || vsi->num_bfltr)) 8520 return -EINVAL; 8521 8522 hw = &pf->hw; 8523 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { 8524 struct ice_fd_hw_prof *prof; 8525 int tun, status; 8526 u64 entry_h; 8527 8528 if (!(hw->fdir_prof && hw->fdir_prof[flow] && 8529 hw->fdir_prof[flow]->cnt)) 8530 continue; 8531 8532 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { 8533 enum ice_flow_priority prio; 8534 8535 /* add this VSI to FDir profile for this flow */ 8536 prio = ICE_FLOW_PRIO_NORMAL; 8537 prof = hw->fdir_prof[flow]; 8538 status = ice_flow_add_entry(hw, ICE_BLK_FD, 8539 prof->prof_id[tun], 8540 prof->vsi_h[0], vsi->idx, 8541 prio, prof->fdir_seg[tun], 8542 &entry_h); 8543 if (status) { 8544 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", 8545 vsi->idx, flow); 8546 continue; 8547 } 8548 8549 prof->entry_h[prof->cnt][tun] = entry_h; 8550 } 8551 8552 /* store VSI for filter replay and delete */ 8553 prof->vsi_h[prof->cnt] = vsi->idx; 8554 prof->cnt++; 8555 8556 added = true; 8557 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, 8558 flow); 8559 } 8560 8561 if (!added) 8562 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); 8563 8564 return 0; 8565 } 8566 8567 /** 8568 * ice_add_channel - add a channel by adding VSI 8569 * @pf: ptr to PF device 8570 * @sw_id: underlying HW switching element ID 8571 * @ch: ptr to channel structure 8572 * 8573 * Add a channel (VSI) using add_vsi and queue_map 8574 */ 8575 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) 8576 { 8577 struct device *dev = ice_pf_to_dev(pf); 8578 struct ice_vsi *vsi; 8579 8580 if (ch->type != ICE_VSI_CHNL) { 8581 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); 8582 return -EINVAL; 8583 } 8584 8585 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); 8586 if (!vsi || vsi->type != ICE_VSI_CHNL) { 8587 dev_err(dev, "create chnl VSI failure\n"); 8588 return -EINVAL; 8589 } 8590 8591 ice_add_vsi_to_fdir(pf, vsi); 8592 8593 ch->sw_id = sw_id; 8594 ch->vsi_num = vsi->vsi_num; 8595 ch->info.mapping_flags = vsi->info.mapping_flags; 8596 ch->ch_vsi = vsi; 8597 /* set the back pointer of channel for newly created VSI */ 8598 vsi->ch = ch; 8599 8600 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, 8601 sizeof(vsi->info.q_mapping)); 8602 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, 8603 sizeof(vsi->info.tc_mapping)); 8604 8605 return 0; 8606 } 8607 8608 /** 8609 * ice_chnl_cfg_res 8610 * @vsi: the VSI being setup 8611 * @ch: ptr to channel structure 8612 * 8613 * Configure channel specific resources such as rings, vector. 8614 */ 8615 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) 8616 { 8617 int i; 8618 8619 for (i = 0; i < ch->num_txq; i++) { 8620 struct ice_q_vector *tx_q_vector, *rx_q_vector; 8621 struct ice_ring_container *rc; 8622 struct ice_tx_ring *tx_ring; 8623 struct ice_rx_ring *rx_ring; 8624 8625 tx_ring = vsi->tx_rings[ch->base_q + i]; 8626 rx_ring = vsi->rx_rings[ch->base_q + i]; 8627 if (!tx_ring || !rx_ring) 8628 continue; 8629 8630 /* setup ring being channel enabled */ 8631 tx_ring->ch = ch; 8632 rx_ring->ch = ch; 8633 8634 /* following code block sets up vector specific attributes */ 8635 tx_q_vector = tx_ring->q_vector; 8636 rx_q_vector = rx_ring->q_vector; 8637 if (!tx_q_vector && !rx_q_vector) 8638 continue; 8639 8640 if (tx_q_vector) { 8641 tx_q_vector->ch = ch; 8642 /* setup Tx and Rx ITR setting if DIM is off */ 8643 rc = &tx_q_vector->tx; 8644 if (!ITR_IS_DYNAMIC(rc)) 8645 ice_write_itr(rc, rc->itr_setting); 8646 } 8647 if (rx_q_vector) { 8648 rx_q_vector->ch = ch; 8649 /* setup Tx and Rx ITR setting if DIM is off */ 8650 rc = &rx_q_vector->rx; 8651 if (!ITR_IS_DYNAMIC(rc)) 8652 ice_write_itr(rc, rc->itr_setting); 8653 } 8654 } 8655 8656 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then 8657 * GLINT_ITR register would have written to perform in-context 8658 * update, hence perform flush 8659 */ 8660 if (ch->num_txq || ch->num_rxq) 8661 ice_flush(&vsi->back->hw); 8662 } 8663 8664 /** 8665 * ice_cfg_chnl_all_res - configure channel resources 8666 * @vsi: pte to main_vsi 8667 * @ch: ptr to channel structure 8668 * 8669 * This function configures channel specific resources such as flow-director 8670 * counter index, and other resources such as queues, vectors, ITR settings 8671 */ 8672 static void 8673 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) 8674 { 8675 /* configure channel (aka ADQ) resources such as queues, vectors, 8676 * ITR settings for channel specific vectors and anything else 8677 */ 8678 ice_chnl_cfg_res(vsi, ch); 8679 } 8680 8681 /** 8682 * ice_setup_hw_channel - setup new channel 8683 * @pf: ptr to PF device 8684 * @vsi: the VSI being setup 8685 * @ch: ptr to channel structure 8686 * @sw_id: underlying HW switching element ID 8687 * @type: type of channel to be created (VMDq2/VF) 8688 * 8689 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8690 * and configures Tx rings accordingly 8691 */ 8692 static int 8693 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8694 struct ice_channel *ch, u16 sw_id, u8 type) 8695 { 8696 struct device *dev = ice_pf_to_dev(pf); 8697 int ret; 8698 8699 ch->base_q = vsi->next_base_q; 8700 ch->type = type; 8701 8702 ret = ice_add_channel(pf, sw_id, ch); 8703 if (ret) { 8704 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); 8705 return ret; 8706 } 8707 8708 /* configure/setup ADQ specific resources */ 8709 ice_cfg_chnl_all_res(vsi, ch); 8710 8711 /* make sure to update the next_base_q so that subsequent channel's 8712 * (aka ADQ) VSI queue map is correct 8713 */ 8714 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; 8715 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, 8716 ch->num_rxq); 8717 8718 return 0; 8719 } 8720 8721 /** 8722 * ice_setup_channel - setup new channel using uplink element 8723 * @pf: ptr to PF device 8724 * @vsi: the VSI being setup 8725 * @ch: ptr to channel structure 8726 * 8727 * Setup new channel (VSI) based on specified type (VMDq2/VF) 8728 * and uplink switching element 8729 */ 8730 static bool 8731 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, 8732 struct ice_channel *ch) 8733 { 8734 struct device *dev = ice_pf_to_dev(pf); 8735 u16 sw_id; 8736 int ret; 8737 8738 if (vsi->type != ICE_VSI_PF) { 8739 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); 8740 return false; 8741 } 8742 8743 sw_id = pf->first_sw->sw_id; 8744 8745 /* create channel (VSI) */ 8746 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); 8747 if (ret) { 8748 dev_err(dev, "failed to setup hw_channel\n"); 8749 return false; 8750 } 8751 dev_dbg(dev, "successfully created channel()\n"); 8752 8753 return ch->ch_vsi ? true : false; 8754 } 8755 8756 /** 8757 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate 8758 * @vsi: VSI to be configured 8759 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit 8760 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit 8761 */ 8762 static int 8763 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) 8764 { 8765 int err; 8766 8767 err = ice_set_min_bw_limit(vsi, min_tx_rate); 8768 if (err) 8769 return err; 8770 8771 return ice_set_max_bw_limit(vsi, max_tx_rate); 8772 } 8773 8774 /** 8775 * ice_create_q_channel - function to create channel 8776 * @vsi: VSI to be configured 8777 * @ch: ptr to channel (it contains channel specific params) 8778 * 8779 * This function creates channel (VSI) using num_queues specified by user, 8780 * reconfigs RSS if needed. 8781 */ 8782 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) 8783 { 8784 struct ice_pf *pf = vsi->back; 8785 struct device *dev; 8786 8787 if (!ch) 8788 return -EINVAL; 8789 8790 dev = ice_pf_to_dev(pf); 8791 if (!ch->num_txq || !ch->num_rxq) { 8792 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); 8793 return -EINVAL; 8794 } 8795 8796 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { 8797 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", 8798 vsi->cnt_q_avail, ch->num_txq); 8799 return -EINVAL; 8800 } 8801 8802 if (!ice_setup_channel(pf, vsi, ch)) { 8803 dev_info(dev, "Failed to setup channel\n"); 8804 return -EINVAL; 8805 } 8806 /* configure BW rate limit */ 8807 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { 8808 int ret; 8809 8810 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, 8811 ch->min_tx_rate); 8812 if (ret) 8813 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", 8814 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8815 else 8816 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", 8817 ch->max_tx_rate, ch->ch_vsi->vsi_num); 8818 } 8819 8820 vsi->cnt_q_avail -= ch->num_txq; 8821 8822 return 0; 8823 } 8824 8825 /** 8826 * ice_rem_all_chnl_fltrs - removes all channel filters 8827 * @pf: ptr to PF, TC-flower based filter are tracked at PF level 8828 * 8829 * Remove all advanced switch filters only if they are channel specific 8830 * tc-flower based filter 8831 */ 8832 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) 8833 { 8834 struct ice_tc_flower_fltr *fltr; 8835 struct hlist_node *node; 8836 8837 /* to remove all channel filters, iterate an ordered list of filters */ 8838 hlist_for_each_entry_safe(fltr, node, 8839 &pf->tc_flower_fltr_list, 8840 tc_flower_node) { 8841 struct ice_rule_query_data rule; 8842 int status; 8843 8844 /* for now process only channel specific filters */ 8845 if (!ice_is_chnl_fltr(fltr)) 8846 continue; 8847 8848 rule.rid = fltr->rid; 8849 rule.rule_id = fltr->rule_id; 8850 rule.vsi_handle = fltr->dest_vsi_handle; 8851 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); 8852 if (status) { 8853 if (status == -ENOENT) 8854 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", 8855 rule.rule_id); 8856 else 8857 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", 8858 status); 8859 } else if (fltr->dest_vsi) { 8860 /* update advanced switch filter count */ 8861 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { 8862 u32 flags = fltr->flags; 8863 8864 fltr->dest_vsi->num_chnl_fltr--; 8865 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | 8866 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) 8867 pf->num_dmac_chnl_fltrs--; 8868 } 8869 } 8870 8871 hlist_del(&fltr->tc_flower_node); 8872 kfree(fltr); 8873 } 8874 } 8875 8876 /** 8877 * ice_remove_q_channels - Remove queue channels for the TCs 8878 * @vsi: VSI to be configured 8879 * @rem_fltr: delete advanced switch filter or not 8880 * 8881 * Remove queue channels for the TCs 8882 */ 8883 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) 8884 { 8885 struct ice_channel *ch, *ch_tmp; 8886 struct ice_pf *pf = vsi->back; 8887 int i; 8888 8889 /* remove all tc-flower based filter if they are channel filters only */ 8890 if (rem_fltr) 8891 ice_rem_all_chnl_fltrs(pf); 8892 8893 /* remove ntuple filters since queue configuration is being changed */ 8894 if (vsi->netdev->features & NETIF_F_NTUPLE) { 8895 struct ice_hw *hw = &pf->hw; 8896 8897 mutex_lock(&hw->fdir_fltr_lock); 8898 ice_fdir_del_all_fltrs(vsi); 8899 mutex_unlock(&hw->fdir_fltr_lock); 8900 } 8901 8902 /* perform cleanup for channels if they exist */ 8903 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { 8904 struct ice_vsi *ch_vsi; 8905 8906 list_del(&ch->list); 8907 ch_vsi = ch->ch_vsi; 8908 if (!ch_vsi) { 8909 kfree(ch); 8910 continue; 8911 } 8912 8913 /* Reset queue contexts */ 8914 for (i = 0; i < ch->num_rxq; i++) { 8915 struct ice_tx_ring *tx_ring; 8916 struct ice_rx_ring *rx_ring; 8917 8918 tx_ring = vsi->tx_rings[ch->base_q + i]; 8919 rx_ring = vsi->rx_rings[ch->base_q + i]; 8920 if (tx_ring) { 8921 tx_ring->ch = NULL; 8922 if (tx_ring->q_vector) 8923 tx_ring->q_vector->ch = NULL; 8924 } 8925 if (rx_ring) { 8926 rx_ring->ch = NULL; 8927 if (rx_ring->q_vector) 8928 rx_ring->q_vector->ch = NULL; 8929 } 8930 } 8931 8932 /* Release FD resources for the channel VSI */ 8933 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); 8934 8935 /* clear the VSI from scheduler tree */ 8936 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); 8937 8938 /* Delete VSI from FW, PF and HW VSI arrays */ 8939 ice_vsi_delete(ch->ch_vsi); 8940 8941 /* free the channel */ 8942 kfree(ch); 8943 } 8944 8945 /* clear the channel VSI map which is stored in main VSI */ 8946 ice_for_each_chnl_tc(i) 8947 vsi->tc_map_vsi[i] = NULL; 8948 8949 /* reset main VSI's all TC information */ 8950 vsi->all_enatc = 0; 8951 vsi->all_numtc = 0; 8952 } 8953 8954 /** 8955 * ice_rebuild_channels - rebuild channel 8956 * @pf: ptr to PF 8957 * 8958 * Recreate channel VSIs and replay filters 8959 */ 8960 static int ice_rebuild_channels(struct ice_pf *pf) 8961 { 8962 struct device *dev = ice_pf_to_dev(pf); 8963 struct ice_vsi *main_vsi; 8964 bool rem_adv_fltr = true; 8965 struct ice_channel *ch; 8966 struct ice_vsi *vsi; 8967 int tc_idx = 1; 8968 int i, err; 8969 8970 main_vsi = ice_get_main_vsi(pf); 8971 if (!main_vsi) 8972 return 0; 8973 8974 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || 8975 main_vsi->old_numtc == 1) 8976 return 0; /* nothing to be done */ 8977 8978 /* reconfigure main VSI based on old value of TC and cached values 8979 * for MQPRIO opts 8980 */ 8981 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); 8982 if (err) { 8983 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", 8984 main_vsi->old_ena_tc, main_vsi->vsi_num); 8985 return err; 8986 } 8987 8988 /* rebuild ADQ VSIs */ 8989 ice_for_each_vsi(pf, i) { 8990 enum ice_vsi_type type; 8991 8992 vsi = pf->vsi[i]; 8993 if (!vsi || vsi->type != ICE_VSI_CHNL) 8994 continue; 8995 8996 type = vsi->type; 8997 8998 /* rebuild ADQ VSI */ 8999 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); 9000 if (err) { 9001 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", 9002 ice_vsi_type_str(type), vsi->idx, err); 9003 goto cleanup; 9004 } 9005 9006 /* Re-map HW VSI number, using VSI handle that has been 9007 * previously validated in ice_replay_vsi() call above 9008 */ 9009 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); 9010 9011 /* replay filters for the VSI */ 9012 err = ice_replay_vsi(&pf->hw, vsi->idx); 9013 if (err) { 9014 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", 9015 ice_vsi_type_str(type), err, vsi->idx); 9016 rem_adv_fltr = false; 9017 goto cleanup; 9018 } 9019 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", 9020 ice_vsi_type_str(type), vsi->idx); 9021 9022 /* store ADQ VSI at correct TC index in main VSI's 9023 * map of TC to VSI 9024 */ 9025 main_vsi->tc_map_vsi[tc_idx++] = vsi; 9026 } 9027 9028 /* ADQ VSI(s) has been rebuilt successfully, so setup 9029 * channel for main VSI's Tx and Rx rings 9030 */ 9031 list_for_each_entry(ch, &main_vsi->ch_list, list) { 9032 struct ice_vsi *ch_vsi; 9033 9034 ch_vsi = ch->ch_vsi; 9035 if (!ch_vsi) 9036 continue; 9037 9038 /* reconfig channel resources */ 9039 ice_cfg_chnl_all_res(main_vsi, ch); 9040 9041 /* replay BW rate limit if it is non-zero */ 9042 if (!ch->max_tx_rate && !ch->min_tx_rate) 9043 continue; 9044 9045 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, 9046 ch->min_tx_rate); 9047 if (err) 9048 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 9049 err, ch->max_tx_rate, ch->min_tx_rate, 9050 ch_vsi->vsi_num); 9051 else 9052 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", 9053 ch->max_tx_rate, ch->min_tx_rate, 9054 ch_vsi->vsi_num); 9055 } 9056 9057 /* reconfig RSS for main VSI */ 9058 if (main_vsi->ch_rss_size) 9059 ice_vsi_cfg_rss_lut_key(main_vsi); 9060 9061 return 0; 9062 9063 cleanup: 9064 ice_remove_q_channels(main_vsi, rem_adv_fltr); 9065 return err; 9066 } 9067 9068 /** 9069 * ice_create_q_channels - Add queue channel for the given TCs 9070 * @vsi: VSI to be configured 9071 * 9072 * Configures queue channel mapping to the given TCs 9073 */ 9074 static int ice_create_q_channels(struct ice_vsi *vsi) 9075 { 9076 struct ice_pf *pf = vsi->back; 9077 struct ice_channel *ch; 9078 int ret = 0, i; 9079 9080 ice_for_each_chnl_tc(i) { 9081 if (!(vsi->all_enatc & BIT(i))) 9082 continue; 9083 9084 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 9085 if (!ch) { 9086 ret = -ENOMEM; 9087 goto err_free; 9088 } 9089 INIT_LIST_HEAD(&ch->list); 9090 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; 9091 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; 9092 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; 9093 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; 9094 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; 9095 9096 /* convert to Kbits/s */ 9097 if (ch->max_tx_rate) 9098 ch->max_tx_rate = div_u64(ch->max_tx_rate, 9099 ICE_BW_KBPS_DIVISOR); 9100 if (ch->min_tx_rate) 9101 ch->min_tx_rate = div_u64(ch->min_tx_rate, 9102 ICE_BW_KBPS_DIVISOR); 9103 9104 ret = ice_create_q_channel(vsi, ch); 9105 if (ret) { 9106 dev_err(ice_pf_to_dev(pf), 9107 "failed creating channel TC:%d\n", i); 9108 kfree(ch); 9109 goto err_free; 9110 } 9111 list_add_tail(&ch->list, &vsi->ch_list); 9112 vsi->tc_map_vsi[i] = ch->ch_vsi; 9113 dev_dbg(ice_pf_to_dev(pf), 9114 "successfully created channel: VSI %p\n", ch->ch_vsi); 9115 } 9116 return 0; 9117 9118 err_free: 9119 ice_remove_q_channels(vsi, false); 9120 9121 return ret; 9122 } 9123 9124 /** 9125 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes 9126 * @netdev: net device to configure 9127 * @type_data: TC offload data 9128 */ 9129 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) 9130 { 9131 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 9132 struct ice_netdev_priv *np = netdev_priv(netdev); 9133 struct ice_vsi *vsi = np->vsi; 9134 struct ice_pf *pf = vsi->back; 9135 u16 mode, ena_tc_qdisc = 0; 9136 int cur_txq, cur_rxq; 9137 u8 hw = 0, num_tcf; 9138 struct device *dev; 9139 int ret, i; 9140 9141 dev = ice_pf_to_dev(pf); 9142 num_tcf = mqprio_qopt->qopt.num_tc; 9143 hw = mqprio_qopt->qopt.hw; 9144 mode = mqprio_qopt->mode; 9145 if (!hw) { 9146 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 9147 vsi->ch_rss_size = 0; 9148 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 9149 goto config_tcf; 9150 } 9151 9152 /* Generate queue region map for number of TCF requested */ 9153 for (i = 0; i < num_tcf; i++) 9154 ena_tc_qdisc |= BIT(i); 9155 9156 switch (mode) { 9157 case TC_MQPRIO_MODE_CHANNEL: 9158 9159 if (pf->hw.port_info->is_custom_tx_enabled) { 9160 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); 9161 return -EBUSY; 9162 } 9163 ice_tear_down_devlink_rate_tree(pf); 9164 9165 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); 9166 if (ret) { 9167 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", 9168 ret); 9169 return ret; 9170 } 9171 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); 9172 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); 9173 /* don't assume state of hw_tc_offload during driver load 9174 * and set the flag for TC flower filter if hw_tc_offload 9175 * already ON 9176 */ 9177 if (vsi->netdev->features & NETIF_F_HW_TC) 9178 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); 9179 break; 9180 default: 9181 return -EINVAL; 9182 } 9183 9184 config_tcf: 9185 9186 /* Requesting same TCF configuration as already enabled */ 9187 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && 9188 mode != TC_MQPRIO_MODE_CHANNEL) 9189 return 0; 9190 9191 /* Pause VSI queues */ 9192 ice_dis_vsi(vsi, true); 9193 9194 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 9195 ice_remove_q_channels(vsi, true); 9196 9197 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 9198 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), 9199 num_online_cpus()); 9200 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), 9201 num_online_cpus()); 9202 } else { 9203 /* logic to rebuild VSI, same like ethtool -L */ 9204 u16 offset = 0, qcount_tx = 0, qcount_rx = 0; 9205 9206 for (i = 0; i < num_tcf; i++) { 9207 if (!(ena_tc_qdisc & BIT(i))) 9208 continue; 9209 9210 offset = vsi->mqprio_qopt.qopt.offset[i]; 9211 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; 9212 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; 9213 } 9214 vsi->req_txq = offset + qcount_tx; 9215 vsi->req_rxq = offset + qcount_rx; 9216 9217 /* store away original rss_size info, so that it gets reused 9218 * form ice_vsi_rebuild during tc-qdisc delete stage - to 9219 * determine, what should be the rss_sizefor main VSI 9220 */ 9221 vsi->orig_rss_size = vsi->rss_size; 9222 } 9223 9224 /* save current values of Tx and Rx queues before calling VSI rebuild 9225 * for fallback option 9226 */ 9227 cur_txq = vsi->num_txq; 9228 cur_rxq = vsi->num_rxq; 9229 9230 /* proceed with rebuild main VSI using correct number of queues */ 9231 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); 9232 if (ret) { 9233 /* fallback to current number of queues */ 9234 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); 9235 vsi->req_txq = cur_txq; 9236 vsi->req_rxq = cur_rxq; 9237 clear_bit(ICE_RESET_FAILED, pf->state); 9238 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { 9239 dev_err(dev, "Rebuild of main VSI failed again\n"); 9240 return ret; 9241 } 9242 } 9243 9244 vsi->all_numtc = num_tcf; 9245 vsi->all_enatc = ena_tc_qdisc; 9246 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); 9247 if (ret) { 9248 netdev_err(netdev, "failed configuring TC for VSI id=%d\n", 9249 vsi->vsi_num); 9250 goto exit; 9251 } 9252 9253 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { 9254 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; 9255 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; 9256 9257 /* set TC0 rate limit if specified */ 9258 if (max_tx_rate || min_tx_rate) { 9259 /* convert to Kbits/s */ 9260 if (max_tx_rate) 9261 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); 9262 if (min_tx_rate) 9263 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); 9264 9265 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); 9266 if (!ret) { 9267 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", 9268 max_tx_rate, min_tx_rate, vsi->vsi_num); 9269 } else { 9270 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", 9271 max_tx_rate, min_tx_rate, vsi->vsi_num); 9272 goto exit; 9273 } 9274 } 9275 ret = ice_create_q_channels(vsi); 9276 if (ret) { 9277 netdev_err(netdev, "failed configuring queue channels\n"); 9278 goto exit; 9279 } else { 9280 netdev_dbg(netdev, "successfully configured channels\n"); 9281 } 9282 } 9283 9284 if (vsi->ch_rss_size) 9285 ice_vsi_cfg_rss_lut_key(vsi); 9286 9287 exit: 9288 /* if error, reset the all_numtc and all_enatc */ 9289 if (ret) { 9290 vsi->all_numtc = 0; 9291 vsi->all_enatc = 0; 9292 } 9293 /* resume VSI */ 9294 ice_ena_vsi(vsi, true); 9295 9296 return ret; 9297 } 9298 9299 /** 9300 * ice_cfg_txtime - configure Tx Time for the Tx ring 9301 * @tx_ring: pointer to the Tx ring structure 9302 * 9303 * Return: 0 on success, negative value on failure. 9304 */ 9305 static int ice_cfg_txtime(struct ice_tx_ring *tx_ring) 9306 { 9307 int err, timeout = 50; 9308 struct ice_vsi *vsi; 9309 struct device *dev; 9310 struct ice_pf *pf; 9311 u32 queue; 9312 9313 if (!tx_ring) 9314 return -EINVAL; 9315 9316 vsi = tx_ring->vsi; 9317 pf = vsi->back; 9318 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 9319 timeout--; 9320 if (!timeout) 9321 return -EBUSY; 9322 usleep_range(1000, 2000); 9323 } 9324 9325 queue = tx_ring->q_index; 9326 dev = ice_pf_to_dev(pf); 9327 9328 /* Ignore return value, and always attempt to enable queue. */ 9329 ice_qp_dis(vsi, queue); 9330 9331 err = ice_qp_ena(vsi, queue); 9332 if (err) 9333 dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n", 9334 queue); 9335 9336 clear_bit(ICE_CFG_BUSY, pf->state); 9337 return err; 9338 } 9339 9340 /** 9341 * ice_offload_txtime - set earliest TxTime first 9342 * @netdev: network interface device structure 9343 * @qopt_off: etf queue option offload from the skb to set 9344 * 9345 * Return: 0 on success, negative value on failure. 9346 */ 9347 static int ice_offload_txtime(struct net_device *netdev, 9348 void *qopt_off) 9349 { 9350 struct ice_netdev_priv *np = netdev_priv(netdev); 9351 struct ice_pf *pf = np->vsi->back; 9352 struct tc_etf_qopt_offload *qopt; 9353 struct ice_vsi *vsi = np->vsi; 9354 struct ice_tx_ring *tx_ring; 9355 int ret = 0; 9356 9357 if (!ice_is_feature_supported(pf, ICE_F_TXTIME)) 9358 return -EOPNOTSUPP; 9359 9360 qopt = qopt_off; 9361 if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq) 9362 return -EINVAL; 9363 9364 if (qopt->enable) 9365 set_bit(qopt->queue, pf->txtime_txqs); 9366 else 9367 clear_bit(qopt->queue, pf->txtime_txqs); 9368 9369 if (netif_running(vsi->netdev)) { 9370 tx_ring = vsi->tx_rings[qopt->queue]; 9371 ret = ice_cfg_txtime(tx_ring); 9372 if (ret) 9373 goto err; 9374 } 9375 9376 netdev_info(netdev, "%s TxTime on queue: %i\n", 9377 str_enable_disable(qopt->enable), qopt->queue); 9378 return 0; 9379 9380 err: 9381 netdev_err(netdev, "Failed to %s TxTime on queue: %i\n", 9382 str_enable_disable(qopt->enable), qopt->queue); 9383 9384 if (qopt->enable) 9385 clear_bit(qopt->queue, pf->txtime_txqs); 9386 return ret; 9387 } 9388 9389 static LIST_HEAD(ice_block_cb_list); 9390 9391 static int 9392 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, 9393 void *type_data) 9394 { 9395 struct ice_netdev_priv *np = netdev_priv(netdev); 9396 enum flow_block_binder_type binder_type; 9397 struct iidc_rdma_core_dev_info *cdev; 9398 struct ice_pf *pf = np->vsi->back; 9399 flow_setup_cb_t *flower_handler; 9400 bool locked = false; 9401 int err; 9402 9403 switch (type) { 9404 case TC_SETUP_BLOCK: 9405 binder_type = 9406 ((struct flow_block_offload *)type_data)->binder_type; 9407 9408 switch (binder_type) { 9409 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: 9410 flower_handler = ice_setup_tc_block_cb_ingress; 9411 break; 9412 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: 9413 flower_handler = ice_setup_tc_block_cb_egress; 9414 break; 9415 default: 9416 return -EOPNOTSUPP; 9417 } 9418 9419 return flow_block_cb_setup_simple(type_data, 9420 &ice_block_cb_list, 9421 flower_handler, 9422 np, np, false); 9423 case TC_SETUP_QDISC_MQPRIO: 9424 if (ice_is_eswitch_mode_switchdev(pf)) { 9425 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); 9426 return -EOPNOTSUPP; 9427 } 9428 9429 cdev = pf->cdev_info; 9430 if (cdev && cdev->adev) { 9431 mutex_lock(&pf->adev_mutex); 9432 device_lock(&cdev->adev->dev); 9433 locked = true; 9434 if (cdev->adev->dev.driver) { 9435 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); 9436 err = -EBUSY; 9437 goto adev_unlock; 9438 } 9439 } 9440 9441 /* setup traffic classifier for receive side */ 9442 mutex_lock(&pf->tc_mutex); 9443 err = ice_setup_tc_mqprio_qdisc(netdev, type_data); 9444 mutex_unlock(&pf->tc_mutex); 9445 9446 adev_unlock: 9447 if (locked) { 9448 device_unlock(&cdev->adev->dev); 9449 mutex_unlock(&pf->adev_mutex); 9450 } 9451 return err; 9452 case TC_SETUP_QDISC_ETF: 9453 return ice_offload_txtime(netdev, type_data); 9454 default: 9455 return -EOPNOTSUPP; 9456 } 9457 return -EOPNOTSUPP; 9458 } 9459 9460 static struct ice_indr_block_priv * 9461 ice_indr_block_priv_lookup(struct ice_netdev_priv *np, 9462 struct net_device *netdev) 9463 { 9464 struct ice_indr_block_priv *cb_priv; 9465 9466 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { 9467 if (!cb_priv->netdev) 9468 return NULL; 9469 if (cb_priv->netdev == netdev) 9470 return cb_priv; 9471 } 9472 return NULL; 9473 } 9474 9475 static int 9476 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, 9477 void *indr_priv) 9478 { 9479 struct ice_indr_block_priv *priv = indr_priv; 9480 struct ice_netdev_priv *np = priv->np; 9481 9482 switch (type) { 9483 case TC_SETUP_CLSFLOWER: 9484 return ice_setup_tc_cls_flower(np, priv->netdev, 9485 (struct flow_cls_offload *) 9486 type_data, false); 9487 default: 9488 return -EOPNOTSUPP; 9489 } 9490 } 9491 9492 static int 9493 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, 9494 struct ice_netdev_priv *np, 9495 struct flow_block_offload *f, void *data, 9496 void (*cleanup)(struct flow_block_cb *block_cb)) 9497 { 9498 struct ice_indr_block_priv *indr_priv; 9499 struct flow_block_cb *block_cb; 9500 9501 if (!ice_is_tunnel_supported(netdev) && 9502 !(is_vlan_dev(netdev) && 9503 vlan_dev_real_dev(netdev) == np->vsi->netdev)) 9504 return -EOPNOTSUPP; 9505 9506 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 9507 return -EOPNOTSUPP; 9508 9509 switch (f->command) { 9510 case FLOW_BLOCK_BIND: 9511 indr_priv = ice_indr_block_priv_lookup(np, netdev); 9512 if (indr_priv) 9513 return -EEXIST; 9514 9515 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); 9516 if (!indr_priv) 9517 return -ENOMEM; 9518 9519 indr_priv->netdev = netdev; 9520 indr_priv->np = np; 9521 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); 9522 9523 block_cb = 9524 flow_indr_block_cb_alloc(ice_indr_setup_block_cb, 9525 indr_priv, indr_priv, 9526 ice_rep_indr_tc_block_unbind, 9527 f, netdev, sch, data, np, 9528 cleanup); 9529 9530 if (IS_ERR(block_cb)) { 9531 list_del(&indr_priv->list); 9532 kfree(indr_priv); 9533 return PTR_ERR(block_cb); 9534 } 9535 flow_block_cb_add(block_cb, f); 9536 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); 9537 break; 9538 case FLOW_BLOCK_UNBIND: 9539 indr_priv = ice_indr_block_priv_lookup(np, netdev); 9540 if (!indr_priv) 9541 return -ENOENT; 9542 9543 block_cb = flow_block_cb_lookup(f->block, 9544 ice_indr_setup_block_cb, 9545 indr_priv); 9546 if (!block_cb) 9547 return -ENOENT; 9548 9549 flow_indr_block_cb_remove(block_cb, f); 9550 9551 list_del(&block_cb->driver_list); 9552 break; 9553 default: 9554 return -EOPNOTSUPP; 9555 } 9556 return 0; 9557 } 9558 9559 static int 9560 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, 9561 void *cb_priv, enum tc_setup_type type, void *type_data, 9562 void *data, 9563 void (*cleanup)(struct flow_block_cb *block_cb)) 9564 { 9565 switch (type) { 9566 case TC_SETUP_BLOCK: 9567 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, 9568 data, cleanup); 9569 9570 default: 9571 return -EOPNOTSUPP; 9572 } 9573 } 9574 9575 /** 9576 * ice_open - Called when a network interface becomes active 9577 * @netdev: network interface device structure 9578 * 9579 * The open entry point is called when a network interface is made 9580 * active by the system (IFF_UP). At this point all resources needed 9581 * for transmit and receive operations are allocated, the interrupt 9582 * handler is registered with the OS, the netdev watchdog is enabled, 9583 * and the stack is notified that the interface is ready. 9584 * 9585 * Returns 0 on success, negative value on failure 9586 */ 9587 int ice_open(struct net_device *netdev) 9588 { 9589 struct ice_pf *pf = ice_netdev_to_pf(netdev); 9590 9591 if (ice_is_reset_in_progress(pf->state)) { 9592 netdev_err(netdev, "can't open net device while reset is in progress"); 9593 return -EBUSY; 9594 } 9595 9596 return ice_open_internal(netdev); 9597 } 9598 9599 /** 9600 * ice_open_internal - Called when a network interface becomes active 9601 * @netdev: network interface device structure 9602 * 9603 * Internal ice_open implementation. Should not be used directly except for ice_open and reset 9604 * handling routine 9605 * 9606 * Returns 0 on success, negative value on failure 9607 */ 9608 int ice_open_internal(struct net_device *netdev) 9609 { 9610 struct ice_netdev_priv *np = netdev_priv(netdev); 9611 struct ice_vsi *vsi = np->vsi; 9612 struct ice_pf *pf = vsi->back; 9613 struct ice_port_info *pi; 9614 int err; 9615 9616 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { 9617 netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); 9618 return -EIO; 9619 } 9620 9621 netif_carrier_off(netdev); 9622 9623 pi = vsi->port_info; 9624 err = ice_update_link_info(pi); 9625 if (err) { 9626 netdev_err(netdev, "Failed to get link info, error %d\n", err); 9627 return err; 9628 } 9629 9630 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); 9631 9632 /* Set PHY if there is media, otherwise, turn off PHY */ 9633 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 9634 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); 9635 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { 9636 err = ice_init_phy_user_cfg(pi); 9637 if (err) { 9638 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", 9639 err); 9640 return err; 9641 } 9642 } 9643 9644 err = ice_configure_phy(vsi); 9645 if (err) { 9646 netdev_err(netdev, "Failed to set physical link up, error %d\n", 9647 err); 9648 return err; 9649 } 9650 } else { 9651 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); 9652 ice_set_link(vsi, false); 9653 } 9654 9655 err = ice_vsi_open(vsi); 9656 if (err) 9657 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 9658 vsi->vsi_num, vsi->vsw->sw_id); 9659 9660 /* Update existing tunnels information */ 9661 udp_tunnel_get_rx_info(netdev); 9662 9663 return err; 9664 } 9665 9666 /** 9667 * ice_stop - Disables a network interface 9668 * @netdev: network interface device structure 9669 * 9670 * The stop entry point is called when an interface is de-activated by the OS, 9671 * and the netdevice enters the DOWN state. The hardware is still under the 9672 * driver's control, but the netdev interface is disabled. 9673 * 9674 * Returns success only - not allowed to fail 9675 */ 9676 int ice_stop(struct net_device *netdev) 9677 { 9678 struct ice_netdev_priv *np = netdev_priv(netdev); 9679 struct ice_vsi *vsi = np->vsi; 9680 struct ice_pf *pf = vsi->back; 9681 9682 if (ice_is_reset_in_progress(pf->state)) { 9683 netdev_err(netdev, "can't stop net device while reset is in progress"); 9684 return -EBUSY; 9685 } 9686 9687 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { 9688 int link_err = ice_force_phys_link_state(vsi, false); 9689 9690 if (link_err) { 9691 if (link_err == -ENOMEDIUM) 9692 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", 9693 vsi->vsi_num); 9694 else 9695 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", 9696 vsi->vsi_num, link_err); 9697 9698 ice_vsi_close(vsi); 9699 return -EIO; 9700 } 9701 } 9702 9703 ice_vsi_close(vsi); 9704 9705 return 0; 9706 } 9707 9708 /** 9709 * ice_features_check - Validate encapsulated packet conforms to limits 9710 * @skb: skb buffer 9711 * @netdev: This port's netdev 9712 * @features: Offload features that the stack believes apply 9713 */ 9714 static netdev_features_t 9715 ice_features_check(struct sk_buff *skb, 9716 struct net_device __always_unused *netdev, 9717 netdev_features_t features) 9718 { 9719 bool gso = skb_is_gso(skb); 9720 size_t len; 9721 9722 /* No point in doing any of this if neither checksum nor GSO are 9723 * being requested for this frame. We can rule out both by just 9724 * checking for CHECKSUM_PARTIAL 9725 */ 9726 if (skb->ip_summed != CHECKSUM_PARTIAL) 9727 return features; 9728 9729 /* We cannot support GSO if the MSS is going to be less than 9730 * 64 bytes. If it is then we need to drop support for GSO. 9731 */ 9732 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) 9733 features &= ~NETIF_F_GSO_MASK; 9734 9735 len = skb_network_offset(skb); 9736 if (len > ICE_TXD_MACLEN_MAX || len & 0x1) 9737 goto out_rm_features; 9738 9739 len = skb_network_header_len(skb); 9740 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9741 goto out_rm_features; 9742 9743 if (skb->encapsulation) { 9744 /* this must work for VXLAN frames AND IPIP/SIT frames, and in 9745 * the case of IPIP frames, the transport header pointer is 9746 * after the inner header! So check to make sure that this 9747 * is a GRE or UDP_TUNNEL frame before doing that math. 9748 */ 9749 if (gso && (skb_shinfo(skb)->gso_type & 9750 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { 9751 len = skb_inner_network_header(skb) - 9752 skb_transport_header(skb); 9753 if (len > ICE_TXD_L4LEN_MAX || len & 0x1) 9754 goto out_rm_features; 9755 } 9756 9757 len = skb_inner_network_header_len(skb); 9758 if (len > ICE_TXD_IPLEN_MAX || len & 0x1) 9759 goto out_rm_features; 9760 } 9761 9762 return features; 9763 out_rm_features: 9764 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 9765 } 9766 9767 static const struct net_device_ops ice_netdev_safe_mode_ops = { 9768 .ndo_open = ice_open, 9769 .ndo_stop = ice_stop, 9770 .ndo_start_xmit = ice_start_xmit, 9771 .ndo_set_mac_address = ice_set_mac_address, 9772 .ndo_validate_addr = eth_validate_addr, 9773 .ndo_change_mtu = ice_change_mtu, 9774 .ndo_get_stats64 = ice_get_stats64, 9775 .ndo_tx_timeout = ice_tx_timeout, 9776 .ndo_bpf = ice_xdp_safe_mode, 9777 }; 9778 9779 static const struct net_device_ops ice_netdev_ops = { 9780 .ndo_open = ice_open, 9781 .ndo_stop = ice_stop, 9782 .ndo_start_xmit = ice_start_xmit, 9783 .ndo_select_queue = ice_select_queue, 9784 .ndo_features_check = ice_features_check, 9785 .ndo_fix_features = ice_fix_features, 9786 .ndo_set_rx_mode = ice_set_rx_mode, 9787 .ndo_set_mac_address = ice_set_mac_address, 9788 .ndo_validate_addr = eth_validate_addr, 9789 .ndo_change_mtu = ice_change_mtu, 9790 .ndo_get_stats64 = ice_get_stats64, 9791 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 9792 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 9793 .ndo_set_vf_mac = ice_set_vf_mac, 9794 .ndo_get_vf_config = ice_get_vf_cfg, 9795 .ndo_set_vf_trust = ice_set_vf_trust, 9796 .ndo_set_vf_vlan = ice_set_vf_port_vlan, 9797 .ndo_set_vf_link_state = ice_set_vf_link_state, 9798 .ndo_get_vf_stats = ice_get_vf_stats, 9799 .ndo_set_vf_rate = ice_set_vf_bw, 9800 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 9801 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 9802 .ndo_setup_tc = ice_setup_tc, 9803 .ndo_set_features = ice_set_features, 9804 .ndo_bridge_getlink = ice_bridge_getlink, 9805 .ndo_bridge_setlink = ice_bridge_setlink, 9806 .ndo_fdb_add = ice_fdb_add, 9807 .ndo_fdb_del = ice_fdb_del, 9808 #ifdef CONFIG_RFS_ACCEL 9809 .ndo_rx_flow_steer = ice_rx_flow_steer, 9810 #endif 9811 .ndo_tx_timeout = ice_tx_timeout, 9812 .ndo_bpf = ice_xdp, 9813 .ndo_xdp_xmit = ice_xdp_xmit, 9814 .ndo_xsk_wakeup = ice_xsk_wakeup, 9815 .ndo_hwtstamp_get = ice_ptp_hwtstamp_get, 9816 .ndo_hwtstamp_set = ice_ptp_hwtstamp_set, 9817 }; 9818