1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
3
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "devlink/devlink.h"
17 #include "devlink/port.h"
18 #include "ice_sf_eth.h"
19 #include "ice_hwmon.h"
20 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
21 * ice tracepoint functions. This must be done exactly once across the
22 * ice driver.
23 */
24 #define CREATE_TRACE_POINTS
25 #include "ice_trace.h"
26 #include "ice_eswitch.h"
27 #include "ice_tc_lib.h"
28 #include "ice_vsi_vlan_ops.h"
29 #include <net/xdp_sock_drv.h>
30
31 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
32 static const char ice_driver_string[] = DRV_SUMMARY;
33 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
34
35 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
36 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
37 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
38
39 MODULE_DESCRIPTION(DRV_SUMMARY);
40 MODULE_IMPORT_NS("LIBIE");
41 MODULE_IMPORT_NS("LIBIE_ADMINQ");
42 MODULE_LICENSE("GPL v2");
43 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
44
45 static int debug = -1;
46 module_param(debug, int, 0644);
47 #ifndef CONFIG_DYNAMIC_DEBUG
48 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
49 #else
50 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
51 #endif /* !CONFIG_DYNAMIC_DEBUG */
52
53 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
54 EXPORT_SYMBOL(ice_xdp_locking_key);
55
56 /**
57 * ice_hw_to_dev - Get device pointer from the hardware structure
58 * @hw: pointer to the device HW structure
59 *
60 * Used to access the device pointer from compilation units which can't easily
61 * include the definition of struct ice_pf without leading to circular header
62 * dependencies.
63 */
ice_hw_to_dev(struct ice_hw * hw)64 struct device *ice_hw_to_dev(struct ice_hw *hw)
65 {
66 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
67
68 return &pf->pdev->dev;
69 }
70
71 static struct workqueue_struct *ice_wq;
72 struct workqueue_struct *ice_lag_wq;
73 static const struct net_device_ops ice_netdev_safe_mode_ops;
74 static const struct net_device_ops ice_netdev_ops;
75
76 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
77
78 static void ice_vsi_release_all(struct ice_pf *pf);
79
80 static int ice_rebuild_channels(struct ice_pf *pf);
81 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
82
83 static int
84 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
85 void *cb_priv, enum tc_setup_type type, void *type_data,
86 void *data,
87 void (*cleanup)(struct flow_block_cb *block_cb));
88
netif_is_ice(const struct net_device * dev)89 bool netif_is_ice(const struct net_device *dev)
90 {
91 return dev && (dev->netdev_ops == &ice_netdev_ops ||
92 dev->netdev_ops == &ice_netdev_safe_mode_ops);
93 }
94
95 /**
96 * ice_get_tx_pending - returns number of Tx descriptors not processed
97 * @ring: the ring of descriptors
98 */
ice_get_tx_pending(struct ice_tx_ring * ring)99 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
100 {
101 u16 head, tail;
102
103 head = ring->next_to_clean;
104 tail = ring->next_to_use;
105
106 if (head != tail)
107 return (head < tail) ?
108 tail - head : (tail + ring->count - head);
109 return 0;
110 }
111
112 /**
113 * ice_check_for_hang_subtask - check for and recover hung queues
114 * @pf: pointer to PF struct
115 */
ice_check_for_hang_subtask(struct ice_pf * pf)116 static void ice_check_for_hang_subtask(struct ice_pf *pf)
117 {
118 struct ice_vsi *vsi = NULL;
119 struct ice_hw *hw;
120 unsigned int i;
121 int packets;
122 u32 v;
123
124 ice_for_each_vsi(pf, v)
125 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
126 vsi = pf->vsi[v];
127 break;
128 }
129
130 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
131 return;
132
133 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
134 return;
135
136 hw = &vsi->back->hw;
137
138 ice_for_each_txq(vsi, i) {
139 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
140 struct ice_ring_stats *ring_stats;
141
142 if (!tx_ring)
143 continue;
144 if (ice_ring_ch_enabled(tx_ring))
145 continue;
146
147 ring_stats = tx_ring->ring_stats;
148 if (!ring_stats)
149 continue;
150
151 if (tx_ring->desc) {
152 /* If packet counter has not changed the queue is
153 * likely stalled, so force an interrupt for this
154 * queue.
155 *
156 * prev_pkt would be negative if there was no
157 * pending work.
158 */
159 packets = ring_stats->stats.pkts & INT_MAX;
160 if (ring_stats->tx_stats.prev_pkt == packets) {
161 /* Trigger sw interrupt to revive the queue */
162 ice_trigger_sw_intr(hw, tx_ring->q_vector);
163 continue;
164 }
165
166 /* Memory barrier between read of packet count and call
167 * to ice_get_tx_pending()
168 */
169 smp_rmb();
170 ring_stats->tx_stats.prev_pkt =
171 ice_get_tx_pending(tx_ring) ? packets : -1;
172 }
173 }
174 }
175
176 /**
177 * ice_init_mac_fltr - Set initial MAC filters
178 * @pf: board private structure
179 *
180 * Set initial set of MAC filters for PF VSI; configure filters for permanent
181 * address and broadcast address. If an error is encountered, netdevice will be
182 * unregistered.
183 */
ice_init_mac_fltr(struct ice_pf * pf)184 static int ice_init_mac_fltr(struct ice_pf *pf)
185 {
186 struct ice_vsi *vsi;
187 u8 *perm_addr;
188
189 vsi = ice_get_main_vsi(pf);
190 if (!vsi)
191 return -EINVAL;
192
193 perm_addr = vsi->port_info->mac.perm_addr;
194 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
195 }
196
197 /**
198 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
199 * @netdev: the net device on which the sync is happening
200 * @addr: MAC address to sync
201 *
202 * This is a callback function which is called by the in kernel device sync
203 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
204 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
205 * MAC filters from the hardware.
206 */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)207 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
208 {
209 struct ice_netdev_priv *np = netdev_priv(netdev);
210 struct ice_vsi *vsi = np->vsi;
211
212 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
213 ICE_FWD_TO_VSI))
214 return -EINVAL;
215
216 return 0;
217 }
218
219 /**
220 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
221 * @netdev: the net device on which the unsync is happening
222 * @addr: MAC address to unsync
223 *
224 * This is a callback function which is called by the in kernel device unsync
225 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
226 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
227 * delete the MAC filters from the hardware.
228 */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)229 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
230 {
231 struct ice_netdev_priv *np = netdev_priv(netdev);
232 struct ice_vsi *vsi = np->vsi;
233
234 /* Under some circumstances, we might receive a request to delete our
235 * own device address from our uc list. Because we store the device
236 * address in the VSI's MAC filter list, we need to ignore such
237 * requests and not delete our device address from this list.
238 */
239 if (ether_addr_equal(addr, netdev->dev_addr))
240 return 0;
241
242 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
243 ICE_FWD_TO_VSI))
244 return -EINVAL;
245
246 return 0;
247 }
248
249 /**
250 * ice_vsi_fltr_changed - check if filter state changed
251 * @vsi: VSI to be checked
252 *
253 * returns true if filter state has changed, false otherwise.
254 */
ice_vsi_fltr_changed(struct ice_vsi * vsi)255 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
256 {
257 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
258 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
259 }
260
261 /**
262 * ice_set_promisc - Enable promiscuous mode for a given PF
263 * @vsi: the VSI being configured
264 * @promisc_m: mask of promiscuous config bits
265 *
266 */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)267 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
268 {
269 int status;
270
271 if (vsi->type != ICE_VSI_PF)
272 return 0;
273
274 if (ice_vsi_has_non_zero_vlans(vsi)) {
275 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
276 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
277 promisc_m);
278 } else {
279 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
280 promisc_m, 0);
281 }
282 if (status && status != -EEXIST)
283 return status;
284
285 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
286 vsi->vsi_num, promisc_m);
287 return 0;
288 }
289
290 /**
291 * ice_clear_promisc - Disable promiscuous mode for a given PF
292 * @vsi: the VSI being configured
293 * @promisc_m: mask of promiscuous config bits
294 *
295 */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)296 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
297 {
298 int status;
299
300 if (vsi->type != ICE_VSI_PF)
301 return 0;
302
303 if (ice_vsi_has_non_zero_vlans(vsi)) {
304 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
305 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
306 promisc_m);
307 } else {
308 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
309 promisc_m, 0);
310 }
311
312 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
313 vsi->vsi_num, promisc_m);
314 return status;
315 }
316
317 /**
318 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
319 * @vsi: ptr to the VSI
320 *
321 * Push any outstanding VSI filter changes through the AdminQ.
322 */
ice_vsi_sync_fltr(struct ice_vsi * vsi)323 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
324 {
325 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
326 struct device *dev = ice_pf_to_dev(vsi->back);
327 struct net_device *netdev = vsi->netdev;
328 bool promisc_forced_on = false;
329 struct ice_pf *pf = vsi->back;
330 struct ice_hw *hw = &pf->hw;
331 u32 changed_flags = 0;
332 int err;
333
334 if (!vsi->netdev)
335 return -EINVAL;
336
337 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
338 usleep_range(1000, 2000);
339
340 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
341 vsi->current_netdev_flags = vsi->netdev->flags;
342
343 INIT_LIST_HEAD(&vsi->tmp_sync_list);
344 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
345
346 if (ice_vsi_fltr_changed(vsi)) {
347 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
348 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
349
350 /* grab the netdev's addr_list_lock */
351 netif_addr_lock_bh(netdev);
352 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
353 ice_add_mac_to_unsync_list);
354 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
355 ice_add_mac_to_unsync_list);
356 /* our temp lists are populated. release lock */
357 netif_addr_unlock_bh(netdev);
358 }
359
360 /* Remove MAC addresses in the unsync list */
361 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
362 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
363 if (err) {
364 netdev_err(netdev, "Failed to delete MAC filters\n");
365 /* if we failed because of alloc failures, just bail */
366 if (err == -ENOMEM)
367 goto out;
368 }
369
370 /* Add MAC addresses in the sync list */
371 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
372 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
373 /* If filter is added successfully or already exists, do not go into
374 * 'if' condition and report it as error. Instead continue processing
375 * rest of the function.
376 */
377 if (err && err != -EEXIST) {
378 netdev_err(netdev, "Failed to add MAC filters\n");
379 /* If there is no more space for new umac filters, VSI
380 * should go into promiscuous mode. There should be some
381 * space reserved for promiscuous filters.
382 */
383 if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC &&
384 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
385 vsi->state)) {
386 promisc_forced_on = true;
387 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
388 vsi->vsi_num);
389 } else {
390 goto out;
391 }
392 }
393 err = 0;
394 /* check for changes in promiscuous modes */
395 if (changed_flags & IFF_ALLMULTI) {
396 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
397 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
398 if (err) {
399 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
400 goto out_promisc;
401 }
402 } else {
403 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
404 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
405 if (err) {
406 vsi->current_netdev_flags |= IFF_ALLMULTI;
407 goto out_promisc;
408 }
409 }
410 }
411
412 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
413 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
414 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
415 if (vsi->current_netdev_flags & IFF_PROMISC) {
416 /* Apply Rx filter rule to get traffic from wire */
417 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
418 err = ice_set_dflt_vsi(vsi);
419 if (err && err != -EEXIST) {
420 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
421 err, vsi->vsi_num);
422 vsi->current_netdev_flags &=
423 ~IFF_PROMISC;
424 goto out_promisc;
425 }
426 err = 0;
427 vlan_ops->dis_rx_filtering(vsi);
428
429 /* promiscuous mode implies allmulticast so
430 * that VSIs that are in promiscuous mode are
431 * subscribed to multicast packets coming to
432 * the port
433 */
434 err = ice_set_promisc(vsi,
435 ICE_MCAST_PROMISC_BITS);
436 if (err)
437 goto out_promisc;
438 }
439 } else {
440 /* Clear Rx filter to remove traffic from wire */
441 if (ice_is_vsi_dflt_vsi(vsi)) {
442 err = ice_clear_dflt_vsi(vsi);
443 if (err) {
444 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
445 err, vsi->vsi_num);
446 vsi->current_netdev_flags |=
447 IFF_PROMISC;
448 goto out_promisc;
449 }
450 if (vsi->netdev->features &
451 NETIF_F_HW_VLAN_CTAG_FILTER)
452 vlan_ops->ena_rx_filtering(vsi);
453 }
454
455 /* disable allmulti here, but only if allmulti is not
456 * still enabled for the netdev
457 */
458 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
459 err = ice_clear_promisc(vsi,
460 ICE_MCAST_PROMISC_BITS);
461 if (err) {
462 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
463 err, vsi->vsi_num);
464 }
465 }
466 }
467 }
468 goto exit;
469
470 out_promisc:
471 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
472 goto exit;
473 out:
474 /* if something went wrong then set the changed flag so we try again */
475 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
476 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
477 exit:
478 clear_bit(ICE_CFG_BUSY, vsi->state);
479 return err;
480 }
481
482 /**
483 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
484 * @pf: board private structure
485 */
ice_sync_fltr_subtask(struct ice_pf * pf)486 static void ice_sync_fltr_subtask(struct ice_pf *pf)
487 {
488 int v;
489
490 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
491 return;
492
493 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
494
495 ice_for_each_vsi(pf, v)
496 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
497 ice_vsi_sync_fltr(pf->vsi[v])) {
498 /* come back and try again later */
499 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
500 break;
501 }
502 }
503
504 /**
505 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
506 * @pf: the PF
507 * @locked: is the rtnl_lock already held
508 */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)509 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
510 {
511 int node;
512 int v;
513
514 ice_for_each_vsi(pf, v)
515 if (pf->vsi[v])
516 ice_dis_vsi(pf->vsi[v], locked);
517
518 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
519 pf->pf_agg_node[node].num_vsis = 0;
520
521 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
522 pf->vf_agg_node[node].num_vsis = 0;
523 }
524
525 /**
526 * ice_prepare_for_reset - prep for reset
527 * @pf: board private structure
528 * @reset_type: reset type requested
529 *
530 * Inform or close all dependent features in prep for reset.
531 */
532 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)533 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
534 {
535 struct ice_hw *hw = &pf->hw;
536 struct ice_vsi *vsi;
537 struct ice_vf *vf;
538 unsigned int bkt;
539
540 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
541
542 /* already prepared for reset */
543 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
544 return;
545
546 synchronize_irq(pf->oicr_irq.virq);
547
548 ice_unplug_aux_dev(pf);
549
550 /* Notify VFs of impending reset */
551 if (ice_check_sq_alive(hw, &hw->mailboxq))
552 ice_vc_notify_reset(pf);
553
554 /* Disable VFs until reset is completed */
555 mutex_lock(&pf->vfs.table_lock);
556 ice_for_each_vf(pf, bkt, vf)
557 ice_set_vf_state_dis(vf);
558 mutex_unlock(&pf->vfs.table_lock);
559
560 if (ice_is_eswitch_mode_switchdev(pf)) {
561 rtnl_lock();
562 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
563 rtnl_unlock();
564 }
565
566 /* release ADQ specific HW and SW resources */
567 vsi = ice_get_main_vsi(pf);
568 if (!vsi)
569 goto skip;
570
571 /* to be on safe side, reset orig_rss_size so that normal flow
572 * of deciding rss_size can take precedence
573 */
574 vsi->orig_rss_size = 0;
575
576 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
577 if (reset_type == ICE_RESET_PFR) {
578 vsi->old_ena_tc = vsi->all_enatc;
579 vsi->old_numtc = vsi->all_numtc;
580 } else {
581 ice_remove_q_channels(vsi, true);
582
583 /* for other reset type, do not support channel rebuild
584 * hence reset needed info
585 */
586 vsi->old_ena_tc = 0;
587 vsi->all_enatc = 0;
588 vsi->old_numtc = 0;
589 vsi->all_numtc = 0;
590 vsi->req_txq = 0;
591 vsi->req_rxq = 0;
592 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
593 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
594 }
595 }
596
597 if (vsi->netdev)
598 netif_device_detach(vsi->netdev);
599 skip:
600
601 /* clear SW filtering DB */
602 ice_clear_hw_tbls(hw);
603 /* disable the VSIs and their queues that are not already DOWN */
604 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
605 ice_pf_dis_all_vsi(pf, false);
606
607 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
608 ice_ptp_prepare_for_reset(pf, reset_type);
609
610 if (ice_is_feature_supported(pf, ICE_F_GNSS))
611 ice_gnss_exit(pf);
612
613 if (hw->port_info)
614 ice_sched_clear_port(hw->port_info);
615
616 ice_shutdown_all_ctrlq(hw, false);
617
618 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
619 }
620
621 /**
622 * ice_do_reset - Initiate one of many types of resets
623 * @pf: board private structure
624 * @reset_type: reset type requested before this function was called.
625 */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)626 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
627 {
628 struct device *dev = ice_pf_to_dev(pf);
629 struct ice_hw *hw = &pf->hw;
630
631 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
632
633 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
634 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
635 reset_type = ICE_RESET_CORER;
636 }
637
638 ice_prepare_for_reset(pf, reset_type);
639
640 /* trigger the reset */
641 if (ice_reset(hw, reset_type)) {
642 dev_err(dev, "reset %d failed\n", reset_type);
643 set_bit(ICE_RESET_FAILED, pf->state);
644 clear_bit(ICE_RESET_OICR_RECV, pf->state);
645 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
646 clear_bit(ICE_PFR_REQ, pf->state);
647 clear_bit(ICE_CORER_REQ, pf->state);
648 clear_bit(ICE_GLOBR_REQ, pf->state);
649 wake_up(&pf->reset_wait_queue);
650 return;
651 }
652
653 /* PFR is a bit of a special case because it doesn't result in an OICR
654 * interrupt. So for PFR, rebuild after the reset and clear the reset-
655 * associated state bits.
656 */
657 if (reset_type == ICE_RESET_PFR) {
658 pf->pfr_count++;
659 ice_rebuild(pf, reset_type);
660 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
661 clear_bit(ICE_PFR_REQ, pf->state);
662 wake_up(&pf->reset_wait_queue);
663 ice_reset_all_vfs(pf);
664 }
665 }
666
667 /**
668 * ice_reset_subtask - Set up for resetting the device and driver
669 * @pf: board private structure
670 */
ice_reset_subtask(struct ice_pf * pf)671 static void ice_reset_subtask(struct ice_pf *pf)
672 {
673 enum ice_reset_req reset_type = ICE_RESET_INVAL;
674
675 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
676 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
677 * of reset is pending and sets bits in pf->state indicating the reset
678 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
679 * prepare for pending reset if not already (for PF software-initiated
680 * global resets the software should already be prepared for it as
681 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
682 * by firmware or software on other PFs, that bit is not set so prepare
683 * for the reset now), poll for reset done, rebuild and return.
684 */
685 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
686 /* Perform the largest reset requested */
687 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
688 reset_type = ICE_RESET_CORER;
689 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
690 reset_type = ICE_RESET_GLOBR;
691 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
692 reset_type = ICE_RESET_EMPR;
693 /* return if no valid reset type requested */
694 if (reset_type == ICE_RESET_INVAL)
695 return;
696 ice_prepare_for_reset(pf, reset_type);
697
698 /* make sure we are ready to rebuild */
699 if (ice_check_reset(&pf->hw)) {
700 set_bit(ICE_RESET_FAILED, pf->state);
701 } else {
702 /* done with reset. start rebuild */
703 pf->hw.reset_ongoing = false;
704 ice_rebuild(pf, reset_type);
705 /* clear bit to resume normal operations, but
706 * ICE_NEEDS_RESTART bit is set in case rebuild failed
707 */
708 clear_bit(ICE_RESET_OICR_RECV, pf->state);
709 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
710 clear_bit(ICE_PFR_REQ, pf->state);
711 clear_bit(ICE_CORER_REQ, pf->state);
712 clear_bit(ICE_GLOBR_REQ, pf->state);
713 wake_up(&pf->reset_wait_queue);
714 ice_reset_all_vfs(pf);
715 }
716
717 return;
718 }
719
720 /* No pending resets to finish processing. Check for new resets */
721 if (test_bit(ICE_PFR_REQ, pf->state)) {
722 reset_type = ICE_RESET_PFR;
723 if (pf->lag && pf->lag->bonded) {
724 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
725 reset_type = ICE_RESET_CORER;
726 }
727 }
728 if (test_bit(ICE_CORER_REQ, pf->state))
729 reset_type = ICE_RESET_CORER;
730 if (test_bit(ICE_GLOBR_REQ, pf->state))
731 reset_type = ICE_RESET_GLOBR;
732 /* If no valid reset type requested just return */
733 if (reset_type == ICE_RESET_INVAL)
734 return;
735
736 /* reset if not already down or busy */
737 if (!test_bit(ICE_DOWN, pf->state) &&
738 !test_bit(ICE_CFG_BUSY, pf->state)) {
739 ice_do_reset(pf, reset_type);
740 }
741 }
742
743 /**
744 * ice_print_topo_conflict - print topology conflict message
745 * @vsi: the VSI whose topology status is being checked
746 */
ice_print_topo_conflict(struct ice_vsi * vsi)747 static void ice_print_topo_conflict(struct ice_vsi *vsi)
748 {
749 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
750 case ICE_AQ_LINK_TOPO_CONFLICT:
751 case ICE_AQ_LINK_MEDIA_CONFLICT:
752 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
753 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
754 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
755 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
756 break;
757 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
758 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
759 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
760 else
761 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
762 break;
763 default:
764 break;
765 }
766 }
767
768 /**
769 * ice_print_link_msg - print link up or down message
770 * @vsi: the VSI whose link status is being queried
771 * @isup: boolean for if the link is now up or down
772 */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)773 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
774 {
775 struct ice_aqc_get_phy_caps_data *caps;
776 const char *an_advertised;
777 const char *fec_req;
778 const char *speed;
779 const char *fec;
780 const char *fc;
781 const char *an;
782 int status;
783
784 if (!vsi)
785 return;
786
787 if (vsi->current_isup == isup)
788 return;
789
790 vsi->current_isup = isup;
791
792 if (!isup) {
793 netdev_info(vsi->netdev, "NIC Link is Down\n");
794 return;
795 }
796
797 switch (vsi->port_info->phy.link_info.link_speed) {
798 case ICE_AQ_LINK_SPEED_200GB:
799 speed = "200 G";
800 break;
801 case ICE_AQ_LINK_SPEED_100GB:
802 speed = "100 G";
803 break;
804 case ICE_AQ_LINK_SPEED_50GB:
805 speed = "50 G";
806 break;
807 case ICE_AQ_LINK_SPEED_40GB:
808 speed = "40 G";
809 break;
810 case ICE_AQ_LINK_SPEED_25GB:
811 speed = "25 G";
812 break;
813 case ICE_AQ_LINK_SPEED_20GB:
814 speed = "20 G";
815 break;
816 case ICE_AQ_LINK_SPEED_10GB:
817 speed = "10 G";
818 break;
819 case ICE_AQ_LINK_SPEED_5GB:
820 speed = "5 G";
821 break;
822 case ICE_AQ_LINK_SPEED_2500MB:
823 speed = "2.5 G";
824 break;
825 case ICE_AQ_LINK_SPEED_1000MB:
826 speed = "1 G";
827 break;
828 case ICE_AQ_LINK_SPEED_100MB:
829 speed = "100 M";
830 break;
831 default:
832 speed = "Unknown ";
833 break;
834 }
835
836 switch (vsi->port_info->fc.current_mode) {
837 case ICE_FC_FULL:
838 fc = "Rx/Tx";
839 break;
840 case ICE_FC_TX_PAUSE:
841 fc = "Tx";
842 break;
843 case ICE_FC_RX_PAUSE:
844 fc = "Rx";
845 break;
846 case ICE_FC_NONE:
847 fc = "None";
848 break;
849 default:
850 fc = "Unknown";
851 break;
852 }
853
854 /* Get FEC mode based on negotiated link info */
855 switch (vsi->port_info->phy.link_info.fec_info) {
856 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
857 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
858 fec = "RS-FEC";
859 break;
860 case ICE_AQ_LINK_25G_KR_FEC_EN:
861 fec = "FC-FEC/BASE-R";
862 break;
863 default:
864 fec = "NONE";
865 break;
866 }
867
868 /* check if autoneg completed, might be false due to not supported */
869 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
870 an = "True";
871 else
872 an = "False";
873
874 /* Get FEC mode requested based on PHY caps last SW configuration */
875 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
876 if (!caps) {
877 fec_req = "Unknown";
878 an_advertised = "Unknown";
879 goto done;
880 }
881
882 status = ice_aq_get_phy_caps(vsi->port_info, false,
883 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
884 if (status)
885 netdev_info(vsi->netdev, "Get phy capability failed.\n");
886
887 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
888
889 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
890 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
891 fec_req = "RS-FEC";
892 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
893 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
894 fec_req = "FC-FEC/BASE-R";
895 else
896 fec_req = "NONE";
897
898 kfree(caps);
899
900 done:
901 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
902 speed, fec_req, fec, an_advertised, an, fc);
903 ice_print_topo_conflict(vsi);
904 }
905
906 /**
907 * ice_vsi_link_event - update the VSI's netdev
908 * @vsi: the VSI on which the link event occurred
909 * @link_up: whether or not the VSI needs to be set up or down
910 */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)911 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
912 {
913 if (!vsi)
914 return;
915
916 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
917 return;
918
919 if (vsi->type == ICE_VSI_PF) {
920 if (link_up == netif_carrier_ok(vsi->netdev))
921 return;
922
923 if (link_up) {
924 netif_carrier_on(vsi->netdev);
925 netif_tx_wake_all_queues(vsi->netdev);
926 } else {
927 netif_carrier_off(vsi->netdev);
928 netif_tx_stop_all_queues(vsi->netdev);
929 }
930 }
931 }
932
933 /**
934 * ice_set_dflt_mib - send a default config MIB to the FW
935 * @pf: private PF struct
936 *
937 * This function sends a default configuration MIB to the FW.
938 *
939 * If this function errors out at any point, the driver is still able to
940 * function. The main impact is that LFC may not operate as expected.
941 * Therefore an error state in this function should be treated with a DBG
942 * message and continue on with driver rebuild/reenable.
943 */
ice_set_dflt_mib(struct ice_pf * pf)944 static void ice_set_dflt_mib(struct ice_pf *pf)
945 {
946 struct device *dev = ice_pf_to_dev(pf);
947 u8 mib_type, *buf, *lldpmib = NULL;
948 u16 len, typelen, offset = 0;
949 struct ice_lldp_org_tlv *tlv;
950 struct ice_hw *hw = &pf->hw;
951 u32 ouisubtype;
952
953 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
954 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
955 if (!lldpmib) {
956 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
957 __func__);
958 return;
959 }
960
961 /* Add ETS CFG TLV */
962 tlv = (struct ice_lldp_org_tlv *)lldpmib;
963 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
964 ICE_IEEE_ETS_TLV_LEN);
965 tlv->typelen = htons(typelen);
966 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
967 ICE_IEEE_SUBTYPE_ETS_CFG);
968 tlv->ouisubtype = htonl(ouisubtype);
969
970 buf = tlv->tlvinfo;
971 buf[0] = 0;
972
973 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
974 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
975 * Octets 13 - 20 are TSA values - leave as zeros
976 */
977 buf[5] = 0x64;
978 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
979 offset += len + 2;
980 tlv = (struct ice_lldp_org_tlv *)
981 ((char *)tlv + sizeof(tlv->typelen) + len);
982
983 /* Add ETS REC TLV */
984 buf = tlv->tlvinfo;
985 tlv->typelen = htons(typelen);
986
987 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
988 ICE_IEEE_SUBTYPE_ETS_REC);
989 tlv->ouisubtype = htonl(ouisubtype);
990
991 /* First octet of buf is reserved
992 * Octets 1 - 4 map UP to TC - all UPs map to zero
993 * Octets 5 - 12 are BW values - set TC 0 to 100%.
994 * Octets 13 - 20 are TSA value - leave as zeros
995 */
996 buf[5] = 0x64;
997 offset += len + 2;
998 tlv = (struct ice_lldp_org_tlv *)
999 ((char *)tlv + sizeof(tlv->typelen) + len);
1000
1001 /* Add PFC CFG TLV */
1002 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1003 ICE_IEEE_PFC_TLV_LEN);
1004 tlv->typelen = htons(typelen);
1005
1006 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1007 ICE_IEEE_SUBTYPE_PFC_CFG);
1008 tlv->ouisubtype = htonl(ouisubtype);
1009
1010 /* Octet 1 left as all zeros - PFC disabled */
1011 buf[0] = 0x08;
1012 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1013 offset += len + 2;
1014
1015 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1016 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1017
1018 kfree(lldpmib);
1019 }
1020
1021 /**
1022 * ice_check_phy_fw_load - check if PHY FW load failed
1023 * @pf: pointer to PF struct
1024 * @link_cfg_err: bitmap from the link info structure
1025 *
1026 * check if external PHY FW load failed and print an error message if it did
1027 */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1028 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1029 {
1030 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1031 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1032 return;
1033 }
1034
1035 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1036 return;
1037
1038 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1039 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1040 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1041 }
1042 }
1043
1044 /**
1045 * ice_check_module_power
1046 * @pf: pointer to PF struct
1047 * @link_cfg_err: bitmap from the link info structure
1048 *
1049 * check module power level returned by a previous call to aq_get_link_info
1050 * and print error messages if module power level is not supported
1051 */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1052 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1053 {
1054 /* if module power level is supported, clear the flag */
1055 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1056 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1057 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1058 return;
1059 }
1060
1061 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1062 * above block didn't clear this bit, there's nothing to do
1063 */
1064 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1065 return;
1066
1067 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1068 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1069 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1070 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1071 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1072 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1073 }
1074 }
1075
1076 /**
1077 * ice_check_link_cfg_err - check if link configuration failed
1078 * @pf: pointer to the PF struct
1079 * @link_cfg_err: bitmap from the link info structure
1080 *
1081 * print if any link configuration failure happens due to the value in the
1082 * link_cfg_err parameter in the link info structure
1083 */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1084 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1085 {
1086 ice_check_module_power(pf, link_cfg_err);
1087 ice_check_phy_fw_load(pf, link_cfg_err);
1088 }
1089
1090 /**
1091 * ice_link_event - process the link event
1092 * @pf: PF that the link event is associated with
1093 * @pi: port_info for the port that the link event is associated with
1094 * @link_up: true if the physical link is up and false if it is down
1095 * @link_speed: current link speed received from the link event
1096 *
1097 * Returns 0 on success and negative on failure
1098 */
1099 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1100 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1101 u16 link_speed)
1102 {
1103 struct device *dev = ice_pf_to_dev(pf);
1104 struct ice_phy_info *phy_info;
1105 struct ice_vsi *vsi;
1106 u16 old_link_speed;
1107 bool old_link;
1108 int status;
1109
1110 phy_info = &pi->phy;
1111 phy_info->link_info_old = phy_info->link_info;
1112
1113 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1114 old_link_speed = phy_info->link_info_old.link_speed;
1115
1116 /* update the link info structures and re-enable link events,
1117 * don't bail on failure due to other book keeping needed
1118 */
1119 status = ice_update_link_info(pi);
1120 if (status)
1121 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1122 pi->lport, status,
1123 libie_aq_str(pi->hw->adminq.sq_last_status));
1124
1125 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1126
1127 /* Check if the link state is up after updating link info, and treat
1128 * this event as an UP event since the link is actually UP now.
1129 */
1130 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1131 link_up = true;
1132
1133 vsi = ice_get_main_vsi(pf);
1134 if (!vsi || !vsi->port_info)
1135 return -EINVAL;
1136
1137 /* turn off PHY if media was removed */
1138 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1139 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1140 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1141 ice_set_link(vsi, false);
1142 }
1143
1144 /* if the old link up/down and speed is the same as the new */
1145 if (link_up == old_link && link_speed == old_link_speed)
1146 return 0;
1147
1148 if (!link_up && old_link)
1149 pf->link_down_events++;
1150
1151 ice_ptp_link_change(pf, link_up);
1152
1153 if (ice_is_dcb_active(pf)) {
1154 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1155 ice_dcb_rebuild(pf);
1156 } else {
1157 if (link_up)
1158 ice_set_dflt_mib(pf);
1159 }
1160 ice_vsi_link_event(vsi, link_up);
1161 ice_print_link_msg(vsi, link_up);
1162
1163 ice_vc_notify_link_state(pf);
1164
1165 return 0;
1166 }
1167
1168 /**
1169 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1170 * @pf: board private structure
1171 */
ice_watchdog_subtask(struct ice_pf * pf)1172 static void ice_watchdog_subtask(struct ice_pf *pf)
1173 {
1174 int i;
1175
1176 /* if interface is down do nothing */
1177 if (test_bit(ICE_DOWN, pf->state) ||
1178 test_bit(ICE_CFG_BUSY, pf->state))
1179 return;
1180
1181 /* make sure we don't do these things too often */
1182 if (time_before(jiffies,
1183 pf->serv_tmr_prev + pf->serv_tmr_period))
1184 return;
1185
1186 pf->serv_tmr_prev = jiffies;
1187
1188 /* Update the stats for active netdevs so the network stack
1189 * can look at updated numbers whenever it cares to
1190 */
1191 ice_update_pf_stats(pf);
1192 ice_for_each_vsi(pf, i)
1193 if (pf->vsi[i] && pf->vsi[i]->netdev)
1194 ice_update_vsi_stats(pf->vsi[i]);
1195 }
1196
1197 /**
1198 * ice_init_link_events - enable/initialize link events
1199 * @pi: pointer to the port_info instance
1200 *
1201 * Returns -EIO on failure, 0 on success
1202 */
ice_init_link_events(struct ice_port_info * pi)1203 static int ice_init_link_events(struct ice_port_info *pi)
1204 {
1205 u16 mask;
1206
1207 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1208 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1209 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1210
1211 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1212 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1213 pi->lport);
1214 return -EIO;
1215 }
1216
1217 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1218 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1219 pi->lport);
1220 return -EIO;
1221 }
1222
1223 return 0;
1224 }
1225
1226 /**
1227 * ice_handle_link_event - handle link event via ARQ
1228 * @pf: PF that the link event is associated with
1229 * @event: event structure containing link status info
1230 */
1231 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1232 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1233 {
1234 struct ice_aqc_get_link_status_data *link_data;
1235 struct ice_port_info *port_info;
1236 int status;
1237
1238 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1239 port_info = pf->hw.port_info;
1240 if (!port_info)
1241 return -EINVAL;
1242
1243 status = ice_link_event(pf, port_info,
1244 !!(link_data->link_info & ICE_AQ_LINK_UP),
1245 le16_to_cpu(link_data->link_speed));
1246 if (status)
1247 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1248 status);
1249
1250 return status;
1251 }
1252
1253 /**
1254 * ice_get_fwlog_data - copy the FW log data from ARQ event
1255 * @pf: PF that the FW log event is associated with
1256 * @event: event structure containing FW log data
1257 */
1258 static void
ice_get_fwlog_data(struct ice_pf * pf,struct ice_rq_event_info * event)1259 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1260 {
1261 struct ice_fwlog_data *fwlog;
1262 struct ice_hw *hw = &pf->hw;
1263
1264 fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1265
1266 memset(fwlog->data, 0, PAGE_SIZE);
1267 fwlog->data_size = le16_to_cpu(event->desc.datalen);
1268
1269 memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1270 ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1271
1272 if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1273 /* the rings are full so bump the head to create room */
1274 ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1275 hw->fwlog_ring.size);
1276 }
1277 }
1278
1279 /**
1280 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1281 * @pf: pointer to the PF private structure
1282 * @task: intermediate helper storage and identifier for waiting
1283 * @opcode: the opcode to wait for
1284 *
1285 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1286 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1287 *
1288 * Calls are separated to allow caller registering for event before sending
1289 * the command, which mitigates a race between registering and FW responding.
1290 *
1291 * To obtain only the descriptor contents, pass an task->event with null
1292 * msg_buf. If the complete data buffer is desired, allocate the
1293 * task->event.msg_buf with enough space ahead of time.
1294 */
ice_aq_prep_for_event(struct ice_pf * pf,struct ice_aq_task * task,u16 opcode)1295 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1296 u16 opcode)
1297 {
1298 INIT_HLIST_NODE(&task->entry);
1299 task->opcode = opcode;
1300 task->state = ICE_AQ_TASK_WAITING;
1301
1302 spin_lock_bh(&pf->aq_wait_lock);
1303 hlist_add_head(&task->entry, &pf->aq_wait_list);
1304 spin_unlock_bh(&pf->aq_wait_lock);
1305 }
1306
1307 /**
1308 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1309 * @pf: pointer to the PF private structure
1310 * @task: ptr prepared by ice_aq_prep_for_event()
1311 * @timeout: how long to wait, in jiffies
1312 *
1313 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1314 * current thread will be put to sleep until the specified event occurs or
1315 * until the given timeout is reached.
1316 *
1317 * Returns: zero on success, or a negative error code on failure.
1318 */
ice_aq_wait_for_event(struct ice_pf * pf,struct ice_aq_task * task,unsigned long timeout)1319 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1320 unsigned long timeout)
1321 {
1322 enum ice_aq_task_state *state = &task->state;
1323 struct device *dev = ice_pf_to_dev(pf);
1324 unsigned long start = jiffies;
1325 long ret;
1326 int err;
1327
1328 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1329 *state != ICE_AQ_TASK_WAITING,
1330 timeout);
1331 switch (*state) {
1332 case ICE_AQ_TASK_NOT_PREPARED:
1333 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1334 err = -EINVAL;
1335 break;
1336 case ICE_AQ_TASK_WAITING:
1337 err = ret < 0 ? ret : -ETIMEDOUT;
1338 break;
1339 case ICE_AQ_TASK_CANCELED:
1340 err = ret < 0 ? ret : -ECANCELED;
1341 break;
1342 case ICE_AQ_TASK_COMPLETE:
1343 err = ret < 0 ? ret : 0;
1344 break;
1345 default:
1346 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1347 err = -EINVAL;
1348 break;
1349 }
1350
1351 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1352 jiffies_to_msecs(jiffies - start),
1353 jiffies_to_msecs(timeout),
1354 task->opcode);
1355
1356 spin_lock_bh(&pf->aq_wait_lock);
1357 hlist_del(&task->entry);
1358 spin_unlock_bh(&pf->aq_wait_lock);
1359
1360 return err;
1361 }
1362
1363 /**
1364 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1365 * @pf: pointer to the PF private structure
1366 * @opcode: the opcode of the event
1367 * @event: the event to check
1368 *
1369 * Loops over the current list of pending threads waiting for an AdminQ event.
1370 * For each matching task, copy the contents of the event into the task
1371 * structure and wake up the thread.
1372 *
1373 * If multiple threads wait for the same opcode, they will all be woken up.
1374 *
1375 * Note that event->msg_buf will only be duplicated if the event has a buffer
1376 * with enough space already allocated. Otherwise, only the descriptor and
1377 * message length will be copied.
1378 *
1379 * Returns: true if an event was found, false otherwise
1380 */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1381 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1382 struct ice_rq_event_info *event)
1383 {
1384 struct ice_rq_event_info *task_ev;
1385 struct ice_aq_task *task;
1386 bool found = false;
1387
1388 spin_lock_bh(&pf->aq_wait_lock);
1389 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1390 if (task->state != ICE_AQ_TASK_WAITING)
1391 continue;
1392 if (task->opcode != opcode)
1393 continue;
1394
1395 task_ev = &task->event;
1396 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1397 task_ev->msg_len = event->msg_len;
1398
1399 /* Only copy the data buffer if a destination was set */
1400 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1401 memcpy(task_ev->msg_buf, event->msg_buf,
1402 event->buf_len);
1403 task_ev->buf_len = event->buf_len;
1404 }
1405
1406 task->state = ICE_AQ_TASK_COMPLETE;
1407 found = true;
1408 }
1409 spin_unlock_bh(&pf->aq_wait_lock);
1410
1411 if (found)
1412 wake_up(&pf->aq_wait_queue);
1413 }
1414
1415 /**
1416 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1417 * @pf: the PF private structure
1418 *
1419 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1420 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1421 */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1422 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1423 {
1424 struct ice_aq_task *task;
1425
1426 spin_lock_bh(&pf->aq_wait_lock);
1427 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1428 task->state = ICE_AQ_TASK_CANCELED;
1429 spin_unlock_bh(&pf->aq_wait_lock);
1430
1431 wake_up(&pf->aq_wait_queue);
1432 }
1433
1434 #define ICE_MBX_OVERFLOW_WATERMARK 64
1435
1436 /**
1437 * __ice_clean_ctrlq - helper function to clean controlq rings
1438 * @pf: ptr to struct ice_pf
1439 * @q_type: specific Control queue type
1440 */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1441 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1442 {
1443 struct device *dev = ice_pf_to_dev(pf);
1444 struct ice_rq_event_info event;
1445 struct ice_hw *hw = &pf->hw;
1446 struct ice_ctl_q_info *cq;
1447 u16 pending, i = 0;
1448 const char *qtype;
1449 u32 oldval, val;
1450
1451 /* Do not clean control queue if/when PF reset fails */
1452 if (test_bit(ICE_RESET_FAILED, pf->state))
1453 return 0;
1454
1455 switch (q_type) {
1456 case ICE_CTL_Q_ADMIN:
1457 cq = &hw->adminq;
1458 qtype = "Admin";
1459 break;
1460 case ICE_CTL_Q_SB:
1461 cq = &hw->sbq;
1462 qtype = "Sideband";
1463 break;
1464 case ICE_CTL_Q_MAILBOX:
1465 cq = &hw->mailboxq;
1466 qtype = "Mailbox";
1467 /* we are going to try to detect a malicious VF, so set the
1468 * state to begin detection
1469 */
1470 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1471 break;
1472 default:
1473 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1474 return 0;
1475 }
1476
1477 /* check for error indications - PF_xx_AxQLEN register layout for
1478 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1479 */
1480 val = rd32(hw, cq->rq.len);
1481 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1482 PF_FW_ARQLEN_ARQCRIT_M)) {
1483 oldval = val;
1484 if (val & PF_FW_ARQLEN_ARQVFE_M)
1485 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1486 qtype);
1487 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1488 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1489 qtype);
1490 }
1491 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1492 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1493 qtype);
1494 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1495 PF_FW_ARQLEN_ARQCRIT_M);
1496 if (oldval != val)
1497 wr32(hw, cq->rq.len, val);
1498 }
1499
1500 val = rd32(hw, cq->sq.len);
1501 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1502 PF_FW_ATQLEN_ATQCRIT_M)) {
1503 oldval = val;
1504 if (val & PF_FW_ATQLEN_ATQVFE_M)
1505 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1506 qtype);
1507 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1508 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1509 qtype);
1510 }
1511 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1512 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1513 qtype);
1514 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1515 PF_FW_ATQLEN_ATQCRIT_M);
1516 if (oldval != val)
1517 wr32(hw, cq->sq.len, val);
1518 }
1519
1520 event.buf_len = cq->rq_buf_size;
1521 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1522 if (!event.msg_buf)
1523 return 0;
1524
1525 do {
1526 struct ice_mbx_data data = {};
1527 u16 opcode;
1528 int ret;
1529
1530 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1531 if (ret == -EALREADY)
1532 break;
1533 if (ret) {
1534 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1535 ret);
1536 break;
1537 }
1538
1539 opcode = le16_to_cpu(event.desc.opcode);
1540
1541 /* Notify any thread that might be waiting for this event */
1542 ice_aq_check_events(pf, opcode, &event);
1543
1544 switch (opcode) {
1545 case ice_aqc_opc_get_link_status:
1546 if (ice_handle_link_event(pf, &event))
1547 dev_err(dev, "Could not handle link event\n");
1548 break;
1549 case ice_aqc_opc_event_lan_overflow:
1550 ice_vf_lan_overflow_event(pf, &event);
1551 break;
1552 case ice_mbx_opc_send_msg_to_pf:
1553 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
1554 ice_vc_process_vf_msg(pf, &event, NULL);
1555 ice_mbx_vf_dec_trig_e830(hw, &event);
1556 } else {
1557 u16 val = hw->mailboxq.num_rq_entries;
1558
1559 data.max_num_msgs_mbx = val;
1560 val = ICE_MBX_OVERFLOW_WATERMARK;
1561 data.async_watermark_val = val;
1562 data.num_msg_proc = i;
1563 data.num_pending_arq = pending;
1564
1565 ice_vc_process_vf_msg(pf, &event, &data);
1566 }
1567 break;
1568 case ice_aqc_opc_fw_logs_event:
1569 ice_get_fwlog_data(pf, &event);
1570 break;
1571 case ice_aqc_opc_lldp_set_mib_change:
1572 ice_dcb_process_lldp_set_mib_change(pf, &event);
1573 break;
1574 case ice_aqc_opc_get_health_status:
1575 ice_process_health_status_event(pf, &event);
1576 break;
1577 default:
1578 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1579 qtype, opcode);
1580 break;
1581 }
1582 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1583
1584 kfree(event.msg_buf);
1585
1586 return pending && (i == ICE_DFLT_IRQ_WORK);
1587 }
1588
1589 /**
1590 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1591 * @hw: pointer to hardware info
1592 * @cq: control queue information
1593 *
1594 * returns true if there are pending messages in a queue, false if there aren't
1595 */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1596 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1597 {
1598 u16 ntu;
1599
1600 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1601 return cq->rq.next_to_clean != ntu;
1602 }
1603
1604 /**
1605 * ice_clean_adminq_subtask - clean the AdminQ rings
1606 * @pf: board private structure
1607 */
ice_clean_adminq_subtask(struct ice_pf * pf)1608 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1609 {
1610 struct ice_hw *hw = &pf->hw;
1611
1612 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1613 return;
1614
1615 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1616 return;
1617
1618 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1619
1620 /* There might be a situation where new messages arrive to a control
1621 * queue between processing the last message and clearing the
1622 * EVENT_PENDING bit. So before exiting, check queue head again (using
1623 * ice_ctrlq_pending) and process new messages if any.
1624 */
1625 if (ice_ctrlq_pending(hw, &hw->adminq))
1626 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1627
1628 ice_flush(hw);
1629 }
1630
1631 /**
1632 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1633 * @pf: board private structure
1634 */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1635 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1636 {
1637 struct ice_hw *hw = &pf->hw;
1638
1639 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1640 return;
1641
1642 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1643 return;
1644
1645 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1646
1647 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1648 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1649
1650 ice_flush(hw);
1651 }
1652
1653 /**
1654 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1655 * @pf: board private structure
1656 */
ice_clean_sbq_subtask(struct ice_pf * pf)1657 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1658 {
1659 struct ice_hw *hw = &pf->hw;
1660
1661 /* if mac_type is not generic, sideband is not supported
1662 * and there's nothing to do here
1663 */
1664 if (!ice_is_generic_mac(hw)) {
1665 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1666 return;
1667 }
1668
1669 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1670 return;
1671
1672 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1673 return;
1674
1675 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1676
1677 if (ice_ctrlq_pending(hw, &hw->sbq))
1678 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1679
1680 ice_flush(hw);
1681 }
1682
1683 /**
1684 * ice_service_task_schedule - schedule the service task to wake up
1685 * @pf: board private structure
1686 *
1687 * If not already scheduled, this puts the task into the work queue.
1688 */
ice_service_task_schedule(struct ice_pf * pf)1689 void ice_service_task_schedule(struct ice_pf *pf)
1690 {
1691 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1692 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1693 !test_bit(ICE_NEEDS_RESTART, pf->state))
1694 queue_work(ice_wq, &pf->serv_task);
1695 }
1696
1697 /**
1698 * ice_service_task_complete - finish up the service task
1699 * @pf: board private structure
1700 */
ice_service_task_complete(struct ice_pf * pf)1701 static void ice_service_task_complete(struct ice_pf *pf)
1702 {
1703 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1704
1705 /* force memory (pf->state) to sync before next service task */
1706 smp_mb__before_atomic();
1707 clear_bit(ICE_SERVICE_SCHED, pf->state);
1708 }
1709
1710 /**
1711 * ice_service_task_stop - stop service task and cancel works
1712 * @pf: board private structure
1713 *
1714 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1715 * 1 otherwise.
1716 */
ice_service_task_stop(struct ice_pf * pf)1717 static int ice_service_task_stop(struct ice_pf *pf)
1718 {
1719 int ret;
1720
1721 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1722
1723 if (pf->serv_tmr.function)
1724 timer_delete_sync(&pf->serv_tmr);
1725 if (pf->serv_task.func)
1726 cancel_work_sync(&pf->serv_task);
1727
1728 clear_bit(ICE_SERVICE_SCHED, pf->state);
1729 return ret;
1730 }
1731
1732 /**
1733 * ice_service_task_restart - restart service task and schedule works
1734 * @pf: board private structure
1735 *
1736 * This function is needed for suspend and resume works (e.g WoL scenario)
1737 */
ice_service_task_restart(struct ice_pf * pf)1738 static void ice_service_task_restart(struct ice_pf *pf)
1739 {
1740 clear_bit(ICE_SERVICE_DIS, pf->state);
1741 ice_service_task_schedule(pf);
1742 }
1743
1744 /**
1745 * ice_service_timer - timer callback to schedule service task
1746 * @t: pointer to timer_list
1747 */
ice_service_timer(struct timer_list * t)1748 static void ice_service_timer(struct timer_list *t)
1749 {
1750 struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);
1751
1752 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1753 ice_service_task_schedule(pf);
1754 }
1755
1756 /**
1757 * ice_mdd_maybe_reset_vf - reset VF after MDD event
1758 * @pf: pointer to the PF structure
1759 * @vf: pointer to the VF structure
1760 * @reset_vf_tx: whether Tx MDD has occurred
1761 * @reset_vf_rx: whether Rx MDD has occurred
1762 *
1763 * Since the queue can get stuck on VF MDD events, the PF can be configured to
1764 * automatically reset the VF by enabling the private ethtool flag
1765 * mdd-auto-reset-vf.
1766 */
ice_mdd_maybe_reset_vf(struct ice_pf * pf,struct ice_vf * vf,bool reset_vf_tx,bool reset_vf_rx)1767 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1768 bool reset_vf_tx, bool reset_vf_rx)
1769 {
1770 struct device *dev = ice_pf_to_dev(pf);
1771
1772 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1773 return;
1774
1775 /* VF MDD event counters will be cleared by reset, so print the event
1776 * prior to reset.
1777 */
1778 if (reset_vf_tx)
1779 ice_print_vf_tx_mdd_event(vf);
1780
1781 if (reset_vf_rx)
1782 ice_print_vf_rx_mdd_event(vf);
1783
1784 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1785 pf->hw.pf_id, vf->vf_id);
1786 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1787 }
1788
1789 /**
1790 * ice_handle_mdd_event - handle malicious driver detect event
1791 * @pf: pointer to the PF structure
1792 *
1793 * Called from service task. OICR interrupt handler indicates MDD event.
1794 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1795 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1796 * disable the queue, the PF can be configured to reset the VF using ethtool
1797 * private flag mdd-auto-reset-vf.
1798 */
ice_handle_mdd_event(struct ice_pf * pf)1799 static void ice_handle_mdd_event(struct ice_pf *pf)
1800 {
1801 struct device *dev = ice_pf_to_dev(pf);
1802 struct ice_hw *hw = &pf->hw;
1803 struct ice_vf *vf;
1804 unsigned int bkt;
1805 u32 reg;
1806
1807 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1808 /* Since the VF MDD event logging is rate limited, check if
1809 * there are pending MDD events.
1810 */
1811 ice_print_vfs_mdd_events(pf);
1812 return;
1813 }
1814
1815 /* find what triggered an MDD event */
1816 reg = rd32(hw, GL_MDET_TX_PQM);
1817 if (reg & GL_MDET_TX_PQM_VALID_M) {
1818 u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1819 u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1820 u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1821 u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
1822
1823 if (netif_msg_tx_err(pf))
1824 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1825 event, queue, pf_num, vf_num);
1826 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
1827 event, queue);
1828 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1829 }
1830
1831 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1832 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1833 u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1834 u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1835 u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1836 u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
1837
1838 if (netif_msg_tx_err(pf))
1839 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1840 event, queue, pf_num, vf_num);
1841 ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
1842 event, queue);
1843 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1844 }
1845
1846 reg = rd32(hw, GL_MDET_RX);
1847 if (reg & GL_MDET_RX_VALID_M) {
1848 u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1849 u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1850 u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1851 u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
1852
1853 if (netif_msg_rx_err(pf))
1854 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1855 event, queue, pf_num, vf_num);
1856 ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
1857 queue);
1858 wr32(hw, GL_MDET_RX, 0xffffffff);
1859 }
1860
1861 /* check to see if this PF caused an MDD event */
1862 reg = rd32(hw, PF_MDET_TX_PQM);
1863 if (reg & PF_MDET_TX_PQM_VALID_M) {
1864 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1865 if (netif_msg_tx_err(pf))
1866 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1867 }
1868
1869 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1870 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1871 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1872 if (netif_msg_tx_err(pf))
1873 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1874 }
1875
1876 reg = rd32(hw, PF_MDET_RX);
1877 if (reg & PF_MDET_RX_VALID_M) {
1878 wr32(hw, PF_MDET_RX, 0xFFFF);
1879 if (netif_msg_rx_err(pf))
1880 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1881 }
1882
1883 /* Check to see if one of the VFs caused an MDD event, and then
1884 * increment counters and set print pending
1885 */
1886 mutex_lock(&pf->vfs.table_lock);
1887 ice_for_each_vf(pf, bkt, vf) {
1888 bool reset_vf_tx = false, reset_vf_rx = false;
1889
1890 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1891 if (reg & VP_MDET_TX_PQM_VALID_M) {
1892 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1893 vf->mdd_tx_events.count++;
1894 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1895 if (netif_msg_tx_err(pf))
1896 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1897 vf->vf_id);
1898
1899 reset_vf_tx = true;
1900 }
1901
1902 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1903 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1904 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1905 vf->mdd_tx_events.count++;
1906 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1907 if (netif_msg_tx_err(pf))
1908 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1909 vf->vf_id);
1910
1911 reset_vf_tx = true;
1912 }
1913
1914 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1915 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1916 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1917 vf->mdd_tx_events.count++;
1918 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1919 if (netif_msg_tx_err(pf))
1920 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1921 vf->vf_id);
1922
1923 reset_vf_tx = true;
1924 }
1925
1926 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1927 if (reg & VP_MDET_RX_VALID_M) {
1928 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1929 vf->mdd_rx_events.count++;
1930 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1931 if (netif_msg_rx_err(pf))
1932 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1933 vf->vf_id);
1934
1935 reset_vf_rx = true;
1936 }
1937
1938 if (reset_vf_tx || reset_vf_rx)
1939 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1940 reset_vf_rx);
1941 }
1942 mutex_unlock(&pf->vfs.table_lock);
1943
1944 ice_print_vfs_mdd_events(pf);
1945 }
1946
1947 /**
1948 * ice_force_phys_link_state - Force the physical link state
1949 * @vsi: VSI to force the physical link state to up/down
1950 * @link_up: true/false indicates to set the physical link to up/down
1951 *
1952 * Force the physical link state by getting the current PHY capabilities from
1953 * hardware and setting the PHY config based on the determined capabilities. If
1954 * link changes a link event will be triggered because both the Enable Automatic
1955 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1956 *
1957 * Returns 0 on success, negative on failure
1958 */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1959 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1960 {
1961 struct ice_aqc_get_phy_caps_data *pcaps;
1962 struct ice_aqc_set_phy_cfg_data *cfg;
1963 struct ice_port_info *pi;
1964 struct device *dev;
1965 int retcode;
1966
1967 if (!vsi || !vsi->port_info || !vsi->back)
1968 return -EINVAL;
1969 if (vsi->type != ICE_VSI_PF)
1970 return 0;
1971
1972 dev = ice_pf_to_dev(vsi->back);
1973
1974 pi = vsi->port_info;
1975
1976 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1977 if (!pcaps)
1978 return -ENOMEM;
1979
1980 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1981 NULL);
1982 if (retcode) {
1983 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1984 vsi->vsi_num, retcode);
1985 retcode = -EIO;
1986 goto out;
1987 }
1988
1989 /* No change in link */
1990 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1991 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1992 goto out;
1993
1994 /* Use the current user PHY configuration. The current user PHY
1995 * configuration is initialized during probe from PHY capabilities
1996 * software mode, and updated on set PHY configuration.
1997 */
1998 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1999 if (!cfg) {
2000 retcode = -ENOMEM;
2001 goto out;
2002 }
2003
2004 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2005 if (link_up)
2006 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
2007 else
2008 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
2009
2010 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
2011 if (retcode) {
2012 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2013 vsi->vsi_num, retcode);
2014 retcode = -EIO;
2015 }
2016
2017 kfree(cfg);
2018 out:
2019 kfree(pcaps);
2020 return retcode;
2021 }
2022
2023 /**
2024 * ice_init_nvm_phy_type - Initialize the NVM PHY type
2025 * @pi: port info structure
2026 *
2027 * Initialize nvm_phy_type_[low|high] for link lenient mode support
2028 */
ice_init_nvm_phy_type(struct ice_port_info * pi)2029 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2030 {
2031 struct ice_aqc_get_phy_caps_data *pcaps;
2032 struct ice_pf *pf = pi->hw->back;
2033 int err;
2034
2035 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2036 if (!pcaps)
2037 return -ENOMEM;
2038
2039 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2040 pcaps, NULL);
2041
2042 if (err) {
2043 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2044 goto out;
2045 }
2046
2047 pf->nvm_phy_type_hi = pcaps->phy_type_high;
2048 pf->nvm_phy_type_lo = pcaps->phy_type_low;
2049
2050 out:
2051 kfree(pcaps);
2052 return err;
2053 }
2054
2055 /**
2056 * ice_init_link_dflt_override - Initialize link default override
2057 * @pi: port info structure
2058 *
2059 * Initialize link default override and PHY total port shutdown during probe
2060 */
ice_init_link_dflt_override(struct ice_port_info * pi)2061 static void ice_init_link_dflt_override(struct ice_port_info *pi)
2062 {
2063 struct ice_link_default_override_tlv *ldo;
2064 struct ice_pf *pf = pi->hw->back;
2065
2066 ldo = &pf->link_dflt_override;
2067 if (ice_get_link_default_override(ldo, pi))
2068 return;
2069
2070 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2071 return;
2072
2073 /* Enable Total Port Shutdown (override/replace link-down-on-close
2074 * ethtool private flag) for ports with Port Disable bit set.
2075 */
2076 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2077 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2078 }
2079
2080 /**
2081 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2082 * @pi: port info structure
2083 *
2084 * If default override is enabled, initialize the user PHY cfg speed and FEC
2085 * settings using the default override mask from the NVM.
2086 *
2087 * The PHY should only be configured with the default override settings the
2088 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2089 * is used to indicate that the user PHY cfg default override is initialized
2090 * and the PHY has not been configured with the default override settings. The
2091 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2092 * configured.
2093 *
2094 * This function should be called only if the FW doesn't support default
2095 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2096 */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2097 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2098 {
2099 struct ice_link_default_override_tlv *ldo;
2100 struct ice_aqc_set_phy_cfg_data *cfg;
2101 struct ice_phy_info *phy = &pi->phy;
2102 struct ice_pf *pf = pi->hw->back;
2103
2104 ldo = &pf->link_dflt_override;
2105
2106 /* If link default override is enabled, use to mask NVM PHY capabilities
2107 * for speed and FEC default configuration.
2108 */
2109 cfg = &phy->curr_user_phy_cfg;
2110
2111 if (ldo->phy_type_low || ldo->phy_type_high) {
2112 cfg->phy_type_low = pf->nvm_phy_type_lo &
2113 cpu_to_le64(ldo->phy_type_low);
2114 cfg->phy_type_high = pf->nvm_phy_type_hi &
2115 cpu_to_le64(ldo->phy_type_high);
2116 }
2117 cfg->link_fec_opt = ldo->fec_options;
2118 phy->curr_user_fec_req = ICE_FEC_AUTO;
2119
2120 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2121 }
2122
2123 /**
2124 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2125 * @pi: port info structure
2126 *
2127 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2128 * mode to default. The PHY defaults are from get PHY capabilities topology
2129 * with media so call when media is first available. An error is returned if
2130 * called when media is not available. The PHY initialization completed state is
2131 * set here.
2132 *
2133 * These configurations are used when setting PHY
2134 * configuration. The user PHY configuration is updated on set PHY
2135 * configuration. Returns 0 on success, negative on failure
2136 */
ice_init_phy_user_cfg(struct ice_port_info * pi)2137 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2138 {
2139 struct ice_aqc_get_phy_caps_data *pcaps;
2140 struct ice_phy_info *phy = &pi->phy;
2141 struct ice_pf *pf = pi->hw->back;
2142 int err;
2143
2144 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2145 return -EIO;
2146
2147 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2148 if (!pcaps)
2149 return -ENOMEM;
2150
2151 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2152 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2153 pcaps, NULL);
2154 else
2155 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2156 pcaps, NULL);
2157 if (err) {
2158 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2159 goto err_out;
2160 }
2161
2162 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2163
2164 /* check if lenient mode is supported and enabled */
2165 if (ice_fw_supports_link_override(pi->hw) &&
2166 !(pcaps->module_compliance_enforcement &
2167 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2168 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2169
2170 /* if the FW supports default PHY configuration mode, then the driver
2171 * does not have to apply link override settings. If not,
2172 * initialize user PHY configuration with link override values
2173 */
2174 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2175 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2176 ice_init_phy_cfg_dflt_override(pi);
2177 goto out;
2178 }
2179 }
2180
2181 /* if link default override is not enabled, set user flow control and
2182 * FEC settings based on what get_phy_caps returned
2183 */
2184 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2185 pcaps->link_fec_options);
2186 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2187
2188 out:
2189 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2190 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2191 err_out:
2192 kfree(pcaps);
2193 return err;
2194 }
2195
2196 /**
2197 * ice_configure_phy - configure PHY
2198 * @vsi: VSI of PHY
2199 *
2200 * Set the PHY configuration. If the current PHY configuration is the same as
2201 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2202 * configure the based get PHY capabilities for topology with media.
2203 */
ice_configure_phy(struct ice_vsi * vsi)2204 static int ice_configure_phy(struct ice_vsi *vsi)
2205 {
2206 struct device *dev = ice_pf_to_dev(vsi->back);
2207 struct ice_port_info *pi = vsi->port_info;
2208 struct ice_aqc_get_phy_caps_data *pcaps;
2209 struct ice_aqc_set_phy_cfg_data *cfg;
2210 struct ice_phy_info *phy = &pi->phy;
2211 struct ice_pf *pf = vsi->back;
2212 int err;
2213
2214 /* Ensure we have media as we cannot configure a medialess port */
2215 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2216 return -ENOMEDIUM;
2217
2218 ice_print_topo_conflict(vsi);
2219
2220 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2221 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2222 return -EPERM;
2223
2224 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2225 return ice_force_phys_link_state(vsi, true);
2226
2227 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2228 if (!pcaps)
2229 return -ENOMEM;
2230
2231 /* Get current PHY config */
2232 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2233 NULL);
2234 if (err) {
2235 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2236 vsi->vsi_num, err);
2237 goto done;
2238 }
2239
2240 /* If PHY enable link is configured and configuration has not changed,
2241 * there's nothing to do
2242 */
2243 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2244 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2245 goto done;
2246
2247 /* Use PHY topology as baseline for configuration */
2248 memset(pcaps, 0, sizeof(*pcaps));
2249 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2250 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2251 pcaps, NULL);
2252 else
2253 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2254 pcaps, NULL);
2255 if (err) {
2256 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2257 vsi->vsi_num, err);
2258 goto done;
2259 }
2260
2261 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2262 if (!cfg) {
2263 err = -ENOMEM;
2264 goto done;
2265 }
2266
2267 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2268
2269 /* Speed - If default override pending, use curr_user_phy_cfg set in
2270 * ice_init_phy_user_cfg_ldo.
2271 */
2272 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2273 vsi->back->state)) {
2274 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2275 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2276 } else {
2277 u64 phy_low = 0, phy_high = 0;
2278
2279 ice_update_phy_type(&phy_low, &phy_high,
2280 pi->phy.curr_user_speed_req);
2281 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2282 cfg->phy_type_high = pcaps->phy_type_high &
2283 cpu_to_le64(phy_high);
2284 }
2285
2286 /* Can't provide what was requested; use PHY capabilities */
2287 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2288 cfg->phy_type_low = pcaps->phy_type_low;
2289 cfg->phy_type_high = pcaps->phy_type_high;
2290 }
2291
2292 /* FEC */
2293 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2294
2295 /* Can't provide what was requested; use PHY capabilities */
2296 if (cfg->link_fec_opt !=
2297 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2298 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2299 cfg->link_fec_opt = pcaps->link_fec_options;
2300 }
2301
2302 /* Flow Control - always supported; no need to check against
2303 * capabilities
2304 */
2305 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2306
2307 /* Enable link and link update */
2308 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2309
2310 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2311 if (err)
2312 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2313 vsi->vsi_num, err);
2314
2315 kfree(cfg);
2316 done:
2317 kfree(pcaps);
2318 return err;
2319 }
2320
2321 /**
2322 * ice_check_media_subtask - Check for media
2323 * @pf: pointer to PF struct
2324 *
2325 * If media is available, then initialize PHY user configuration if it is not
2326 * been, and configure the PHY if the interface is up.
2327 */
ice_check_media_subtask(struct ice_pf * pf)2328 static void ice_check_media_subtask(struct ice_pf *pf)
2329 {
2330 struct ice_port_info *pi;
2331 struct ice_vsi *vsi;
2332 int err;
2333
2334 /* No need to check for media if it's already present */
2335 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2336 return;
2337
2338 vsi = ice_get_main_vsi(pf);
2339 if (!vsi)
2340 return;
2341
2342 /* Refresh link info and check if media is present */
2343 pi = vsi->port_info;
2344 err = ice_update_link_info(pi);
2345 if (err)
2346 return;
2347
2348 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2349
2350 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2351 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2352 ice_init_phy_user_cfg(pi);
2353
2354 /* PHY settings are reset on media insertion, reconfigure
2355 * PHY to preserve settings.
2356 */
2357 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2358 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2359 return;
2360
2361 err = ice_configure_phy(vsi);
2362 if (!err)
2363 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2364
2365 /* A Link Status Event will be generated; the event handler
2366 * will complete bringing the interface up
2367 */
2368 }
2369 }
2370
ice_service_task_recovery_mode(struct work_struct * work)2371 static void ice_service_task_recovery_mode(struct work_struct *work)
2372 {
2373 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2374
2375 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2376 ice_clean_adminq_subtask(pf);
2377
2378 ice_service_task_complete(pf);
2379
2380 mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
2381 }
2382
2383 /**
2384 * ice_service_task - manage and run subtasks
2385 * @work: pointer to work_struct contained by the PF struct
2386 */
ice_service_task(struct work_struct * work)2387 static void ice_service_task(struct work_struct *work)
2388 {
2389 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2390 unsigned long start_time = jiffies;
2391
2392 if (pf->health_reporters.tx_hang_buf.tx_ring) {
2393 ice_report_tx_hang(pf);
2394 pf->health_reporters.tx_hang_buf.tx_ring = NULL;
2395 }
2396
2397 ice_reset_subtask(pf);
2398
2399 /* bail if a reset/recovery cycle is pending or rebuild failed */
2400 if (ice_is_reset_in_progress(pf->state) ||
2401 test_bit(ICE_SUSPENDED, pf->state) ||
2402 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2403 ice_service_task_complete(pf);
2404 return;
2405 }
2406
2407 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2408 struct iidc_rdma_event *event;
2409
2410 event = kzalloc(sizeof(*event), GFP_KERNEL);
2411 if (event) {
2412 set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
2413 /* report the entire OICR value to AUX driver */
2414 swap(event->reg, pf->oicr_err_reg);
2415 ice_send_event_to_aux(pf, event);
2416 kfree(event);
2417 }
2418 }
2419
2420 /* unplug aux dev per request, if an unplug request came in
2421 * while processing a plug request, this will handle it
2422 */
2423 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2424 ice_unplug_aux_dev(pf);
2425
2426 /* Plug aux device per request */
2427 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2428 ice_plug_aux_dev(pf);
2429
2430 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2431 struct iidc_rdma_event *event;
2432
2433 event = kzalloc(sizeof(*event), GFP_KERNEL);
2434 if (event) {
2435 set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
2436 ice_send_event_to_aux(pf, event);
2437 kfree(event);
2438 }
2439 }
2440
2441 ice_clean_adminq_subtask(pf);
2442 ice_check_media_subtask(pf);
2443 ice_check_for_hang_subtask(pf);
2444 ice_sync_fltr_subtask(pf);
2445 ice_handle_mdd_event(pf);
2446 ice_watchdog_subtask(pf);
2447
2448 if (ice_is_safe_mode(pf)) {
2449 ice_service_task_complete(pf);
2450 return;
2451 }
2452
2453 ice_process_vflr_event(pf);
2454 ice_clean_mailboxq_subtask(pf);
2455 ice_clean_sbq_subtask(pf);
2456 ice_sync_arfs_fltrs(pf);
2457 ice_flush_fdir_ctx(pf);
2458
2459 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2460 ice_service_task_complete(pf);
2461
2462 /* If the tasks have taken longer than one service timer period
2463 * or there is more work to be done, reset the service timer to
2464 * schedule the service task now.
2465 */
2466 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2467 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2468 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2469 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2470 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2471 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2472 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2473 mod_timer(&pf->serv_tmr, jiffies);
2474 }
2475
2476 /**
2477 * ice_set_ctrlq_len - helper function to set controlq length
2478 * @hw: pointer to the HW instance
2479 */
ice_set_ctrlq_len(struct ice_hw * hw)2480 static void ice_set_ctrlq_len(struct ice_hw *hw)
2481 {
2482 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2483 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2484 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2485 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2486 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2487 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2488 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2489 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2490 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2491 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2492 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2493 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2494 }
2495
2496 /**
2497 * ice_schedule_reset - schedule a reset
2498 * @pf: board private structure
2499 * @reset: reset being requested
2500 */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2501 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2502 {
2503 struct device *dev = ice_pf_to_dev(pf);
2504
2505 /* bail out if earlier reset has failed */
2506 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2507 dev_dbg(dev, "earlier reset has failed\n");
2508 return -EIO;
2509 }
2510 /* bail if reset/recovery already in progress */
2511 if (ice_is_reset_in_progress(pf->state)) {
2512 dev_dbg(dev, "Reset already in progress\n");
2513 return -EBUSY;
2514 }
2515
2516 switch (reset) {
2517 case ICE_RESET_PFR:
2518 set_bit(ICE_PFR_REQ, pf->state);
2519 break;
2520 case ICE_RESET_CORER:
2521 set_bit(ICE_CORER_REQ, pf->state);
2522 break;
2523 case ICE_RESET_GLOBR:
2524 set_bit(ICE_GLOBR_REQ, pf->state);
2525 break;
2526 default:
2527 return -EINVAL;
2528 }
2529
2530 ice_service_task_schedule(pf);
2531 return 0;
2532 }
2533
2534 /**
2535 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2536 * @vsi: the VSI being configured
2537 */
ice_vsi_ena_irq(struct ice_vsi * vsi)2538 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2539 {
2540 struct ice_hw *hw = &vsi->back->hw;
2541 int i;
2542
2543 ice_for_each_q_vector(vsi, i)
2544 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2545
2546 ice_flush(hw);
2547 return 0;
2548 }
2549
2550 /**
2551 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2552 * @vsi: the VSI being configured
2553 * @basename: name for the vector
2554 */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2555 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2556 {
2557 int q_vectors = vsi->num_q_vectors;
2558 struct ice_pf *pf = vsi->back;
2559 struct device *dev;
2560 int rx_int_idx = 0;
2561 int tx_int_idx = 0;
2562 int vector, err;
2563 int irq_num;
2564
2565 dev = ice_pf_to_dev(pf);
2566 for (vector = 0; vector < q_vectors; vector++) {
2567 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2568
2569 irq_num = q_vector->irq.virq;
2570
2571 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2572 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2573 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2574 tx_int_idx++;
2575 } else if (q_vector->rx.rx_ring) {
2576 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2577 "%s-%s-%d", basename, "rx", rx_int_idx++);
2578 } else if (q_vector->tx.tx_ring) {
2579 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2580 "%s-%s-%d", basename, "tx", tx_int_idx++);
2581 } else {
2582 /* skip this unused q_vector */
2583 continue;
2584 }
2585 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2586 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2587 IRQF_SHARED, q_vector->name,
2588 q_vector);
2589 else
2590 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2591 0, q_vector->name, q_vector);
2592 if (err) {
2593 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2594 err);
2595 goto free_q_irqs;
2596 }
2597 }
2598
2599 err = ice_set_cpu_rx_rmap(vsi);
2600 if (err) {
2601 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2602 vsi->vsi_num, ERR_PTR(err));
2603 goto free_q_irqs;
2604 }
2605
2606 vsi->irqs_ready = true;
2607 return 0;
2608
2609 free_q_irqs:
2610 while (vector--) {
2611 irq_num = vsi->q_vectors[vector]->irq.virq;
2612 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2613 }
2614 return err;
2615 }
2616
2617 /**
2618 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2619 * @vsi: VSI to setup Tx rings used by XDP
2620 *
2621 * Return 0 on success and negative value on error
2622 */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2623 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2624 {
2625 struct device *dev = ice_pf_to_dev(vsi->back);
2626 struct ice_tx_desc *tx_desc;
2627 int i, j;
2628
2629 ice_for_each_xdp_txq(vsi, i) {
2630 u16 xdp_q_idx = vsi->alloc_txq + i;
2631 struct ice_ring_stats *ring_stats;
2632 struct ice_tx_ring *xdp_ring;
2633
2634 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2635 if (!xdp_ring)
2636 goto free_xdp_rings;
2637
2638 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2639 if (!ring_stats) {
2640 ice_free_tx_ring(xdp_ring);
2641 goto free_xdp_rings;
2642 }
2643
2644 xdp_ring->ring_stats = ring_stats;
2645 xdp_ring->q_index = xdp_q_idx;
2646 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2647 xdp_ring->vsi = vsi;
2648 xdp_ring->netdev = NULL;
2649 xdp_ring->dev = dev;
2650 xdp_ring->count = vsi->num_tx_desc;
2651 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2652 if (ice_setup_tx_ring(xdp_ring))
2653 goto free_xdp_rings;
2654 ice_set_ring_xdp(xdp_ring);
2655 spin_lock_init(&xdp_ring->tx_lock);
2656 for (j = 0; j < xdp_ring->count; j++) {
2657 tx_desc = ICE_TX_DESC(xdp_ring, j);
2658 tx_desc->cmd_type_offset_bsz = 0;
2659 }
2660 }
2661
2662 return 0;
2663
2664 free_xdp_rings:
2665 for (; i >= 0; i--) {
2666 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2667 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2668 vsi->xdp_rings[i]->ring_stats = NULL;
2669 ice_free_tx_ring(vsi->xdp_rings[i]);
2670 }
2671 }
2672 return -ENOMEM;
2673 }
2674
2675 /**
2676 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2677 * @vsi: VSI to set the bpf prog on
2678 * @prog: the bpf prog pointer
2679 */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2680 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2681 {
2682 struct bpf_prog *old_prog;
2683 int i;
2684
2685 old_prog = xchg(&vsi->xdp_prog, prog);
2686 ice_for_each_rxq(vsi, i)
2687 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2688
2689 if (old_prog)
2690 bpf_prog_put(old_prog);
2691 }
2692
ice_xdp_ring_from_qid(struct ice_vsi * vsi,int qid)2693 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2694 {
2695 struct ice_q_vector *q_vector;
2696 struct ice_tx_ring *ring;
2697
2698 if (static_key_enabled(&ice_xdp_locking_key))
2699 return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2700
2701 q_vector = vsi->rx_rings[qid]->q_vector;
2702 ice_for_each_tx_ring(ring, q_vector->tx)
2703 if (ice_ring_is_xdp(ring))
2704 return ring;
2705
2706 return NULL;
2707 }
2708
2709 /**
2710 * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2711 * @vsi: the VSI with XDP rings being configured
2712 *
2713 * Map XDP rings to interrupt vectors and perform the configuration steps
2714 * dependent on the mapping.
2715 */
ice_map_xdp_rings(struct ice_vsi * vsi)2716 void ice_map_xdp_rings(struct ice_vsi *vsi)
2717 {
2718 int xdp_rings_rem = vsi->num_xdp_txq;
2719 int v_idx, q_idx;
2720
2721 /* follow the logic from ice_vsi_map_rings_to_vectors */
2722 ice_for_each_q_vector(vsi, v_idx) {
2723 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2724 int xdp_rings_per_v, q_id, q_base;
2725
2726 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2727 vsi->num_q_vectors - v_idx);
2728 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2729
2730 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2731 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2732
2733 xdp_ring->q_vector = q_vector;
2734 xdp_ring->next = q_vector->tx.tx_ring;
2735 q_vector->tx.tx_ring = xdp_ring;
2736 }
2737 xdp_rings_rem -= xdp_rings_per_v;
2738 }
2739
2740 ice_for_each_rxq(vsi, q_idx) {
2741 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2742 q_idx);
2743 ice_tx_xsk_pool(vsi, q_idx);
2744 }
2745 }
2746
2747 /**
2748 * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
2749 * @vsi: the VSI with XDP rings being unmapped
2750 */
ice_unmap_xdp_rings(struct ice_vsi * vsi)2751 static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
2752 {
2753 int v_idx;
2754
2755 ice_for_each_q_vector(vsi, v_idx) {
2756 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2757 struct ice_tx_ring *ring;
2758
2759 ice_for_each_tx_ring(ring, q_vector->tx)
2760 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2761 break;
2762
2763 /* restore the value of last node prior to XDP setup */
2764 q_vector->tx.tx_ring = ring;
2765 }
2766 }
2767
2768 /**
2769 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2770 * @vsi: VSI to bring up Tx rings used by XDP
2771 * @prog: bpf program that will be assigned to VSI
2772 * @cfg_type: create from scratch or restore the existing configuration
2773 *
2774 * Return 0 on success and negative value on error
2775 */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog,enum ice_xdp_cfg cfg_type)2776 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2777 enum ice_xdp_cfg cfg_type)
2778 {
2779 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2780 struct ice_pf *pf = vsi->back;
2781 struct ice_qs_cfg xdp_qs_cfg = {
2782 .qs_mutex = &pf->avail_q_mutex,
2783 .pf_map = pf->avail_txqs,
2784 .pf_map_size = pf->max_pf_txqs,
2785 .q_count = vsi->num_xdp_txq,
2786 .scatter_count = ICE_MAX_SCATTER_TXQS,
2787 .vsi_map = vsi->txq_map,
2788 .vsi_map_offset = vsi->alloc_txq,
2789 .mapping_mode = ICE_VSI_MAP_CONTIG
2790 };
2791 struct device *dev;
2792 int status, i;
2793
2794 dev = ice_pf_to_dev(pf);
2795 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2796 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2797 if (!vsi->xdp_rings)
2798 return -ENOMEM;
2799
2800 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2801 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2802 goto err_map_xdp;
2803
2804 if (static_key_enabled(&ice_xdp_locking_key))
2805 netdev_warn(vsi->netdev,
2806 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2807
2808 if (ice_xdp_alloc_setup_rings(vsi))
2809 goto clear_xdp_rings;
2810
2811 /* omit the scheduler update if in reset path; XDP queues will be
2812 * taken into account at the end of ice_vsi_rebuild, where
2813 * ice_cfg_vsi_lan is being called
2814 */
2815 if (cfg_type == ICE_XDP_CFG_PART)
2816 return 0;
2817
2818 ice_map_xdp_rings(vsi);
2819
2820 /* tell the Tx scheduler that right now we have
2821 * additional queues
2822 */
2823 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2824 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2825
2826 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2827 max_txqs);
2828 if (status) {
2829 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2830 status);
2831 goto unmap_xdp_rings;
2832 }
2833
2834 /* assign the prog only when it's not already present on VSI;
2835 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2836 * VSI rebuild that happens under ethtool -L can expose us to
2837 * the bpf_prog refcount issues as we would be swapping same
2838 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2839 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2840 * this is not harmful as dev_xdp_install bumps the refcount
2841 * before calling the op exposed by the driver;
2842 */
2843 if (!ice_is_xdp_ena_vsi(vsi))
2844 ice_vsi_assign_bpf_prog(vsi, prog);
2845
2846 return 0;
2847 unmap_xdp_rings:
2848 ice_unmap_xdp_rings(vsi);
2849 clear_xdp_rings:
2850 ice_for_each_xdp_txq(vsi, i)
2851 if (vsi->xdp_rings[i]) {
2852 kfree_rcu(vsi->xdp_rings[i], rcu);
2853 vsi->xdp_rings[i] = NULL;
2854 }
2855
2856 err_map_xdp:
2857 mutex_lock(&pf->avail_q_mutex);
2858 ice_for_each_xdp_txq(vsi, i) {
2859 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2860 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2861 }
2862 mutex_unlock(&pf->avail_q_mutex);
2863
2864 devm_kfree(dev, vsi->xdp_rings);
2865 vsi->xdp_rings = NULL;
2866
2867 return -ENOMEM;
2868 }
2869
2870 /**
2871 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2872 * @vsi: VSI to remove XDP rings
2873 * @cfg_type: disable XDP permanently or allow it to be restored later
2874 *
2875 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2876 * resources
2877 */
ice_destroy_xdp_rings(struct ice_vsi * vsi,enum ice_xdp_cfg cfg_type)2878 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2879 {
2880 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2881 struct ice_pf *pf = vsi->back;
2882 int i;
2883
2884 /* q_vectors are freed in reset path so there's no point in detaching
2885 * rings
2886 */
2887 if (cfg_type == ICE_XDP_CFG_PART)
2888 goto free_qmap;
2889
2890 ice_unmap_xdp_rings(vsi);
2891
2892 free_qmap:
2893 mutex_lock(&pf->avail_q_mutex);
2894 ice_for_each_xdp_txq(vsi, i) {
2895 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2896 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2897 }
2898 mutex_unlock(&pf->avail_q_mutex);
2899
2900 ice_for_each_xdp_txq(vsi, i)
2901 if (vsi->xdp_rings[i]) {
2902 if (vsi->xdp_rings[i]->desc) {
2903 synchronize_rcu();
2904 ice_free_tx_ring(vsi->xdp_rings[i]);
2905 }
2906 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2907 vsi->xdp_rings[i]->ring_stats = NULL;
2908 kfree_rcu(vsi->xdp_rings[i], rcu);
2909 vsi->xdp_rings[i] = NULL;
2910 }
2911
2912 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2913 vsi->xdp_rings = NULL;
2914
2915 if (static_key_enabled(&ice_xdp_locking_key))
2916 static_branch_dec(&ice_xdp_locking_key);
2917
2918 if (cfg_type == ICE_XDP_CFG_PART)
2919 return 0;
2920
2921 ice_vsi_assign_bpf_prog(vsi, NULL);
2922
2923 /* notify Tx scheduler that we destroyed XDP queues and bring
2924 * back the old number of child nodes
2925 */
2926 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2927 max_txqs[i] = vsi->num_txq;
2928
2929 /* change number of XDP Tx queues to 0 */
2930 vsi->num_xdp_txq = 0;
2931
2932 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2933 max_txqs);
2934 }
2935
2936 /**
2937 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2938 * @vsi: VSI to schedule napi on
2939 */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2940 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2941 {
2942 int i;
2943
2944 ice_for_each_rxq(vsi, i) {
2945 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2946
2947 if (READ_ONCE(rx_ring->xsk_pool))
2948 napi_schedule(&rx_ring->q_vector->napi);
2949 }
2950 }
2951
2952 /**
2953 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2954 * @vsi: VSI to determine the count of XDP Tx qs
2955 *
2956 * returns 0 if Tx qs count is higher than at least half of CPU count,
2957 * -ENOMEM otherwise
2958 */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2959 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2960 {
2961 u16 avail = ice_get_avail_txq_count(vsi->back);
2962 u16 cpus = num_possible_cpus();
2963
2964 if (avail < cpus / 2)
2965 return -ENOMEM;
2966
2967 if (vsi->type == ICE_VSI_SF)
2968 avail = vsi->alloc_txq;
2969
2970 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2971
2972 if (vsi->num_xdp_txq < cpus)
2973 static_branch_inc(&ice_xdp_locking_key);
2974
2975 return 0;
2976 }
2977
2978 /**
2979 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2980 * @vsi: Pointer to VSI structure
2981 */
ice_max_xdp_frame_size(struct ice_vsi * vsi)2982 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2983 {
2984 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2985 return ICE_RXBUF_1664;
2986 else
2987 return ICE_RXBUF_3072;
2988 }
2989
2990 /**
2991 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2992 * @vsi: VSI to setup XDP for
2993 * @prog: XDP program
2994 * @extack: netlink extended ack
2995 */
2996 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2997 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2998 struct netlink_ext_ack *extack)
2999 {
3000 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
3001 int ret = 0, xdp_ring_err = 0;
3002 bool if_running;
3003
3004 if (prog && !prog->aux->xdp_has_frags) {
3005 if (frame_size > ice_max_xdp_frame_size(vsi)) {
3006 NL_SET_ERR_MSG_MOD(extack,
3007 "MTU is too large for linear frames and XDP prog does not support frags");
3008 return -EOPNOTSUPP;
3009 }
3010 }
3011
3012 /* hot swap progs and avoid toggling link */
3013 if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
3014 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
3015 ice_vsi_assign_bpf_prog(vsi, prog);
3016 return 0;
3017 }
3018
3019 if_running = netif_running(vsi->netdev) &&
3020 !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
3021
3022 /* need to stop netdev while setting up the program for Rx rings */
3023 if (if_running) {
3024 ret = ice_down(vsi);
3025 if (ret) {
3026 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3027 return ret;
3028 }
3029 }
3030
3031 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3032 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3033 if (xdp_ring_err) {
3034 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3035 goto resume_if;
3036 } else {
3037 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3038 ICE_XDP_CFG_FULL);
3039 if (xdp_ring_err) {
3040 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3041 goto resume_if;
3042 }
3043 }
3044 xdp_features_set_redirect_target(vsi->netdev, true);
3045 /* reallocate Rx queues that are used for zero-copy */
3046 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
3047 if (xdp_ring_err)
3048 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
3049 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3050 xdp_features_clear_redirect_target(vsi->netdev);
3051 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3052 if (xdp_ring_err)
3053 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3054 /* reallocate Rx queues that were used for zero-copy */
3055 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
3056 if (xdp_ring_err)
3057 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
3058 }
3059
3060 resume_if:
3061 if (if_running)
3062 ret = ice_up(vsi);
3063
3064 if (!ret && prog)
3065 ice_vsi_rx_napi_schedule(vsi);
3066
3067 return (ret || xdp_ring_err) ? -ENOMEM : 0;
3068 }
3069
3070 /**
3071 * ice_xdp_safe_mode - XDP handler for safe mode
3072 * @dev: netdevice
3073 * @xdp: XDP command
3074 */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)3075 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3076 struct netdev_bpf *xdp)
3077 {
3078 NL_SET_ERR_MSG_MOD(xdp->extack,
3079 "Please provide working DDP firmware package in order to use XDP\n"
3080 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3081 return -EOPNOTSUPP;
3082 }
3083
3084 /**
3085 * ice_xdp - implements XDP handler
3086 * @dev: netdevice
3087 * @xdp: XDP command
3088 */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)3089 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3090 {
3091 struct ice_netdev_priv *np = netdev_priv(dev);
3092 struct ice_vsi *vsi = np->vsi;
3093 int ret;
3094
3095 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
3096 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
3097 return -EINVAL;
3098 }
3099
3100 mutex_lock(&vsi->xdp_state_lock);
3101
3102 switch (xdp->command) {
3103 case XDP_SETUP_PROG:
3104 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3105 break;
3106 case XDP_SETUP_XSK_POOL:
3107 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
3108 break;
3109 default:
3110 ret = -EINVAL;
3111 }
3112
3113 mutex_unlock(&vsi->xdp_state_lock);
3114 return ret;
3115 }
3116
3117 /**
3118 * ice_ena_misc_vector - enable the non-queue interrupts
3119 * @pf: board private structure
3120 */
ice_ena_misc_vector(struct ice_pf * pf)3121 static void ice_ena_misc_vector(struct ice_pf *pf)
3122 {
3123 struct ice_hw *hw = &pf->hw;
3124 u32 pf_intr_start_offset;
3125 u32 val;
3126
3127 /* Disable anti-spoof detection interrupt to prevent spurious event
3128 * interrupts during a function reset. Anti-spoof functionally is
3129 * still supported.
3130 */
3131 val = rd32(hw, GL_MDCK_TX_TDPU);
3132 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3133 wr32(hw, GL_MDCK_TX_TDPU, val);
3134
3135 /* clear things first */
3136 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3137 rd32(hw, PFINT_OICR); /* read to clear */
3138
3139 val = (PFINT_OICR_ECC_ERR_M |
3140 PFINT_OICR_MAL_DETECT_M |
3141 PFINT_OICR_GRST_M |
3142 PFINT_OICR_PCI_EXCEPTION_M |
3143 PFINT_OICR_VFLR_M |
3144 PFINT_OICR_HMC_ERR_M |
3145 PFINT_OICR_PE_PUSH_M |
3146 PFINT_OICR_PE_CRITERR_M);
3147
3148 wr32(hw, PFINT_OICR_ENA, val);
3149
3150 /* SW_ITR_IDX = 0, but don't change INTENA */
3151 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3152 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3153
3154 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3155 return;
3156 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3157 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3158 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3159 }
3160
3161 /**
3162 * ice_ll_ts_intr - ll_ts interrupt handler
3163 * @irq: interrupt number
3164 * @data: pointer to a q_vector
3165 */
ice_ll_ts_intr(int __always_unused irq,void * data)3166 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3167 {
3168 struct ice_pf *pf = data;
3169 u32 pf_intr_start_offset;
3170 struct ice_ptp_tx *tx;
3171 unsigned long flags;
3172 struct ice_hw *hw;
3173 u32 val;
3174 u8 idx;
3175
3176 hw = &pf->hw;
3177 tx = &pf->ptp.port.tx;
3178 spin_lock_irqsave(&tx->lock, flags);
3179 ice_ptp_complete_tx_single_tstamp(tx);
3180
3181 idx = find_next_bit_wrap(tx->in_use, tx->len,
3182 tx->last_ll_ts_idx_read + 1);
3183 if (idx != tx->len)
3184 ice_ptp_req_tx_single_tstamp(tx, idx);
3185 spin_unlock_irqrestore(&tx->lock, flags);
3186
3187 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3188 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3189 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3190 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3191 val);
3192
3193 return IRQ_HANDLED;
3194 }
3195
3196 /**
3197 * ice_misc_intr - misc interrupt handler
3198 * @irq: interrupt number
3199 * @data: pointer to a q_vector
3200 */
ice_misc_intr(int __always_unused irq,void * data)3201 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3202 {
3203 struct ice_pf *pf = (struct ice_pf *)data;
3204 irqreturn_t ret = IRQ_HANDLED;
3205 struct ice_hw *hw = &pf->hw;
3206 struct device *dev;
3207 u32 oicr, ena_mask;
3208
3209 dev = ice_pf_to_dev(pf);
3210 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3211 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3212 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3213
3214 oicr = rd32(hw, PFINT_OICR);
3215 ena_mask = rd32(hw, PFINT_OICR_ENA);
3216
3217 if (oicr & PFINT_OICR_SWINT_M) {
3218 ena_mask &= ~PFINT_OICR_SWINT_M;
3219 pf->sw_int_count++;
3220 }
3221
3222 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3223 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3224 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3225 }
3226 if (oicr & PFINT_OICR_VFLR_M) {
3227 /* disable any further VFLR event notifications */
3228 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3229 u32 reg = rd32(hw, PFINT_OICR_ENA);
3230
3231 reg &= ~PFINT_OICR_VFLR_M;
3232 wr32(hw, PFINT_OICR_ENA, reg);
3233 } else {
3234 ena_mask &= ~PFINT_OICR_VFLR_M;
3235 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3236 }
3237 }
3238
3239 if (oicr & PFINT_OICR_GRST_M) {
3240 u32 reset;
3241
3242 /* we have a reset warning */
3243 ena_mask &= ~PFINT_OICR_GRST_M;
3244 reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3245 rd32(hw, GLGEN_RSTAT));
3246
3247 if (reset == ICE_RESET_CORER)
3248 pf->corer_count++;
3249 else if (reset == ICE_RESET_GLOBR)
3250 pf->globr_count++;
3251 else if (reset == ICE_RESET_EMPR)
3252 pf->empr_count++;
3253 else
3254 dev_dbg(dev, "Invalid reset type %d\n", reset);
3255
3256 /* If a reset cycle isn't already in progress, we set a bit in
3257 * pf->state so that the service task can start a reset/rebuild.
3258 */
3259 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3260 if (reset == ICE_RESET_CORER)
3261 set_bit(ICE_CORER_RECV, pf->state);
3262 else if (reset == ICE_RESET_GLOBR)
3263 set_bit(ICE_GLOBR_RECV, pf->state);
3264 else
3265 set_bit(ICE_EMPR_RECV, pf->state);
3266
3267 /* There are couple of different bits at play here.
3268 * hw->reset_ongoing indicates whether the hardware is
3269 * in reset. This is set to true when a reset interrupt
3270 * is received and set back to false after the driver
3271 * has determined that the hardware is out of reset.
3272 *
3273 * ICE_RESET_OICR_RECV in pf->state indicates
3274 * that a post reset rebuild is required before the
3275 * driver is operational again. This is set above.
3276 *
3277 * As this is the start of the reset/rebuild cycle, set
3278 * both to indicate that.
3279 */
3280 hw->reset_ongoing = true;
3281 }
3282 }
3283
3284 if (oicr & PFINT_OICR_TSYN_TX_M) {
3285 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3286
3287 ret = ice_ptp_ts_irq(pf);
3288 }
3289
3290 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3291 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3292 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3293
3294 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3295
3296 if (ice_pf_src_tmr_owned(pf)) {
3297 /* Save EVENTs from GLTSYN register */
3298 pf->ptp.ext_ts_irq |= gltsyn_stat &
3299 (GLTSYN_STAT_EVENT0_M |
3300 GLTSYN_STAT_EVENT1_M |
3301 GLTSYN_STAT_EVENT2_M);
3302
3303 ice_ptp_extts_event(pf);
3304 }
3305 }
3306
3307 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3308 if (oicr & ICE_AUX_CRIT_ERR) {
3309 pf->oicr_err_reg |= oicr;
3310 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3311 ena_mask &= ~ICE_AUX_CRIT_ERR;
3312 }
3313
3314 /* Report any remaining unexpected interrupts */
3315 oicr &= ena_mask;
3316 if (oicr) {
3317 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3318 /* If a critical error is pending there is no choice but to
3319 * reset the device.
3320 */
3321 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3322 PFINT_OICR_ECC_ERR_M)) {
3323 set_bit(ICE_PFR_REQ, pf->state);
3324 }
3325 }
3326 ice_service_task_schedule(pf);
3327 if (ret == IRQ_HANDLED)
3328 ice_irq_dynamic_ena(hw, NULL, NULL);
3329
3330 return ret;
3331 }
3332
3333 /**
3334 * ice_misc_intr_thread_fn - misc interrupt thread function
3335 * @irq: interrupt number
3336 * @data: pointer to a q_vector
3337 */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3338 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3339 {
3340 struct ice_pf *pf = data;
3341 struct ice_hw *hw;
3342
3343 hw = &pf->hw;
3344
3345 if (ice_is_reset_in_progress(pf->state))
3346 goto skip_irq;
3347
3348 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3349 /* Process outstanding Tx timestamps. If there is more work,
3350 * re-arm the interrupt to trigger again.
3351 */
3352 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3353 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3354 ice_flush(hw);
3355 }
3356 }
3357
3358 skip_irq:
3359 ice_irq_dynamic_ena(hw, NULL, NULL);
3360
3361 return IRQ_HANDLED;
3362 }
3363
3364 /**
3365 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3366 * @hw: pointer to HW structure
3367 */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3368 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3369 {
3370 /* disable Admin queue Interrupt causes */
3371 wr32(hw, PFINT_FW_CTL,
3372 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3373
3374 /* disable Mailbox queue Interrupt causes */
3375 wr32(hw, PFINT_MBX_CTL,
3376 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3377
3378 wr32(hw, PFINT_SB_CTL,
3379 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3380
3381 /* disable Control queue Interrupt causes */
3382 wr32(hw, PFINT_OICR_CTL,
3383 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3384
3385 ice_flush(hw);
3386 }
3387
3388 /**
3389 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3390 * @pf: board private structure
3391 */
ice_free_irq_msix_ll_ts(struct ice_pf * pf)3392 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3393 {
3394 int irq_num = pf->ll_ts_irq.virq;
3395
3396 synchronize_irq(irq_num);
3397 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3398
3399 ice_free_irq(pf, pf->ll_ts_irq);
3400 }
3401
3402 /**
3403 * ice_free_irq_msix_misc - Unroll misc vector setup
3404 * @pf: board private structure
3405 */
ice_free_irq_msix_misc(struct ice_pf * pf)3406 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3407 {
3408 int misc_irq_num = pf->oicr_irq.virq;
3409 struct ice_hw *hw = &pf->hw;
3410
3411 ice_dis_ctrlq_interrupts(hw);
3412
3413 /* disable OICR interrupt */
3414 wr32(hw, PFINT_OICR_ENA, 0);
3415 ice_flush(hw);
3416
3417 synchronize_irq(misc_irq_num);
3418 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3419
3420 ice_free_irq(pf, pf->oicr_irq);
3421 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3422 ice_free_irq_msix_ll_ts(pf);
3423 }
3424
3425 /**
3426 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3427 * @hw: pointer to HW structure
3428 * @reg_idx: HW vector index to associate the control queue interrupts with
3429 */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3430 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3431 {
3432 u32 val;
3433
3434 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3435 PFINT_OICR_CTL_CAUSE_ENA_M);
3436 wr32(hw, PFINT_OICR_CTL, val);
3437
3438 /* enable Admin queue Interrupt causes */
3439 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3440 PFINT_FW_CTL_CAUSE_ENA_M);
3441 wr32(hw, PFINT_FW_CTL, val);
3442
3443 /* enable Mailbox queue Interrupt causes */
3444 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3445 PFINT_MBX_CTL_CAUSE_ENA_M);
3446 wr32(hw, PFINT_MBX_CTL, val);
3447
3448 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3449 /* enable Sideband queue Interrupt causes */
3450 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3451 PFINT_SB_CTL_CAUSE_ENA_M);
3452 wr32(hw, PFINT_SB_CTL, val);
3453 }
3454
3455 ice_flush(hw);
3456 }
3457
3458 /**
3459 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3460 * @pf: board private structure
3461 *
3462 * This sets up the handler for MSIX 0, which is used to manage the
3463 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3464 * when in MSI or Legacy interrupt mode.
3465 */
ice_req_irq_msix_misc(struct ice_pf * pf)3466 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3467 {
3468 struct device *dev = ice_pf_to_dev(pf);
3469 struct ice_hw *hw = &pf->hw;
3470 u32 pf_intr_start_offset;
3471 struct msi_map irq;
3472 int err = 0;
3473
3474 if (!pf->int_name[0])
3475 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3476 dev_driver_string(dev), dev_name(dev));
3477
3478 if (!pf->int_name_ll_ts[0])
3479 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3480 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3481 /* Do not request IRQ but do enable OICR interrupt since settings are
3482 * lost during reset. Note that this function is called only during
3483 * rebuild path and not while reset is in progress.
3484 */
3485 if (ice_is_reset_in_progress(pf->state))
3486 goto skip_req_irq;
3487
3488 /* reserve one vector in irq_tracker for misc interrupts */
3489 irq = ice_alloc_irq(pf, false);
3490 if (irq.index < 0)
3491 return irq.index;
3492
3493 pf->oicr_irq = irq;
3494 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3495 ice_misc_intr_thread_fn, 0,
3496 pf->int_name, pf);
3497 if (err) {
3498 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3499 pf->int_name, err);
3500 ice_free_irq(pf, pf->oicr_irq);
3501 return err;
3502 }
3503
3504 /* reserve one vector in irq_tracker for ll_ts interrupt */
3505 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3506 goto skip_req_irq;
3507
3508 irq = ice_alloc_irq(pf, false);
3509 if (irq.index < 0)
3510 return irq.index;
3511
3512 pf->ll_ts_irq = irq;
3513 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3514 pf->int_name_ll_ts, pf);
3515 if (err) {
3516 dev_err(dev, "devm_request_irq for %s failed: %d\n",
3517 pf->int_name_ll_ts, err);
3518 ice_free_irq(pf, pf->ll_ts_irq);
3519 return err;
3520 }
3521
3522 skip_req_irq:
3523 ice_ena_misc_vector(pf);
3524
3525 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3526 /* This enables LL TS interrupt */
3527 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3528 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3529 wr32(hw, PFINT_SB_CTL,
3530 ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3531 PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3532 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3533 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3534
3535 ice_flush(hw);
3536 ice_irq_dynamic_ena(hw, NULL, NULL);
3537
3538 return 0;
3539 }
3540
3541 /**
3542 * ice_set_ops - set netdev and ethtools ops for the given netdev
3543 * @vsi: the VSI associated with the new netdev
3544 */
ice_set_ops(struct ice_vsi * vsi)3545 static void ice_set_ops(struct ice_vsi *vsi)
3546 {
3547 struct net_device *netdev = vsi->netdev;
3548 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3549
3550 if (ice_is_safe_mode(pf)) {
3551 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3552 ice_set_ethtool_safe_mode_ops(netdev);
3553 return;
3554 }
3555
3556 netdev->netdev_ops = &ice_netdev_ops;
3557 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3558 netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3559 ice_set_ethtool_ops(netdev);
3560
3561 if (vsi->type != ICE_VSI_PF)
3562 return;
3563
3564 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3565 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3566 NETDEV_XDP_ACT_RX_SG;
3567 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3568 }
3569
3570 /**
3571 * ice_set_netdev_features - set features for the given netdev
3572 * @netdev: netdev instance
3573 */
ice_set_netdev_features(struct net_device * netdev)3574 void ice_set_netdev_features(struct net_device *netdev)
3575 {
3576 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3577 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3578 netdev_features_t csumo_features;
3579 netdev_features_t vlano_features;
3580 netdev_features_t dflt_features;
3581 netdev_features_t tso_features;
3582
3583 if (ice_is_safe_mode(pf)) {
3584 /* safe mode */
3585 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3586 netdev->hw_features = netdev->features;
3587 return;
3588 }
3589
3590 dflt_features = NETIF_F_SG |
3591 NETIF_F_HIGHDMA |
3592 NETIF_F_NTUPLE |
3593 NETIF_F_RXHASH;
3594
3595 csumo_features = NETIF_F_RXCSUM |
3596 NETIF_F_IP_CSUM |
3597 NETIF_F_SCTP_CRC |
3598 NETIF_F_IPV6_CSUM;
3599
3600 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3601 NETIF_F_HW_VLAN_CTAG_TX |
3602 NETIF_F_HW_VLAN_CTAG_RX;
3603
3604 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3605 if (is_dvm_ena)
3606 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3607
3608 tso_features = NETIF_F_TSO |
3609 NETIF_F_TSO_ECN |
3610 NETIF_F_TSO6 |
3611 NETIF_F_GSO_GRE |
3612 NETIF_F_GSO_UDP_TUNNEL |
3613 NETIF_F_GSO_GRE_CSUM |
3614 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3615 NETIF_F_GSO_PARTIAL |
3616 NETIF_F_GSO_IPXIP4 |
3617 NETIF_F_GSO_IPXIP6 |
3618 NETIF_F_GSO_UDP_L4;
3619
3620 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3621 NETIF_F_GSO_GRE_CSUM;
3622 /* set features that user can change */
3623 netdev->hw_features = dflt_features | csumo_features |
3624 vlano_features | tso_features;
3625
3626 /* add support for HW_CSUM on packets with MPLS header */
3627 netdev->mpls_features = NETIF_F_HW_CSUM |
3628 NETIF_F_TSO |
3629 NETIF_F_TSO6;
3630
3631 /* enable features */
3632 netdev->features |= netdev->hw_features;
3633
3634 netdev->hw_features |= NETIF_F_HW_TC;
3635 netdev->hw_features |= NETIF_F_LOOPBACK;
3636
3637 /* encap and VLAN devices inherit default, csumo and tso features */
3638 netdev->hw_enc_features |= dflt_features | csumo_features |
3639 tso_features;
3640 netdev->vlan_features |= dflt_features | csumo_features |
3641 tso_features;
3642
3643 /* advertise support but don't enable by default since only one type of
3644 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3645 * type turns on the other has to be turned off. This is enforced by the
3646 * ice_fix_features() ndo callback.
3647 */
3648 if (is_dvm_ena)
3649 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3650 NETIF_F_HW_VLAN_STAG_TX;
3651
3652 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3653 * be changed at runtime
3654 */
3655 netdev->hw_features |= NETIF_F_RXFCS;
3656
3657 /* Allow core to manage IRQs affinity */
3658 netif_set_affinity_auto(netdev);
3659
3660 /* Mutual exclusivity for TSO and GCS is enforced by the set features
3661 * ndo callback.
3662 */
3663 if (ice_is_feature_supported(pf, ICE_F_GCS))
3664 netdev->hw_features |= NETIF_F_HW_CSUM;
3665
3666 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3667 }
3668
3669 /**
3670 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3671 * @lut: Lookup table
3672 * @rss_table_size: Lookup table size
3673 * @rss_size: Range of queue number for hashing
3674 */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3675 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3676 {
3677 u16 i;
3678
3679 for (i = 0; i < rss_table_size; i++)
3680 lut[i] = i % rss_size;
3681 }
3682
3683 /**
3684 * ice_pf_vsi_setup - Set up a PF VSI
3685 * @pf: board private structure
3686 * @pi: pointer to the port_info instance
3687 *
3688 * Returns pointer to the successfully allocated VSI software struct
3689 * on success, otherwise returns NULL on failure.
3690 */
3691 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3692 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3693 {
3694 struct ice_vsi_cfg_params params = {};
3695
3696 params.type = ICE_VSI_PF;
3697 params.port_info = pi;
3698 params.flags = ICE_VSI_FLAG_INIT;
3699
3700 return ice_vsi_setup(pf, ¶ms);
3701 }
3702
3703 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3704 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3705 struct ice_channel *ch)
3706 {
3707 struct ice_vsi_cfg_params params = {};
3708
3709 params.type = ICE_VSI_CHNL;
3710 params.port_info = pi;
3711 params.ch = ch;
3712 params.flags = ICE_VSI_FLAG_INIT;
3713
3714 return ice_vsi_setup(pf, ¶ms);
3715 }
3716
3717 /**
3718 * ice_ctrl_vsi_setup - Set up a control VSI
3719 * @pf: board private structure
3720 * @pi: pointer to the port_info instance
3721 *
3722 * Returns pointer to the successfully allocated VSI software struct
3723 * on success, otherwise returns NULL on failure.
3724 */
3725 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3726 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3727 {
3728 struct ice_vsi_cfg_params params = {};
3729
3730 params.type = ICE_VSI_CTRL;
3731 params.port_info = pi;
3732 params.flags = ICE_VSI_FLAG_INIT;
3733
3734 return ice_vsi_setup(pf, ¶ms);
3735 }
3736
3737 /**
3738 * ice_lb_vsi_setup - Set up a loopback VSI
3739 * @pf: board private structure
3740 * @pi: pointer to the port_info instance
3741 *
3742 * Returns pointer to the successfully allocated VSI software struct
3743 * on success, otherwise returns NULL on failure.
3744 */
3745 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3746 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3747 {
3748 struct ice_vsi_cfg_params params = {};
3749
3750 params.type = ICE_VSI_LB;
3751 params.port_info = pi;
3752 params.flags = ICE_VSI_FLAG_INIT;
3753
3754 return ice_vsi_setup(pf, ¶ms);
3755 }
3756
3757 /**
3758 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3759 * @netdev: network interface to be adjusted
3760 * @proto: VLAN TPID
3761 * @vid: VLAN ID to be added
3762 *
3763 * net_device_ops implementation for adding VLAN IDs
3764 */
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3765 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3766 {
3767 struct ice_netdev_priv *np = netdev_priv(netdev);
3768 struct ice_vsi_vlan_ops *vlan_ops;
3769 struct ice_vsi *vsi = np->vsi;
3770 struct ice_vlan vlan;
3771 int ret;
3772
3773 /* VLAN 0 is added by default during load/reset */
3774 if (!vid)
3775 return 0;
3776
3777 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3778 usleep_range(1000, 2000);
3779
3780 /* Add multicast promisc rule for the VLAN ID to be added if
3781 * all-multicast is currently enabled.
3782 */
3783 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3784 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3785 ICE_MCAST_VLAN_PROMISC_BITS,
3786 vid);
3787 if (ret)
3788 goto finish;
3789 }
3790
3791 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3792
3793 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3794 * packets aren't pruned by the device's internal switch on Rx
3795 */
3796 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3797 ret = vlan_ops->add_vlan(vsi, &vlan);
3798 if (ret)
3799 goto finish;
3800
3801 /* If all-multicast is currently enabled and this VLAN ID is only one
3802 * besides VLAN-0 we have to update look-up type of multicast promisc
3803 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3804 */
3805 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3806 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3807 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3808 ICE_MCAST_PROMISC_BITS, 0);
3809 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3810 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3811 }
3812
3813 finish:
3814 clear_bit(ICE_CFG_BUSY, vsi->state);
3815
3816 return ret;
3817 }
3818
3819 /**
3820 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3821 * @netdev: network interface to be adjusted
3822 * @proto: VLAN TPID
3823 * @vid: VLAN ID to be removed
3824 *
3825 * net_device_ops implementation for removing VLAN IDs
3826 */
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3827 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3828 {
3829 struct ice_netdev_priv *np = netdev_priv(netdev);
3830 struct ice_vsi_vlan_ops *vlan_ops;
3831 struct ice_vsi *vsi = np->vsi;
3832 struct ice_vlan vlan;
3833 int ret;
3834
3835 /* don't allow removal of VLAN 0 */
3836 if (!vid)
3837 return 0;
3838
3839 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3840 usleep_range(1000, 2000);
3841
3842 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3843 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3844 if (ret) {
3845 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3846 vsi->vsi_num);
3847 vsi->current_netdev_flags |= IFF_ALLMULTI;
3848 }
3849
3850 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3851
3852 /* Make sure VLAN delete is successful before updating VLAN
3853 * information
3854 */
3855 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3856 ret = vlan_ops->del_vlan(vsi, &vlan);
3857 if (ret)
3858 goto finish;
3859
3860 /* Remove multicast promisc rule for the removed VLAN ID if
3861 * all-multicast is enabled.
3862 */
3863 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3864 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3865 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3866
3867 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3868 /* Update look-up type of multicast promisc rule for VLAN 0
3869 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3870 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3871 */
3872 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3873 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3874 ICE_MCAST_VLAN_PROMISC_BITS,
3875 0);
3876 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3877 ICE_MCAST_PROMISC_BITS, 0);
3878 }
3879 }
3880
3881 finish:
3882 clear_bit(ICE_CFG_BUSY, vsi->state);
3883
3884 return ret;
3885 }
3886
3887 /**
3888 * ice_rep_indr_tc_block_unbind
3889 * @cb_priv: indirection block private data
3890 */
ice_rep_indr_tc_block_unbind(void * cb_priv)3891 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3892 {
3893 struct ice_indr_block_priv *indr_priv = cb_priv;
3894
3895 list_del(&indr_priv->list);
3896 kfree(indr_priv);
3897 }
3898
3899 /**
3900 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3901 * @vsi: VSI struct which has the netdev
3902 */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3903 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3904 {
3905 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3906
3907 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3908 ice_rep_indr_tc_block_unbind);
3909 }
3910
3911 /**
3912 * ice_tc_indir_block_register - Register TC indirect block notifications
3913 * @vsi: VSI struct which has the netdev
3914 *
3915 * Returns 0 on success, negative value on failure
3916 */
ice_tc_indir_block_register(struct ice_vsi * vsi)3917 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3918 {
3919 struct ice_netdev_priv *np;
3920
3921 if (!vsi || !vsi->netdev)
3922 return -EINVAL;
3923
3924 np = netdev_priv(vsi->netdev);
3925
3926 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3927 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3928 }
3929
3930 /**
3931 * ice_get_avail_q_count - Get count of queues in use
3932 * @pf_qmap: bitmap to get queue use count from
3933 * @lock: pointer to a mutex that protects access to pf_qmap
3934 * @size: size of the bitmap
3935 */
3936 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3937 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3938 {
3939 unsigned long bit;
3940 u16 count = 0;
3941
3942 mutex_lock(lock);
3943 for_each_clear_bit(bit, pf_qmap, size)
3944 count++;
3945 mutex_unlock(lock);
3946
3947 return count;
3948 }
3949
3950 /**
3951 * ice_get_avail_txq_count - Get count of Tx queues in use
3952 * @pf: pointer to an ice_pf instance
3953 */
ice_get_avail_txq_count(struct ice_pf * pf)3954 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3955 {
3956 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3957 pf->max_pf_txqs);
3958 }
3959
3960 /**
3961 * ice_get_avail_rxq_count - Get count of Rx queues in use
3962 * @pf: pointer to an ice_pf instance
3963 */
ice_get_avail_rxq_count(struct ice_pf * pf)3964 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3965 {
3966 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3967 pf->max_pf_rxqs);
3968 }
3969
3970 /**
3971 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3972 * @pf: board private structure to initialize
3973 */
ice_deinit_pf(struct ice_pf * pf)3974 static void ice_deinit_pf(struct ice_pf *pf)
3975 {
3976 ice_service_task_stop(pf);
3977 mutex_destroy(&pf->lag_mutex);
3978 mutex_destroy(&pf->adev_mutex);
3979 mutex_destroy(&pf->sw_mutex);
3980 mutex_destroy(&pf->tc_mutex);
3981 mutex_destroy(&pf->avail_q_mutex);
3982 mutex_destroy(&pf->vfs.table_lock);
3983
3984 if (pf->avail_txqs) {
3985 bitmap_free(pf->avail_txqs);
3986 pf->avail_txqs = NULL;
3987 }
3988
3989 if (pf->avail_rxqs) {
3990 bitmap_free(pf->avail_rxqs);
3991 pf->avail_rxqs = NULL;
3992 }
3993
3994 if (pf->ptp.clock)
3995 ptp_clock_unregister(pf->ptp.clock);
3996
3997 xa_destroy(&pf->dyn_ports);
3998 xa_destroy(&pf->sf_nums);
3999 }
4000
4001 /**
4002 * ice_set_pf_caps - set PFs capability flags
4003 * @pf: pointer to the PF instance
4004 */
ice_set_pf_caps(struct ice_pf * pf)4005 static void ice_set_pf_caps(struct ice_pf *pf)
4006 {
4007 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
4008
4009 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4010 if (func_caps->common_cap.rdma)
4011 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4012 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4013 if (func_caps->common_cap.dcb)
4014 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4015 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4016 if (func_caps->common_cap.sr_iov_1_1) {
4017 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4018 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
4019 ICE_MAX_SRIOV_VFS);
4020 }
4021 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4022 if (func_caps->common_cap.rss_table_size)
4023 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4024
4025 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4026 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4027 u16 unused;
4028
4029 /* ctrl_vsi_idx will be set to a valid value when flow director
4030 * is setup by ice_init_fdir
4031 */
4032 pf->ctrl_vsi_idx = ICE_NO_VSI;
4033 set_bit(ICE_FLAG_FD_ENA, pf->flags);
4034 /* force guaranteed filter pool for PF */
4035 ice_alloc_fd_guar_item(&pf->hw, &unused,
4036 func_caps->fd_fltr_guar);
4037 /* force shared filter pool for PF */
4038 ice_alloc_fd_shrd_item(&pf->hw, &unused,
4039 func_caps->fd_fltr_best_effort);
4040 }
4041
4042 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4043 if (func_caps->common_cap.ieee_1588)
4044 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4045
4046 pf->max_pf_txqs = func_caps->common_cap.num_txq;
4047 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4048 }
4049
4050 /**
4051 * ice_init_pf - Initialize general software structures (struct ice_pf)
4052 * @pf: board private structure to initialize
4053 */
ice_init_pf(struct ice_pf * pf)4054 static int ice_init_pf(struct ice_pf *pf)
4055 {
4056 ice_set_pf_caps(pf);
4057
4058 mutex_init(&pf->sw_mutex);
4059 mutex_init(&pf->tc_mutex);
4060 mutex_init(&pf->adev_mutex);
4061 mutex_init(&pf->lag_mutex);
4062
4063 INIT_HLIST_HEAD(&pf->aq_wait_list);
4064 spin_lock_init(&pf->aq_wait_lock);
4065 init_waitqueue_head(&pf->aq_wait_queue);
4066
4067 init_waitqueue_head(&pf->reset_wait_queue);
4068
4069 /* setup service timer and periodic service task */
4070 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4071 pf->serv_tmr_period = HZ;
4072 INIT_WORK(&pf->serv_task, ice_service_task);
4073 clear_bit(ICE_SERVICE_SCHED, pf->state);
4074
4075 mutex_init(&pf->avail_q_mutex);
4076 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4077 if (!pf->avail_txqs)
4078 return -ENOMEM;
4079
4080 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4081 if (!pf->avail_rxqs) {
4082 bitmap_free(pf->avail_txqs);
4083 pf->avail_txqs = NULL;
4084 return -ENOMEM;
4085 }
4086
4087 mutex_init(&pf->vfs.table_lock);
4088 hash_init(pf->vfs.table);
4089 if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
4090 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
4091 ICE_MBX_OVERFLOW_WATERMARK);
4092 else
4093 ice_mbx_init_snapshot(&pf->hw);
4094
4095 xa_init(&pf->dyn_ports);
4096 xa_init(&pf->sf_nums);
4097
4098 return 0;
4099 }
4100
4101 /**
4102 * ice_is_wol_supported - check if WoL is supported
4103 * @hw: pointer to hardware info
4104 *
4105 * Check if WoL is supported based on the HW configuration.
4106 * Returns true if NVM supports and enables WoL for this port, false otherwise
4107 */
ice_is_wol_supported(struct ice_hw * hw)4108 bool ice_is_wol_supported(struct ice_hw *hw)
4109 {
4110 u16 wol_ctrl;
4111
4112 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4113 * word) indicates WoL is not supported on the corresponding PF ID.
4114 */
4115 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4116 return false;
4117
4118 return !(BIT(hw->port_info->lport) & wol_ctrl);
4119 }
4120
4121 /**
4122 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4123 * @vsi: VSI being changed
4124 * @new_rx: new number of Rx queues
4125 * @new_tx: new number of Tx queues
4126 * @locked: is adev device_lock held
4127 *
4128 * Only change the number of queues if new_tx, or new_rx is non-0.
4129 *
4130 * Returns 0 on success.
4131 */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)4132 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4133 {
4134 struct ice_pf *pf = vsi->back;
4135 int i, err = 0, timeout = 50;
4136
4137 if (!new_rx && !new_tx)
4138 return -EINVAL;
4139
4140 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4141 timeout--;
4142 if (!timeout)
4143 return -EBUSY;
4144 usleep_range(1000, 2000);
4145 }
4146
4147 if (new_tx)
4148 vsi->req_txq = (u16)new_tx;
4149 if (new_rx)
4150 vsi->req_rxq = (u16)new_rx;
4151
4152 /* set for the next time the netdev is started */
4153 if (!netif_running(vsi->netdev)) {
4154 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4155 if (err)
4156 goto rebuild_err;
4157 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4158 goto done;
4159 }
4160
4161 ice_vsi_close(vsi);
4162 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4163 if (err)
4164 goto rebuild_err;
4165
4166 ice_for_each_traffic_class(i) {
4167 if (vsi->tc_cfg.ena_tc & BIT(i))
4168 netdev_set_tc_queue(vsi->netdev,
4169 vsi->tc_cfg.tc_info[i].netdev_tc,
4170 vsi->tc_cfg.tc_info[i].qcount_tx,
4171 vsi->tc_cfg.tc_info[i].qoffset);
4172 }
4173 ice_pf_dcb_recfg(pf, locked);
4174 ice_vsi_open(vsi);
4175 goto done;
4176
4177 rebuild_err:
4178 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4179 err);
4180 done:
4181 clear_bit(ICE_CFG_BUSY, pf->state);
4182 return err;
4183 }
4184
4185 /**
4186 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4187 * @pf: PF to configure
4188 *
4189 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4190 * VSI can still Tx/Rx VLAN tagged packets.
4191 */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4192 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4193 {
4194 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4195 struct ice_vsi_ctx *ctxt;
4196 struct ice_hw *hw;
4197 int status;
4198
4199 if (!vsi)
4200 return;
4201
4202 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4203 if (!ctxt)
4204 return;
4205
4206 hw = &pf->hw;
4207 ctxt->info = vsi->info;
4208
4209 ctxt->info.valid_sections =
4210 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4211 ICE_AQ_VSI_PROP_SECURITY_VALID |
4212 ICE_AQ_VSI_PROP_SW_VALID);
4213
4214 /* disable VLAN anti-spoof */
4215 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4216 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4217
4218 /* disable VLAN pruning and keep all other settings */
4219 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4220
4221 /* allow all VLANs on Tx and don't strip on Rx */
4222 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4223 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4224
4225 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4226 if (status) {
4227 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4228 status, libie_aq_str(hw->adminq.sq_last_status));
4229 } else {
4230 vsi->info.sec_flags = ctxt->info.sec_flags;
4231 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4232 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4233 }
4234
4235 kfree(ctxt);
4236 }
4237
4238 /**
4239 * ice_log_pkg_init - log result of DDP package load
4240 * @hw: pointer to hardware info
4241 * @state: state of package load
4242 */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4243 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4244 {
4245 struct ice_pf *pf = hw->back;
4246 struct device *dev;
4247
4248 dev = ice_pf_to_dev(pf);
4249
4250 switch (state) {
4251 case ICE_DDP_PKG_SUCCESS:
4252 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4253 hw->active_pkg_name,
4254 hw->active_pkg_ver.major,
4255 hw->active_pkg_ver.minor,
4256 hw->active_pkg_ver.update,
4257 hw->active_pkg_ver.draft);
4258 break;
4259 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4260 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4261 hw->active_pkg_name,
4262 hw->active_pkg_ver.major,
4263 hw->active_pkg_ver.minor,
4264 hw->active_pkg_ver.update,
4265 hw->active_pkg_ver.draft);
4266 break;
4267 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4268 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4269 hw->active_pkg_name,
4270 hw->active_pkg_ver.major,
4271 hw->active_pkg_ver.minor,
4272 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4273 break;
4274 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4275 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4276 hw->active_pkg_name,
4277 hw->active_pkg_ver.major,
4278 hw->active_pkg_ver.minor,
4279 hw->active_pkg_ver.update,
4280 hw->active_pkg_ver.draft,
4281 hw->pkg_name,
4282 hw->pkg_ver.major,
4283 hw->pkg_ver.minor,
4284 hw->pkg_ver.update,
4285 hw->pkg_ver.draft);
4286 break;
4287 case ICE_DDP_PKG_FW_MISMATCH:
4288 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4289 break;
4290 case ICE_DDP_PKG_INVALID_FILE:
4291 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4292 break;
4293 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4294 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4295 break;
4296 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4297 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4298 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4299 break;
4300 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4301 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4302 break;
4303 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4304 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4305 break;
4306 case ICE_DDP_PKG_LOAD_ERROR:
4307 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4308 /* poll for reset to complete */
4309 if (ice_check_reset(hw))
4310 dev_err(dev, "Error resetting device. Please reload the driver\n");
4311 break;
4312 case ICE_DDP_PKG_ERR:
4313 default:
4314 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4315 break;
4316 }
4317 }
4318
4319 /**
4320 * ice_load_pkg - load/reload the DDP Package file
4321 * @firmware: firmware structure when firmware requested or NULL for reload
4322 * @pf: pointer to the PF instance
4323 *
4324 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4325 * initialize HW tables.
4326 */
4327 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4328 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4329 {
4330 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4331 struct device *dev = ice_pf_to_dev(pf);
4332 struct ice_hw *hw = &pf->hw;
4333
4334 /* Load DDP Package */
4335 if (firmware && !hw->pkg_copy) {
4336 state = ice_copy_and_init_pkg(hw, firmware->data,
4337 firmware->size);
4338 ice_log_pkg_init(hw, state);
4339 } else if (!firmware && hw->pkg_copy) {
4340 /* Reload package during rebuild after CORER/GLOBR reset */
4341 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4342 ice_log_pkg_init(hw, state);
4343 } else {
4344 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4345 }
4346
4347 if (!ice_is_init_pkg_successful(state)) {
4348 /* Safe Mode */
4349 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4350 return;
4351 }
4352
4353 /* Successful download package is the precondition for advanced
4354 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4355 */
4356 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4357 }
4358
4359 /**
4360 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4361 * @pf: pointer to the PF structure
4362 *
4363 * There is no error returned here because the driver should be able to handle
4364 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4365 * specifically with Tx.
4366 */
ice_verify_cacheline_size(struct ice_pf * pf)4367 static void ice_verify_cacheline_size(struct ice_pf *pf)
4368 {
4369 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4370 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4371 ICE_CACHE_LINE_BYTES);
4372 }
4373
4374 /**
4375 * ice_send_version - update firmware with driver version
4376 * @pf: PF struct
4377 *
4378 * Returns 0 on success, else error code
4379 */
ice_send_version(struct ice_pf * pf)4380 static int ice_send_version(struct ice_pf *pf)
4381 {
4382 struct ice_driver_ver dv;
4383
4384 dv.major_ver = 0xff;
4385 dv.minor_ver = 0xff;
4386 dv.build_ver = 0xff;
4387 dv.subbuild_ver = 0;
4388 strscpy((char *)dv.driver_string, UTS_RELEASE,
4389 sizeof(dv.driver_string));
4390 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4391 }
4392
4393 /**
4394 * ice_init_fdir - Initialize flow director VSI and configuration
4395 * @pf: pointer to the PF instance
4396 *
4397 * returns 0 on success, negative on error
4398 */
ice_init_fdir(struct ice_pf * pf)4399 static int ice_init_fdir(struct ice_pf *pf)
4400 {
4401 struct device *dev = ice_pf_to_dev(pf);
4402 struct ice_vsi *ctrl_vsi;
4403 int err;
4404
4405 /* Side Band Flow Director needs to have a control VSI.
4406 * Allocate it and store it in the PF.
4407 */
4408 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4409 if (!ctrl_vsi) {
4410 dev_dbg(dev, "could not create control VSI\n");
4411 return -ENOMEM;
4412 }
4413
4414 err = ice_vsi_open_ctrl(ctrl_vsi);
4415 if (err) {
4416 dev_dbg(dev, "could not open control VSI\n");
4417 goto err_vsi_open;
4418 }
4419
4420 mutex_init(&pf->hw.fdir_fltr_lock);
4421
4422 err = ice_fdir_create_dflt_rules(pf);
4423 if (err)
4424 goto err_fdir_rule;
4425
4426 return 0;
4427
4428 err_fdir_rule:
4429 ice_fdir_release_flows(&pf->hw);
4430 ice_vsi_close(ctrl_vsi);
4431 err_vsi_open:
4432 ice_vsi_release(ctrl_vsi);
4433 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4434 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4435 pf->ctrl_vsi_idx = ICE_NO_VSI;
4436 }
4437 return err;
4438 }
4439
ice_deinit_fdir(struct ice_pf * pf)4440 static void ice_deinit_fdir(struct ice_pf *pf)
4441 {
4442 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4443
4444 if (!vsi)
4445 return;
4446
4447 ice_vsi_manage_fdir(vsi, false);
4448 ice_vsi_release(vsi);
4449 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4450 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4451 pf->ctrl_vsi_idx = ICE_NO_VSI;
4452 }
4453
4454 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4455 }
4456
4457 /**
4458 * ice_get_opt_fw_name - return optional firmware file name or NULL
4459 * @pf: pointer to the PF instance
4460 */
ice_get_opt_fw_name(struct ice_pf * pf)4461 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4462 {
4463 /* Optional firmware name same as default with additional dash
4464 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4465 */
4466 struct pci_dev *pdev = pf->pdev;
4467 char *opt_fw_filename;
4468 u64 dsn;
4469
4470 /* Determine the name of the optional file using the DSN (two
4471 * dwords following the start of the DSN Capability).
4472 */
4473 dsn = pci_get_dsn(pdev);
4474 if (!dsn)
4475 return NULL;
4476
4477 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4478 if (!opt_fw_filename)
4479 return NULL;
4480
4481 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4482 ICE_DDP_PKG_PATH, dsn);
4483
4484 return opt_fw_filename;
4485 }
4486
4487 /**
4488 * ice_request_fw - Device initialization routine
4489 * @pf: pointer to the PF instance
4490 * @firmware: double pointer to firmware struct
4491 *
4492 * Return: zero when successful, negative values otherwise.
4493 */
ice_request_fw(struct ice_pf * pf,const struct firmware ** firmware)4494 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4495 {
4496 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4497 struct device *dev = ice_pf_to_dev(pf);
4498 int err = 0;
4499
4500 /* optional device-specific DDP (if present) overrides the default DDP
4501 * package file. kernel logs a debug message if the file doesn't exist,
4502 * and warning messages for other errors.
4503 */
4504 if (opt_fw_filename) {
4505 err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4506 kfree(opt_fw_filename);
4507 if (!err)
4508 return err;
4509 }
4510 err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4511 if (err)
4512 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4513
4514 return err;
4515 }
4516
4517 /**
4518 * ice_init_tx_topology - performs Tx topology initialization
4519 * @hw: pointer to the hardware structure
4520 * @firmware: pointer to firmware structure
4521 *
4522 * Return: zero when init was successful, negative values otherwise.
4523 */
4524 static int
ice_init_tx_topology(struct ice_hw * hw,const struct firmware * firmware)4525 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4526 {
4527 u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4528 struct ice_pf *pf = hw->back;
4529 struct device *dev;
4530 int err;
4531
4532 dev = ice_pf_to_dev(pf);
4533 err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
4534 if (!err) {
4535 if (hw->num_tx_sched_layers > num_tx_sched_layers)
4536 dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4537 else
4538 dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4539 return 0;
4540 } else if (err == -ENODEV) {
4541 /* If we failed to re-initialize the device, we can no longer
4542 * continue loading.
4543 */
4544 dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
4545 return err;
4546 } else if (err == -EIO) {
4547 dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4548 return 0;
4549 } else if (err == -EEXIST) {
4550 return 0;
4551 }
4552
4553 /* Do not treat this as a fatal error. */
4554 dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
4555 ERR_PTR(err));
4556 return 0;
4557 }
4558
4559 /**
4560 * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
4561 * @hw: pointer to the hardware structure
4562 * @pf: pointer to pf structure
4563 *
4564 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4565 * formats the PF hardware supports. The exact list of supported RXDIDs
4566 * depends on the loaded DDP package. The IDs can be determined by reading the
4567 * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
4568 *
4569 * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
4570 * in the DDP package. The 16-byte legacy descriptor is never supported by
4571 * VFs.
4572 */
ice_init_supported_rxdids(struct ice_hw * hw,struct ice_pf * pf)4573 static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
4574 {
4575 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
4576
4577 for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
4578 u32 regval;
4579
4580 regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
4581 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
4582 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
4583 pf->supported_rxdids |= BIT(i);
4584 }
4585 }
4586
4587 /**
4588 * ice_init_ddp_config - DDP related configuration
4589 * @hw: pointer to the hardware structure
4590 * @pf: pointer to pf structure
4591 *
4592 * This function loads DDP file from the disk, then initializes Tx
4593 * topology. At the end DDP package is loaded on the card.
4594 *
4595 * Return: zero when init was successful, negative values otherwise.
4596 */
ice_init_ddp_config(struct ice_hw * hw,struct ice_pf * pf)4597 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4598 {
4599 struct device *dev = ice_pf_to_dev(pf);
4600 const struct firmware *firmware = NULL;
4601 int err;
4602
4603 err = ice_request_fw(pf, &firmware);
4604 if (err) {
4605 dev_err(dev, "Fail during requesting FW: %d\n", err);
4606 return err;
4607 }
4608
4609 err = ice_init_tx_topology(hw, firmware);
4610 if (err) {
4611 dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4612 err);
4613 release_firmware(firmware);
4614 return err;
4615 }
4616
4617 /* Download firmware to device */
4618 ice_load_pkg(firmware, pf);
4619 release_firmware(firmware);
4620
4621 /* Initialize the supported Rx descriptor IDs after loading DDP */
4622 ice_init_supported_rxdids(hw, pf);
4623
4624 return 0;
4625 }
4626
4627 /**
4628 * ice_print_wake_reason - show the wake up cause in the log
4629 * @pf: pointer to the PF struct
4630 */
ice_print_wake_reason(struct ice_pf * pf)4631 static void ice_print_wake_reason(struct ice_pf *pf)
4632 {
4633 u32 wus = pf->wakeup_reason;
4634 const char *wake_str;
4635
4636 /* if no wake event, nothing to print */
4637 if (!wus)
4638 return;
4639
4640 if (wus & PFPM_WUS_LNKC_M)
4641 wake_str = "Link\n";
4642 else if (wus & PFPM_WUS_MAG_M)
4643 wake_str = "Magic Packet\n";
4644 else if (wus & PFPM_WUS_MNG_M)
4645 wake_str = "Management\n";
4646 else if (wus & PFPM_WUS_FW_RST_WK_M)
4647 wake_str = "Firmware Reset\n";
4648 else
4649 wake_str = "Unknown\n";
4650
4651 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4652 }
4653
4654 /**
4655 * ice_pf_fwlog_update_module - update 1 module
4656 * @pf: pointer to the PF struct
4657 * @log_level: log_level to use for the @module
4658 * @module: module to update
4659 */
ice_pf_fwlog_update_module(struct ice_pf * pf,int log_level,int module)4660 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4661 {
4662 struct ice_hw *hw = &pf->hw;
4663
4664 hw->fwlog_cfg.module_entries[module].log_level = log_level;
4665 }
4666
4667 /**
4668 * ice_register_netdev - register netdev
4669 * @vsi: pointer to the VSI struct
4670 */
ice_register_netdev(struct ice_vsi * vsi)4671 static int ice_register_netdev(struct ice_vsi *vsi)
4672 {
4673 int err;
4674
4675 if (!vsi || !vsi->netdev)
4676 return -EIO;
4677
4678 err = register_netdev(vsi->netdev);
4679 if (err)
4680 return err;
4681
4682 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4683 netif_carrier_off(vsi->netdev);
4684 netif_tx_stop_all_queues(vsi->netdev);
4685
4686 return 0;
4687 }
4688
ice_unregister_netdev(struct ice_vsi * vsi)4689 static void ice_unregister_netdev(struct ice_vsi *vsi)
4690 {
4691 if (!vsi || !vsi->netdev)
4692 return;
4693
4694 unregister_netdev(vsi->netdev);
4695 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4696 }
4697
4698 /**
4699 * ice_cfg_netdev - Allocate, configure and register a netdev
4700 * @vsi: the VSI associated with the new netdev
4701 *
4702 * Returns 0 on success, negative value on failure
4703 */
ice_cfg_netdev(struct ice_vsi * vsi)4704 static int ice_cfg_netdev(struct ice_vsi *vsi)
4705 {
4706 struct ice_netdev_priv *np;
4707 struct net_device *netdev;
4708 u8 mac_addr[ETH_ALEN];
4709
4710 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4711 vsi->alloc_rxq);
4712 if (!netdev)
4713 return -ENOMEM;
4714
4715 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4716 vsi->netdev = netdev;
4717 np = netdev_priv(netdev);
4718 np->vsi = vsi;
4719
4720 ice_set_netdev_features(netdev);
4721 ice_set_ops(vsi);
4722
4723 if (vsi->type == ICE_VSI_PF) {
4724 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4725 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4726 eth_hw_addr_set(netdev, mac_addr);
4727 }
4728
4729 netdev->priv_flags |= IFF_UNICAST_FLT;
4730
4731 /* Setup netdev TC information */
4732 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4733
4734 netdev->max_mtu = ICE_MAX_MTU;
4735
4736 return 0;
4737 }
4738
ice_decfg_netdev(struct ice_vsi * vsi)4739 static void ice_decfg_netdev(struct ice_vsi *vsi)
4740 {
4741 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4742 free_netdev(vsi->netdev);
4743 vsi->netdev = NULL;
4744 }
4745
ice_init_dev(struct ice_pf * pf)4746 int ice_init_dev(struct ice_pf *pf)
4747 {
4748 struct device *dev = ice_pf_to_dev(pf);
4749 struct ice_hw *hw = &pf->hw;
4750 int err;
4751
4752 ice_init_feature_support(pf);
4753
4754 err = ice_init_ddp_config(hw, pf);
4755
4756 /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4757 * set in pf->state, which will cause ice_is_safe_mode to return
4758 * true
4759 */
4760 if (err || ice_is_safe_mode(pf)) {
4761 /* we already got function/device capabilities but these don't
4762 * reflect what the driver needs to do in safe mode. Instead of
4763 * adding conditional logic everywhere to ignore these
4764 * device/function capabilities, override them.
4765 */
4766 ice_set_safe_mode_caps(hw);
4767 }
4768
4769 err = ice_init_pf(pf);
4770 if (err) {
4771 dev_err(dev, "ice_init_pf failed: %d\n", err);
4772 return err;
4773 }
4774
4775 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4776 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4777 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4778 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4779 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4780 pf->hw.tnl.valid_count[TNL_VXLAN];
4781 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4782 UDP_TUNNEL_TYPE_VXLAN;
4783 }
4784 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4785 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4786 pf->hw.tnl.valid_count[TNL_GENEVE];
4787 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4788 UDP_TUNNEL_TYPE_GENEVE;
4789 }
4790
4791 err = ice_init_interrupt_scheme(pf);
4792 if (err) {
4793 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4794 err = -EIO;
4795 goto unroll_pf_init;
4796 }
4797
4798 /* In case of MSIX we are going to setup the misc vector right here
4799 * to handle admin queue events etc. In case of legacy and MSI
4800 * the misc functionality and queue processing is combined in
4801 * the same vector and that gets setup at open.
4802 */
4803 err = ice_req_irq_msix_misc(pf);
4804 if (err) {
4805 dev_err(dev, "setup of misc vector failed: %d\n", err);
4806 goto unroll_irq_scheme_init;
4807 }
4808
4809 return 0;
4810
4811 unroll_irq_scheme_init:
4812 ice_clear_interrupt_scheme(pf);
4813 unroll_pf_init:
4814 ice_deinit_pf(pf);
4815 return err;
4816 }
4817
ice_deinit_dev(struct ice_pf * pf)4818 void ice_deinit_dev(struct ice_pf *pf)
4819 {
4820 ice_free_irq_msix_misc(pf);
4821 ice_deinit_pf(pf);
4822 ice_deinit_hw(&pf->hw);
4823
4824 /* Service task is already stopped, so call reset directly. */
4825 ice_reset(&pf->hw, ICE_RESET_PFR);
4826 pci_wait_for_pending_transaction(pf->pdev);
4827 ice_clear_interrupt_scheme(pf);
4828 }
4829
ice_init_features(struct ice_pf * pf)4830 static void ice_init_features(struct ice_pf *pf)
4831 {
4832 struct device *dev = ice_pf_to_dev(pf);
4833
4834 if (ice_is_safe_mode(pf))
4835 return;
4836
4837 /* initialize DDP driven features */
4838 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4839 ice_ptp_init(pf);
4840
4841 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4842 ice_gnss_init(pf);
4843
4844 if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4845 ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4846 ice_dpll_init(pf);
4847
4848 /* Note: Flow director init failure is non-fatal to load */
4849 if (ice_init_fdir(pf))
4850 dev_err(dev, "could not initialize flow director\n");
4851
4852 /* Note: DCB init failure is non-fatal to load */
4853 if (ice_init_pf_dcb(pf, false)) {
4854 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4855 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4856 } else {
4857 ice_cfg_lldp_mib_change(&pf->hw, true);
4858 }
4859
4860 if (ice_init_lag(pf))
4861 dev_warn(dev, "Failed to init link aggregation support\n");
4862
4863 ice_hwmon_init(pf);
4864 }
4865
ice_deinit_features(struct ice_pf * pf)4866 static void ice_deinit_features(struct ice_pf *pf)
4867 {
4868 if (ice_is_safe_mode(pf))
4869 return;
4870
4871 ice_deinit_lag(pf);
4872 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4873 ice_cfg_lldp_mib_change(&pf->hw, false);
4874 ice_deinit_fdir(pf);
4875 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4876 ice_gnss_exit(pf);
4877 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4878 ice_ptp_release(pf);
4879 if (test_bit(ICE_FLAG_DPLL, pf->flags))
4880 ice_dpll_deinit(pf);
4881 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4882 xa_destroy(&pf->eswitch.reprs);
4883 }
4884
ice_init_wakeup(struct ice_pf * pf)4885 static void ice_init_wakeup(struct ice_pf *pf)
4886 {
4887 /* Save wakeup reason register for later use */
4888 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4889
4890 /* check for a power management event */
4891 ice_print_wake_reason(pf);
4892
4893 /* clear wake status, all bits */
4894 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4895
4896 /* Disable WoL at init, wait for user to enable */
4897 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4898 }
4899
ice_init_link(struct ice_pf * pf)4900 static int ice_init_link(struct ice_pf *pf)
4901 {
4902 struct device *dev = ice_pf_to_dev(pf);
4903 int err;
4904
4905 err = ice_init_link_events(pf->hw.port_info);
4906 if (err) {
4907 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4908 return err;
4909 }
4910
4911 /* not a fatal error if this fails */
4912 err = ice_init_nvm_phy_type(pf->hw.port_info);
4913 if (err)
4914 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4915
4916 /* not a fatal error if this fails */
4917 err = ice_update_link_info(pf->hw.port_info);
4918 if (err)
4919 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4920
4921 ice_init_link_dflt_override(pf->hw.port_info);
4922
4923 ice_check_link_cfg_err(pf,
4924 pf->hw.port_info->phy.link_info.link_cfg_err);
4925
4926 /* if media available, initialize PHY settings */
4927 if (pf->hw.port_info->phy.link_info.link_info &
4928 ICE_AQ_MEDIA_AVAILABLE) {
4929 /* not a fatal error if this fails */
4930 err = ice_init_phy_user_cfg(pf->hw.port_info);
4931 if (err)
4932 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4933
4934 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4935 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4936
4937 if (vsi)
4938 ice_configure_phy(vsi);
4939 }
4940 } else {
4941 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4942 }
4943
4944 return err;
4945 }
4946
ice_init_pf_sw(struct ice_pf * pf)4947 static int ice_init_pf_sw(struct ice_pf *pf)
4948 {
4949 bool dvm = ice_is_dvm_ena(&pf->hw);
4950 struct ice_vsi *vsi;
4951 int err;
4952
4953 /* create switch struct for the switch element created by FW on boot */
4954 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4955 if (!pf->first_sw)
4956 return -ENOMEM;
4957
4958 if (pf->hw.evb_veb)
4959 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4960 else
4961 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4962
4963 pf->first_sw->pf = pf;
4964
4965 /* record the sw_id available for later use */
4966 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4967
4968 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4969 if (err)
4970 goto err_aq_set_port_params;
4971
4972 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4973 if (!vsi) {
4974 err = -ENOMEM;
4975 goto err_pf_vsi_setup;
4976 }
4977
4978 return 0;
4979
4980 err_pf_vsi_setup:
4981 err_aq_set_port_params:
4982 kfree(pf->first_sw);
4983 return err;
4984 }
4985
ice_deinit_pf_sw(struct ice_pf * pf)4986 static void ice_deinit_pf_sw(struct ice_pf *pf)
4987 {
4988 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4989
4990 if (!vsi)
4991 return;
4992
4993 ice_vsi_release(vsi);
4994 kfree(pf->first_sw);
4995 }
4996
ice_alloc_vsis(struct ice_pf * pf)4997 static int ice_alloc_vsis(struct ice_pf *pf)
4998 {
4999 struct device *dev = ice_pf_to_dev(pf);
5000
5001 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
5002 if (!pf->num_alloc_vsi)
5003 return -EIO;
5004
5005 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
5006 dev_warn(dev,
5007 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
5008 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
5009 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
5010 }
5011
5012 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
5013 GFP_KERNEL);
5014 if (!pf->vsi)
5015 return -ENOMEM;
5016
5017 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
5018 sizeof(*pf->vsi_stats), GFP_KERNEL);
5019 if (!pf->vsi_stats) {
5020 devm_kfree(dev, pf->vsi);
5021 return -ENOMEM;
5022 }
5023
5024 return 0;
5025 }
5026
ice_dealloc_vsis(struct ice_pf * pf)5027 static void ice_dealloc_vsis(struct ice_pf *pf)
5028 {
5029 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
5030 pf->vsi_stats = NULL;
5031
5032 pf->num_alloc_vsi = 0;
5033 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
5034 pf->vsi = NULL;
5035 }
5036
ice_init_devlink(struct ice_pf * pf)5037 static int ice_init_devlink(struct ice_pf *pf)
5038 {
5039 int err;
5040
5041 err = ice_devlink_register_params(pf);
5042 if (err)
5043 return err;
5044
5045 ice_devlink_init_regions(pf);
5046 ice_devlink_register(pf);
5047 ice_health_init(pf);
5048
5049 return 0;
5050 }
5051
ice_deinit_devlink(struct ice_pf * pf)5052 static void ice_deinit_devlink(struct ice_pf *pf)
5053 {
5054 ice_health_deinit(pf);
5055 ice_devlink_unregister(pf);
5056 ice_devlink_destroy_regions(pf);
5057 ice_devlink_unregister_params(pf);
5058 }
5059
ice_init(struct ice_pf * pf)5060 static int ice_init(struct ice_pf *pf)
5061 {
5062 int err;
5063
5064 err = ice_init_dev(pf);
5065 if (err)
5066 return err;
5067
5068 if (pf->hw.mac_type == ICE_MAC_E830) {
5069 err = pci_enable_ptm(pf->pdev, NULL);
5070 if (err)
5071 dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n");
5072 }
5073
5074 err = ice_alloc_vsis(pf);
5075 if (err)
5076 goto err_alloc_vsis;
5077
5078 err = ice_init_pf_sw(pf);
5079 if (err)
5080 goto err_init_pf_sw;
5081
5082 ice_init_wakeup(pf);
5083
5084 err = ice_init_link(pf);
5085 if (err)
5086 goto err_init_link;
5087
5088 err = ice_send_version(pf);
5089 if (err)
5090 goto err_init_link;
5091
5092 ice_verify_cacheline_size(pf);
5093
5094 if (ice_is_safe_mode(pf))
5095 ice_set_safe_mode_vlan_cfg(pf);
5096 else
5097 /* print PCI link speed and width */
5098 pcie_print_link_status(pf->pdev);
5099
5100 /* ready to go, so clear down state bit */
5101 clear_bit(ICE_DOWN, pf->state);
5102 clear_bit(ICE_SERVICE_DIS, pf->state);
5103
5104 /* since everything is good, start the service timer */
5105 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5106
5107 return 0;
5108
5109 err_init_link:
5110 ice_deinit_pf_sw(pf);
5111 err_init_pf_sw:
5112 ice_dealloc_vsis(pf);
5113 err_alloc_vsis:
5114 ice_deinit_dev(pf);
5115 return err;
5116 }
5117
ice_deinit(struct ice_pf * pf)5118 static void ice_deinit(struct ice_pf *pf)
5119 {
5120 set_bit(ICE_SERVICE_DIS, pf->state);
5121 set_bit(ICE_DOWN, pf->state);
5122
5123 ice_deinit_pf_sw(pf);
5124 ice_dealloc_vsis(pf);
5125 ice_deinit_dev(pf);
5126 }
5127
5128 /**
5129 * ice_load - load pf by init hw and starting VSI
5130 * @pf: pointer to the pf instance
5131 *
5132 * This function has to be called under devl_lock.
5133 */
ice_load(struct ice_pf * pf)5134 int ice_load(struct ice_pf *pf)
5135 {
5136 struct ice_vsi *vsi;
5137 int err;
5138
5139 devl_assert_locked(priv_to_devlink(pf));
5140
5141 vsi = ice_get_main_vsi(pf);
5142
5143 /* init channel list */
5144 INIT_LIST_HEAD(&vsi->ch_list);
5145
5146 err = ice_cfg_netdev(vsi);
5147 if (err)
5148 return err;
5149
5150 /* Setup DCB netlink interface */
5151 ice_dcbnl_setup(vsi);
5152
5153 err = ice_init_mac_fltr(pf);
5154 if (err)
5155 goto err_init_mac_fltr;
5156
5157 err = ice_devlink_create_pf_port(pf);
5158 if (err)
5159 goto err_devlink_create_pf_port;
5160
5161 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5162
5163 err = ice_register_netdev(vsi);
5164 if (err)
5165 goto err_register_netdev;
5166
5167 err = ice_tc_indir_block_register(vsi);
5168 if (err)
5169 goto err_tc_indir_block_register;
5170
5171 ice_napi_add(vsi);
5172
5173 ice_init_features(pf);
5174
5175 err = ice_init_rdma(pf);
5176 if (err)
5177 goto err_init_rdma;
5178
5179 ice_service_task_restart(pf);
5180
5181 clear_bit(ICE_DOWN, pf->state);
5182
5183 return 0;
5184
5185 err_init_rdma:
5186 ice_deinit_features(pf);
5187 ice_tc_indir_block_unregister(vsi);
5188 err_tc_indir_block_register:
5189 ice_unregister_netdev(vsi);
5190 err_register_netdev:
5191 ice_devlink_destroy_pf_port(pf);
5192 err_devlink_create_pf_port:
5193 err_init_mac_fltr:
5194 ice_decfg_netdev(vsi);
5195 return err;
5196 }
5197
5198 /**
5199 * ice_unload - unload pf by stopping VSI and deinit hw
5200 * @pf: pointer to the pf instance
5201 *
5202 * This function has to be called under devl_lock.
5203 */
ice_unload(struct ice_pf * pf)5204 void ice_unload(struct ice_pf *pf)
5205 {
5206 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5207
5208 devl_assert_locked(priv_to_devlink(pf));
5209
5210 ice_deinit_rdma(pf);
5211 ice_deinit_features(pf);
5212 ice_tc_indir_block_unregister(vsi);
5213 ice_unregister_netdev(vsi);
5214 ice_devlink_destroy_pf_port(pf);
5215 ice_decfg_netdev(vsi);
5216 }
5217
ice_probe_recovery_mode(struct ice_pf * pf)5218 static int ice_probe_recovery_mode(struct ice_pf *pf)
5219 {
5220 struct device *dev = ice_pf_to_dev(pf);
5221 int err;
5222
5223 dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
5224
5225 INIT_HLIST_HEAD(&pf->aq_wait_list);
5226 spin_lock_init(&pf->aq_wait_lock);
5227 init_waitqueue_head(&pf->aq_wait_queue);
5228
5229 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
5230 pf->serv_tmr_period = HZ;
5231 INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
5232 clear_bit(ICE_SERVICE_SCHED, pf->state);
5233 err = ice_create_all_ctrlq(&pf->hw);
5234 if (err)
5235 return err;
5236
5237 scoped_guard(devl, priv_to_devlink(pf)) {
5238 err = ice_init_devlink(pf);
5239 if (err)
5240 return err;
5241 }
5242
5243 ice_service_task_restart(pf);
5244
5245 return 0;
5246 }
5247
5248 /**
5249 * ice_probe - Device initialization routine
5250 * @pdev: PCI device information struct
5251 * @ent: entry in ice_pci_tbl
5252 *
5253 * Returns 0 on success, negative on failure
5254 */
5255 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)5256 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5257 {
5258 struct device *dev = &pdev->dev;
5259 struct ice_adapter *adapter;
5260 struct ice_pf *pf;
5261 struct ice_hw *hw;
5262 int err;
5263
5264 if (pdev->is_virtfn) {
5265 dev_err(dev, "can't probe a virtual function\n");
5266 return -EINVAL;
5267 }
5268
5269 /* when under a kdump kernel initiate a reset before enabling the
5270 * device in order to clear out any pending DMA transactions. These
5271 * transactions can cause some systems to machine check when doing
5272 * the pcim_enable_device() below.
5273 */
5274 if (is_kdump_kernel()) {
5275 pci_save_state(pdev);
5276 pci_clear_master(pdev);
5277 err = pcie_flr(pdev);
5278 if (err)
5279 return err;
5280 pci_restore_state(pdev);
5281 }
5282
5283 /* this driver uses devres, see
5284 * Documentation/driver-api/driver-model/devres.rst
5285 */
5286 err = pcim_enable_device(pdev);
5287 if (err)
5288 return err;
5289
5290 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5291 if (err) {
5292 dev_err(dev, "BAR0 I/O map error %d\n", err);
5293 return err;
5294 }
5295
5296 pf = ice_allocate_pf(dev);
5297 if (!pf)
5298 return -ENOMEM;
5299
5300 /* initialize Auxiliary index to invalid value */
5301 pf->aux_idx = -1;
5302
5303 /* set up for high or low DMA */
5304 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5305 if (err) {
5306 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5307 return err;
5308 }
5309
5310 pci_set_master(pdev);
5311 pf->pdev = pdev;
5312 pci_set_drvdata(pdev, pf);
5313 set_bit(ICE_DOWN, pf->state);
5314 /* Disable service task until DOWN bit is cleared */
5315 set_bit(ICE_SERVICE_DIS, pf->state);
5316
5317 hw = &pf->hw;
5318 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5319 pci_save_state(pdev);
5320
5321 hw->back = pf;
5322 hw->port_info = NULL;
5323 hw->vendor_id = pdev->vendor;
5324 hw->device_id = pdev->device;
5325 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5326 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5327 hw->subsystem_device_id = pdev->subsystem_device;
5328 hw->bus.device = PCI_SLOT(pdev->devfn);
5329 hw->bus.func = PCI_FUNC(pdev->devfn);
5330 ice_set_ctrlq_len(hw);
5331
5332 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5333
5334 #ifndef CONFIG_DYNAMIC_DEBUG
5335 if (debug < -1)
5336 hw->debug_mask = debug;
5337 #endif
5338
5339 if (ice_is_recovery_mode(hw))
5340 return ice_probe_recovery_mode(pf);
5341
5342 err = ice_init_hw(hw);
5343 if (err) {
5344 dev_err(dev, "ice_init_hw failed: %d\n", err);
5345 return err;
5346 }
5347
5348 adapter = ice_adapter_get(pdev);
5349 if (IS_ERR(adapter)) {
5350 err = PTR_ERR(adapter);
5351 goto unroll_hw_init;
5352 }
5353 pf->adapter = adapter;
5354
5355 err = ice_init(pf);
5356 if (err)
5357 goto unroll_adapter;
5358
5359 devl_lock(priv_to_devlink(pf));
5360 err = ice_load(pf);
5361 if (err)
5362 goto unroll_init;
5363
5364 err = ice_init_devlink(pf);
5365 if (err)
5366 goto unroll_load;
5367 devl_unlock(priv_to_devlink(pf));
5368
5369 return 0;
5370
5371 unroll_load:
5372 ice_unload(pf);
5373 unroll_init:
5374 devl_unlock(priv_to_devlink(pf));
5375 ice_deinit(pf);
5376 unroll_adapter:
5377 ice_adapter_put(pdev);
5378 unroll_hw_init:
5379 ice_deinit_hw(hw);
5380 return err;
5381 }
5382
5383 /**
5384 * ice_set_wake - enable or disable Wake on LAN
5385 * @pf: pointer to the PF struct
5386 *
5387 * Simple helper for WoL control
5388 */
ice_set_wake(struct ice_pf * pf)5389 static void ice_set_wake(struct ice_pf *pf)
5390 {
5391 struct ice_hw *hw = &pf->hw;
5392 bool wol = pf->wol_ena;
5393
5394 /* clear wake state, otherwise new wake events won't fire */
5395 wr32(hw, PFPM_WUS, U32_MAX);
5396
5397 /* enable / disable APM wake up, no RMW needed */
5398 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5399
5400 /* set magic packet filter enabled */
5401 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5402 }
5403
5404 /**
5405 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5406 * @pf: pointer to the PF struct
5407 *
5408 * Issue firmware command to enable multicast magic wake, making
5409 * sure that any locally administered address (LAA) is used for
5410 * wake, and that PF reset doesn't undo the LAA.
5411 */
ice_setup_mc_magic_wake(struct ice_pf * pf)5412 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5413 {
5414 struct device *dev = ice_pf_to_dev(pf);
5415 struct ice_hw *hw = &pf->hw;
5416 u8 mac_addr[ETH_ALEN];
5417 struct ice_vsi *vsi;
5418 int status;
5419 u8 flags;
5420
5421 if (!pf->wol_ena)
5422 return;
5423
5424 vsi = ice_get_main_vsi(pf);
5425 if (!vsi)
5426 return;
5427
5428 /* Get current MAC address in case it's an LAA */
5429 if (vsi->netdev)
5430 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5431 else
5432 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5433
5434 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5435 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5436 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5437
5438 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5439 if (status)
5440 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5441 status, libie_aq_str(hw->adminq.sq_last_status));
5442 }
5443
5444 /**
5445 * ice_remove - Device removal routine
5446 * @pdev: PCI device information struct
5447 */
ice_remove(struct pci_dev * pdev)5448 static void ice_remove(struct pci_dev *pdev)
5449 {
5450 struct ice_pf *pf = pci_get_drvdata(pdev);
5451 int i;
5452
5453 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5454 if (!ice_is_reset_in_progress(pf->state))
5455 break;
5456 msleep(100);
5457 }
5458
5459 if (ice_is_recovery_mode(&pf->hw)) {
5460 ice_service_task_stop(pf);
5461 scoped_guard(devl, priv_to_devlink(pf)) {
5462 ice_deinit_devlink(pf);
5463 }
5464 return;
5465 }
5466
5467 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5468 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5469 ice_free_vfs(pf);
5470 }
5471
5472 ice_hwmon_exit(pf);
5473
5474 ice_service_task_stop(pf);
5475 ice_aq_cancel_waiting_tasks(pf);
5476 set_bit(ICE_DOWN, pf->state);
5477
5478 if (!ice_is_safe_mode(pf))
5479 ice_remove_arfs(pf);
5480
5481 devl_lock(priv_to_devlink(pf));
5482 ice_dealloc_all_dynamic_ports(pf);
5483 ice_deinit_devlink(pf);
5484
5485 ice_unload(pf);
5486 devl_unlock(priv_to_devlink(pf));
5487
5488 ice_deinit(pf);
5489 ice_vsi_release_all(pf);
5490
5491 ice_setup_mc_magic_wake(pf);
5492 ice_set_wake(pf);
5493
5494 ice_adapter_put(pdev);
5495 }
5496
5497 /**
5498 * ice_shutdown - PCI callback for shutting down device
5499 * @pdev: PCI device information struct
5500 */
ice_shutdown(struct pci_dev * pdev)5501 static void ice_shutdown(struct pci_dev *pdev)
5502 {
5503 struct ice_pf *pf = pci_get_drvdata(pdev);
5504
5505 ice_remove(pdev);
5506
5507 if (system_state == SYSTEM_POWER_OFF) {
5508 pci_wake_from_d3(pdev, pf->wol_ena);
5509 pci_set_power_state(pdev, PCI_D3hot);
5510 }
5511 }
5512
5513 /**
5514 * ice_prepare_for_shutdown - prep for PCI shutdown
5515 * @pf: board private structure
5516 *
5517 * Inform or close all dependent features in prep for PCI device shutdown
5518 */
ice_prepare_for_shutdown(struct ice_pf * pf)5519 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5520 {
5521 struct ice_hw *hw = &pf->hw;
5522 u32 v;
5523
5524 /* Notify VFs of impending reset */
5525 if (ice_check_sq_alive(hw, &hw->mailboxq))
5526 ice_vc_notify_reset(pf);
5527
5528 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5529
5530 /* disable the VSIs and their queues that are not already DOWN */
5531 ice_pf_dis_all_vsi(pf, false);
5532
5533 ice_for_each_vsi(pf, v)
5534 if (pf->vsi[v])
5535 pf->vsi[v]->vsi_num = 0;
5536
5537 ice_shutdown_all_ctrlq(hw, true);
5538 }
5539
5540 /**
5541 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5542 * @pf: board private structure to reinitialize
5543 *
5544 * This routine reinitialize interrupt scheme that was cleared during
5545 * power management suspend callback.
5546 *
5547 * This should be called during resume routine to re-allocate the q_vectors
5548 * and reacquire interrupts.
5549 */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5550 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5551 {
5552 struct device *dev = ice_pf_to_dev(pf);
5553 int ret, v;
5554
5555 /* Since we clear MSIX flag during suspend, we need to
5556 * set it back during resume...
5557 */
5558
5559 ret = ice_init_interrupt_scheme(pf);
5560 if (ret) {
5561 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5562 return ret;
5563 }
5564
5565 /* Remap vectors and rings, after successful re-init interrupts */
5566 ice_for_each_vsi(pf, v) {
5567 if (!pf->vsi[v])
5568 continue;
5569
5570 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5571 if (ret)
5572 goto err_reinit;
5573 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5574 rtnl_lock();
5575 ice_vsi_set_napi_queues(pf->vsi[v]);
5576 rtnl_unlock();
5577 }
5578
5579 ret = ice_req_irq_msix_misc(pf);
5580 if (ret) {
5581 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5582 ret);
5583 goto err_reinit;
5584 }
5585
5586 return 0;
5587
5588 err_reinit:
5589 while (v--)
5590 if (pf->vsi[v]) {
5591 rtnl_lock();
5592 ice_vsi_clear_napi_queues(pf->vsi[v]);
5593 rtnl_unlock();
5594 ice_vsi_free_q_vectors(pf->vsi[v]);
5595 }
5596
5597 return ret;
5598 }
5599
5600 /**
5601 * ice_suspend
5602 * @dev: generic device information structure
5603 *
5604 * Power Management callback to quiesce the device and prepare
5605 * for D3 transition.
5606 */
ice_suspend(struct device * dev)5607 static int ice_suspend(struct device *dev)
5608 {
5609 struct pci_dev *pdev = to_pci_dev(dev);
5610 struct ice_pf *pf;
5611 int disabled, v;
5612
5613 pf = pci_get_drvdata(pdev);
5614
5615 if (!ice_pf_state_is_nominal(pf)) {
5616 dev_err(dev, "Device is not ready, no need to suspend it\n");
5617 return -EBUSY;
5618 }
5619
5620 /* Stop watchdog tasks until resume completion.
5621 * Even though it is most likely that the service task is
5622 * disabled if the device is suspended or down, the service task's
5623 * state is controlled by a different state bit, and we should
5624 * store and honor whatever state that bit is in at this point.
5625 */
5626 disabled = ice_service_task_stop(pf);
5627
5628 ice_deinit_rdma(pf);
5629
5630 /* Already suspended?, then there is nothing to do */
5631 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5632 if (!disabled)
5633 ice_service_task_restart(pf);
5634 return 0;
5635 }
5636
5637 if (test_bit(ICE_DOWN, pf->state) ||
5638 ice_is_reset_in_progress(pf->state)) {
5639 dev_err(dev, "can't suspend device in reset or already down\n");
5640 if (!disabled)
5641 ice_service_task_restart(pf);
5642 return 0;
5643 }
5644
5645 ice_setup_mc_magic_wake(pf);
5646
5647 ice_prepare_for_shutdown(pf);
5648
5649 ice_set_wake(pf);
5650
5651 /* Free vectors, clear the interrupt scheme and release IRQs
5652 * for proper hibernation, especially with large number of CPUs.
5653 * Otherwise hibernation might fail when mapping all the vectors back
5654 * to CPU0.
5655 */
5656 ice_free_irq_msix_misc(pf);
5657 ice_for_each_vsi(pf, v) {
5658 if (!pf->vsi[v])
5659 continue;
5660 rtnl_lock();
5661 ice_vsi_clear_napi_queues(pf->vsi[v]);
5662 rtnl_unlock();
5663 ice_vsi_free_q_vectors(pf->vsi[v]);
5664 }
5665 ice_clear_interrupt_scheme(pf);
5666
5667 pci_save_state(pdev);
5668 pci_wake_from_d3(pdev, pf->wol_ena);
5669 pci_set_power_state(pdev, PCI_D3hot);
5670 return 0;
5671 }
5672
5673 /**
5674 * ice_resume - PM callback for waking up from D3
5675 * @dev: generic device information structure
5676 */
ice_resume(struct device * dev)5677 static int ice_resume(struct device *dev)
5678 {
5679 struct pci_dev *pdev = to_pci_dev(dev);
5680 enum ice_reset_req reset_type;
5681 struct ice_pf *pf;
5682 struct ice_hw *hw;
5683 int ret;
5684
5685 pci_set_power_state(pdev, PCI_D0);
5686 pci_restore_state(pdev);
5687 pci_save_state(pdev);
5688
5689 if (!pci_device_is_present(pdev))
5690 return -ENODEV;
5691
5692 ret = pci_enable_device_mem(pdev);
5693 if (ret) {
5694 dev_err(dev, "Cannot enable device after suspend\n");
5695 return ret;
5696 }
5697
5698 pf = pci_get_drvdata(pdev);
5699 hw = &pf->hw;
5700
5701 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5702 ice_print_wake_reason(pf);
5703
5704 /* We cleared the interrupt scheme when we suspended, so we need to
5705 * restore it now to resume device functionality.
5706 */
5707 ret = ice_reinit_interrupt_scheme(pf);
5708 if (ret)
5709 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5710
5711 ret = ice_init_rdma(pf);
5712 if (ret)
5713 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5714 ret);
5715
5716 clear_bit(ICE_DOWN, pf->state);
5717 /* Now perform PF reset and rebuild */
5718 reset_type = ICE_RESET_PFR;
5719 /* re-enable service task for reset, but allow reset to schedule it */
5720 clear_bit(ICE_SERVICE_DIS, pf->state);
5721
5722 if (ice_schedule_reset(pf, reset_type))
5723 dev_err(dev, "Reset during resume failed.\n");
5724
5725 clear_bit(ICE_SUSPENDED, pf->state);
5726 ice_service_task_restart(pf);
5727
5728 /* Restart the service task */
5729 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5730
5731 return 0;
5732 }
5733
5734 /**
5735 * ice_pci_err_detected - warning that PCI error has been detected
5736 * @pdev: PCI device information struct
5737 * @err: the type of PCI error
5738 *
5739 * Called to warn that something happened on the PCI bus and the error handling
5740 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5741 */
5742 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5743 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5744 {
5745 struct ice_pf *pf = pci_get_drvdata(pdev);
5746
5747 if (!pf) {
5748 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5749 __func__, err);
5750 return PCI_ERS_RESULT_DISCONNECT;
5751 }
5752
5753 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5754 ice_service_task_stop(pf);
5755
5756 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5757 set_bit(ICE_PFR_REQ, pf->state);
5758 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5759 }
5760 }
5761
5762 return PCI_ERS_RESULT_NEED_RESET;
5763 }
5764
5765 /**
5766 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5767 * @pdev: PCI device information struct
5768 *
5769 * Called to determine if the driver can recover from the PCI slot reset by
5770 * using a register read to determine if the device is recoverable.
5771 */
ice_pci_err_slot_reset(struct pci_dev * pdev)5772 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5773 {
5774 struct ice_pf *pf = pci_get_drvdata(pdev);
5775 pci_ers_result_t result;
5776 int err;
5777 u32 reg;
5778
5779 err = pci_enable_device_mem(pdev);
5780 if (err) {
5781 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5782 err);
5783 result = PCI_ERS_RESULT_DISCONNECT;
5784 } else {
5785 pci_set_master(pdev);
5786 pci_restore_state(pdev);
5787 pci_save_state(pdev);
5788 pci_wake_from_d3(pdev, false);
5789
5790 /* Check for life */
5791 reg = rd32(&pf->hw, GLGEN_RTRIG);
5792 if (!reg)
5793 result = PCI_ERS_RESULT_RECOVERED;
5794 else
5795 result = PCI_ERS_RESULT_DISCONNECT;
5796 }
5797
5798 return result;
5799 }
5800
5801 /**
5802 * ice_pci_err_resume - restart operations after PCI error recovery
5803 * @pdev: PCI device information struct
5804 *
5805 * Called to allow the driver to bring things back up after PCI error and/or
5806 * reset recovery have finished
5807 */
ice_pci_err_resume(struct pci_dev * pdev)5808 static void ice_pci_err_resume(struct pci_dev *pdev)
5809 {
5810 struct ice_pf *pf = pci_get_drvdata(pdev);
5811
5812 if (!pf) {
5813 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5814 __func__);
5815 return;
5816 }
5817
5818 if (test_bit(ICE_SUSPENDED, pf->state)) {
5819 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5820 __func__);
5821 return;
5822 }
5823
5824 ice_restore_all_vfs_msi_state(pf);
5825
5826 ice_do_reset(pf, ICE_RESET_PFR);
5827 ice_service_task_restart(pf);
5828 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5829 }
5830
5831 /**
5832 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5833 * @pdev: PCI device information struct
5834 */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5835 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5836 {
5837 struct ice_pf *pf = pci_get_drvdata(pdev);
5838
5839 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5840 ice_service_task_stop(pf);
5841
5842 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5843 set_bit(ICE_PFR_REQ, pf->state);
5844 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5845 }
5846 }
5847 }
5848
5849 /**
5850 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5851 * @pdev: PCI device information struct
5852 */
ice_pci_err_reset_done(struct pci_dev * pdev)5853 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5854 {
5855 ice_pci_err_resume(pdev);
5856 }
5857
5858 /* ice_pci_tbl - PCI Device ID Table
5859 *
5860 * Wildcard entries (PCI_ANY_ID) should come last
5861 * Last entry must be all 0s
5862 *
5863 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5864 * Class, Class Mask, private data (not used) }
5865 */
5866 static const struct pci_device_id ice_pci_tbl[] = {
5867 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5868 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5869 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5870 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5871 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5872 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5873 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5874 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5875 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5876 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5877 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5878 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5879 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5880 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5881 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5882 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5883 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5884 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5885 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5886 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5887 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5888 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5889 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5890 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5891 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5892 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5893 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5894 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5895 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5896 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5897 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5898 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5899 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5900 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5901 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5902 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5903 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5904 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5905 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5906 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5907 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
5908 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
5909 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
5910 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
5911 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
5912 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
5913 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
5914 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
5915 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
5916 /* required last entry */
5917 {}
5918 };
5919 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5920
5921 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5922
5923 static const struct pci_error_handlers ice_pci_err_handler = {
5924 .error_detected = ice_pci_err_detected,
5925 .slot_reset = ice_pci_err_slot_reset,
5926 .reset_prepare = ice_pci_err_reset_prepare,
5927 .reset_done = ice_pci_err_reset_done,
5928 .resume = ice_pci_err_resume
5929 };
5930
5931 static struct pci_driver ice_driver = {
5932 .name = KBUILD_MODNAME,
5933 .id_table = ice_pci_tbl,
5934 .probe = ice_probe,
5935 .remove = ice_remove,
5936 .driver.pm = pm_sleep_ptr(&ice_pm_ops),
5937 .shutdown = ice_shutdown,
5938 .sriov_configure = ice_sriov_configure,
5939 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5940 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5941 .err_handler = &ice_pci_err_handler
5942 };
5943
5944 /**
5945 * ice_module_init - Driver registration routine
5946 *
5947 * ice_module_init is the first routine called when the driver is
5948 * loaded. All it does is register with the PCI subsystem.
5949 */
ice_module_init(void)5950 static int __init ice_module_init(void)
5951 {
5952 int status = -ENOMEM;
5953
5954 pr_info("%s\n", ice_driver_string);
5955 pr_info("%s\n", ice_copyright);
5956
5957 ice_adv_lnk_speed_maps_init();
5958
5959 ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
5960 if (!ice_wq) {
5961 pr_err("Failed to create workqueue\n");
5962 return status;
5963 }
5964
5965 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5966 if (!ice_lag_wq) {
5967 pr_err("Failed to create LAG workqueue\n");
5968 goto err_dest_wq;
5969 }
5970
5971 ice_debugfs_init();
5972
5973 status = pci_register_driver(&ice_driver);
5974 if (status) {
5975 pr_err("failed to register PCI driver, err %d\n", status);
5976 goto err_dest_lag_wq;
5977 }
5978
5979 status = ice_sf_driver_register();
5980 if (status) {
5981 pr_err("Failed to register SF driver, err %d\n", status);
5982 goto err_sf_driver;
5983 }
5984
5985 return 0;
5986
5987 err_sf_driver:
5988 pci_unregister_driver(&ice_driver);
5989 err_dest_lag_wq:
5990 destroy_workqueue(ice_lag_wq);
5991 ice_debugfs_exit();
5992 err_dest_wq:
5993 destroy_workqueue(ice_wq);
5994 return status;
5995 }
5996 module_init(ice_module_init);
5997
5998 /**
5999 * ice_module_exit - Driver exit cleanup routine
6000 *
6001 * ice_module_exit is called just before the driver is removed
6002 * from memory.
6003 */
ice_module_exit(void)6004 static void __exit ice_module_exit(void)
6005 {
6006 ice_sf_driver_unregister();
6007 pci_unregister_driver(&ice_driver);
6008 ice_debugfs_exit();
6009 destroy_workqueue(ice_wq);
6010 destroy_workqueue(ice_lag_wq);
6011 pr_info("module unloaded\n");
6012 }
6013 module_exit(ice_module_exit);
6014
6015 /**
6016 * ice_set_mac_address - NDO callback to set MAC address
6017 * @netdev: network interface device structure
6018 * @pi: pointer to an address structure
6019 *
6020 * Returns 0 on success, negative on failure
6021 */
ice_set_mac_address(struct net_device * netdev,void * pi)6022 static int ice_set_mac_address(struct net_device *netdev, void *pi)
6023 {
6024 struct ice_netdev_priv *np = netdev_priv(netdev);
6025 struct ice_vsi *vsi = np->vsi;
6026 struct ice_pf *pf = vsi->back;
6027 struct ice_hw *hw = &pf->hw;
6028 struct sockaddr *addr = pi;
6029 u8 old_mac[ETH_ALEN];
6030 u8 flags = 0;
6031 u8 *mac;
6032 int err;
6033
6034 mac = (u8 *)addr->sa_data;
6035
6036 if (!is_valid_ether_addr(mac))
6037 return -EADDRNOTAVAIL;
6038
6039 if (test_bit(ICE_DOWN, pf->state) ||
6040 ice_is_reset_in_progress(pf->state)) {
6041 netdev_err(netdev, "can't set mac %pM. device not ready\n",
6042 mac);
6043 return -EBUSY;
6044 }
6045
6046 if (ice_chnl_dmac_fltr_cnt(pf)) {
6047 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
6048 mac);
6049 return -EAGAIN;
6050 }
6051
6052 netif_addr_lock_bh(netdev);
6053 ether_addr_copy(old_mac, netdev->dev_addr);
6054 /* change the netdev's MAC address */
6055 eth_hw_addr_set(netdev, mac);
6056 netif_addr_unlock_bh(netdev);
6057
6058 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
6059 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
6060 if (err && err != -ENOENT) {
6061 err = -EADDRNOTAVAIL;
6062 goto err_update_filters;
6063 }
6064
6065 /* Add filter for new MAC. If filter exists, return success */
6066 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
6067 if (err == -EEXIST) {
6068 /* Although this MAC filter is already present in hardware it's
6069 * possible in some cases (e.g. bonding) that dev_addr was
6070 * modified outside of the driver and needs to be restored back
6071 * to this value.
6072 */
6073 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
6074
6075 return 0;
6076 } else if (err) {
6077 /* error if the new filter addition failed */
6078 err = -EADDRNOTAVAIL;
6079 }
6080
6081 err_update_filters:
6082 if (err) {
6083 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6084 mac);
6085 netif_addr_lock_bh(netdev);
6086 eth_hw_addr_set(netdev, old_mac);
6087 netif_addr_unlock_bh(netdev);
6088 return err;
6089 }
6090
6091 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6092 netdev->dev_addr);
6093
6094 /* write new MAC address to the firmware */
6095 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6096 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6097 if (err) {
6098 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6099 mac, err);
6100 }
6101 return 0;
6102 }
6103
6104 /**
6105 * ice_set_rx_mode - NDO callback to set the netdev filters
6106 * @netdev: network interface device structure
6107 */
ice_set_rx_mode(struct net_device * netdev)6108 static void ice_set_rx_mode(struct net_device *netdev)
6109 {
6110 struct ice_netdev_priv *np = netdev_priv(netdev);
6111 struct ice_vsi *vsi = np->vsi;
6112
6113 if (!vsi || ice_is_switchdev_running(vsi->back))
6114 return;
6115
6116 /* Set the flags to synchronize filters
6117 * ndo_set_rx_mode may be triggered even without a change in netdev
6118 * flags
6119 */
6120 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6121 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6122 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6123
6124 /* schedule our worker thread which will take care of
6125 * applying the new filter changes
6126 */
6127 ice_service_task_schedule(vsi->back);
6128 }
6129
6130 /**
6131 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6132 * @netdev: network interface device structure
6133 * @queue_index: Queue ID
6134 * @maxrate: maximum bandwidth in Mbps
6135 */
6136 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)6137 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6138 {
6139 struct ice_netdev_priv *np = netdev_priv(netdev);
6140 struct ice_vsi *vsi = np->vsi;
6141 u16 q_handle;
6142 int status;
6143 u8 tc;
6144
6145 /* Validate maxrate requested is within permitted range */
6146 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6147 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6148 maxrate, queue_index);
6149 return -EINVAL;
6150 }
6151
6152 q_handle = vsi->tx_rings[queue_index]->q_handle;
6153 tc = ice_dcb_get_tc(vsi, queue_index);
6154
6155 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6156 if (!vsi) {
6157 netdev_err(netdev, "Invalid VSI for given queue %d\n",
6158 queue_index);
6159 return -EINVAL;
6160 }
6161
6162 /* Set BW back to default, when user set maxrate to 0 */
6163 if (!maxrate)
6164 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6165 q_handle, ICE_MAX_BW);
6166 else
6167 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6168 q_handle, ICE_MAX_BW, maxrate * 1000);
6169 if (status)
6170 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6171 status);
6172
6173 return status;
6174 }
6175
6176 /**
6177 * ice_fdb_add - add an entry to the hardware database
6178 * @ndm: the input from the stack
6179 * @tb: pointer to array of nladdr (unused)
6180 * @dev: the net device pointer
6181 * @addr: the MAC address entry being added
6182 * @vid: VLAN ID
6183 * @flags: instructions from stack about fdb operation
6184 * @notified: whether notification was emitted
6185 * @extack: netlink extended ack
6186 */
6187 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,bool * notified,struct netlink_ext_ack __always_unused * extack)6188 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6189 struct net_device *dev, const unsigned char *addr, u16 vid,
6190 u16 flags, bool *notified,
6191 struct netlink_ext_ack __always_unused *extack)
6192 {
6193 int err;
6194
6195 if (vid) {
6196 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6197 return -EINVAL;
6198 }
6199 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6200 netdev_err(dev, "FDB only supports static addresses\n");
6201 return -EINVAL;
6202 }
6203
6204 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6205 err = dev_uc_add_excl(dev, addr);
6206 else if (is_multicast_ether_addr(addr))
6207 err = dev_mc_add_excl(dev, addr);
6208 else
6209 err = -EINVAL;
6210
6211 /* Only return duplicate errors if NLM_F_EXCL is set */
6212 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6213 err = 0;
6214
6215 return err;
6216 }
6217
6218 /**
6219 * ice_fdb_del - delete an entry from the hardware database
6220 * @ndm: the input from the stack
6221 * @tb: pointer to array of nladdr (unused)
6222 * @dev: the net device pointer
6223 * @addr: the MAC address entry being added
6224 * @vid: VLAN ID
6225 * @notified: whether notification was emitted
6226 * @extack: netlink extended ack
6227 */
6228 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,bool * notified,struct netlink_ext_ack * extack)6229 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6230 struct net_device *dev, const unsigned char *addr,
6231 __always_unused u16 vid, bool *notified,
6232 struct netlink_ext_ack *extack)
6233 {
6234 int err;
6235
6236 if (ndm->ndm_state & NUD_PERMANENT) {
6237 netdev_err(dev, "FDB only supports static addresses\n");
6238 return -EINVAL;
6239 }
6240
6241 if (is_unicast_ether_addr(addr))
6242 err = dev_uc_del(dev, addr);
6243 else if (is_multicast_ether_addr(addr))
6244 err = dev_mc_del(dev, addr);
6245 else
6246 err = -EINVAL;
6247
6248 return err;
6249 }
6250
6251 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6252 NETIF_F_HW_VLAN_CTAG_TX | \
6253 NETIF_F_HW_VLAN_STAG_RX | \
6254 NETIF_F_HW_VLAN_STAG_TX)
6255
6256 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6257 NETIF_F_HW_VLAN_STAG_RX)
6258
6259 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
6260 NETIF_F_HW_VLAN_STAG_FILTER)
6261
6262 /**
6263 * ice_fix_features - fix the netdev features flags based on device limitations
6264 * @netdev: ptr to the netdev that flags are being fixed on
6265 * @features: features that need to be checked and possibly fixed
6266 *
6267 * Make sure any fixups are made to features in this callback. This enables the
6268 * driver to not have to check unsupported configurations throughout the driver
6269 * because that's the responsiblity of this callback.
6270 *
6271 * Single VLAN Mode (SVM) Supported Features:
6272 * NETIF_F_HW_VLAN_CTAG_FILTER
6273 * NETIF_F_HW_VLAN_CTAG_RX
6274 * NETIF_F_HW_VLAN_CTAG_TX
6275 *
6276 * Double VLAN Mode (DVM) Supported Features:
6277 * NETIF_F_HW_VLAN_CTAG_FILTER
6278 * NETIF_F_HW_VLAN_CTAG_RX
6279 * NETIF_F_HW_VLAN_CTAG_TX
6280 *
6281 * NETIF_F_HW_VLAN_STAG_FILTER
6282 * NETIF_HW_VLAN_STAG_RX
6283 * NETIF_HW_VLAN_STAG_TX
6284 *
6285 * Features that need fixing:
6286 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6287 * These are mutually exlusive as the VSI context cannot support multiple
6288 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
6289 * is not done, then default to clearing the requested STAG offload
6290 * settings.
6291 *
6292 * All supported filtering has to be enabled or disabled together. For
6293 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6294 * together. If this is not done, then default to VLAN filtering disabled.
6295 * These are mutually exclusive as there is currently no way to
6296 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6297 * prune rules.
6298 */
6299 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)6300 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6301 {
6302 struct ice_netdev_priv *np = netdev_priv(netdev);
6303 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6304 bool cur_ctag, cur_stag, req_ctag, req_stag;
6305
6306 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6307 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6308 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6309
6310 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6311 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6312 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6313
6314 if (req_vlan_fltr != cur_vlan_fltr) {
6315 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6316 if (req_ctag && req_stag) {
6317 features |= NETIF_VLAN_FILTERING_FEATURES;
6318 } else if (!req_ctag && !req_stag) {
6319 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6320 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
6321 (!cur_stag && req_stag && !cur_ctag)) {
6322 features |= NETIF_VLAN_FILTERING_FEATURES;
6323 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6324 } else if ((cur_ctag && !req_ctag && cur_stag) ||
6325 (cur_stag && !req_stag && cur_ctag)) {
6326 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6327 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6328 }
6329 } else {
6330 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6331 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6332
6333 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6334 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6335 }
6336 }
6337
6338 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6339 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6340 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6341 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6342 NETIF_F_HW_VLAN_STAG_TX);
6343 }
6344
6345 if (!(netdev->features & NETIF_F_RXFCS) &&
6346 (features & NETIF_F_RXFCS) &&
6347 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6348 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6349 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6350 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6351 }
6352
6353 return features;
6354 }
6355
6356 /**
6357 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6358 * @vsi: PF's VSI
6359 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6360 *
6361 * Store current stripped VLAN proto in ring packet context,
6362 * so it can be accessed more efficiently by packet processing code.
6363 */
6364 static void
ice_set_rx_rings_vlan_proto(struct ice_vsi * vsi,__be16 vlan_ethertype)6365 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6366 {
6367 u16 i;
6368
6369 ice_for_each_alloc_rxq(vsi, i)
6370 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6371 }
6372
6373 /**
6374 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6375 * @vsi: PF's VSI
6376 * @features: features used to determine VLAN offload settings
6377 *
6378 * First, determine the vlan_ethertype based on the VLAN offload bits in
6379 * features. Then determine if stripping and insertion should be enabled or
6380 * disabled. Finally enable or disable VLAN stripping and insertion.
6381 */
6382 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)6383 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6384 {
6385 bool enable_stripping = true, enable_insertion = true;
6386 struct ice_vsi_vlan_ops *vlan_ops;
6387 int strip_err = 0, insert_err = 0;
6388 u16 vlan_ethertype = 0;
6389
6390 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6391
6392 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6393 vlan_ethertype = ETH_P_8021AD;
6394 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6395 vlan_ethertype = ETH_P_8021Q;
6396
6397 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6398 enable_stripping = false;
6399 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6400 enable_insertion = false;
6401
6402 if (enable_stripping)
6403 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6404 else
6405 strip_err = vlan_ops->dis_stripping(vsi);
6406
6407 if (enable_insertion)
6408 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6409 else
6410 insert_err = vlan_ops->dis_insertion(vsi);
6411
6412 if (strip_err || insert_err)
6413 return -EIO;
6414
6415 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6416 htons(vlan_ethertype) : 0);
6417
6418 return 0;
6419 }
6420
6421 /**
6422 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6423 * @vsi: PF's VSI
6424 * @features: features used to determine VLAN filtering settings
6425 *
6426 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6427 * features.
6428 */
6429 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)6430 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6431 {
6432 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6433 int err = 0;
6434
6435 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6436 * if either bit is set. In switchdev mode Rx filtering should never be
6437 * enabled.
6438 */
6439 if ((features &
6440 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
6441 !ice_is_eswitch_mode_switchdev(vsi->back))
6442 err = vlan_ops->ena_rx_filtering(vsi);
6443 else
6444 err = vlan_ops->dis_rx_filtering(vsi);
6445
6446 return err;
6447 }
6448
6449 /**
6450 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6451 * @netdev: ptr to the netdev being adjusted
6452 * @features: the feature set that the stack is suggesting
6453 *
6454 * Only update VLAN settings if the requested_vlan_features are different than
6455 * the current_vlan_features.
6456 */
6457 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6458 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6459 {
6460 netdev_features_t current_vlan_features, requested_vlan_features;
6461 struct ice_netdev_priv *np = netdev_priv(netdev);
6462 struct ice_vsi *vsi = np->vsi;
6463 int err;
6464
6465 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6466 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6467 if (current_vlan_features ^ requested_vlan_features) {
6468 if ((features & NETIF_F_RXFCS) &&
6469 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6470 dev_err(ice_pf_to_dev(vsi->back),
6471 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6472 return -EIO;
6473 }
6474
6475 err = ice_set_vlan_offload_features(vsi, features);
6476 if (err)
6477 return err;
6478 }
6479
6480 current_vlan_features = netdev->features &
6481 NETIF_VLAN_FILTERING_FEATURES;
6482 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6483 if (current_vlan_features ^ requested_vlan_features) {
6484 err = ice_set_vlan_filtering_features(vsi, features);
6485 if (err)
6486 return err;
6487 }
6488
6489 return 0;
6490 }
6491
6492 /**
6493 * ice_set_loopback - turn on/off loopback mode on underlying PF
6494 * @vsi: ptr to VSI
6495 * @ena: flag to indicate the on/off setting
6496 */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6497 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6498 {
6499 bool if_running = netif_running(vsi->netdev);
6500 int ret;
6501
6502 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6503 ret = ice_down(vsi);
6504 if (ret) {
6505 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6506 return ret;
6507 }
6508 }
6509 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6510 if (ret)
6511 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6512 if (if_running)
6513 ret = ice_up(vsi);
6514
6515 return ret;
6516 }
6517
6518 /**
6519 * ice_set_features - set the netdev feature flags
6520 * @netdev: ptr to the netdev being adjusted
6521 * @features: the feature set that the stack is suggesting
6522 */
6523 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6524 ice_set_features(struct net_device *netdev, netdev_features_t features)
6525 {
6526 netdev_features_t changed = netdev->features ^ features;
6527 struct ice_netdev_priv *np = netdev_priv(netdev);
6528 struct ice_vsi *vsi = np->vsi;
6529 struct ice_pf *pf = vsi->back;
6530 int ret = 0;
6531
6532 /* Don't set any netdev advanced features with device in Safe Mode */
6533 if (ice_is_safe_mode(pf)) {
6534 dev_err(ice_pf_to_dev(pf),
6535 "Device is in Safe Mode - not enabling advanced netdev features\n");
6536 return ret;
6537 }
6538
6539 /* Do not change setting during reset */
6540 if (ice_is_reset_in_progress(pf->state)) {
6541 dev_err(ice_pf_to_dev(pf),
6542 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6543 return -EBUSY;
6544 }
6545
6546 /* Multiple features can be changed in one call so keep features in
6547 * separate if/else statements to guarantee each feature is checked
6548 */
6549 if (changed & NETIF_F_RXHASH)
6550 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6551
6552 ret = ice_set_vlan_features(netdev, features);
6553 if (ret)
6554 return ret;
6555
6556 /* Turn on receive of FCS aka CRC, and after setting this
6557 * flag the packet data will have the 4 byte CRC appended
6558 */
6559 if (changed & NETIF_F_RXFCS) {
6560 if ((features & NETIF_F_RXFCS) &&
6561 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6562 dev_err(ice_pf_to_dev(vsi->back),
6563 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6564 return -EIO;
6565 }
6566
6567 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6568 ret = ice_down_up(vsi);
6569 if (ret)
6570 return ret;
6571 }
6572
6573 if (changed & NETIF_F_NTUPLE) {
6574 bool ena = !!(features & NETIF_F_NTUPLE);
6575
6576 ice_vsi_manage_fdir(vsi, ena);
6577 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6578 }
6579
6580 /* don't turn off hw_tc_offload when ADQ is already enabled */
6581 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6582 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6583 return -EACCES;
6584 }
6585
6586 if (changed & NETIF_F_HW_TC) {
6587 bool ena = !!(features & NETIF_F_HW_TC);
6588
6589 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
6590 }
6591
6592 if (changed & NETIF_F_LOOPBACK)
6593 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6594
6595 /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
6596 * (NETIF_F_HW_CSUM) is not supported.
6597 */
6598 if (ice_is_feature_supported(pf, ICE_F_GCS) &&
6599 ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
6600 if (netdev->features & NETIF_F_HW_CSUM)
6601 dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
6602 else
6603 dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
6604 return -EIO;
6605 }
6606
6607 return ret;
6608 }
6609
6610 /**
6611 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6612 * @vsi: VSI to setup VLAN properties for
6613 */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6614 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6615 {
6616 int err;
6617
6618 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6619 if (err)
6620 return err;
6621
6622 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6623 if (err)
6624 return err;
6625
6626 return ice_vsi_add_vlan_zero(vsi);
6627 }
6628
6629 /**
6630 * ice_vsi_cfg_lan - Setup the VSI lan related config
6631 * @vsi: the VSI being configured
6632 *
6633 * Return 0 on success and negative value on error
6634 */
ice_vsi_cfg_lan(struct ice_vsi * vsi)6635 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6636 {
6637 int err;
6638
6639 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6640 ice_set_rx_mode(vsi->netdev);
6641
6642 err = ice_vsi_vlan_setup(vsi);
6643 if (err)
6644 return err;
6645 }
6646 ice_vsi_cfg_dcb_rings(vsi);
6647
6648 err = ice_vsi_cfg_lan_txqs(vsi);
6649 if (!err && ice_is_xdp_ena_vsi(vsi))
6650 err = ice_vsi_cfg_xdp_txqs(vsi);
6651 if (!err)
6652 err = ice_vsi_cfg_rxqs(vsi);
6653
6654 return err;
6655 }
6656
6657 /* THEORY OF MODERATION:
6658 * The ice driver hardware works differently than the hardware that DIMLIB was
6659 * originally made for. ice hardware doesn't have packet count limits that
6660 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6661 * which is hard-coded to a limit of 250,000 ints/second.
6662 * If not using dynamic moderation, the INTRL value can be modified
6663 * by ethtool rx-usecs-high.
6664 */
6665 struct ice_dim {
6666 /* the throttle rate for interrupts, basically worst case delay before
6667 * an initial interrupt fires, value is stored in microseconds.
6668 */
6669 u16 itr;
6670 };
6671
6672 /* Make a different profile for Rx that doesn't allow quite so aggressive
6673 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6674 * second.
6675 */
6676 static const struct ice_dim rx_profile[] = {
6677 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6678 {8}, /* 125,000 ints/s */
6679 {16}, /* 62,500 ints/s */
6680 {62}, /* 16,129 ints/s */
6681 {126} /* 7,936 ints/s */
6682 };
6683
6684 /* The transmit profile, which has the same sorts of values
6685 * as the previous struct
6686 */
6687 static const struct ice_dim tx_profile[] = {
6688 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6689 {8}, /* 125,000 ints/s */
6690 {40}, /* 16,125 ints/s */
6691 {128}, /* 7,812 ints/s */
6692 {256} /* 3,906 ints/s */
6693 };
6694
ice_tx_dim_work(struct work_struct * work)6695 static void ice_tx_dim_work(struct work_struct *work)
6696 {
6697 struct ice_ring_container *rc;
6698 struct dim *dim;
6699 u16 itr;
6700
6701 dim = container_of(work, struct dim, work);
6702 rc = dim->priv;
6703
6704 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6705
6706 /* look up the values in our local table */
6707 itr = tx_profile[dim->profile_ix].itr;
6708
6709 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6710 ice_write_itr(rc, itr);
6711
6712 dim->state = DIM_START_MEASURE;
6713 }
6714
ice_rx_dim_work(struct work_struct * work)6715 static void ice_rx_dim_work(struct work_struct *work)
6716 {
6717 struct ice_ring_container *rc;
6718 struct dim *dim;
6719 u16 itr;
6720
6721 dim = container_of(work, struct dim, work);
6722 rc = dim->priv;
6723
6724 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6725
6726 /* look up the values in our local table */
6727 itr = rx_profile[dim->profile_ix].itr;
6728
6729 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6730 ice_write_itr(rc, itr);
6731
6732 dim->state = DIM_START_MEASURE;
6733 }
6734
6735 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6736
6737 /**
6738 * ice_init_moderation - set up interrupt moderation
6739 * @q_vector: the vector containing rings to be configured
6740 *
6741 * Set up interrupt moderation registers, with the intent to do the right thing
6742 * when called from reset or from probe, and whether or not dynamic moderation
6743 * is enabled or not. Take special care to write all the registers in both
6744 * dynamic moderation mode or not in order to make sure hardware is in a known
6745 * state.
6746 */
ice_init_moderation(struct ice_q_vector * q_vector)6747 static void ice_init_moderation(struct ice_q_vector *q_vector)
6748 {
6749 struct ice_ring_container *rc;
6750 bool tx_dynamic, rx_dynamic;
6751
6752 rc = &q_vector->tx;
6753 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6754 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6755 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6756 rc->dim.priv = rc;
6757 tx_dynamic = ITR_IS_DYNAMIC(rc);
6758
6759 /* set the initial TX ITR to match the above */
6760 ice_write_itr(rc, tx_dynamic ?
6761 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6762
6763 rc = &q_vector->rx;
6764 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6765 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6766 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6767 rc->dim.priv = rc;
6768 rx_dynamic = ITR_IS_DYNAMIC(rc);
6769
6770 /* set the initial RX ITR to match the above */
6771 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6772 rc->itr_setting);
6773
6774 ice_set_q_vector_intrl(q_vector);
6775 }
6776
6777 /**
6778 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6779 * @vsi: the VSI being configured
6780 */
ice_napi_enable_all(struct ice_vsi * vsi)6781 static void ice_napi_enable_all(struct ice_vsi *vsi)
6782 {
6783 int q_idx;
6784
6785 if (!vsi->netdev)
6786 return;
6787
6788 ice_for_each_q_vector(vsi, q_idx) {
6789 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6790
6791 ice_init_moderation(q_vector);
6792
6793 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6794 napi_enable(&q_vector->napi);
6795 }
6796 }
6797
6798 /**
6799 * ice_up_complete - Finish the last steps of bringing up a connection
6800 * @vsi: The VSI being configured
6801 *
6802 * Return 0 on success and negative value on error
6803 */
ice_up_complete(struct ice_vsi * vsi)6804 static int ice_up_complete(struct ice_vsi *vsi)
6805 {
6806 struct ice_pf *pf = vsi->back;
6807 int err;
6808
6809 ice_vsi_cfg_msix(vsi);
6810
6811 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6812 * Tx queue group list was configured and the context bits were
6813 * programmed using ice_vsi_cfg_txqs
6814 */
6815 err = ice_vsi_start_all_rx_rings(vsi);
6816 if (err)
6817 return err;
6818
6819 clear_bit(ICE_VSI_DOWN, vsi->state);
6820 ice_napi_enable_all(vsi);
6821 ice_vsi_ena_irq(vsi);
6822
6823 if (vsi->port_info &&
6824 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6825 ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
6826 vsi->type == ICE_VSI_SF)))) {
6827 ice_print_link_msg(vsi, true);
6828 netif_tx_start_all_queues(vsi->netdev);
6829 netif_carrier_on(vsi->netdev);
6830 ice_ptp_link_change(pf, true);
6831 }
6832
6833 /* Perform an initial read of the statistics registers now to
6834 * set the baseline so counters are ready when interface is up
6835 */
6836 ice_update_eth_stats(vsi);
6837
6838 if (vsi->type == ICE_VSI_PF)
6839 ice_service_task_schedule(pf);
6840
6841 return 0;
6842 }
6843
6844 /**
6845 * ice_up - Bring the connection back up after being down
6846 * @vsi: VSI being configured
6847 */
ice_up(struct ice_vsi * vsi)6848 int ice_up(struct ice_vsi *vsi)
6849 {
6850 int err;
6851
6852 err = ice_vsi_cfg_lan(vsi);
6853 if (!err)
6854 err = ice_up_complete(vsi);
6855
6856 return err;
6857 }
6858
6859 /**
6860 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6861 * @syncp: pointer to u64_stats_sync
6862 * @stats: stats that pkts and bytes count will be taken from
6863 * @pkts: packets stats counter
6864 * @bytes: bytes stats counter
6865 *
6866 * This function fetches stats from the ring considering the atomic operations
6867 * that needs to be performed to read u64 values in 32 bit machine.
6868 */
6869 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6870 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6871 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6872 {
6873 unsigned int start;
6874
6875 do {
6876 start = u64_stats_fetch_begin(syncp);
6877 *pkts = stats.pkts;
6878 *bytes = stats.bytes;
6879 } while (u64_stats_fetch_retry(syncp, start));
6880 }
6881
6882 /**
6883 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6884 * @vsi: the VSI to be updated
6885 * @vsi_stats: the stats struct to be updated
6886 * @rings: rings to work on
6887 * @count: number of rings
6888 */
6889 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6890 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6891 struct rtnl_link_stats64 *vsi_stats,
6892 struct ice_tx_ring **rings, u16 count)
6893 {
6894 u16 i;
6895
6896 for (i = 0; i < count; i++) {
6897 struct ice_tx_ring *ring;
6898 u64 pkts = 0, bytes = 0;
6899
6900 ring = READ_ONCE(rings[i]);
6901 if (!ring || !ring->ring_stats)
6902 continue;
6903 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6904 ring->ring_stats->stats, &pkts,
6905 &bytes);
6906 vsi_stats->tx_packets += pkts;
6907 vsi_stats->tx_bytes += bytes;
6908 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6909 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6910 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6911 }
6912 }
6913
6914 /**
6915 * ice_update_vsi_ring_stats - Update VSI stats counters
6916 * @vsi: the VSI to be updated
6917 */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6918 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6919 {
6920 struct rtnl_link_stats64 *net_stats, *stats_prev;
6921 struct rtnl_link_stats64 *vsi_stats;
6922 struct ice_pf *pf = vsi->back;
6923 u64 pkts, bytes;
6924 int i;
6925
6926 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6927 if (!vsi_stats)
6928 return;
6929
6930 /* reset non-netdev (extended) stats */
6931 vsi->tx_restart = 0;
6932 vsi->tx_busy = 0;
6933 vsi->tx_linearize = 0;
6934 vsi->rx_buf_failed = 0;
6935 vsi->rx_page_failed = 0;
6936
6937 rcu_read_lock();
6938
6939 /* update Tx rings counters */
6940 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6941 vsi->num_txq);
6942
6943 /* update Rx rings counters */
6944 ice_for_each_rxq(vsi, i) {
6945 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6946 struct ice_ring_stats *ring_stats;
6947
6948 ring_stats = ring->ring_stats;
6949 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6950 ring_stats->stats, &pkts,
6951 &bytes);
6952 vsi_stats->rx_packets += pkts;
6953 vsi_stats->rx_bytes += bytes;
6954 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6955 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6956 }
6957
6958 /* update XDP Tx rings counters */
6959 if (ice_is_xdp_ena_vsi(vsi))
6960 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6961 vsi->num_xdp_txq);
6962
6963 rcu_read_unlock();
6964
6965 net_stats = &vsi->net_stats;
6966 stats_prev = &vsi->net_stats_prev;
6967
6968 /* Update netdev counters, but keep in mind that values could start at
6969 * random value after PF reset. And as we increase the reported stat by
6970 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6971 * let's skip this round.
6972 */
6973 if (likely(pf->stat_prev_loaded)) {
6974 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6975 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6976 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6977 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6978 }
6979
6980 stats_prev->tx_packets = vsi_stats->tx_packets;
6981 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6982 stats_prev->rx_packets = vsi_stats->rx_packets;
6983 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6984
6985 kfree(vsi_stats);
6986 }
6987
6988 /**
6989 * ice_update_vsi_stats - Update VSI stats counters
6990 * @vsi: the VSI to be updated
6991 */
ice_update_vsi_stats(struct ice_vsi * vsi)6992 void ice_update_vsi_stats(struct ice_vsi *vsi)
6993 {
6994 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6995 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6996 struct ice_pf *pf = vsi->back;
6997
6998 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6999 test_bit(ICE_CFG_BUSY, pf->state))
7000 return;
7001
7002 /* get stats as recorded by Tx/Rx rings */
7003 ice_update_vsi_ring_stats(vsi);
7004
7005 /* get VSI stats as recorded by the hardware */
7006 ice_update_eth_stats(vsi);
7007
7008 cur_ns->tx_errors = cur_es->tx_errors;
7009 cur_ns->rx_dropped = cur_es->rx_discards;
7010 cur_ns->tx_dropped = cur_es->tx_discards;
7011 cur_ns->multicast = cur_es->rx_multicast;
7012
7013 /* update some more netdev stats if this is main VSI */
7014 if (vsi->type == ICE_VSI_PF) {
7015 cur_ns->rx_crc_errors = pf->stats.crc_errors;
7016 cur_ns->rx_errors = pf->stats.crc_errors +
7017 pf->stats.illegal_bytes +
7018 pf->stats.rx_undersize +
7019 pf->hw_csum_rx_error +
7020 pf->stats.rx_jabber +
7021 pf->stats.rx_fragments +
7022 pf->stats.rx_oversize;
7023 /* record drops from the port level */
7024 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
7025 }
7026 }
7027
7028 /**
7029 * ice_update_pf_stats - Update PF port stats counters
7030 * @pf: PF whose stats needs to be updated
7031 */
ice_update_pf_stats(struct ice_pf * pf)7032 void ice_update_pf_stats(struct ice_pf *pf)
7033 {
7034 struct ice_hw_port_stats *prev_ps, *cur_ps;
7035 struct ice_hw *hw = &pf->hw;
7036 u16 fd_ctr_base;
7037 u8 port;
7038
7039 port = hw->port_info->lport;
7040 prev_ps = &pf->stats_prev;
7041 cur_ps = &pf->stats;
7042
7043 if (ice_is_reset_in_progress(pf->state))
7044 pf->stat_prev_loaded = false;
7045
7046 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
7047 &prev_ps->eth.rx_bytes,
7048 &cur_ps->eth.rx_bytes);
7049
7050 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
7051 &prev_ps->eth.rx_unicast,
7052 &cur_ps->eth.rx_unicast);
7053
7054 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
7055 &prev_ps->eth.rx_multicast,
7056 &cur_ps->eth.rx_multicast);
7057
7058 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
7059 &prev_ps->eth.rx_broadcast,
7060 &cur_ps->eth.rx_broadcast);
7061
7062 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
7063 &prev_ps->eth.rx_discards,
7064 &cur_ps->eth.rx_discards);
7065
7066 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
7067 &prev_ps->eth.tx_bytes,
7068 &cur_ps->eth.tx_bytes);
7069
7070 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
7071 &prev_ps->eth.tx_unicast,
7072 &cur_ps->eth.tx_unicast);
7073
7074 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
7075 &prev_ps->eth.tx_multicast,
7076 &cur_ps->eth.tx_multicast);
7077
7078 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
7079 &prev_ps->eth.tx_broadcast,
7080 &cur_ps->eth.tx_broadcast);
7081
7082 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
7083 &prev_ps->tx_dropped_link_down,
7084 &cur_ps->tx_dropped_link_down);
7085
7086 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
7087 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
7088
7089 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
7090 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
7091
7092 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
7093 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
7094
7095 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
7096 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7097
7098 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7099 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7100
7101 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7102 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7103
7104 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7105 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7106
7107 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7108 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7109
7110 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7111 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7112
7113 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7114 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7115
7116 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7117 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7118
7119 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7120 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7121
7122 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7123 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7124
7125 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7126 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7127
7128 fd_ctr_base = hw->fd_ctr_base;
7129
7130 ice_stat_update40(hw,
7131 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7132 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7133 &cur_ps->fd_sb_match);
7134 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7135 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7136
7137 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7138 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7139
7140 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7141 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7142
7143 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7144 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7145
7146 ice_update_dcb_stats(pf);
7147
7148 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7149 &prev_ps->crc_errors, &cur_ps->crc_errors);
7150
7151 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7152 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7153
7154 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7155 &prev_ps->mac_local_faults,
7156 &cur_ps->mac_local_faults);
7157
7158 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7159 &prev_ps->mac_remote_faults,
7160 &cur_ps->mac_remote_faults);
7161
7162 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7163 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7164
7165 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7166 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7167
7168 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7169 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7170
7171 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7172 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7173
7174 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7175
7176 pf->stat_prev_loaded = true;
7177 }
7178
7179 /**
7180 * ice_get_stats64 - get statistics for network device structure
7181 * @netdev: network interface device structure
7182 * @stats: main device statistics structure
7183 */
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)7184 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7185 {
7186 struct ice_netdev_priv *np = netdev_priv(netdev);
7187 struct rtnl_link_stats64 *vsi_stats;
7188 struct ice_vsi *vsi = np->vsi;
7189
7190 vsi_stats = &vsi->net_stats;
7191
7192 if (!vsi->num_txq || !vsi->num_rxq)
7193 return;
7194
7195 /* netdev packet/byte stats come from ring counter. These are obtained
7196 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7197 * But, only call the update routine and read the registers if VSI is
7198 * not down.
7199 */
7200 if (!test_bit(ICE_VSI_DOWN, vsi->state))
7201 ice_update_vsi_ring_stats(vsi);
7202 stats->tx_packets = vsi_stats->tx_packets;
7203 stats->tx_bytes = vsi_stats->tx_bytes;
7204 stats->rx_packets = vsi_stats->rx_packets;
7205 stats->rx_bytes = vsi_stats->rx_bytes;
7206
7207 /* The rest of the stats can be read from the hardware but instead we
7208 * just return values that the watchdog task has already obtained from
7209 * the hardware.
7210 */
7211 stats->multicast = vsi_stats->multicast;
7212 stats->tx_errors = vsi_stats->tx_errors;
7213 stats->tx_dropped = vsi_stats->tx_dropped;
7214 stats->rx_errors = vsi_stats->rx_errors;
7215 stats->rx_dropped = vsi_stats->rx_dropped;
7216 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7217 stats->rx_length_errors = vsi_stats->rx_length_errors;
7218 }
7219
7220 /**
7221 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7222 * @vsi: VSI having NAPI disabled
7223 */
ice_napi_disable_all(struct ice_vsi * vsi)7224 static void ice_napi_disable_all(struct ice_vsi *vsi)
7225 {
7226 int q_idx;
7227
7228 if (!vsi->netdev)
7229 return;
7230
7231 ice_for_each_q_vector(vsi, q_idx) {
7232 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7233
7234 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7235 napi_disable(&q_vector->napi);
7236
7237 cancel_work_sync(&q_vector->tx.dim.work);
7238 cancel_work_sync(&q_vector->rx.dim.work);
7239 }
7240 }
7241
7242 /**
7243 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7244 * @vsi: the VSI being un-configured
7245 */
ice_vsi_dis_irq(struct ice_vsi * vsi)7246 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7247 {
7248 struct ice_pf *pf = vsi->back;
7249 struct ice_hw *hw = &pf->hw;
7250 u32 val;
7251 int i;
7252
7253 /* disable interrupt causation from each Rx queue; Tx queues are
7254 * handled in ice_vsi_stop_tx_ring()
7255 */
7256 if (vsi->rx_rings) {
7257 ice_for_each_rxq(vsi, i) {
7258 if (vsi->rx_rings[i]) {
7259 u16 reg;
7260
7261 reg = vsi->rx_rings[i]->reg_idx;
7262 val = rd32(hw, QINT_RQCTL(reg));
7263 val &= ~QINT_RQCTL_CAUSE_ENA_M;
7264 wr32(hw, QINT_RQCTL(reg), val);
7265 }
7266 }
7267 }
7268
7269 /* disable each interrupt */
7270 ice_for_each_q_vector(vsi, i) {
7271 if (!vsi->q_vectors[i])
7272 continue;
7273 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7274 }
7275
7276 ice_flush(hw);
7277
7278 /* don't call synchronize_irq() for VF's from the host */
7279 if (vsi->type == ICE_VSI_VF)
7280 return;
7281
7282 ice_for_each_q_vector(vsi, i)
7283 synchronize_irq(vsi->q_vectors[i]->irq.virq);
7284 }
7285
7286 /**
7287 * ice_down - Shutdown the connection
7288 * @vsi: The VSI being stopped
7289 *
7290 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7291 */
ice_down(struct ice_vsi * vsi)7292 int ice_down(struct ice_vsi *vsi)
7293 {
7294 int i, tx_err, rx_err, vlan_err = 0;
7295
7296 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7297
7298 if (vsi->netdev) {
7299 vlan_err = ice_vsi_del_vlan_zero(vsi);
7300 ice_ptp_link_change(vsi->back, false);
7301 netif_carrier_off(vsi->netdev);
7302 netif_tx_disable(vsi->netdev);
7303 }
7304
7305 ice_vsi_dis_irq(vsi);
7306
7307 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7308 if (tx_err)
7309 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7310 vsi->vsi_num, tx_err);
7311 if (!tx_err && vsi->xdp_rings) {
7312 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7313 if (tx_err)
7314 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7315 vsi->vsi_num, tx_err);
7316 }
7317
7318 rx_err = ice_vsi_stop_all_rx_rings(vsi);
7319 if (rx_err)
7320 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7321 vsi->vsi_num, rx_err);
7322
7323 ice_napi_disable_all(vsi);
7324
7325 ice_for_each_txq(vsi, i)
7326 ice_clean_tx_ring(vsi->tx_rings[i]);
7327
7328 if (vsi->xdp_rings)
7329 ice_for_each_xdp_txq(vsi, i)
7330 ice_clean_tx_ring(vsi->xdp_rings[i]);
7331
7332 ice_for_each_rxq(vsi, i)
7333 ice_clean_rx_ring(vsi->rx_rings[i]);
7334
7335 if (tx_err || rx_err || vlan_err) {
7336 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7337 vsi->vsi_num, vsi->vsw->sw_id);
7338 return -EIO;
7339 }
7340
7341 return 0;
7342 }
7343
7344 /**
7345 * ice_down_up - shutdown the VSI connection and bring it up
7346 * @vsi: the VSI to be reconnected
7347 */
ice_down_up(struct ice_vsi * vsi)7348 int ice_down_up(struct ice_vsi *vsi)
7349 {
7350 int ret;
7351
7352 /* if DOWN already set, nothing to do */
7353 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7354 return 0;
7355
7356 ret = ice_down(vsi);
7357 if (ret)
7358 return ret;
7359
7360 ret = ice_up(vsi);
7361 if (ret) {
7362 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7363 return ret;
7364 }
7365
7366 return 0;
7367 }
7368
7369 /**
7370 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7371 * @vsi: VSI having resources allocated
7372 *
7373 * Return 0 on success, negative on failure
7374 */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)7375 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7376 {
7377 int i, err = 0;
7378
7379 if (!vsi->num_txq) {
7380 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7381 vsi->vsi_num);
7382 return -EINVAL;
7383 }
7384
7385 ice_for_each_txq(vsi, i) {
7386 struct ice_tx_ring *ring = vsi->tx_rings[i];
7387
7388 if (!ring)
7389 return -EINVAL;
7390
7391 if (vsi->netdev)
7392 ring->netdev = vsi->netdev;
7393 err = ice_setup_tx_ring(ring);
7394 if (err)
7395 break;
7396 }
7397
7398 return err;
7399 }
7400
7401 /**
7402 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7403 * @vsi: VSI having resources allocated
7404 *
7405 * Return 0 on success, negative on failure
7406 */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)7407 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7408 {
7409 int i, err = 0;
7410
7411 if (!vsi->num_rxq) {
7412 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7413 vsi->vsi_num);
7414 return -EINVAL;
7415 }
7416
7417 ice_for_each_rxq(vsi, i) {
7418 struct ice_rx_ring *ring = vsi->rx_rings[i];
7419
7420 if (!ring)
7421 return -EINVAL;
7422
7423 if (vsi->netdev)
7424 ring->netdev = vsi->netdev;
7425 err = ice_setup_rx_ring(ring);
7426 if (err)
7427 break;
7428 }
7429
7430 return err;
7431 }
7432
7433 /**
7434 * ice_vsi_open_ctrl - open control VSI for use
7435 * @vsi: the VSI to open
7436 *
7437 * Initialization of the Control VSI
7438 *
7439 * Returns 0 on success, negative value on error
7440 */
ice_vsi_open_ctrl(struct ice_vsi * vsi)7441 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7442 {
7443 char int_name[ICE_INT_NAME_STR_LEN];
7444 struct ice_pf *pf = vsi->back;
7445 struct device *dev;
7446 int err;
7447
7448 dev = ice_pf_to_dev(pf);
7449 /* allocate descriptors */
7450 err = ice_vsi_setup_tx_rings(vsi);
7451 if (err)
7452 goto err_setup_tx;
7453
7454 err = ice_vsi_setup_rx_rings(vsi);
7455 if (err)
7456 goto err_setup_rx;
7457
7458 err = ice_vsi_cfg_lan(vsi);
7459 if (err)
7460 goto err_setup_rx;
7461
7462 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7463 dev_driver_string(dev), dev_name(dev));
7464 err = ice_vsi_req_irq_msix(vsi, int_name);
7465 if (err)
7466 goto err_setup_rx;
7467
7468 ice_vsi_cfg_msix(vsi);
7469
7470 err = ice_vsi_start_all_rx_rings(vsi);
7471 if (err)
7472 goto err_up_complete;
7473
7474 clear_bit(ICE_VSI_DOWN, vsi->state);
7475 ice_vsi_ena_irq(vsi);
7476
7477 return 0;
7478
7479 err_up_complete:
7480 ice_down(vsi);
7481 err_setup_rx:
7482 ice_vsi_free_rx_rings(vsi);
7483 err_setup_tx:
7484 ice_vsi_free_tx_rings(vsi);
7485
7486 return err;
7487 }
7488
7489 /**
7490 * ice_vsi_open - Called when a network interface is made active
7491 * @vsi: the VSI to open
7492 *
7493 * Initialization of the VSI
7494 *
7495 * Returns 0 on success, negative value on error
7496 */
ice_vsi_open(struct ice_vsi * vsi)7497 int ice_vsi_open(struct ice_vsi *vsi)
7498 {
7499 char int_name[ICE_INT_NAME_STR_LEN];
7500 struct ice_pf *pf = vsi->back;
7501 int err;
7502
7503 /* allocate descriptors */
7504 err = ice_vsi_setup_tx_rings(vsi);
7505 if (err)
7506 goto err_setup_tx;
7507
7508 err = ice_vsi_setup_rx_rings(vsi);
7509 if (err)
7510 goto err_setup_rx;
7511
7512 err = ice_vsi_cfg_lan(vsi);
7513 if (err)
7514 goto err_setup_rx;
7515
7516 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7517 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7518 err = ice_vsi_req_irq_msix(vsi, int_name);
7519 if (err)
7520 goto err_setup_rx;
7521
7522 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7523
7524 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
7525 /* Notify the stack of the actual queue counts. */
7526 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7527 if (err)
7528 goto err_set_qs;
7529
7530 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7531 if (err)
7532 goto err_set_qs;
7533
7534 ice_vsi_set_napi_queues(vsi);
7535 }
7536
7537 err = ice_up_complete(vsi);
7538 if (err)
7539 goto err_up_complete;
7540
7541 return 0;
7542
7543 err_up_complete:
7544 ice_down(vsi);
7545 err_set_qs:
7546 ice_vsi_free_irq(vsi);
7547 err_setup_rx:
7548 ice_vsi_free_rx_rings(vsi);
7549 err_setup_tx:
7550 ice_vsi_free_tx_rings(vsi);
7551
7552 return err;
7553 }
7554
7555 /**
7556 * ice_vsi_release_all - Delete all VSIs
7557 * @pf: PF from which all VSIs are being removed
7558 */
ice_vsi_release_all(struct ice_pf * pf)7559 static void ice_vsi_release_all(struct ice_pf *pf)
7560 {
7561 int err, i;
7562
7563 if (!pf->vsi)
7564 return;
7565
7566 ice_for_each_vsi(pf, i) {
7567 if (!pf->vsi[i])
7568 continue;
7569
7570 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7571 continue;
7572
7573 err = ice_vsi_release(pf->vsi[i]);
7574 if (err)
7575 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7576 i, err, pf->vsi[i]->vsi_num);
7577 }
7578 }
7579
7580 /**
7581 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7582 * @pf: pointer to the PF instance
7583 * @type: VSI type to rebuild
7584 *
7585 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7586 */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7587 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7588 {
7589 struct device *dev = ice_pf_to_dev(pf);
7590 int i, err;
7591
7592 ice_for_each_vsi(pf, i) {
7593 struct ice_vsi *vsi = pf->vsi[i];
7594
7595 if (!vsi || vsi->type != type)
7596 continue;
7597
7598 /* rebuild the VSI */
7599 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7600 if (err) {
7601 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7602 err, vsi->idx, ice_vsi_type_str(type));
7603 return err;
7604 }
7605
7606 /* replay filters for the VSI */
7607 err = ice_replay_vsi(&pf->hw, vsi->idx);
7608 if (err) {
7609 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7610 err, vsi->idx, ice_vsi_type_str(type));
7611 return err;
7612 }
7613
7614 /* Re-map HW VSI number, using VSI handle that has been
7615 * previously validated in ice_replay_vsi() call above
7616 */
7617 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7618
7619 /* enable the VSI */
7620 err = ice_ena_vsi(vsi, false);
7621 if (err) {
7622 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7623 err, vsi->idx, ice_vsi_type_str(type));
7624 return err;
7625 }
7626
7627 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7628 ice_vsi_type_str(type));
7629 }
7630
7631 return 0;
7632 }
7633
7634 /**
7635 * ice_update_pf_netdev_link - Update PF netdev link status
7636 * @pf: pointer to the PF instance
7637 */
ice_update_pf_netdev_link(struct ice_pf * pf)7638 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7639 {
7640 bool link_up;
7641 int i;
7642
7643 ice_for_each_vsi(pf, i) {
7644 struct ice_vsi *vsi = pf->vsi[i];
7645
7646 if (!vsi || vsi->type != ICE_VSI_PF)
7647 return;
7648
7649 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7650 if (link_up) {
7651 netif_carrier_on(pf->vsi[i]->netdev);
7652 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7653 } else {
7654 netif_carrier_off(pf->vsi[i]->netdev);
7655 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7656 }
7657 }
7658 }
7659
7660 /**
7661 * ice_rebuild - rebuild after reset
7662 * @pf: PF to rebuild
7663 * @reset_type: type of reset
7664 *
7665 * Do not rebuild VF VSI in this flow because that is already handled via
7666 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7667 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7668 * to reset/rebuild all the VF VSI twice.
7669 */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7670 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7671 {
7672 struct ice_vsi *vsi = ice_get_main_vsi(pf);
7673 struct device *dev = ice_pf_to_dev(pf);
7674 struct ice_hw *hw = &pf->hw;
7675 bool dvm;
7676 int err;
7677
7678 if (test_bit(ICE_DOWN, pf->state))
7679 goto clear_recovery;
7680
7681 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7682
7683 #define ICE_EMP_RESET_SLEEP_MS 5000
7684 if (reset_type == ICE_RESET_EMPR) {
7685 /* If an EMP reset has occurred, any previously pending flash
7686 * update will have completed. We no longer know whether or
7687 * not the NVM update EMP reset is restricted.
7688 */
7689 pf->fw_emp_reset_disabled = false;
7690
7691 msleep(ICE_EMP_RESET_SLEEP_MS);
7692 }
7693
7694 err = ice_init_all_ctrlq(hw);
7695 if (err) {
7696 dev_err(dev, "control queues init failed %d\n", err);
7697 goto err_init_ctrlq;
7698 }
7699
7700 /* if DDP was previously loaded successfully */
7701 if (!ice_is_safe_mode(pf)) {
7702 /* reload the SW DB of filter tables */
7703 if (reset_type == ICE_RESET_PFR)
7704 ice_fill_blk_tbls(hw);
7705 else
7706 /* Reload DDP Package after CORER/GLOBR reset */
7707 ice_load_pkg(NULL, pf);
7708 }
7709
7710 err = ice_clear_pf_cfg(hw);
7711 if (err) {
7712 dev_err(dev, "clear PF configuration failed %d\n", err);
7713 goto err_init_ctrlq;
7714 }
7715
7716 ice_clear_pxe_mode(hw);
7717
7718 err = ice_init_nvm(hw);
7719 if (err) {
7720 dev_err(dev, "ice_init_nvm failed %d\n", err);
7721 goto err_init_ctrlq;
7722 }
7723
7724 err = ice_get_caps(hw);
7725 if (err) {
7726 dev_err(dev, "ice_get_caps failed %d\n", err);
7727 goto err_init_ctrlq;
7728 }
7729
7730 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7731 if (err) {
7732 dev_err(dev, "set_mac_cfg failed %d\n", err);
7733 goto err_init_ctrlq;
7734 }
7735
7736 dvm = ice_is_dvm_ena(hw);
7737
7738 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7739 if (err)
7740 goto err_init_ctrlq;
7741
7742 err = ice_sched_init_port(hw->port_info);
7743 if (err)
7744 goto err_sched_init_port;
7745
7746 /* start misc vector */
7747 err = ice_req_irq_msix_misc(pf);
7748 if (err) {
7749 dev_err(dev, "misc vector setup failed: %d\n", err);
7750 goto err_sched_init_port;
7751 }
7752
7753 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7754 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7755 if (!rd32(hw, PFQF_FD_SIZE)) {
7756 u16 unused, guar, b_effort;
7757
7758 guar = hw->func_caps.fd_fltr_guar;
7759 b_effort = hw->func_caps.fd_fltr_best_effort;
7760
7761 /* force guaranteed filter pool for PF */
7762 ice_alloc_fd_guar_item(hw, &unused, guar);
7763 /* force shared filter pool for PF */
7764 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7765 }
7766 }
7767
7768 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7769 ice_dcb_rebuild(pf);
7770
7771 /* If the PF previously had enabled PTP, PTP init needs to happen before
7772 * the VSI rebuild. If not, this causes the PTP link status events to
7773 * fail.
7774 */
7775 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7776 ice_ptp_rebuild(pf, reset_type);
7777
7778 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7779 ice_gnss_init(pf);
7780
7781 /* rebuild PF VSI */
7782 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7783 if (err) {
7784 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7785 goto err_vsi_rebuild;
7786 }
7787
7788 if (reset_type == ICE_RESET_PFR) {
7789 err = ice_rebuild_channels(pf);
7790 if (err) {
7791 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7792 err);
7793 goto err_vsi_rebuild;
7794 }
7795 }
7796
7797 /* If Flow Director is active */
7798 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7799 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7800 if (err) {
7801 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7802 goto err_vsi_rebuild;
7803 }
7804
7805 /* replay HW Flow Director recipes */
7806 if (hw->fdir_prof)
7807 ice_fdir_replay_flows(hw);
7808
7809 /* replay Flow Director filters */
7810 ice_fdir_replay_fltrs(pf);
7811
7812 ice_rebuild_arfs(pf);
7813 }
7814
7815 if (vsi && vsi->netdev)
7816 netif_device_attach(vsi->netdev);
7817
7818 ice_update_pf_netdev_link(pf);
7819
7820 /* tell the firmware we are up */
7821 err = ice_send_version(pf);
7822 if (err) {
7823 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7824 err);
7825 goto err_vsi_rebuild;
7826 }
7827
7828 ice_replay_post(hw);
7829
7830 /* if we get here, reset flow is successful */
7831 clear_bit(ICE_RESET_FAILED, pf->state);
7832
7833 ice_health_clear(pf);
7834
7835 ice_plug_aux_dev(pf);
7836 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7837 ice_lag_rebuild(pf);
7838
7839 /* Restore timestamp mode settings after VSI rebuild */
7840 ice_ptp_restore_timestamp_mode(pf);
7841 return;
7842
7843 err_vsi_rebuild:
7844 err_sched_init_port:
7845 ice_sched_cleanup_all(hw);
7846 err_init_ctrlq:
7847 ice_shutdown_all_ctrlq(hw, false);
7848 set_bit(ICE_RESET_FAILED, pf->state);
7849 clear_recovery:
7850 /* set this bit in PF state to control service task scheduling */
7851 set_bit(ICE_NEEDS_RESTART, pf->state);
7852 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7853 }
7854
7855 /**
7856 * ice_change_mtu - NDO callback to change the MTU
7857 * @netdev: network interface device structure
7858 * @new_mtu: new value for maximum frame size
7859 *
7860 * Returns 0 on success, negative on failure
7861 */
ice_change_mtu(struct net_device * netdev,int new_mtu)7862 int ice_change_mtu(struct net_device *netdev, int new_mtu)
7863 {
7864 struct ice_netdev_priv *np = netdev_priv(netdev);
7865 struct ice_vsi *vsi = np->vsi;
7866 struct ice_pf *pf = vsi->back;
7867 struct bpf_prog *prog;
7868 u8 count = 0;
7869 int err = 0;
7870
7871 if (new_mtu == (int)netdev->mtu) {
7872 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7873 return 0;
7874 }
7875
7876 prog = vsi->xdp_prog;
7877 if (prog && !prog->aux->xdp_has_frags) {
7878 int frame_size = ice_max_xdp_frame_size(vsi);
7879
7880 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7881 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7882 frame_size - ICE_ETH_PKT_HDR_PAD);
7883 return -EINVAL;
7884 }
7885 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7886 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7887 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7888 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7889 return -EINVAL;
7890 }
7891 }
7892
7893 /* if a reset is in progress, wait for some time for it to complete */
7894 do {
7895 if (ice_is_reset_in_progress(pf->state)) {
7896 count++;
7897 usleep_range(1000, 2000);
7898 } else {
7899 break;
7900 }
7901
7902 } while (count < 100);
7903
7904 if (count == 100) {
7905 netdev_err(netdev, "can't change MTU. Device is busy\n");
7906 return -EBUSY;
7907 }
7908
7909 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7910 err = ice_down_up(vsi);
7911 if (err)
7912 return err;
7913
7914 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7915 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7916
7917 return err;
7918 }
7919
7920 /**
7921 * ice_set_rss_lut - Set RSS LUT
7922 * @vsi: Pointer to VSI structure
7923 * @lut: Lookup table
7924 * @lut_size: Lookup table size
7925 *
7926 * Returns 0 on success, negative on failure
7927 */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7928 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7929 {
7930 struct ice_aq_get_set_rss_lut_params params = {};
7931 struct ice_hw *hw = &vsi->back->hw;
7932 int status;
7933
7934 if (!lut)
7935 return -EINVAL;
7936
7937 params.vsi_handle = vsi->idx;
7938 params.lut_size = lut_size;
7939 params.lut_type = vsi->rss_lut_type;
7940 params.lut = lut;
7941
7942 status = ice_aq_set_rss_lut(hw, ¶ms);
7943 if (status)
7944 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7945 status, libie_aq_str(hw->adminq.sq_last_status));
7946
7947 return status;
7948 }
7949
7950 /**
7951 * ice_set_rss_key - Set RSS key
7952 * @vsi: Pointer to the VSI structure
7953 * @seed: RSS hash seed
7954 *
7955 * Returns 0 on success, negative on failure
7956 */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)7957 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7958 {
7959 struct ice_hw *hw = &vsi->back->hw;
7960 int status;
7961
7962 if (!seed)
7963 return -EINVAL;
7964
7965 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7966 if (status)
7967 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7968 status, libie_aq_str(hw->adminq.sq_last_status));
7969
7970 return status;
7971 }
7972
7973 /**
7974 * ice_get_rss_lut - Get RSS LUT
7975 * @vsi: Pointer to VSI structure
7976 * @lut: Buffer to store the lookup table entries
7977 * @lut_size: Size of buffer to store the lookup table entries
7978 *
7979 * Returns 0 on success, negative on failure
7980 */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7981 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7982 {
7983 struct ice_aq_get_set_rss_lut_params params = {};
7984 struct ice_hw *hw = &vsi->back->hw;
7985 int status;
7986
7987 if (!lut)
7988 return -EINVAL;
7989
7990 params.vsi_handle = vsi->idx;
7991 params.lut_size = lut_size;
7992 params.lut_type = vsi->rss_lut_type;
7993 params.lut = lut;
7994
7995 status = ice_aq_get_rss_lut(hw, ¶ms);
7996 if (status)
7997 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7998 status, libie_aq_str(hw->adminq.sq_last_status));
7999
8000 return status;
8001 }
8002
8003 /**
8004 * ice_get_rss_key - Get RSS key
8005 * @vsi: Pointer to VSI structure
8006 * @seed: Buffer to store the key in
8007 *
8008 * Returns 0 on success, negative on failure
8009 */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)8010 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
8011 {
8012 struct ice_hw *hw = &vsi->back->hw;
8013 int status;
8014
8015 if (!seed)
8016 return -EINVAL;
8017
8018 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
8019 if (status)
8020 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
8021 status, libie_aq_str(hw->adminq.sq_last_status));
8022
8023 return status;
8024 }
8025
8026 /**
8027 * ice_set_rss_hfunc - Set RSS HASH function
8028 * @vsi: Pointer to VSI structure
8029 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
8030 *
8031 * Returns 0 on success, negative on failure
8032 */
ice_set_rss_hfunc(struct ice_vsi * vsi,u8 hfunc)8033 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
8034 {
8035 struct ice_hw *hw = &vsi->back->hw;
8036 struct ice_vsi_ctx *ctx;
8037 bool symm;
8038 int err;
8039
8040 if (hfunc == vsi->rss_hfunc)
8041 return 0;
8042
8043 if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8044 hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8045 return -EOPNOTSUPP;
8046
8047 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8048 if (!ctx)
8049 return -ENOMEM;
8050
8051 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8052 ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8053 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8054 ctx->info.q_opt_rss |=
8055 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8056 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8057 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8058
8059 err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8060 if (err) {
8061 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8062 vsi->vsi_num, err);
8063 } else {
8064 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8065 vsi->rss_hfunc = hfunc;
8066 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8067 hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8068 "Symmetric " : "");
8069 }
8070 kfree(ctx);
8071 if (err)
8072 return err;
8073
8074 /* Fix the symmetry setting for all existing RSS configurations */
8075 symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8076 return ice_set_rss_cfg_symm(hw, vsi, symm);
8077 }
8078
8079 /**
8080 * ice_bridge_getlink - Get the hardware bridge mode
8081 * @skb: skb buff
8082 * @pid: process ID
8083 * @seq: RTNL message seq
8084 * @dev: the netdev being configured
8085 * @filter_mask: filter mask passed in
8086 * @nlflags: netlink flags passed in
8087 *
8088 * Return the bridge mode (VEB/VEPA)
8089 */
8090 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)8091 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8092 struct net_device *dev, u32 filter_mask, int nlflags)
8093 {
8094 struct ice_netdev_priv *np = netdev_priv(dev);
8095 struct ice_vsi *vsi = np->vsi;
8096 struct ice_pf *pf = vsi->back;
8097 u16 bmode;
8098
8099 bmode = pf->first_sw->bridge_mode;
8100
8101 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8102 filter_mask, NULL);
8103 }
8104
8105 /**
8106 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8107 * @vsi: Pointer to VSI structure
8108 * @bmode: Hardware bridge mode (VEB/VEPA)
8109 *
8110 * Returns 0 on success, negative on failure
8111 */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)8112 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8113 {
8114 struct ice_aqc_vsi_props *vsi_props;
8115 struct ice_hw *hw = &vsi->back->hw;
8116 struct ice_vsi_ctx *ctxt;
8117 int ret;
8118
8119 vsi_props = &vsi->info;
8120
8121 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8122 if (!ctxt)
8123 return -ENOMEM;
8124
8125 ctxt->info = vsi->info;
8126
8127 if (bmode == BRIDGE_MODE_VEB)
8128 /* change from VEPA to VEB mode */
8129 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8130 else
8131 /* change from VEB to VEPA mode */
8132 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8133 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8134
8135 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8136 if (ret) {
8137 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8138 bmode, ret, libie_aq_str(hw->adminq.sq_last_status));
8139 goto out;
8140 }
8141 /* Update sw flags for book keeping */
8142 vsi_props->sw_flags = ctxt->info.sw_flags;
8143
8144 out:
8145 kfree(ctxt);
8146 return ret;
8147 }
8148
8149 /**
8150 * ice_bridge_setlink - Set the hardware bridge mode
8151 * @dev: the netdev being configured
8152 * @nlh: RTNL message
8153 * @flags: bridge setlink flags
8154 * @extack: netlink extended ack
8155 *
8156 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8157 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8158 * not already set for all VSIs connected to this switch. And also update the
8159 * unicast switch filter rules for the corresponding switch of the netdev.
8160 */
8161 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)8162 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8163 u16 __always_unused flags,
8164 struct netlink_ext_ack __always_unused *extack)
8165 {
8166 struct ice_netdev_priv *np = netdev_priv(dev);
8167 struct ice_pf *pf = np->vsi->back;
8168 struct nlattr *attr, *br_spec;
8169 struct ice_hw *hw = &pf->hw;
8170 struct ice_sw *pf_sw;
8171 int rem, v, err = 0;
8172
8173 pf_sw = pf->first_sw;
8174 /* find the attribute in the netlink message */
8175 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8176 if (!br_spec)
8177 return -EINVAL;
8178
8179 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8180 __u16 mode = nla_get_u16(attr);
8181
8182 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8183 return -EINVAL;
8184 /* Continue if bridge mode is not being flipped */
8185 if (mode == pf_sw->bridge_mode)
8186 continue;
8187 /* Iterates through the PF VSI list and update the loopback
8188 * mode of the VSI
8189 */
8190 ice_for_each_vsi(pf, v) {
8191 if (!pf->vsi[v])
8192 continue;
8193 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8194 if (err)
8195 return err;
8196 }
8197
8198 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8199 /* Update the unicast switch filter rules for the corresponding
8200 * switch of the netdev
8201 */
8202 err = ice_update_sw_rule_bridge_mode(hw);
8203 if (err) {
8204 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8205 mode, err,
8206 libie_aq_str(hw->adminq.sq_last_status));
8207 /* revert hw->evb_veb */
8208 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8209 return err;
8210 }
8211
8212 pf_sw->bridge_mode = mode;
8213 }
8214
8215 return 0;
8216 }
8217
8218 /**
8219 * ice_tx_timeout - Respond to a Tx Hang
8220 * @netdev: network interface device structure
8221 * @txqueue: Tx queue
8222 */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)8223 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8224 {
8225 struct ice_netdev_priv *np = netdev_priv(netdev);
8226 struct ice_tx_ring *tx_ring = NULL;
8227 struct ice_vsi *vsi = np->vsi;
8228 struct ice_pf *pf = vsi->back;
8229 u32 i;
8230
8231 pf->tx_timeout_count++;
8232
8233 /* Check if PFC is enabled for the TC to which the queue belongs
8234 * to. If yes then Tx timeout is not caused by a hung queue, no
8235 * need to reset and rebuild
8236 */
8237 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8238 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8239 txqueue);
8240 return;
8241 }
8242
8243 /* now that we have an index, find the tx_ring struct */
8244 ice_for_each_txq(vsi, i)
8245 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8246 if (txqueue == vsi->tx_rings[i]->q_index) {
8247 tx_ring = vsi->tx_rings[i];
8248 break;
8249 }
8250
8251 /* Reset recovery level if enough time has elapsed after last timeout.
8252 * Also ensure no new reset action happens before next timeout period.
8253 */
8254 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8255 pf->tx_timeout_recovery_level = 1;
8256 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8257 netdev->watchdog_timeo)))
8258 return;
8259
8260 if (tx_ring) {
8261 struct ice_hw *hw = &pf->hw;
8262 u32 head, intr = 0;
8263
8264 head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8265 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8266 /* Read interrupt register */
8267 intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8268
8269 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8270 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8271 head, tx_ring->next_to_use, intr);
8272
8273 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
8274 }
8275
8276 pf->tx_timeout_last_recovery = jiffies;
8277 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8278 pf->tx_timeout_recovery_level, txqueue);
8279
8280 switch (pf->tx_timeout_recovery_level) {
8281 case 1:
8282 set_bit(ICE_PFR_REQ, pf->state);
8283 break;
8284 case 2:
8285 set_bit(ICE_CORER_REQ, pf->state);
8286 break;
8287 case 3:
8288 set_bit(ICE_GLOBR_REQ, pf->state);
8289 break;
8290 default:
8291 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8292 set_bit(ICE_DOWN, pf->state);
8293 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8294 set_bit(ICE_SERVICE_DIS, pf->state);
8295 break;
8296 }
8297
8298 ice_service_task_schedule(pf);
8299 pf->tx_timeout_recovery_level++;
8300 }
8301
8302 /**
8303 * ice_setup_tc_cls_flower - flower classifier offloads
8304 * @np: net device to configure
8305 * @filter_dev: device on which filter is added
8306 * @cls_flower: offload data
8307 * @ingress: if the rule is added to an ingress block
8308 *
8309 * Return: 0 if the flower was successfully added or deleted,
8310 * negative error code otherwise.
8311 */
8312 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower,bool ingress)8313 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8314 struct net_device *filter_dev,
8315 struct flow_cls_offload *cls_flower,
8316 bool ingress)
8317 {
8318 struct ice_vsi *vsi = np->vsi;
8319
8320 if (cls_flower->common.chain_index)
8321 return -EOPNOTSUPP;
8322
8323 switch (cls_flower->command) {
8324 case FLOW_CLS_REPLACE:
8325 return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
8326 case FLOW_CLS_DESTROY:
8327 return ice_del_cls_flower(vsi, cls_flower);
8328 default:
8329 return -EINVAL;
8330 }
8331 }
8332
8333 /**
8334 * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
8335 * @type: TC SETUP type
8336 * @type_data: TC flower offload data that contains user input
8337 * @cb_priv: netdev private data
8338 *
8339 * Return: 0 if the setup was successful, negative error code otherwise.
8340 */
8341 static int
ice_setup_tc_block_cb_ingress(enum tc_setup_type type,void * type_data,void * cb_priv)8342 ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
8343 void *cb_priv)
8344 {
8345 struct ice_netdev_priv *np = cb_priv;
8346
8347 switch (type) {
8348 case TC_SETUP_CLSFLOWER:
8349 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8350 type_data, true);
8351 default:
8352 return -EOPNOTSUPP;
8353 }
8354 }
8355
8356 /**
8357 * ice_setup_tc_block_cb_egress - callback handler for egress TC block
8358 * @type: TC SETUP type
8359 * @type_data: TC flower offload data that contains user input
8360 * @cb_priv: netdev private data
8361 *
8362 * Return: 0 if the setup was successful, negative error code otherwise.
8363 */
8364 static int
ice_setup_tc_block_cb_egress(enum tc_setup_type type,void * type_data,void * cb_priv)8365 ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
8366 void *cb_priv)
8367 {
8368 struct ice_netdev_priv *np = cb_priv;
8369
8370 switch (type) {
8371 case TC_SETUP_CLSFLOWER:
8372 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8373 type_data, false);
8374 default:
8375 return -EOPNOTSUPP;
8376 }
8377 }
8378
8379 /**
8380 * ice_validate_mqprio_qopt - Validate TCF input parameters
8381 * @vsi: Pointer to VSI
8382 * @mqprio_qopt: input parameters for mqprio queue configuration
8383 *
8384 * This function validates MQPRIO params, such as qcount (power of 2 wherever
8385 * needed), and make sure user doesn't specify qcount and BW rate limit
8386 * for TCs, which are more than "num_tc"
8387 */
8388 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)8389 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8390 struct tc_mqprio_qopt_offload *mqprio_qopt)
8391 {
8392 int non_power_of_2_qcount = 0;
8393 struct ice_pf *pf = vsi->back;
8394 int max_rss_q_cnt = 0;
8395 u64 sum_min_rate = 0;
8396 struct device *dev;
8397 int i, speed;
8398 u8 num_tc;
8399
8400 if (vsi->type != ICE_VSI_PF)
8401 return -EINVAL;
8402
8403 if (mqprio_qopt->qopt.offset[0] != 0 ||
8404 mqprio_qopt->qopt.num_tc < 1 ||
8405 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8406 return -EINVAL;
8407
8408 dev = ice_pf_to_dev(pf);
8409 vsi->ch_rss_size = 0;
8410 num_tc = mqprio_qopt->qopt.num_tc;
8411 speed = ice_get_link_speed_kbps(vsi);
8412
8413 for (i = 0; num_tc; i++) {
8414 int qcount = mqprio_qopt->qopt.count[i];
8415 u64 max_rate, min_rate, rem;
8416
8417 if (!qcount)
8418 return -EINVAL;
8419
8420 if (is_power_of_2(qcount)) {
8421 if (non_power_of_2_qcount &&
8422 qcount > non_power_of_2_qcount) {
8423 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8424 qcount, non_power_of_2_qcount);
8425 return -EINVAL;
8426 }
8427 if (qcount > max_rss_q_cnt)
8428 max_rss_q_cnt = qcount;
8429 } else {
8430 if (non_power_of_2_qcount &&
8431 qcount != non_power_of_2_qcount) {
8432 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8433 qcount, non_power_of_2_qcount);
8434 return -EINVAL;
8435 }
8436 if (qcount < max_rss_q_cnt) {
8437 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8438 qcount, max_rss_q_cnt);
8439 return -EINVAL;
8440 }
8441 max_rss_q_cnt = qcount;
8442 non_power_of_2_qcount = qcount;
8443 }
8444
8445 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8446 * converts the bandwidth rate limit into Bytes/s when
8447 * passing it down to the driver. So convert input bandwidth
8448 * from Bytes/s to Kbps
8449 */
8450 max_rate = mqprio_qopt->max_rate[i];
8451 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8452
8453 /* min_rate is minimum guaranteed rate and it can't be zero */
8454 min_rate = mqprio_qopt->min_rate[i];
8455 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8456 sum_min_rate += min_rate;
8457
8458 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8459 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8460 min_rate, ICE_MIN_BW_LIMIT);
8461 return -EINVAL;
8462 }
8463
8464 if (max_rate && max_rate > speed) {
8465 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8466 i, max_rate, speed);
8467 return -EINVAL;
8468 }
8469
8470 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8471 if (rem) {
8472 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8473 i, ICE_MIN_BW_LIMIT);
8474 return -EINVAL;
8475 }
8476
8477 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8478 if (rem) {
8479 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8480 i, ICE_MIN_BW_LIMIT);
8481 return -EINVAL;
8482 }
8483
8484 /* min_rate can't be more than max_rate, except when max_rate
8485 * is zero (implies max_rate sought is max line rate). In such
8486 * a case min_rate can be more than max.
8487 */
8488 if (max_rate && min_rate > max_rate) {
8489 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8490 min_rate, max_rate);
8491 return -EINVAL;
8492 }
8493
8494 if (i >= mqprio_qopt->qopt.num_tc - 1)
8495 break;
8496 if (mqprio_qopt->qopt.offset[i + 1] !=
8497 (mqprio_qopt->qopt.offset[i] + qcount))
8498 return -EINVAL;
8499 }
8500 if (vsi->num_rxq <
8501 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8502 return -EINVAL;
8503 if (vsi->num_txq <
8504 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8505 return -EINVAL;
8506
8507 if (sum_min_rate && sum_min_rate > (u64)speed) {
8508 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8509 sum_min_rate, speed);
8510 return -EINVAL;
8511 }
8512
8513 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8514 vsi->ch_rss_size = max_rss_q_cnt;
8515
8516 return 0;
8517 }
8518
8519 /**
8520 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8521 * @pf: ptr to PF device
8522 * @vsi: ptr to VSI
8523 */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8524 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8525 {
8526 struct device *dev = ice_pf_to_dev(pf);
8527 bool added = false;
8528 struct ice_hw *hw;
8529 int flow;
8530
8531 if (!(vsi->num_gfltr || vsi->num_bfltr))
8532 return -EINVAL;
8533
8534 hw = &pf->hw;
8535 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8536 struct ice_fd_hw_prof *prof;
8537 int tun, status;
8538 u64 entry_h;
8539
8540 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8541 hw->fdir_prof[flow]->cnt))
8542 continue;
8543
8544 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8545 enum ice_flow_priority prio;
8546
8547 /* add this VSI to FDir profile for this flow */
8548 prio = ICE_FLOW_PRIO_NORMAL;
8549 prof = hw->fdir_prof[flow];
8550 status = ice_flow_add_entry(hw, ICE_BLK_FD,
8551 prof->prof_id[tun],
8552 prof->vsi_h[0], vsi->idx,
8553 prio, prof->fdir_seg[tun],
8554 &entry_h);
8555 if (status) {
8556 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8557 vsi->idx, flow);
8558 continue;
8559 }
8560
8561 prof->entry_h[prof->cnt][tun] = entry_h;
8562 }
8563
8564 /* store VSI for filter replay and delete */
8565 prof->vsi_h[prof->cnt] = vsi->idx;
8566 prof->cnt++;
8567
8568 added = true;
8569 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8570 flow);
8571 }
8572
8573 if (!added)
8574 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8575
8576 return 0;
8577 }
8578
8579 /**
8580 * ice_add_channel - add a channel by adding VSI
8581 * @pf: ptr to PF device
8582 * @sw_id: underlying HW switching element ID
8583 * @ch: ptr to channel structure
8584 *
8585 * Add a channel (VSI) using add_vsi and queue_map
8586 */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8587 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8588 {
8589 struct device *dev = ice_pf_to_dev(pf);
8590 struct ice_vsi *vsi;
8591
8592 if (ch->type != ICE_VSI_CHNL) {
8593 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8594 return -EINVAL;
8595 }
8596
8597 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8598 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8599 dev_err(dev, "create chnl VSI failure\n");
8600 return -EINVAL;
8601 }
8602
8603 ice_add_vsi_to_fdir(pf, vsi);
8604
8605 ch->sw_id = sw_id;
8606 ch->vsi_num = vsi->vsi_num;
8607 ch->info.mapping_flags = vsi->info.mapping_flags;
8608 ch->ch_vsi = vsi;
8609 /* set the back pointer of channel for newly created VSI */
8610 vsi->ch = ch;
8611
8612 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8613 sizeof(vsi->info.q_mapping));
8614 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8615 sizeof(vsi->info.tc_mapping));
8616
8617 return 0;
8618 }
8619
8620 /**
8621 * ice_chnl_cfg_res
8622 * @vsi: the VSI being setup
8623 * @ch: ptr to channel structure
8624 *
8625 * Configure channel specific resources such as rings, vector.
8626 */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8627 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8628 {
8629 int i;
8630
8631 for (i = 0; i < ch->num_txq; i++) {
8632 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8633 struct ice_ring_container *rc;
8634 struct ice_tx_ring *tx_ring;
8635 struct ice_rx_ring *rx_ring;
8636
8637 tx_ring = vsi->tx_rings[ch->base_q + i];
8638 rx_ring = vsi->rx_rings[ch->base_q + i];
8639 if (!tx_ring || !rx_ring)
8640 continue;
8641
8642 /* setup ring being channel enabled */
8643 tx_ring->ch = ch;
8644 rx_ring->ch = ch;
8645
8646 /* following code block sets up vector specific attributes */
8647 tx_q_vector = tx_ring->q_vector;
8648 rx_q_vector = rx_ring->q_vector;
8649 if (!tx_q_vector && !rx_q_vector)
8650 continue;
8651
8652 if (tx_q_vector) {
8653 tx_q_vector->ch = ch;
8654 /* setup Tx and Rx ITR setting if DIM is off */
8655 rc = &tx_q_vector->tx;
8656 if (!ITR_IS_DYNAMIC(rc))
8657 ice_write_itr(rc, rc->itr_setting);
8658 }
8659 if (rx_q_vector) {
8660 rx_q_vector->ch = ch;
8661 /* setup Tx and Rx ITR setting if DIM is off */
8662 rc = &rx_q_vector->rx;
8663 if (!ITR_IS_DYNAMIC(rc))
8664 ice_write_itr(rc, rc->itr_setting);
8665 }
8666 }
8667
8668 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8669 * GLINT_ITR register would have written to perform in-context
8670 * update, hence perform flush
8671 */
8672 if (ch->num_txq || ch->num_rxq)
8673 ice_flush(&vsi->back->hw);
8674 }
8675
8676 /**
8677 * ice_cfg_chnl_all_res - configure channel resources
8678 * @vsi: pte to main_vsi
8679 * @ch: ptr to channel structure
8680 *
8681 * This function configures channel specific resources such as flow-director
8682 * counter index, and other resources such as queues, vectors, ITR settings
8683 */
8684 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8685 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8686 {
8687 /* configure channel (aka ADQ) resources such as queues, vectors,
8688 * ITR settings for channel specific vectors and anything else
8689 */
8690 ice_chnl_cfg_res(vsi, ch);
8691 }
8692
8693 /**
8694 * ice_setup_hw_channel - setup new channel
8695 * @pf: ptr to PF device
8696 * @vsi: the VSI being setup
8697 * @ch: ptr to channel structure
8698 * @sw_id: underlying HW switching element ID
8699 * @type: type of channel to be created (VMDq2/VF)
8700 *
8701 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8702 * and configures Tx rings accordingly
8703 */
8704 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8705 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8706 struct ice_channel *ch, u16 sw_id, u8 type)
8707 {
8708 struct device *dev = ice_pf_to_dev(pf);
8709 int ret;
8710
8711 ch->base_q = vsi->next_base_q;
8712 ch->type = type;
8713
8714 ret = ice_add_channel(pf, sw_id, ch);
8715 if (ret) {
8716 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8717 return ret;
8718 }
8719
8720 /* configure/setup ADQ specific resources */
8721 ice_cfg_chnl_all_res(vsi, ch);
8722
8723 /* make sure to update the next_base_q so that subsequent channel's
8724 * (aka ADQ) VSI queue map is correct
8725 */
8726 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8727 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8728 ch->num_rxq);
8729
8730 return 0;
8731 }
8732
8733 /**
8734 * ice_setup_channel - setup new channel using uplink element
8735 * @pf: ptr to PF device
8736 * @vsi: the VSI being setup
8737 * @ch: ptr to channel structure
8738 *
8739 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8740 * and uplink switching element
8741 */
8742 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8743 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8744 struct ice_channel *ch)
8745 {
8746 struct device *dev = ice_pf_to_dev(pf);
8747 u16 sw_id;
8748 int ret;
8749
8750 if (vsi->type != ICE_VSI_PF) {
8751 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8752 return false;
8753 }
8754
8755 sw_id = pf->first_sw->sw_id;
8756
8757 /* create channel (VSI) */
8758 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8759 if (ret) {
8760 dev_err(dev, "failed to setup hw_channel\n");
8761 return false;
8762 }
8763 dev_dbg(dev, "successfully created channel()\n");
8764
8765 return ch->ch_vsi ? true : false;
8766 }
8767
8768 /**
8769 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8770 * @vsi: VSI to be configured
8771 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8772 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8773 */
8774 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8775 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8776 {
8777 int err;
8778
8779 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8780 if (err)
8781 return err;
8782
8783 return ice_set_max_bw_limit(vsi, max_tx_rate);
8784 }
8785
8786 /**
8787 * ice_create_q_channel - function to create channel
8788 * @vsi: VSI to be configured
8789 * @ch: ptr to channel (it contains channel specific params)
8790 *
8791 * This function creates channel (VSI) using num_queues specified by user,
8792 * reconfigs RSS if needed.
8793 */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8794 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8795 {
8796 struct ice_pf *pf = vsi->back;
8797 struct device *dev;
8798
8799 if (!ch)
8800 return -EINVAL;
8801
8802 dev = ice_pf_to_dev(pf);
8803 if (!ch->num_txq || !ch->num_rxq) {
8804 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8805 return -EINVAL;
8806 }
8807
8808 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8809 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8810 vsi->cnt_q_avail, ch->num_txq);
8811 return -EINVAL;
8812 }
8813
8814 if (!ice_setup_channel(pf, vsi, ch)) {
8815 dev_info(dev, "Failed to setup channel\n");
8816 return -EINVAL;
8817 }
8818 /* configure BW rate limit */
8819 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8820 int ret;
8821
8822 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8823 ch->min_tx_rate);
8824 if (ret)
8825 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8826 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8827 else
8828 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8829 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8830 }
8831
8832 vsi->cnt_q_avail -= ch->num_txq;
8833
8834 return 0;
8835 }
8836
8837 /**
8838 * ice_rem_all_chnl_fltrs - removes all channel filters
8839 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8840 *
8841 * Remove all advanced switch filters only if they are channel specific
8842 * tc-flower based filter
8843 */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8844 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8845 {
8846 struct ice_tc_flower_fltr *fltr;
8847 struct hlist_node *node;
8848
8849 /* to remove all channel filters, iterate an ordered list of filters */
8850 hlist_for_each_entry_safe(fltr, node,
8851 &pf->tc_flower_fltr_list,
8852 tc_flower_node) {
8853 struct ice_rule_query_data rule;
8854 int status;
8855
8856 /* for now process only channel specific filters */
8857 if (!ice_is_chnl_fltr(fltr))
8858 continue;
8859
8860 rule.rid = fltr->rid;
8861 rule.rule_id = fltr->rule_id;
8862 rule.vsi_handle = fltr->dest_vsi_handle;
8863 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8864 if (status) {
8865 if (status == -ENOENT)
8866 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8867 rule.rule_id);
8868 else
8869 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8870 status);
8871 } else if (fltr->dest_vsi) {
8872 /* update advanced switch filter count */
8873 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8874 u32 flags = fltr->flags;
8875
8876 fltr->dest_vsi->num_chnl_fltr--;
8877 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8878 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8879 pf->num_dmac_chnl_fltrs--;
8880 }
8881 }
8882
8883 hlist_del(&fltr->tc_flower_node);
8884 kfree(fltr);
8885 }
8886 }
8887
8888 /**
8889 * ice_remove_q_channels - Remove queue channels for the TCs
8890 * @vsi: VSI to be configured
8891 * @rem_fltr: delete advanced switch filter or not
8892 *
8893 * Remove queue channels for the TCs
8894 */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8895 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8896 {
8897 struct ice_channel *ch, *ch_tmp;
8898 struct ice_pf *pf = vsi->back;
8899 int i;
8900
8901 /* remove all tc-flower based filter if they are channel filters only */
8902 if (rem_fltr)
8903 ice_rem_all_chnl_fltrs(pf);
8904
8905 /* remove ntuple filters since queue configuration is being changed */
8906 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8907 struct ice_hw *hw = &pf->hw;
8908
8909 mutex_lock(&hw->fdir_fltr_lock);
8910 ice_fdir_del_all_fltrs(vsi);
8911 mutex_unlock(&hw->fdir_fltr_lock);
8912 }
8913
8914 /* perform cleanup for channels if they exist */
8915 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8916 struct ice_vsi *ch_vsi;
8917
8918 list_del(&ch->list);
8919 ch_vsi = ch->ch_vsi;
8920 if (!ch_vsi) {
8921 kfree(ch);
8922 continue;
8923 }
8924
8925 /* Reset queue contexts */
8926 for (i = 0; i < ch->num_rxq; i++) {
8927 struct ice_tx_ring *tx_ring;
8928 struct ice_rx_ring *rx_ring;
8929
8930 tx_ring = vsi->tx_rings[ch->base_q + i];
8931 rx_ring = vsi->rx_rings[ch->base_q + i];
8932 if (tx_ring) {
8933 tx_ring->ch = NULL;
8934 if (tx_ring->q_vector)
8935 tx_ring->q_vector->ch = NULL;
8936 }
8937 if (rx_ring) {
8938 rx_ring->ch = NULL;
8939 if (rx_ring->q_vector)
8940 rx_ring->q_vector->ch = NULL;
8941 }
8942 }
8943
8944 /* Release FD resources for the channel VSI */
8945 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8946
8947 /* clear the VSI from scheduler tree */
8948 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8949
8950 /* Delete VSI from FW, PF and HW VSI arrays */
8951 ice_vsi_delete(ch->ch_vsi);
8952
8953 /* free the channel */
8954 kfree(ch);
8955 }
8956
8957 /* clear the channel VSI map which is stored in main VSI */
8958 ice_for_each_chnl_tc(i)
8959 vsi->tc_map_vsi[i] = NULL;
8960
8961 /* reset main VSI's all TC information */
8962 vsi->all_enatc = 0;
8963 vsi->all_numtc = 0;
8964 }
8965
8966 /**
8967 * ice_rebuild_channels - rebuild channel
8968 * @pf: ptr to PF
8969 *
8970 * Recreate channel VSIs and replay filters
8971 */
ice_rebuild_channels(struct ice_pf * pf)8972 static int ice_rebuild_channels(struct ice_pf *pf)
8973 {
8974 struct device *dev = ice_pf_to_dev(pf);
8975 struct ice_vsi *main_vsi;
8976 bool rem_adv_fltr = true;
8977 struct ice_channel *ch;
8978 struct ice_vsi *vsi;
8979 int tc_idx = 1;
8980 int i, err;
8981
8982 main_vsi = ice_get_main_vsi(pf);
8983 if (!main_vsi)
8984 return 0;
8985
8986 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8987 main_vsi->old_numtc == 1)
8988 return 0; /* nothing to be done */
8989
8990 /* reconfigure main VSI based on old value of TC and cached values
8991 * for MQPRIO opts
8992 */
8993 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8994 if (err) {
8995 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8996 main_vsi->old_ena_tc, main_vsi->vsi_num);
8997 return err;
8998 }
8999
9000 /* rebuild ADQ VSIs */
9001 ice_for_each_vsi(pf, i) {
9002 enum ice_vsi_type type;
9003
9004 vsi = pf->vsi[i];
9005 if (!vsi || vsi->type != ICE_VSI_CHNL)
9006 continue;
9007
9008 type = vsi->type;
9009
9010 /* rebuild ADQ VSI */
9011 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
9012 if (err) {
9013 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
9014 ice_vsi_type_str(type), vsi->idx, err);
9015 goto cleanup;
9016 }
9017
9018 /* Re-map HW VSI number, using VSI handle that has been
9019 * previously validated in ice_replay_vsi() call above
9020 */
9021 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
9022
9023 /* replay filters for the VSI */
9024 err = ice_replay_vsi(&pf->hw, vsi->idx);
9025 if (err) {
9026 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
9027 ice_vsi_type_str(type), err, vsi->idx);
9028 rem_adv_fltr = false;
9029 goto cleanup;
9030 }
9031 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
9032 ice_vsi_type_str(type), vsi->idx);
9033
9034 /* store ADQ VSI at correct TC index in main VSI's
9035 * map of TC to VSI
9036 */
9037 main_vsi->tc_map_vsi[tc_idx++] = vsi;
9038 }
9039
9040 /* ADQ VSI(s) has been rebuilt successfully, so setup
9041 * channel for main VSI's Tx and Rx rings
9042 */
9043 list_for_each_entry(ch, &main_vsi->ch_list, list) {
9044 struct ice_vsi *ch_vsi;
9045
9046 ch_vsi = ch->ch_vsi;
9047 if (!ch_vsi)
9048 continue;
9049
9050 /* reconfig channel resources */
9051 ice_cfg_chnl_all_res(main_vsi, ch);
9052
9053 /* replay BW rate limit if it is non-zero */
9054 if (!ch->max_tx_rate && !ch->min_tx_rate)
9055 continue;
9056
9057 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
9058 ch->min_tx_rate);
9059 if (err)
9060 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9061 err, ch->max_tx_rate, ch->min_tx_rate,
9062 ch_vsi->vsi_num);
9063 else
9064 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9065 ch->max_tx_rate, ch->min_tx_rate,
9066 ch_vsi->vsi_num);
9067 }
9068
9069 /* reconfig RSS for main VSI */
9070 if (main_vsi->ch_rss_size)
9071 ice_vsi_cfg_rss_lut_key(main_vsi);
9072
9073 return 0;
9074
9075 cleanup:
9076 ice_remove_q_channels(main_vsi, rem_adv_fltr);
9077 return err;
9078 }
9079
9080 /**
9081 * ice_create_q_channels - Add queue channel for the given TCs
9082 * @vsi: VSI to be configured
9083 *
9084 * Configures queue channel mapping to the given TCs
9085 */
ice_create_q_channels(struct ice_vsi * vsi)9086 static int ice_create_q_channels(struct ice_vsi *vsi)
9087 {
9088 struct ice_pf *pf = vsi->back;
9089 struct ice_channel *ch;
9090 int ret = 0, i;
9091
9092 ice_for_each_chnl_tc(i) {
9093 if (!(vsi->all_enatc & BIT(i)))
9094 continue;
9095
9096 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9097 if (!ch) {
9098 ret = -ENOMEM;
9099 goto err_free;
9100 }
9101 INIT_LIST_HEAD(&ch->list);
9102 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9103 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9104 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9105 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9106 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9107
9108 /* convert to Kbits/s */
9109 if (ch->max_tx_rate)
9110 ch->max_tx_rate = div_u64(ch->max_tx_rate,
9111 ICE_BW_KBPS_DIVISOR);
9112 if (ch->min_tx_rate)
9113 ch->min_tx_rate = div_u64(ch->min_tx_rate,
9114 ICE_BW_KBPS_DIVISOR);
9115
9116 ret = ice_create_q_channel(vsi, ch);
9117 if (ret) {
9118 dev_err(ice_pf_to_dev(pf),
9119 "failed creating channel TC:%d\n", i);
9120 kfree(ch);
9121 goto err_free;
9122 }
9123 list_add_tail(&ch->list, &vsi->ch_list);
9124 vsi->tc_map_vsi[i] = ch->ch_vsi;
9125 dev_dbg(ice_pf_to_dev(pf),
9126 "successfully created channel: VSI %pK\n", ch->ch_vsi);
9127 }
9128 return 0;
9129
9130 err_free:
9131 ice_remove_q_channels(vsi, false);
9132
9133 return ret;
9134 }
9135
9136 /**
9137 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9138 * @netdev: net device to configure
9139 * @type_data: TC offload data
9140 */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)9141 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9142 {
9143 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9144 struct ice_netdev_priv *np = netdev_priv(netdev);
9145 struct ice_vsi *vsi = np->vsi;
9146 struct ice_pf *pf = vsi->back;
9147 u16 mode, ena_tc_qdisc = 0;
9148 int cur_txq, cur_rxq;
9149 u8 hw = 0, num_tcf;
9150 struct device *dev;
9151 int ret, i;
9152
9153 dev = ice_pf_to_dev(pf);
9154 num_tcf = mqprio_qopt->qopt.num_tc;
9155 hw = mqprio_qopt->qopt.hw;
9156 mode = mqprio_qopt->mode;
9157 if (!hw) {
9158 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9159 vsi->ch_rss_size = 0;
9160 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9161 goto config_tcf;
9162 }
9163
9164 /* Generate queue region map for number of TCF requested */
9165 for (i = 0; i < num_tcf; i++)
9166 ena_tc_qdisc |= BIT(i);
9167
9168 switch (mode) {
9169 case TC_MQPRIO_MODE_CHANNEL:
9170
9171 if (pf->hw.port_info->is_custom_tx_enabled) {
9172 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9173 return -EBUSY;
9174 }
9175 ice_tear_down_devlink_rate_tree(pf);
9176
9177 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9178 if (ret) {
9179 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9180 ret);
9181 return ret;
9182 }
9183 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9184 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9185 /* don't assume state of hw_tc_offload during driver load
9186 * and set the flag for TC flower filter if hw_tc_offload
9187 * already ON
9188 */
9189 if (vsi->netdev->features & NETIF_F_HW_TC)
9190 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9191 break;
9192 default:
9193 return -EINVAL;
9194 }
9195
9196 config_tcf:
9197
9198 /* Requesting same TCF configuration as already enabled */
9199 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9200 mode != TC_MQPRIO_MODE_CHANNEL)
9201 return 0;
9202
9203 /* Pause VSI queues */
9204 ice_dis_vsi(vsi, true);
9205
9206 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9207 ice_remove_q_channels(vsi, true);
9208
9209 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9210 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9211 num_online_cpus());
9212 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9213 num_online_cpus());
9214 } else {
9215 /* logic to rebuild VSI, same like ethtool -L */
9216 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9217
9218 for (i = 0; i < num_tcf; i++) {
9219 if (!(ena_tc_qdisc & BIT(i)))
9220 continue;
9221
9222 offset = vsi->mqprio_qopt.qopt.offset[i];
9223 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9224 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9225 }
9226 vsi->req_txq = offset + qcount_tx;
9227 vsi->req_rxq = offset + qcount_rx;
9228
9229 /* store away original rss_size info, so that it gets reused
9230 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9231 * determine, what should be the rss_sizefor main VSI
9232 */
9233 vsi->orig_rss_size = vsi->rss_size;
9234 }
9235
9236 /* save current values of Tx and Rx queues before calling VSI rebuild
9237 * for fallback option
9238 */
9239 cur_txq = vsi->num_txq;
9240 cur_rxq = vsi->num_rxq;
9241
9242 /* proceed with rebuild main VSI using correct number of queues */
9243 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9244 if (ret) {
9245 /* fallback to current number of queues */
9246 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9247 vsi->req_txq = cur_txq;
9248 vsi->req_rxq = cur_rxq;
9249 clear_bit(ICE_RESET_FAILED, pf->state);
9250 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9251 dev_err(dev, "Rebuild of main VSI failed again\n");
9252 return ret;
9253 }
9254 }
9255
9256 vsi->all_numtc = num_tcf;
9257 vsi->all_enatc = ena_tc_qdisc;
9258 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9259 if (ret) {
9260 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9261 vsi->vsi_num);
9262 goto exit;
9263 }
9264
9265 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9266 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9267 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9268
9269 /* set TC0 rate limit if specified */
9270 if (max_tx_rate || min_tx_rate) {
9271 /* convert to Kbits/s */
9272 if (max_tx_rate)
9273 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9274 if (min_tx_rate)
9275 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9276
9277 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9278 if (!ret) {
9279 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9280 max_tx_rate, min_tx_rate, vsi->vsi_num);
9281 } else {
9282 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9283 max_tx_rate, min_tx_rate, vsi->vsi_num);
9284 goto exit;
9285 }
9286 }
9287 ret = ice_create_q_channels(vsi);
9288 if (ret) {
9289 netdev_err(netdev, "failed configuring queue channels\n");
9290 goto exit;
9291 } else {
9292 netdev_dbg(netdev, "successfully configured channels\n");
9293 }
9294 }
9295
9296 if (vsi->ch_rss_size)
9297 ice_vsi_cfg_rss_lut_key(vsi);
9298
9299 exit:
9300 /* if error, reset the all_numtc and all_enatc */
9301 if (ret) {
9302 vsi->all_numtc = 0;
9303 vsi->all_enatc = 0;
9304 }
9305 /* resume VSI */
9306 ice_ena_vsi(vsi, true);
9307
9308 return ret;
9309 }
9310
9311 static LIST_HEAD(ice_block_cb_list);
9312
9313 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)9314 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9315 void *type_data)
9316 {
9317 struct ice_netdev_priv *np = netdev_priv(netdev);
9318 enum flow_block_binder_type binder_type;
9319 struct iidc_rdma_core_dev_info *cdev;
9320 struct ice_pf *pf = np->vsi->back;
9321 flow_setup_cb_t *flower_handler;
9322 bool locked = false;
9323 int err;
9324
9325 switch (type) {
9326 case TC_SETUP_BLOCK:
9327 binder_type =
9328 ((struct flow_block_offload *)type_data)->binder_type;
9329
9330 switch (binder_type) {
9331 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
9332 flower_handler = ice_setup_tc_block_cb_ingress;
9333 break;
9334 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
9335 flower_handler = ice_setup_tc_block_cb_egress;
9336 break;
9337 default:
9338 return -EOPNOTSUPP;
9339 }
9340
9341 return flow_block_cb_setup_simple(type_data,
9342 &ice_block_cb_list,
9343 flower_handler,
9344 np, np, false);
9345 case TC_SETUP_QDISC_MQPRIO:
9346 if (ice_is_eswitch_mode_switchdev(pf)) {
9347 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9348 return -EOPNOTSUPP;
9349 }
9350
9351 cdev = pf->cdev_info;
9352 if (cdev && cdev->adev) {
9353 mutex_lock(&pf->adev_mutex);
9354 device_lock(&cdev->adev->dev);
9355 locked = true;
9356 if (cdev->adev->dev.driver) {
9357 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9358 err = -EBUSY;
9359 goto adev_unlock;
9360 }
9361 }
9362
9363 /* setup traffic classifier for receive side */
9364 mutex_lock(&pf->tc_mutex);
9365 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9366 mutex_unlock(&pf->tc_mutex);
9367
9368 adev_unlock:
9369 if (locked) {
9370 device_unlock(&cdev->adev->dev);
9371 mutex_unlock(&pf->adev_mutex);
9372 }
9373 return err;
9374 default:
9375 return -EOPNOTSUPP;
9376 }
9377 return -EOPNOTSUPP;
9378 }
9379
9380 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)9381 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9382 struct net_device *netdev)
9383 {
9384 struct ice_indr_block_priv *cb_priv;
9385
9386 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9387 if (!cb_priv->netdev)
9388 return NULL;
9389 if (cb_priv->netdev == netdev)
9390 return cb_priv;
9391 }
9392 return NULL;
9393 }
9394
9395 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)9396 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9397 void *indr_priv)
9398 {
9399 struct ice_indr_block_priv *priv = indr_priv;
9400 struct ice_netdev_priv *np = priv->np;
9401
9402 switch (type) {
9403 case TC_SETUP_CLSFLOWER:
9404 return ice_setup_tc_cls_flower(np, priv->netdev,
9405 (struct flow_cls_offload *)
9406 type_data, false);
9407 default:
9408 return -EOPNOTSUPP;
9409 }
9410 }
9411
9412 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9413 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9414 struct ice_netdev_priv *np,
9415 struct flow_block_offload *f, void *data,
9416 void (*cleanup)(struct flow_block_cb *block_cb))
9417 {
9418 struct ice_indr_block_priv *indr_priv;
9419 struct flow_block_cb *block_cb;
9420
9421 if (!ice_is_tunnel_supported(netdev) &&
9422 !(is_vlan_dev(netdev) &&
9423 vlan_dev_real_dev(netdev) == np->vsi->netdev))
9424 return -EOPNOTSUPP;
9425
9426 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9427 return -EOPNOTSUPP;
9428
9429 switch (f->command) {
9430 case FLOW_BLOCK_BIND:
9431 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9432 if (indr_priv)
9433 return -EEXIST;
9434
9435 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9436 if (!indr_priv)
9437 return -ENOMEM;
9438
9439 indr_priv->netdev = netdev;
9440 indr_priv->np = np;
9441 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9442
9443 block_cb =
9444 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9445 indr_priv, indr_priv,
9446 ice_rep_indr_tc_block_unbind,
9447 f, netdev, sch, data, np,
9448 cleanup);
9449
9450 if (IS_ERR(block_cb)) {
9451 list_del(&indr_priv->list);
9452 kfree(indr_priv);
9453 return PTR_ERR(block_cb);
9454 }
9455 flow_block_cb_add(block_cb, f);
9456 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9457 break;
9458 case FLOW_BLOCK_UNBIND:
9459 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9460 if (!indr_priv)
9461 return -ENOENT;
9462
9463 block_cb = flow_block_cb_lookup(f->block,
9464 ice_indr_setup_block_cb,
9465 indr_priv);
9466 if (!block_cb)
9467 return -ENOENT;
9468
9469 flow_indr_block_cb_remove(block_cb, f);
9470
9471 list_del(&block_cb->driver_list);
9472 break;
9473 default:
9474 return -EOPNOTSUPP;
9475 }
9476 return 0;
9477 }
9478
9479 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9480 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9481 void *cb_priv, enum tc_setup_type type, void *type_data,
9482 void *data,
9483 void (*cleanup)(struct flow_block_cb *block_cb))
9484 {
9485 switch (type) {
9486 case TC_SETUP_BLOCK:
9487 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9488 data, cleanup);
9489
9490 default:
9491 return -EOPNOTSUPP;
9492 }
9493 }
9494
9495 /**
9496 * ice_open - Called when a network interface becomes active
9497 * @netdev: network interface device structure
9498 *
9499 * The open entry point is called when a network interface is made
9500 * active by the system (IFF_UP). At this point all resources needed
9501 * for transmit and receive operations are allocated, the interrupt
9502 * handler is registered with the OS, the netdev watchdog is enabled,
9503 * and the stack is notified that the interface is ready.
9504 *
9505 * Returns 0 on success, negative value on failure
9506 */
ice_open(struct net_device * netdev)9507 int ice_open(struct net_device *netdev)
9508 {
9509 struct ice_netdev_priv *np = netdev_priv(netdev);
9510 struct ice_pf *pf = np->vsi->back;
9511
9512 if (ice_is_reset_in_progress(pf->state)) {
9513 netdev_err(netdev, "can't open net device while reset is in progress");
9514 return -EBUSY;
9515 }
9516
9517 return ice_open_internal(netdev);
9518 }
9519
9520 /**
9521 * ice_open_internal - Called when a network interface becomes active
9522 * @netdev: network interface device structure
9523 *
9524 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9525 * handling routine
9526 *
9527 * Returns 0 on success, negative value on failure
9528 */
ice_open_internal(struct net_device * netdev)9529 int ice_open_internal(struct net_device *netdev)
9530 {
9531 struct ice_netdev_priv *np = netdev_priv(netdev);
9532 struct ice_vsi *vsi = np->vsi;
9533 struct ice_pf *pf = vsi->back;
9534 struct ice_port_info *pi;
9535 int err;
9536
9537 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9538 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9539 return -EIO;
9540 }
9541
9542 netif_carrier_off(netdev);
9543
9544 pi = vsi->port_info;
9545 err = ice_update_link_info(pi);
9546 if (err) {
9547 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9548 return err;
9549 }
9550
9551 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9552
9553 /* Set PHY if there is media, otherwise, turn off PHY */
9554 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9555 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9556 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9557 err = ice_init_phy_user_cfg(pi);
9558 if (err) {
9559 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9560 err);
9561 return err;
9562 }
9563 }
9564
9565 err = ice_configure_phy(vsi);
9566 if (err) {
9567 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9568 err);
9569 return err;
9570 }
9571 } else {
9572 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9573 ice_set_link(vsi, false);
9574 }
9575
9576 err = ice_vsi_open(vsi);
9577 if (err)
9578 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9579 vsi->vsi_num, vsi->vsw->sw_id);
9580
9581 /* Update existing tunnels information */
9582 udp_tunnel_get_rx_info(netdev);
9583
9584 return err;
9585 }
9586
9587 /**
9588 * ice_stop - Disables a network interface
9589 * @netdev: network interface device structure
9590 *
9591 * The stop entry point is called when an interface is de-activated by the OS,
9592 * and the netdevice enters the DOWN state. The hardware is still under the
9593 * driver's control, but the netdev interface is disabled.
9594 *
9595 * Returns success only - not allowed to fail
9596 */
ice_stop(struct net_device * netdev)9597 int ice_stop(struct net_device *netdev)
9598 {
9599 struct ice_netdev_priv *np = netdev_priv(netdev);
9600 struct ice_vsi *vsi = np->vsi;
9601 struct ice_pf *pf = vsi->back;
9602
9603 if (ice_is_reset_in_progress(pf->state)) {
9604 netdev_err(netdev, "can't stop net device while reset is in progress");
9605 return -EBUSY;
9606 }
9607
9608 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9609 int link_err = ice_force_phys_link_state(vsi, false);
9610
9611 if (link_err) {
9612 if (link_err == -ENOMEDIUM)
9613 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9614 vsi->vsi_num);
9615 else
9616 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9617 vsi->vsi_num, link_err);
9618
9619 ice_vsi_close(vsi);
9620 return -EIO;
9621 }
9622 }
9623
9624 ice_vsi_close(vsi);
9625
9626 return 0;
9627 }
9628
9629 /**
9630 * ice_features_check - Validate encapsulated packet conforms to limits
9631 * @skb: skb buffer
9632 * @netdev: This port's netdev
9633 * @features: Offload features that the stack believes apply
9634 */
9635 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9636 ice_features_check(struct sk_buff *skb,
9637 struct net_device __always_unused *netdev,
9638 netdev_features_t features)
9639 {
9640 bool gso = skb_is_gso(skb);
9641 size_t len;
9642
9643 /* No point in doing any of this if neither checksum nor GSO are
9644 * being requested for this frame. We can rule out both by just
9645 * checking for CHECKSUM_PARTIAL
9646 */
9647 if (skb->ip_summed != CHECKSUM_PARTIAL)
9648 return features;
9649
9650 /* We cannot support GSO if the MSS is going to be less than
9651 * 64 bytes. If it is then we need to drop support for GSO.
9652 */
9653 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9654 features &= ~NETIF_F_GSO_MASK;
9655
9656 len = skb_network_offset(skb);
9657 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9658 goto out_rm_features;
9659
9660 len = skb_network_header_len(skb);
9661 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9662 goto out_rm_features;
9663
9664 if (skb->encapsulation) {
9665 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9666 * the case of IPIP frames, the transport header pointer is
9667 * after the inner header! So check to make sure that this
9668 * is a GRE or UDP_TUNNEL frame before doing that math.
9669 */
9670 if (gso && (skb_shinfo(skb)->gso_type &
9671 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9672 len = skb_inner_network_header(skb) -
9673 skb_transport_header(skb);
9674 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9675 goto out_rm_features;
9676 }
9677
9678 len = skb_inner_network_header_len(skb);
9679 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9680 goto out_rm_features;
9681 }
9682
9683 return features;
9684 out_rm_features:
9685 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9686 }
9687
9688 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9689 .ndo_open = ice_open,
9690 .ndo_stop = ice_stop,
9691 .ndo_start_xmit = ice_start_xmit,
9692 .ndo_set_mac_address = ice_set_mac_address,
9693 .ndo_validate_addr = eth_validate_addr,
9694 .ndo_change_mtu = ice_change_mtu,
9695 .ndo_get_stats64 = ice_get_stats64,
9696 .ndo_tx_timeout = ice_tx_timeout,
9697 .ndo_bpf = ice_xdp_safe_mode,
9698 };
9699
9700 static const struct net_device_ops ice_netdev_ops = {
9701 .ndo_open = ice_open,
9702 .ndo_stop = ice_stop,
9703 .ndo_start_xmit = ice_start_xmit,
9704 .ndo_select_queue = ice_select_queue,
9705 .ndo_features_check = ice_features_check,
9706 .ndo_fix_features = ice_fix_features,
9707 .ndo_set_rx_mode = ice_set_rx_mode,
9708 .ndo_set_mac_address = ice_set_mac_address,
9709 .ndo_validate_addr = eth_validate_addr,
9710 .ndo_change_mtu = ice_change_mtu,
9711 .ndo_get_stats64 = ice_get_stats64,
9712 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9713 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9714 .ndo_set_vf_mac = ice_set_vf_mac,
9715 .ndo_get_vf_config = ice_get_vf_cfg,
9716 .ndo_set_vf_trust = ice_set_vf_trust,
9717 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9718 .ndo_set_vf_link_state = ice_set_vf_link_state,
9719 .ndo_get_vf_stats = ice_get_vf_stats,
9720 .ndo_set_vf_rate = ice_set_vf_bw,
9721 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9722 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9723 .ndo_setup_tc = ice_setup_tc,
9724 .ndo_set_features = ice_set_features,
9725 .ndo_bridge_getlink = ice_bridge_getlink,
9726 .ndo_bridge_setlink = ice_bridge_setlink,
9727 .ndo_fdb_add = ice_fdb_add,
9728 .ndo_fdb_del = ice_fdb_del,
9729 #ifdef CONFIG_RFS_ACCEL
9730 .ndo_rx_flow_steer = ice_rx_flow_steer,
9731 #endif
9732 .ndo_tx_timeout = ice_tx_timeout,
9733 .ndo_bpf = ice_xdp,
9734 .ndo_xdp_xmit = ice_xdp_xmit,
9735 .ndo_xsk_wakeup = ice_xsk_wakeup,
9736 .ndo_hwtstamp_get = ice_ptp_hwtstamp_get,
9737 .ndo_hwtstamp_set = ice_ptp_hwtstamp_set,
9738 };
9739