1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
3
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "devlink/devlink.h"
17 #include "devlink/devlink_port.h"
18 #include "ice_sf_eth.h"
19 #include "ice_hwmon.h"
20 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
21 * ice tracepoint functions. This must be done exactly once across the
22 * ice driver.
23 */
24 #define CREATE_TRACE_POINTS
25 #include "ice_trace.h"
26 #include "ice_eswitch.h"
27 #include "ice_tc_lib.h"
28 #include "ice_vsi_vlan_ops.h"
29 #include <net/xdp_sock_drv.h>
30
31 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
32 static const char ice_driver_string[] = DRV_SUMMARY;
33 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
34
35 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
36 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
37 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
38
39 MODULE_DESCRIPTION(DRV_SUMMARY);
40 MODULE_IMPORT_NS(LIBIE);
41 MODULE_LICENSE("GPL v2");
42 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
43
44 static int debug = -1;
45 module_param(debug, int, 0644);
46 #ifndef CONFIG_DYNAMIC_DEBUG
47 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
48 #else
49 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
50 #endif /* !CONFIG_DYNAMIC_DEBUG */
51
52 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
53 EXPORT_SYMBOL(ice_xdp_locking_key);
54
55 /**
56 * ice_hw_to_dev - Get device pointer from the hardware structure
57 * @hw: pointer to the device HW structure
58 *
59 * Used to access the device pointer from compilation units which can't easily
60 * include the definition of struct ice_pf without leading to circular header
61 * dependencies.
62 */
ice_hw_to_dev(struct ice_hw * hw)63 struct device *ice_hw_to_dev(struct ice_hw *hw)
64 {
65 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
66
67 return &pf->pdev->dev;
68 }
69
70 static struct workqueue_struct *ice_wq;
71 struct workqueue_struct *ice_lag_wq;
72 static const struct net_device_ops ice_netdev_safe_mode_ops;
73 static const struct net_device_ops ice_netdev_ops;
74
75 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
76
77 static void ice_vsi_release_all(struct ice_pf *pf);
78
79 static int ice_rebuild_channels(struct ice_pf *pf);
80 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
81
82 static int
83 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
84 void *cb_priv, enum tc_setup_type type, void *type_data,
85 void *data,
86 void (*cleanup)(struct flow_block_cb *block_cb));
87
netif_is_ice(const struct net_device * dev)88 bool netif_is_ice(const struct net_device *dev)
89 {
90 return dev && (dev->netdev_ops == &ice_netdev_ops);
91 }
92
93 /**
94 * ice_get_tx_pending - returns number of Tx descriptors not processed
95 * @ring: the ring of descriptors
96 */
ice_get_tx_pending(struct ice_tx_ring * ring)97 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
98 {
99 u16 head, tail;
100
101 head = ring->next_to_clean;
102 tail = ring->next_to_use;
103
104 if (head != tail)
105 return (head < tail) ?
106 tail - head : (tail + ring->count - head);
107 return 0;
108 }
109
110 /**
111 * ice_check_for_hang_subtask - check for and recover hung queues
112 * @pf: pointer to PF struct
113 */
ice_check_for_hang_subtask(struct ice_pf * pf)114 static void ice_check_for_hang_subtask(struct ice_pf *pf)
115 {
116 struct ice_vsi *vsi = NULL;
117 struct ice_hw *hw;
118 unsigned int i;
119 int packets;
120 u32 v;
121
122 ice_for_each_vsi(pf, v)
123 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
124 vsi = pf->vsi[v];
125 break;
126 }
127
128 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
129 return;
130
131 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
132 return;
133
134 hw = &vsi->back->hw;
135
136 ice_for_each_txq(vsi, i) {
137 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
138 struct ice_ring_stats *ring_stats;
139
140 if (!tx_ring)
141 continue;
142 if (ice_ring_ch_enabled(tx_ring))
143 continue;
144
145 ring_stats = tx_ring->ring_stats;
146 if (!ring_stats)
147 continue;
148
149 if (tx_ring->desc) {
150 /* If packet counter has not changed the queue is
151 * likely stalled, so force an interrupt for this
152 * queue.
153 *
154 * prev_pkt would be negative if there was no
155 * pending work.
156 */
157 packets = ring_stats->stats.pkts & INT_MAX;
158 if (ring_stats->tx_stats.prev_pkt == packets) {
159 /* Trigger sw interrupt to revive the queue */
160 ice_trigger_sw_intr(hw, tx_ring->q_vector);
161 continue;
162 }
163
164 /* Memory barrier between read of packet count and call
165 * to ice_get_tx_pending()
166 */
167 smp_rmb();
168 ring_stats->tx_stats.prev_pkt =
169 ice_get_tx_pending(tx_ring) ? packets : -1;
170 }
171 }
172 }
173
174 /**
175 * ice_init_mac_fltr - Set initial MAC filters
176 * @pf: board private structure
177 *
178 * Set initial set of MAC filters for PF VSI; configure filters for permanent
179 * address and broadcast address. If an error is encountered, netdevice will be
180 * unregistered.
181 */
ice_init_mac_fltr(struct ice_pf * pf)182 static int ice_init_mac_fltr(struct ice_pf *pf)
183 {
184 struct ice_vsi *vsi;
185 u8 *perm_addr;
186
187 vsi = ice_get_main_vsi(pf);
188 if (!vsi)
189 return -EINVAL;
190
191 perm_addr = vsi->port_info->mac.perm_addr;
192 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
193 }
194
195 /**
196 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
197 * @netdev: the net device on which the sync is happening
198 * @addr: MAC address to sync
199 *
200 * This is a callback function which is called by the in kernel device sync
201 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
202 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
203 * MAC filters from the hardware.
204 */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)205 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
206 {
207 struct ice_netdev_priv *np = netdev_priv(netdev);
208 struct ice_vsi *vsi = np->vsi;
209
210 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
211 ICE_FWD_TO_VSI))
212 return -EINVAL;
213
214 return 0;
215 }
216
217 /**
218 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
219 * @netdev: the net device on which the unsync is happening
220 * @addr: MAC address to unsync
221 *
222 * This is a callback function which is called by the in kernel device unsync
223 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
224 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
225 * delete the MAC filters from the hardware.
226 */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)227 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
228 {
229 struct ice_netdev_priv *np = netdev_priv(netdev);
230 struct ice_vsi *vsi = np->vsi;
231
232 /* Under some circumstances, we might receive a request to delete our
233 * own device address from our uc list. Because we store the device
234 * address in the VSI's MAC filter list, we need to ignore such
235 * requests and not delete our device address from this list.
236 */
237 if (ether_addr_equal(addr, netdev->dev_addr))
238 return 0;
239
240 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
241 ICE_FWD_TO_VSI))
242 return -EINVAL;
243
244 return 0;
245 }
246
247 /**
248 * ice_vsi_fltr_changed - check if filter state changed
249 * @vsi: VSI to be checked
250 *
251 * returns true if filter state has changed, false otherwise.
252 */
ice_vsi_fltr_changed(struct ice_vsi * vsi)253 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
254 {
255 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
256 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
257 }
258
259 /**
260 * ice_set_promisc - Enable promiscuous mode for a given PF
261 * @vsi: the VSI being configured
262 * @promisc_m: mask of promiscuous config bits
263 *
264 */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)265 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
266 {
267 int status;
268
269 if (vsi->type != ICE_VSI_PF)
270 return 0;
271
272 if (ice_vsi_has_non_zero_vlans(vsi)) {
273 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
274 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
275 promisc_m);
276 } else {
277 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
278 promisc_m, 0);
279 }
280 if (status && status != -EEXIST)
281 return status;
282
283 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
284 vsi->vsi_num, promisc_m);
285 return 0;
286 }
287
288 /**
289 * ice_clear_promisc - Disable promiscuous mode for a given PF
290 * @vsi: the VSI being configured
291 * @promisc_m: mask of promiscuous config bits
292 *
293 */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)294 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
295 {
296 int status;
297
298 if (vsi->type != ICE_VSI_PF)
299 return 0;
300
301 if (ice_vsi_has_non_zero_vlans(vsi)) {
302 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
303 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
304 promisc_m);
305 } else {
306 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
307 promisc_m, 0);
308 }
309
310 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
311 vsi->vsi_num, promisc_m);
312 return status;
313 }
314
315 /**
316 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
317 * @vsi: ptr to the VSI
318 *
319 * Push any outstanding VSI filter changes through the AdminQ.
320 */
ice_vsi_sync_fltr(struct ice_vsi * vsi)321 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
322 {
323 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
324 struct device *dev = ice_pf_to_dev(vsi->back);
325 struct net_device *netdev = vsi->netdev;
326 bool promisc_forced_on = false;
327 struct ice_pf *pf = vsi->back;
328 struct ice_hw *hw = &pf->hw;
329 u32 changed_flags = 0;
330 int err;
331
332 if (!vsi->netdev)
333 return -EINVAL;
334
335 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
336 usleep_range(1000, 2000);
337
338 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
339 vsi->current_netdev_flags = vsi->netdev->flags;
340
341 INIT_LIST_HEAD(&vsi->tmp_sync_list);
342 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
343
344 if (ice_vsi_fltr_changed(vsi)) {
345 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
346 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
347
348 /* grab the netdev's addr_list_lock */
349 netif_addr_lock_bh(netdev);
350 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
351 ice_add_mac_to_unsync_list);
352 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
353 ice_add_mac_to_unsync_list);
354 /* our temp lists are populated. release lock */
355 netif_addr_unlock_bh(netdev);
356 }
357
358 /* Remove MAC addresses in the unsync list */
359 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
360 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
361 if (err) {
362 netdev_err(netdev, "Failed to delete MAC filters\n");
363 /* if we failed because of alloc failures, just bail */
364 if (err == -ENOMEM)
365 goto out;
366 }
367
368 /* Add MAC addresses in the sync list */
369 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
370 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
371 /* If filter is added successfully or already exists, do not go into
372 * 'if' condition and report it as error. Instead continue processing
373 * rest of the function.
374 */
375 if (err && err != -EEXIST) {
376 netdev_err(netdev, "Failed to add MAC filters\n");
377 /* If there is no more space for new umac filters, VSI
378 * should go into promiscuous mode. There should be some
379 * space reserved for promiscuous filters.
380 */
381 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
382 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
383 vsi->state)) {
384 promisc_forced_on = true;
385 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
386 vsi->vsi_num);
387 } else {
388 goto out;
389 }
390 }
391 err = 0;
392 /* check for changes in promiscuous modes */
393 if (changed_flags & IFF_ALLMULTI) {
394 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
395 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
396 if (err) {
397 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
398 goto out_promisc;
399 }
400 } else {
401 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
402 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
403 if (err) {
404 vsi->current_netdev_flags |= IFF_ALLMULTI;
405 goto out_promisc;
406 }
407 }
408 }
409
410 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
411 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
412 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
413 if (vsi->current_netdev_flags & IFF_PROMISC) {
414 /* Apply Rx filter rule to get traffic from wire */
415 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
416 err = ice_set_dflt_vsi(vsi);
417 if (err && err != -EEXIST) {
418 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
419 err, vsi->vsi_num);
420 vsi->current_netdev_flags &=
421 ~IFF_PROMISC;
422 goto out_promisc;
423 }
424 err = 0;
425 vlan_ops->dis_rx_filtering(vsi);
426
427 /* promiscuous mode implies allmulticast so
428 * that VSIs that are in promiscuous mode are
429 * subscribed to multicast packets coming to
430 * the port
431 */
432 err = ice_set_promisc(vsi,
433 ICE_MCAST_PROMISC_BITS);
434 if (err)
435 goto out_promisc;
436 }
437 } else {
438 /* Clear Rx filter to remove traffic from wire */
439 if (ice_is_vsi_dflt_vsi(vsi)) {
440 err = ice_clear_dflt_vsi(vsi);
441 if (err) {
442 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
443 err, vsi->vsi_num);
444 vsi->current_netdev_flags |=
445 IFF_PROMISC;
446 goto out_promisc;
447 }
448 if (vsi->netdev->features &
449 NETIF_F_HW_VLAN_CTAG_FILTER)
450 vlan_ops->ena_rx_filtering(vsi);
451 }
452
453 /* disable allmulti here, but only if allmulti is not
454 * still enabled for the netdev
455 */
456 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
457 err = ice_clear_promisc(vsi,
458 ICE_MCAST_PROMISC_BITS);
459 if (err) {
460 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
461 err, vsi->vsi_num);
462 }
463 }
464 }
465 }
466 goto exit;
467
468 out_promisc:
469 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
470 goto exit;
471 out:
472 /* if something went wrong then set the changed flag so we try again */
473 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
474 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
475 exit:
476 clear_bit(ICE_CFG_BUSY, vsi->state);
477 return err;
478 }
479
480 /**
481 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
482 * @pf: board private structure
483 */
ice_sync_fltr_subtask(struct ice_pf * pf)484 static void ice_sync_fltr_subtask(struct ice_pf *pf)
485 {
486 int v;
487
488 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
489 return;
490
491 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
492
493 ice_for_each_vsi(pf, v)
494 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
495 ice_vsi_sync_fltr(pf->vsi[v])) {
496 /* come back and try again later */
497 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
498 break;
499 }
500 }
501
502 /**
503 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
504 * @pf: the PF
505 * @locked: is the rtnl_lock already held
506 */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)507 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
508 {
509 int node;
510 int v;
511
512 ice_for_each_vsi(pf, v)
513 if (pf->vsi[v])
514 ice_dis_vsi(pf->vsi[v], locked);
515
516 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
517 pf->pf_agg_node[node].num_vsis = 0;
518
519 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
520 pf->vf_agg_node[node].num_vsis = 0;
521 }
522
523 /**
524 * ice_clear_sw_switch_recipes - clear switch recipes
525 * @pf: board private structure
526 *
527 * Mark switch recipes as not created in sw structures. There are cases where
528 * rules (especially advanced rules) need to be restored, either re-read from
529 * hardware or added again. For example after the reset. 'recp_created' flag
530 * prevents from doing that and need to be cleared upfront.
531 */
ice_clear_sw_switch_recipes(struct ice_pf * pf)532 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
533 {
534 struct ice_sw_recipe *recp;
535 u8 i;
536
537 recp = pf->hw.switch_info->recp_list;
538 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
539 recp[i].recp_created = false;
540 }
541
542 /**
543 * ice_prepare_for_reset - prep for reset
544 * @pf: board private structure
545 * @reset_type: reset type requested
546 *
547 * Inform or close all dependent features in prep for reset.
548 */
549 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)550 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
551 {
552 struct ice_hw *hw = &pf->hw;
553 struct ice_vsi *vsi;
554 struct ice_vf *vf;
555 unsigned int bkt;
556
557 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
558
559 /* already prepared for reset */
560 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
561 return;
562
563 synchronize_irq(pf->oicr_irq.virq);
564
565 ice_unplug_aux_dev(pf);
566
567 /* Notify VFs of impending reset */
568 if (ice_check_sq_alive(hw, &hw->mailboxq))
569 ice_vc_notify_reset(pf);
570
571 /* Disable VFs until reset is completed */
572 mutex_lock(&pf->vfs.table_lock);
573 ice_for_each_vf(pf, bkt, vf)
574 ice_set_vf_state_dis(vf);
575 mutex_unlock(&pf->vfs.table_lock);
576
577 if (ice_is_eswitch_mode_switchdev(pf)) {
578 if (reset_type != ICE_RESET_PFR)
579 ice_clear_sw_switch_recipes(pf);
580 }
581
582 /* release ADQ specific HW and SW resources */
583 vsi = ice_get_main_vsi(pf);
584 if (!vsi)
585 goto skip;
586
587 /* to be on safe side, reset orig_rss_size so that normal flow
588 * of deciding rss_size can take precedence
589 */
590 vsi->orig_rss_size = 0;
591
592 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
593 if (reset_type == ICE_RESET_PFR) {
594 vsi->old_ena_tc = vsi->all_enatc;
595 vsi->old_numtc = vsi->all_numtc;
596 } else {
597 ice_remove_q_channels(vsi, true);
598
599 /* for other reset type, do not support channel rebuild
600 * hence reset needed info
601 */
602 vsi->old_ena_tc = 0;
603 vsi->all_enatc = 0;
604 vsi->old_numtc = 0;
605 vsi->all_numtc = 0;
606 vsi->req_txq = 0;
607 vsi->req_rxq = 0;
608 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
609 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
610 }
611 }
612
613 if (vsi->netdev)
614 netif_device_detach(vsi->netdev);
615 skip:
616
617 /* clear SW filtering DB */
618 ice_clear_hw_tbls(hw);
619 /* disable the VSIs and their queues that are not already DOWN */
620 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
621 ice_pf_dis_all_vsi(pf, false);
622
623 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
624 ice_ptp_prepare_for_reset(pf, reset_type);
625
626 if (ice_is_feature_supported(pf, ICE_F_GNSS))
627 ice_gnss_exit(pf);
628
629 if (hw->port_info)
630 ice_sched_clear_port(hw->port_info);
631
632 ice_shutdown_all_ctrlq(hw, false);
633
634 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
635 }
636
637 /**
638 * ice_do_reset - Initiate one of many types of resets
639 * @pf: board private structure
640 * @reset_type: reset type requested before this function was called.
641 */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)642 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
643 {
644 struct device *dev = ice_pf_to_dev(pf);
645 struct ice_hw *hw = &pf->hw;
646
647 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
648
649 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
650 dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
651 reset_type = ICE_RESET_CORER;
652 }
653
654 ice_prepare_for_reset(pf, reset_type);
655
656 /* trigger the reset */
657 if (ice_reset(hw, reset_type)) {
658 dev_err(dev, "reset %d failed\n", reset_type);
659 set_bit(ICE_RESET_FAILED, pf->state);
660 clear_bit(ICE_RESET_OICR_RECV, pf->state);
661 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
662 clear_bit(ICE_PFR_REQ, pf->state);
663 clear_bit(ICE_CORER_REQ, pf->state);
664 clear_bit(ICE_GLOBR_REQ, pf->state);
665 wake_up(&pf->reset_wait_queue);
666 return;
667 }
668
669 /* PFR is a bit of a special case because it doesn't result in an OICR
670 * interrupt. So for PFR, rebuild after the reset and clear the reset-
671 * associated state bits.
672 */
673 if (reset_type == ICE_RESET_PFR) {
674 pf->pfr_count++;
675 ice_rebuild(pf, reset_type);
676 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
677 clear_bit(ICE_PFR_REQ, pf->state);
678 wake_up(&pf->reset_wait_queue);
679 ice_reset_all_vfs(pf);
680 }
681 }
682
683 /**
684 * ice_reset_subtask - Set up for resetting the device and driver
685 * @pf: board private structure
686 */
ice_reset_subtask(struct ice_pf * pf)687 static void ice_reset_subtask(struct ice_pf *pf)
688 {
689 enum ice_reset_req reset_type = ICE_RESET_INVAL;
690
691 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
692 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
693 * of reset is pending and sets bits in pf->state indicating the reset
694 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
695 * prepare for pending reset if not already (for PF software-initiated
696 * global resets the software should already be prepared for it as
697 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
698 * by firmware or software on other PFs, that bit is not set so prepare
699 * for the reset now), poll for reset done, rebuild and return.
700 */
701 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
702 /* Perform the largest reset requested */
703 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
704 reset_type = ICE_RESET_CORER;
705 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
706 reset_type = ICE_RESET_GLOBR;
707 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
708 reset_type = ICE_RESET_EMPR;
709 /* return if no valid reset type requested */
710 if (reset_type == ICE_RESET_INVAL)
711 return;
712 ice_prepare_for_reset(pf, reset_type);
713
714 /* make sure we are ready to rebuild */
715 if (ice_check_reset(&pf->hw)) {
716 set_bit(ICE_RESET_FAILED, pf->state);
717 } else {
718 /* done with reset. start rebuild */
719 pf->hw.reset_ongoing = false;
720 ice_rebuild(pf, reset_type);
721 /* clear bit to resume normal operations, but
722 * ICE_NEEDS_RESTART bit is set in case rebuild failed
723 */
724 clear_bit(ICE_RESET_OICR_RECV, pf->state);
725 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
726 clear_bit(ICE_PFR_REQ, pf->state);
727 clear_bit(ICE_CORER_REQ, pf->state);
728 clear_bit(ICE_GLOBR_REQ, pf->state);
729 wake_up(&pf->reset_wait_queue);
730 ice_reset_all_vfs(pf);
731 }
732
733 return;
734 }
735
736 /* No pending resets to finish processing. Check for new resets */
737 if (test_bit(ICE_PFR_REQ, pf->state)) {
738 reset_type = ICE_RESET_PFR;
739 if (pf->lag && pf->lag->bonded) {
740 dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
741 reset_type = ICE_RESET_CORER;
742 }
743 }
744 if (test_bit(ICE_CORER_REQ, pf->state))
745 reset_type = ICE_RESET_CORER;
746 if (test_bit(ICE_GLOBR_REQ, pf->state))
747 reset_type = ICE_RESET_GLOBR;
748 /* If no valid reset type requested just return */
749 if (reset_type == ICE_RESET_INVAL)
750 return;
751
752 /* reset if not already down or busy */
753 if (!test_bit(ICE_DOWN, pf->state) &&
754 !test_bit(ICE_CFG_BUSY, pf->state)) {
755 ice_do_reset(pf, reset_type);
756 }
757 }
758
759 /**
760 * ice_print_topo_conflict - print topology conflict message
761 * @vsi: the VSI whose topology status is being checked
762 */
ice_print_topo_conflict(struct ice_vsi * vsi)763 static void ice_print_topo_conflict(struct ice_vsi *vsi)
764 {
765 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
766 case ICE_AQ_LINK_TOPO_CONFLICT:
767 case ICE_AQ_LINK_MEDIA_CONFLICT:
768 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
769 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
770 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
771 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
772 break;
773 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
774 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
775 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
776 else
777 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
778 break;
779 default:
780 break;
781 }
782 }
783
784 /**
785 * ice_print_link_msg - print link up or down message
786 * @vsi: the VSI whose link status is being queried
787 * @isup: boolean for if the link is now up or down
788 */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)789 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
790 {
791 struct ice_aqc_get_phy_caps_data *caps;
792 const char *an_advertised;
793 const char *fec_req;
794 const char *speed;
795 const char *fec;
796 const char *fc;
797 const char *an;
798 int status;
799
800 if (!vsi)
801 return;
802
803 if (vsi->current_isup == isup)
804 return;
805
806 vsi->current_isup = isup;
807
808 if (!isup) {
809 netdev_info(vsi->netdev, "NIC Link is Down\n");
810 return;
811 }
812
813 switch (vsi->port_info->phy.link_info.link_speed) {
814 case ICE_AQ_LINK_SPEED_200GB:
815 speed = "200 G";
816 break;
817 case ICE_AQ_LINK_SPEED_100GB:
818 speed = "100 G";
819 break;
820 case ICE_AQ_LINK_SPEED_50GB:
821 speed = "50 G";
822 break;
823 case ICE_AQ_LINK_SPEED_40GB:
824 speed = "40 G";
825 break;
826 case ICE_AQ_LINK_SPEED_25GB:
827 speed = "25 G";
828 break;
829 case ICE_AQ_LINK_SPEED_20GB:
830 speed = "20 G";
831 break;
832 case ICE_AQ_LINK_SPEED_10GB:
833 speed = "10 G";
834 break;
835 case ICE_AQ_LINK_SPEED_5GB:
836 speed = "5 G";
837 break;
838 case ICE_AQ_LINK_SPEED_2500MB:
839 speed = "2.5 G";
840 break;
841 case ICE_AQ_LINK_SPEED_1000MB:
842 speed = "1 G";
843 break;
844 case ICE_AQ_LINK_SPEED_100MB:
845 speed = "100 M";
846 break;
847 default:
848 speed = "Unknown ";
849 break;
850 }
851
852 switch (vsi->port_info->fc.current_mode) {
853 case ICE_FC_FULL:
854 fc = "Rx/Tx";
855 break;
856 case ICE_FC_TX_PAUSE:
857 fc = "Tx";
858 break;
859 case ICE_FC_RX_PAUSE:
860 fc = "Rx";
861 break;
862 case ICE_FC_NONE:
863 fc = "None";
864 break;
865 default:
866 fc = "Unknown";
867 break;
868 }
869
870 /* Get FEC mode based on negotiated link info */
871 switch (vsi->port_info->phy.link_info.fec_info) {
872 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
873 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
874 fec = "RS-FEC";
875 break;
876 case ICE_AQ_LINK_25G_KR_FEC_EN:
877 fec = "FC-FEC/BASE-R";
878 break;
879 default:
880 fec = "NONE";
881 break;
882 }
883
884 /* check if autoneg completed, might be false due to not supported */
885 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
886 an = "True";
887 else
888 an = "False";
889
890 /* Get FEC mode requested based on PHY caps last SW configuration */
891 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
892 if (!caps) {
893 fec_req = "Unknown";
894 an_advertised = "Unknown";
895 goto done;
896 }
897
898 status = ice_aq_get_phy_caps(vsi->port_info, false,
899 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
900 if (status)
901 netdev_info(vsi->netdev, "Get phy capability failed.\n");
902
903 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
904
905 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
906 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
907 fec_req = "RS-FEC";
908 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
909 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
910 fec_req = "FC-FEC/BASE-R";
911 else
912 fec_req = "NONE";
913
914 kfree(caps);
915
916 done:
917 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
918 speed, fec_req, fec, an_advertised, an, fc);
919 ice_print_topo_conflict(vsi);
920 }
921
922 /**
923 * ice_vsi_link_event - update the VSI's netdev
924 * @vsi: the VSI on which the link event occurred
925 * @link_up: whether or not the VSI needs to be set up or down
926 */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)927 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
928 {
929 if (!vsi)
930 return;
931
932 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
933 return;
934
935 if (vsi->type == ICE_VSI_PF) {
936 if (link_up == netif_carrier_ok(vsi->netdev))
937 return;
938
939 if (link_up) {
940 netif_carrier_on(vsi->netdev);
941 netif_tx_wake_all_queues(vsi->netdev);
942 } else {
943 netif_carrier_off(vsi->netdev);
944 netif_tx_stop_all_queues(vsi->netdev);
945 }
946 }
947 }
948
949 /**
950 * ice_set_dflt_mib - send a default config MIB to the FW
951 * @pf: private PF struct
952 *
953 * This function sends a default configuration MIB to the FW.
954 *
955 * If this function errors out at any point, the driver is still able to
956 * function. The main impact is that LFC may not operate as expected.
957 * Therefore an error state in this function should be treated with a DBG
958 * message and continue on with driver rebuild/reenable.
959 */
ice_set_dflt_mib(struct ice_pf * pf)960 static void ice_set_dflt_mib(struct ice_pf *pf)
961 {
962 struct device *dev = ice_pf_to_dev(pf);
963 u8 mib_type, *buf, *lldpmib = NULL;
964 u16 len, typelen, offset = 0;
965 struct ice_lldp_org_tlv *tlv;
966 struct ice_hw *hw = &pf->hw;
967 u32 ouisubtype;
968
969 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
970 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
971 if (!lldpmib) {
972 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
973 __func__);
974 return;
975 }
976
977 /* Add ETS CFG TLV */
978 tlv = (struct ice_lldp_org_tlv *)lldpmib;
979 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
980 ICE_IEEE_ETS_TLV_LEN);
981 tlv->typelen = htons(typelen);
982 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
983 ICE_IEEE_SUBTYPE_ETS_CFG);
984 tlv->ouisubtype = htonl(ouisubtype);
985
986 buf = tlv->tlvinfo;
987 buf[0] = 0;
988
989 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
990 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
991 * Octets 13 - 20 are TSA values - leave as zeros
992 */
993 buf[5] = 0x64;
994 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
995 offset += len + 2;
996 tlv = (struct ice_lldp_org_tlv *)
997 ((char *)tlv + sizeof(tlv->typelen) + len);
998
999 /* Add ETS REC TLV */
1000 buf = tlv->tlvinfo;
1001 tlv->typelen = htons(typelen);
1002
1003 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1004 ICE_IEEE_SUBTYPE_ETS_REC);
1005 tlv->ouisubtype = htonl(ouisubtype);
1006
1007 /* First octet of buf is reserved
1008 * Octets 1 - 4 map UP to TC - all UPs map to zero
1009 * Octets 5 - 12 are BW values - set TC 0 to 100%.
1010 * Octets 13 - 20 are TSA value - leave as zeros
1011 */
1012 buf[5] = 0x64;
1013 offset += len + 2;
1014 tlv = (struct ice_lldp_org_tlv *)
1015 ((char *)tlv + sizeof(tlv->typelen) + len);
1016
1017 /* Add PFC CFG TLV */
1018 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1019 ICE_IEEE_PFC_TLV_LEN);
1020 tlv->typelen = htons(typelen);
1021
1022 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1023 ICE_IEEE_SUBTYPE_PFC_CFG);
1024 tlv->ouisubtype = htonl(ouisubtype);
1025
1026 /* Octet 1 left as all zeros - PFC disabled */
1027 buf[0] = 0x08;
1028 len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1029 offset += len + 2;
1030
1031 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1032 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1033
1034 kfree(lldpmib);
1035 }
1036
1037 /**
1038 * ice_check_phy_fw_load - check if PHY FW load failed
1039 * @pf: pointer to PF struct
1040 * @link_cfg_err: bitmap from the link info structure
1041 *
1042 * check if external PHY FW load failed and print an error message if it did
1043 */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1044 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1045 {
1046 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1047 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1048 return;
1049 }
1050
1051 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1052 return;
1053
1054 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1055 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1056 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1057 }
1058 }
1059
1060 /**
1061 * ice_check_module_power
1062 * @pf: pointer to PF struct
1063 * @link_cfg_err: bitmap from the link info structure
1064 *
1065 * check module power level returned by a previous call to aq_get_link_info
1066 * and print error messages if module power level is not supported
1067 */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1068 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1069 {
1070 /* if module power level is supported, clear the flag */
1071 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1072 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1073 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1074 return;
1075 }
1076
1077 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1078 * above block didn't clear this bit, there's nothing to do
1079 */
1080 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1081 return;
1082
1083 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1084 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1085 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1086 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1087 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1088 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1089 }
1090 }
1091
1092 /**
1093 * ice_check_link_cfg_err - check if link configuration failed
1094 * @pf: pointer to the PF struct
1095 * @link_cfg_err: bitmap from the link info structure
1096 *
1097 * print if any link configuration failure happens due to the value in the
1098 * link_cfg_err parameter in the link info structure
1099 */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1100 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1101 {
1102 ice_check_module_power(pf, link_cfg_err);
1103 ice_check_phy_fw_load(pf, link_cfg_err);
1104 }
1105
1106 /**
1107 * ice_link_event - process the link event
1108 * @pf: PF that the link event is associated with
1109 * @pi: port_info for the port that the link event is associated with
1110 * @link_up: true if the physical link is up and false if it is down
1111 * @link_speed: current link speed received from the link event
1112 *
1113 * Returns 0 on success and negative on failure
1114 */
1115 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1116 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1117 u16 link_speed)
1118 {
1119 struct device *dev = ice_pf_to_dev(pf);
1120 struct ice_phy_info *phy_info;
1121 struct ice_vsi *vsi;
1122 u16 old_link_speed;
1123 bool old_link;
1124 int status;
1125
1126 phy_info = &pi->phy;
1127 phy_info->link_info_old = phy_info->link_info;
1128
1129 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1130 old_link_speed = phy_info->link_info_old.link_speed;
1131
1132 /* update the link info structures and re-enable link events,
1133 * don't bail on failure due to other book keeping needed
1134 */
1135 status = ice_update_link_info(pi);
1136 if (status)
1137 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1138 pi->lport, status,
1139 ice_aq_str(pi->hw->adminq.sq_last_status));
1140
1141 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1142
1143 /* Check if the link state is up after updating link info, and treat
1144 * this event as an UP event since the link is actually UP now.
1145 */
1146 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1147 link_up = true;
1148
1149 vsi = ice_get_main_vsi(pf);
1150 if (!vsi || !vsi->port_info)
1151 return -EINVAL;
1152
1153 /* turn off PHY if media was removed */
1154 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1155 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1156 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1157 ice_set_link(vsi, false);
1158 }
1159
1160 /* if the old link up/down and speed is the same as the new */
1161 if (link_up == old_link && link_speed == old_link_speed)
1162 return 0;
1163
1164 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1165
1166 if (ice_is_dcb_active(pf)) {
1167 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1168 ice_dcb_rebuild(pf);
1169 } else {
1170 if (link_up)
1171 ice_set_dflt_mib(pf);
1172 }
1173 ice_vsi_link_event(vsi, link_up);
1174 ice_print_link_msg(vsi, link_up);
1175
1176 ice_vc_notify_link_state(pf);
1177
1178 return 0;
1179 }
1180
1181 /**
1182 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1183 * @pf: board private structure
1184 */
ice_watchdog_subtask(struct ice_pf * pf)1185 static void ice_watchdog_subtask(struct ice_pf *pf)
1186 {
1187 int i;
1188
1189 /* if interface is down do nothing */
1190 if (test_bit(ICE_DOWN, pf->state) ||
1191 test_bit(ICE_CFG_BUSY, pf->state))
1192 return;
1193
1194 /* make sure we don't do these things too often */
1195 if (time_before(jiffies,
1196 pf->serv_tmr_prev + pf->serv_tmr_period))
1197 return;
1198
1199 pf->serv_tmr_prev = jiffies;
1200
1201 /* Update the stats for active netdevs so the network stack
1202 * can look at updated numbers whenever it cares to
1203 */
1204 ice_update_pf_stats(pf);
1205 ice_for_each_vsi(pf, i)
1206 if (pf->vsi[i] && pf->vsi[i]->netdev)
1207 ice_update_vsi_stats(pf->vsi[i]);
1208 }
1209
1210 /**
1211 * ice_init_link_events - enable/initialize link events
1212 * @pi: pointer to the port_info instance
1213 *
1214 * Returns -EIO on failure, 0 on success
1215 */
ice_init_link_events(struct ice_port_info * pi)1216 static int ice_init_link_events(struct ice_port_info *pi)
1217 {
1218 u16 mask;
1219
1220 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1221 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1222 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1223
1224 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1225 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1226 pi->lport);
1227 return -EIO;
1228 }
1229
1230 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1231 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1232 pi->lport);
1233 return -EIO;
1234 }
1235
1236 return 0;
1237 }
1238
1239 /**
1240 * ice_handle_link_event - handle link event via ARQ
1241 * @pf: PF that the link event is associated with
1242 * @event: event structure containing link status info
1243 */
1244 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1245 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1246 {
1247 struct ice_aqc_get_link_status_data *link_data;
1248 struct ice_port_info *port_info;
1249 int status;
1250
1251 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1252 port_info = pf->hw.port_info;
1253 if (!port_info)
1254 return -EINVAL;
1255
1256 status = ice_link_event(pf, port_info,
1257 !!(link_data->link_info & ICE_AQ_LINK_UP),
1258 le16_to_cpu(link_data->link_speed));
1259 if (status)
1260 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1261 status);
1262
1263 return status;
1264 }
1265
1266 /**
1267 * ice_get_fwlog_data - copy the FW log data from ARQ event
1268 * @pf: PF that the FW log event is associated with
1269 * @event: event structure containing FW log data
1270 */
1271 static void
ice_get_fwlog_data(struct ice_pf * pf,struct ice_rq_event_info * event)1272 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1273 {
1274 struct ice_fwlog_data *fwlog;
1275 struct ice_hw *hw = &pf->hw;
1276
1277 fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1278
1279 memset(fwlog->data, 0, PAGE_SIZE);
1280 fwlog->data_size = le16_to_cpu(event->desc.datalen);
1281
1282 memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1283 ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1284
1285 if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1286 /* the rings are full so bump the head to create room */
1287 ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1288 hw->fwlog_ring.size);
1289 }
1290 }
1291
1292 /**
1293 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1294 * @pf: pointer to the PF private structure
1295 * @task: intermediate helper storage and identifier for waiting
1296 * @opcode: the opcode to wait for
1297 *
1298 * Prepares to wait for a specific AdminQ completion event on the ARQ for
1299 * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1300 *
1301 * Calls are separated to allow caller registering for event before sending
1302 * the command, which mitigates a race between registering and FW responding.
1303 *
1304 * To obtain only the descriptor contents, pass an task->event with null
1305 * msg_buf. If the complete data buffer is desired, allocate the
1306 * task->event.msg_buf with enough space ahead of time.
1307 */
ice_aq_prep_for_event(struct ice_pf * pf,struct ice_aq_task * task,u16 opcode)1308 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1309 u16 opcode)
1310 {
1311 INIT_HLIST_NODE(&task->entry);
1312 task->opcode = opcode;
1313 task->state = ICE_AQ_TASK_WAITING;
1314
1315 spin_lock_bh(&pf->aq_wait_lock);
1316 hlist_add_head(&task->entry, &pf->aq_wait_list);
1317 spin_unlock_bh(&pf->aq_wait_lock);
1318 }
1319
1320 /**
1321 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1322 * @pf: pointer to the PF private structure
1323 * @task: ptr prepared by ice_aq_prep_for_event()
1324 * @timeout: how long to wait, in jiffies
1325 *
1326 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1327 * current thread will be put to sleep until the specified event occurs or
1328 * until the given timeout is reached.
1329 *
1330 * Returns: zero on success, or a negative error code on failure.
1331 */
ice_aq_wait_for_event(struct ice_pf * pf,struct ice_aq_task * task,unsigned long timeout)1332 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1333 unsigned long timeout)
1334 {
1335 enum ice_aq_task_state *state = &task->state;
1336 struct device *dev = ice_pf_to_dev(pf);
1337 unsigned long start = jiffies;
1338 long ret;
1339 int err;
1340
1341 ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1342 *state != ICE_AQ_TASK_WAITING,
1343 timeout);
1344 switch (*state) {
1345 case ICE_AQ_TASK_NOT_PREPARED:
1346 WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1347 err = -EINVAL;
1348 break;
1349 case ICE_AQ_TASK_WAITING:
1350 err = ret < 0 ? ret : -ETIMEDOUT;
1351 break;
1352 case ICE_AQ_TASK_CANCELED:
1353 err = ret < 0 ? ret : -ECANCELED;
1354 break;
1355 case ICE_AQ_TASK_COMPLETE:
1356 err = ret < 0 ? ret : 0;
1357 break;
1358 default:
1359 WARN(1, "Unexpected AdminQ wait task state %u", *state);
1360 err = -EINVAL;
1361 break;
1362 }
1363
1364 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1365 jiffies_to_msecs(jiffies - start),
1366 jiffies_to_msecs(timeout),
1367 task->opcode);
1368
1369 spin_lock_bh(&pf->aq_wait_lock);
1370 hlist_del(&task->entry);
1371 spin_unlock_bh(&pf->aq_wait_lock);
1372
1373 return err;
1374 }
1375
1376 /**
1377 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1378 * @pf: pointer to the PF private structure
1379 * @opcode: the opcode of the event
1380 * @event: the event to check
1381 *
1382 * Loops over the current list of pending threads waiting for an AdminQ event.
1383 * For each matching task, copy the contents of the event into the task
1384 * structure and wake up the thread.
1385 *
1386 * If multiple threads wait for the same opcode, they will all be woken up.
1387 *
1388 * Note that event->msg_buf will only be duplicated if the event has a buffer
1389 * with enough space already allocated. Otherwise, only the descriptor and
1390 * message length will be copied.
1391 *
1392 * Returns: true if an event was found, false otherwise
1393 */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1394 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1395 struct ice_rq_event_info *event)
1396 {
1397 struct ice_rq_event_info *task_ev;
1398 struct ice_aq_task *task;
1399 bool found = false;
1400
1401 spin_lock_bh(&pf->aq_wait_lock);
1402 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1403 if (task->state != ICE_AQ_TASK_WAITING)
1404 continue;
1405 if (task->opcode != opcode)
1406 continue;
1407
1408 task_ev = &task->event;
1409 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1410 task_ev->msg_len = event->msg_len;
1411
1412 /* Only copy the data buffer if a destination was set */
1413 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1414 memcpy(task_ev->msg_buf, event->msg_buf,
1415 event->buf_len);
1416 task_ev->buf_len = event->buf_len;
1417 }
1418
1419 task->state = ICE_AQ_TASK_COMPLETE;
1420 found = true;
1421 }
1422 spin_unlock_bh(&pf->aq_wait_lock);
1423
1424 if (found)
1425 wake_up(&pf->aq_wait_queue);
1426 }
1427
1428 /**
1429 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1430 * @pf: the PF private structure
1431 *
1432 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1433 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1434 */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1435 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1436 {
1437 struct ice_aq_task *task;
1438
1439 spin_lock_bh(&pf->aq_wait_lock);
1440 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1441 task->state = ICE_AQ_TASK_CANCELED;
1442 spin_unlock_bh(&pf->aq_wait_lock);
1443
1444 wake_up(&pf->aq_wait_queue);
1445 }
1446
1447 #define ICE_MBX_OVERFLOW_WATERMARK 64
1448
1449 /**
1450 * __ice_clean_ctrlq - helper function to clean controlq rings
1451 * @pf: ptr to struct ice_pf
1452 * @q_type: specific Control queue type
1453 */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1454 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1455 {
1456 struct device *dev = ice_pf_to_dev(pf);
1457 struct ice_rq_event_info event;
1458 struct ice_hw *hw = &pf->hw;
1459 struct ice_ctl_q_info *cq;
1460 u16 pending, i = 0;
1461 const char *qtype;
1462 u32 oldval, val;
1463
1464 /* Do not clean control queue if/when PF reset fails */
1465 if (test_bit(ICE_RESET_FAILED, pf->state))
1466 return 0;
1467
1468 switch (q_type) {
1469 case ICE_CTL_Q_ADMIN:
1470 cq = &hw->adminq;
1471 qtype = "Admin";
1472 break;
1473 case ICE_CTL_Q_SB:
1474 cq = &hw->sbq;
1475 qtype = "Sideband";
1476 break;
1477 case ICE_CTL_Q_MAILBOX:
1478 cq = &hw->mailboxq;
1479 qtype = "Mailbox";
1480 /* we are going to try to detect a malicious VF, so set the
1481 * state to begin detection
1482 */
1483 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1484 break;
1485 default:
1486 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1487 return 0;
1488 }
1489
1490 /* check for error indications - PF_xx_AxQLEN register layout for
1491 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1492 */
1493 val = rd32(hw, cq->rq.len);
1494 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1495 PF_FW_ARQLEN_ARQCRIT_M)) {
1496 oldval = val;
1497 if (val & PF_FW_ARQLEN_ARQVFE_M)
1498 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1499 qtype);
1500 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1501 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1502 qtype);
1503 }
1504 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1505 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1506 qtype);
1507 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1508 PF_FW_ARQLEN_ARQCRIT_M);
1509 if (oldval != val)
1510 wr32(hw, cq->rq.len, val);
1511 }
1512
1513 val = rd32(hw, cq->sq.len);
1514 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1515 PF_FW_ATQLEN_ATQCRIT_M)) {
1516 oldval = val;
1517 if (val & PF_FW_ATQLEN_ATQVFE_M)
1518 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1519 qtype);
1520 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1521 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1522 qtype);
1523 }
1524 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1525 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1526 qtype);
1527 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1528 PF_FW_ATQLEN_ATQCRIT_M);
1529 if (oldval != val)
1530 wr32(hw, cq->sq.len, val);
1531 }
1532
1533 event.buf_len = cq->rq_buf_size;
1534 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1535 if (!event.msg_buf)
1536 return 0;
1537
1538 do {
1539 struct ice_mbx_data data = {};
1540 u16 opcode;
1541 int ret;
1542
1543 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1544 if (ret == -EALREADY)
1545 break;
1546 if (ret) {
1547 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1548 ret);
1549 break;
1550 }
1551
1552 opcode = le16_to_cpu(event.desc.opcode);
1553
1554 /* Notify any thread that might be waiting for this event */
1555 ice_aq_check_events(pf, opcode, &event);
1556
1557 switch (opcode) {
1558 case ice_aqc_opc_get_link_status:
1559 if (ice_handle_link_event(pf, &event))
1560 dev_err(dev, "Could not handle link event\n");
1561 break;
1562 case ice_aqc_opc_event_lan_overflow:
1563 ice_vf_lan_overflow_event(pf, &event);
1564 break;
1565 case ice_mbx_opc_send_msg_to_pf:
1566 data.num_msg_proc = i;
1567 data.num_pending_arq = pending;
1568 data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1569 data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1570
1571 ice_vc_process_vf_msg(pf, &event, &data);
1572 break;
1573 case ice_aqc_opc_fw_logs_event:
1574 ice_get_fwlog_data(pf, &event);
1575 break;
1576 case ice_aqc_opc_lldp_set_mib_change:
1577 ice_dcb_process_lldp_set_mib_change(pf, &event);
1578 break;
1579 default:
1580 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1581 qtype, opcode);
1582 break;
1583 }
1584 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1585
1586 kfree(event.msg_buf);
1587
1588 return pending && (i == ICE_DFLT_IRQ_WORK);
1589 }
1590
1591 /**
1592 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1593 * @hw: pointer to hardware info
1594 * @cq: control queue information
1595 *
1596 * returns true if there are pending messages in a queue, false if there aren't
1597 */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1598 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1599 {
1600 u16 ntu;
1601
1602 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1603 return cq->rq.next_to_clean != ntu;
1604 }
1605
1606 /**
1607 * ice_clean_adminq_subtask - clean the AdminQ rings
1608 * @pf: board private structure
1609 */
ice_clean_adminq_subtask(struct ice_pf * pf)1610 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1611 {
1612 struct ice_hw *hw = &pf->hw;
1613
1614 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1615 return;
1616
1617 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1618 return;
1619
1620 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1621
1622 /* There might be a situation where new messages arrive to a control
1623 * queue between processing the last message and clearing the
1624 * EVENT_PENDING bit. So before exiting, check queue head again (using
1625 * ice_ctrlq_pending) and process new messages if any.
1626 */
1627 if (ice_ctrlq_pending(hw, &hw->adminq))
1628 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1629
1630 ice_flush(hw);
1631 }
1632
1633 /**
1634 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1635 * @pf: board private structure
1636 */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1637 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1638 {
1639 struct ice_hw *hw = &pf->hw;
1640
1641 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1642 return;
1643
1644 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1645 return;
1646
1647 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1648
1649 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1650 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1651
1652 ice_flush(hw);
1653 }
1654
1655 /**
1656 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1657 * @pf: board private structure
1658 */
ice_clean_sbq_subtask(struct ice_pf * pf)1659 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1660 {
1661 struct ice_hw *hw = &pf->hw;
1662
1663 /* if mac_type is not generic, sideband is not supported
1664 * and there's nothing to do here
1665 */
1666 if (!ice_is_generic_mac(hw)) {
1667 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1668 return;
1669 }
1670
1671 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1672 return;
1673
1674 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1675 return;
1676
1677 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1678
1679 if (ice_ctrlq_pending(hw, &hw->sbq))
1680 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1681
1682 ice_flush(hw);
1683 }
1684
1685 /**
1686 * ice_service_task_schedule - schedule the service task to wake up
1687 * @pf: board private structure
1688 *
1689 * If not already scheduled, this puts the task into the work queue.
1690 */
ice_service_task_schedule(struct ice_pf * pf)1691 void ice_service_task_schedule(struct ice_pf *pf)
1692 {
1693 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1694 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1695 !test_bit(ICE_NEEDS_RESTART, pf->state))
1696 queue_work(ice_wq, &pf->serv_task);
1697 }
1698
1699 /**
1700 * ice_service_task_complete - finish up the service task
1701 * @pf: board private structure
1702 */
ice_service_task_complete(struct ice_pf * pf)1703 static void ice_service_task_complete(struct ice_pf *pf)
1704 {
1705 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1706
1707 /* force memory (pf->state) to sync before next service task */
1708 smp_mb__before_atomic();
1709 clear_bit(ICE_SERVICE_SCHED, pf->state);
1710 }
1711
1712 /**
1713 * ice_service_task_stop - stop service task and cancel works
1714 * @pf: board private structure
1715 *
1716 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1717 * 1 otherwise.
1718 */
ice_service_task_stop(struct ice_pf * pf)1719 static int ice_service_task_stop(struct ice_pf *pf)
1720 {
1721 int ret;
1722
1723 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1724
1725 if (pf->serv_tmr.function)
1726 del_timer_sync(&pf->serv_tmr);
1727 if (pf->serv_task.func)
1728 cancel_work_sync(&pf->serv_task);
1729
1730 clear_bit(ICE_SERVICE_SCHED, pf->state);
1731 return ret;
1732 }
1733
1734 /**
1735 * ice_service_task_restart - restart service task and schedule works
1736 * @pf: board private structure
1737 *
1738 * This function is needed for suspend and resume works (e.g WoL scenario)
1739 */
ice_service_task_restart(struct ice_pf * pf)1740 static void ice_service_task_restart(struct ice_pf *pf)
1741 {
1742 clear_bit(ICE_SERVICE_DIS, pf->state);
1743 ice_service_task_schedule(pf);
1744 }
1745
1746 /**
1747 * ice_service_timer - timer callback to schedule service task
1748 * @t: pointer to timer_list
1749 */
ice_service_timer(struct timer_list * t)1750 static void ice_service_timer(struct timer_list *t)
1751 {
1752 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1753
1754 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1755 ice_service_task_schedule(pf);
1756 }
1757
1758 /**
1759 * ice_mdd_maybe_reset_vf - reset VF after MDD event
1760 * @pf: pointer to the PF structure
1761 * @vf: pointer to the VF structure
1762 * @reset_vf_tx: whether Tx MDD has occurred
1763 * @reset_vf_rx: whether Rx MDD has occurred
1764 *
1765 * Since the queue can get stuck on VF MDD events, the PF can be configured to
1766 * automatically reset the VF by enabling the private ethtool flag
1767 * mdd-auto-reset-vf.
1768 */
ice_mdd_maybe_reset_vf(struct ice_pf * pf,struct ice_vf * vf,bool reset_vf_tx,bool reset_vf_rx)1769 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1770 bool reset_vf_tx, bool reset_vf_rx)
1771 {
1772 struct device *dev = ice_pf_to_dev(pf);
1773
1774 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1775 return;
1776
1777 /* VF MDD event counters will be cleared by reset, so print the event
1778 * prior to reset.
1779 */
1780 if (reset_vf_tx)
1781 ice_print_vf_tx_mdd_event(vf);
1782
1783 if (reset_vf_rx)
1784 ice_print_vf_rx_mdd_event(vf);
1785
1786 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1787 pf->hw.pf_id, vf->vf_id);
1788 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1789 }
1790
1791 /**
1792 * ice_handle_mdd_event - handle malicious driver detect event
1793 * @pf: pointer to the PF structure
1794 *
1795 * Called from service task. OICR interrupt handler indicates MDD event.
1796 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1797 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1798 * disable the queue, the PF can be configured to reset the VF using ethtool
1799 * private flag mdd-auto-reset-vf.
1800 */
ice_handle_mdd_event(struct ice_pf * pf)1801 static void ice_handle_mdd_event(struct ice_pf *pf)
1802 {
1803 struct device *dev = ice_pf_to_dev(pf);
1804 struct ice_hw *hw = &pf->hw;
1805 struct ice_vf *vf;
1806 unsigned int bkt;
1807 u32 reg;
1808
1809 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1810 /* Since the VF MDD event logging is rate limited, check if
1811 * there are pending MDD events.
1812 */
1813 ice_print_vfs_mdd_events(pf);
1814 return;
1815 }
1816
1817 /* find what triggered an MDD event */
1818 reg = rd32(hw, GL_MDET_TX_PQM);
1819 if (reg & GL_MDET_TX_PQM_VALID_M) {
1820 u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1821 u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1822 u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1823 u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
1824
1825 if (netif_msg_tx_err(pf))
1826 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1827 event, queue, pf_num, vf_num);
1828 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1829 }
1830
1831 reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1832 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1833 u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1834 u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1835 u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1836 u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
1837
1838 if (netif_msg_tx_err(pf))
1839 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1840 event, queue, pf_num, vf_num);
1841 wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1842 }
1843
1844 reg = rd32(hw, GL_MDET_RX);
1845 if (reg & GL_MDET_RX_VALID_M) {
1846 u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1847 u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1848 u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1849 u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
1850
1851 if (netif_msg_rx_err(pf))
1852 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1853 event, queue, pf_num, vf_num);
1854 wr32(hw, GL_MDET_RX, 0xffffffff);
1855 }
1856
1857 /* check to see if this PF caused an MDD event */
1858 reg = rd32(hw, PF_MDET_TX_PQM);
1859 if (reg & PF_MDET_TX_PQM_VALID_M) {
1860 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1861 if (netif_msg_tx_err(pf))
1862 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1863 }
1864
1865 reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1866 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1867 wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1868 if (netif_msg_tx_err(pf))
1869 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1870 }
1871
1872 reg = rd32(hw, PF_MDET_RX);
1873 if (reg & PF_MDET_RX_VALID_M) {
1874 wr32(hw, PF_MDET_RX, 0xFFFF);
1875 if (netif_msg_rx_err(pf))
1876 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1877 }
1878
1879 /* Check to see if one of the VFs caused an MDD event, and then
1880 * increment counters and set print pending
1881 */
1882 mutex_lock(&pf->vfs.table_lock);
1883 ice_for_each_vf(pf, bkt, vf) {
1884 bool reset_vf_tx = false, reset_vf_rx = false;
1885
1886 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1887 if (reg & VP_MDET_TX_PQM_VALID_M) {
1888 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1889 vf->mdd_tx_events.count++;
1890 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1891 if (netif_msg_tx_err(pf))
1892 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1893 vf->vf_id);
1894
1895 reset_vf_tx = true;
1896 }
1897
1898 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1899 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1900 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1901 vf->mdd_tx_events.count++;
1902 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1903 if (netif_msg_tx_err(pf))
1904 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1905 vf->vf_id);
1906
1907 reset_vf_tx = true;
1908 }
1909
1910 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1911 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1912 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1913 vf->mdd_tx_events.count++;
1914 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1915 if (netif_msg_tx_err(pf))
1916 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1917 vf->vf_id);
1918
1919 reset_vf_tx = true;
1920 }
1921
1922 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1923 if (reg & VP_MDET_RX_VALID_M) {
1924 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1925 vf->mdd_rx_events.count++;
1926 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1927 if (netif_msg_rx_err(pf))
1928 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1929 vf->vf_id);
1930
1931 reset_vf_rx = true;
1932 }
1933
1934 if (reset_vf_tx || reset_vf_rx)
1935 ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1936 reset_vf_rx);
1937 }
1938 mutex_unlock(&pf->vfs.table_lock);
1939
1940 ice_print_vfs_mdd_events(pf);
1941 }
1942
1943 /**
1944 * ice_force_phys_link_state - Force the physical link state
1945 * @vsi: VSI to force the physical link state to up/down
1946 * @link_up: true/false indicates to set the physical link to up/down
1947 *
1948 * Force the physical link state by getting the current PHY capabilities from
1949 * hardware and setting the PHY config based on the determined capabilities. If
1950 * link changes a link event will be triggered because both the Enable Automatic
1951 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1952 *
1953 * Returns 0 on success, negative on failure
1954 */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1955 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1956 {
1957 struct ice_aqc_get_phy_caps_data *pcaps;
1958 struct ice_aqc_set_phy_cfg_data *cfg;
1959 struct ice_port_info *pi;
1960 struct device *dev;
1961 int retcode;
1962
1963 if (!vsi || !vsi->port_info || !vsi->back)
1964 return -EINVAL;
1965 if (vsi->type != ICE_VSI_PF)
1966 return 0;
1967
1968 dev = ice_pf_to_dev(vsi->back);
1969
1970 pi = vsi->port_info;
1971
1972 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1973 if (!pcaps)
1974 return -ENOMEM;
1975
1976 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1977 NULL);
1978 if (retcode) {
1979 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1980 vsi->vsi_num, retcode);
1981 retcode = -EIO;
1982 goto out;
1983 }
1984
1985 /* No change in link */
1986 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1987 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1988 goto out;
1989
1990 /* Use the current user PHY configuration. The current user PHY
1991 * configuration is initialized during probe from PHY capabilities
1992 * software mode, and updated on set PHY configuration.
1993 */
1994 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1995 if (!cfg) {
1996 retcode = -ENOMEM;
1997 goto out;
1998 }
1999
2000 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2001 if (link_up)
2002 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
2003 else
2004 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
2005
2006 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
2007 if (retcode) {
2008 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2009 vsi->vsi_num, retcode);
2010 retcode = -EIO;
2011 }
2012
2013 kfree(cfg);
2014 out:
2015 kfree(pcaps);
2016 return retcode;
2017 }
2018
2019 /**
2020 * ice_init_nvm_phy_type - Initialize the NVM PHY type
2021 * @pi: port info structure
2022 *
2023 * Initialize nvm_phy_type_[low|high] for link lenient mode support
2024 */
ice_init_nvm_phy_type(struct ice_port_info * pi)2025 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2026 {
2027 struct ice_aqc_get_phy_caps_data *pcaps;
2028 struct ice_pf *pf = pi->hw->back;
2029 int err;
2030
2031 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2032 if (!pcaps)
2033 return -ENOMEM;
2034
2035 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2036 pcaps, NULL);
2037
2038 if (err) {
2039 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2040 goto out;
2041 }
2042
2043 pf->nvm_phy_type_hi = pcaps->phy_type_high;
2044 pf->nvm_phy_type_lo = pcaps->phy_type_low;
2045
2046 out:
2047 kfree(pcaps);
2048 return err;
2049 }
2050
2051 /**
2052 * ice_init_link_dflt_override - Initialize link default override
2053 * @pi: port info structure
2054 *
2055 * Initialize link default override and PHY total port shutdown during probe
2056 */
ice_init_link_dflt_override(struct ice_port_info * pi)2057 static void ice_init_link_dflt_override(struct ice_port_info *pi)
2058 {
2059 struct ice_link_default_override_tlv *ldo;
2060 struct ice_pf *pf = pi->hw->back;
2061
2062 ldo = &pf->link_dflt_override;
2063 if (ice_get_link_default_override(ldo, pi))
2064 return;
2065
2066 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2067 return;
2068
2069 /* Enable Total Port Shutdown (override/replace link-down-on-close
2070 * ethtool private flag) for ports with Port Disable bit set.
2071 */
2072 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2073 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2074 }
2075
2076 /**
2077 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2078 * @pi: port info structure
2079 *
2080 * If default override is enabled, initialize the user PHY cfg speed and FEC
2081 * settings using the default override mask from the NVM.
2082 *
2083 * The PHY should only be configured with the default override settings the
2084 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2085 * is used to indicate that the user PHY cfg default override is initialized
2086 * and the PHY has not been configured with the default override settings. The
2087 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2088 * configured.
2089 *
2090 * This function should be called only if the FW doesn't support default
2091 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2092 */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2093 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2094 {
2095 struct ice_link_default_override_tlv *ldo;
2096 struct ice_aqc_set_phy_cfg_data *cfg;
2097 struct ice_phy_info *phy = &pi->phy;
2098 struct ice_pf *pf = pi->hw->back;
2099
2100 ldo = &pf->link_dflt_override;
2101
2102 /* If link default override is enabled, use to mask NVM PHY capabilities
2103 * for speed and FEC default configuration.
2104 */
2105 cfg = &phy->curr_user_phy_cfg;
2106
2107 if (ldo->phy_type_low || ldo->phy_type_high) {
2108 cfg->phy_type_low = pf->nvm_phy_type_lo &
2109 cpu_to_le64(ldo->phy_type_low);
2110 cfg->phy_type_high = pf->nvm_phy_type_hi &
2111 cpu_to_le64(ldo->phy_type_high);
2112 }
2113 cfg->link_fec_opt = ldo->fec_options;
2114 phy->curr_user_fec_req = ICE_FEC_AUTO;
2115
2116 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2117 }
2118
2119 /**
2120 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2121 * @pi: port info structure
2122 *
2123 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2124 * mode to default. The PHY defaults are from get PHY capabilities topology
2125 * with media so call when media is first available. An error is returned if
2126 * called when media is not available. The PHY initialization completed state is
2127 * set here.
2128 *
2129 * These configurations are used when setting PHY
2130 * configuration. The user PHY configuration is updated on set PHY
2131 * configuration. Returns 0 on success, negative on failure
2132 */
ice_init_phy_user_cfg(struct ice_port_info * pi)2133 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2134 {
2135 struct ice_aqc_get_phy_caps_data *pcaps;
2136 struct ice_phy_info *phy = &pi->phy;
2137 struct ice_pf *pf = pi->hw->back;
2138 int err;
2139
2140 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2141 return -EIO;
2142
2143 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2144 if (!pcaps)
2145 return -ENOMEM;
2146
2147 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2148 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2149 pcaps, NULL);
2150 else
2151 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2152 pcaps, NULL);
2153 if (err) {
2154 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2155 goto err_out;
2156 }
2157
2158 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2159
2160 /* check if lenient mode is supported and enabled */
2161 if (ice_fw_supports_link_override(pi->hw) &&
2162 !(pcaps->module_compliance_enforcement &
2163 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2164 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2165
2166 /* if the FW supports default PHY configuration mode, then the driver
2167 * does not have to apply link override settings. If not,
2168 * initialize user PHY configuration with link override values
2169 */
2170 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2171 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2172 ice_init_phy_cfg_dflt_override(pi);
2173 goto out;
2174 }
2175 }
2176
2177 /* if link default override is not enabled, set user flow control and
2178 * FEC settings based on what get_phy_caps returned
2179 */
2180 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2181 pcaps->link_fec_options);
2182 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2183
2184 out:
2185 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2186 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2187 err_out:
2188 kfree(pcaps);
2189 return err;
2190 }
2191
2192 /**
2193 * ice_configure_phy - configure PHY
2194 * @vsi: VSI of PHY
2195 *
2196 * Set the PHY configuration. If the current PHY configuration is the same as
2197 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2198 * configure the based get PHY capabilities for topology with media.
2199 */
ice_configure_phy(struct ice_vsi * vsi)2200 static int ice_configure_phy(struct ice_vsi *vsi)
2201 {
2202 struct device *dev = ice_pf_to_dev(vsi->back);
2203 struct ice_port_info *pi = vsi->port_info;
2204 struct ice_aqc_get_phy_caps_data *pcaps;
2205 struct ice_aqc_set_phy_cfg_data *cfg;
2206 struct ice_phy_info *phy = &pi->phy;
2207 struct ice_pf *pf = vsi->back;
2208 int err;
2209
2210 /* Ensure we have media as we cannot configure a medialess port */
2211 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2212 return -ENOMEDIUM;
2213
2214 ice_print_topo_conflict(vsi);
2215
2216 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2217 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2218 return -EPERM;
2219
2220 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2221 return ice_force_phys_link_state(vsi, true);
2222
2223 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2224 if (!pcaps)
2225 return -ENOMEM;
2226
2227 /* Get current PHY config */
2228 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2229 NULL);
2230 if (err) {
2231 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2232 vsi->vsi_num, err);
2233 goto done;
2234 }
2235
2236 /* If PHY enable link is configured and configuration has not changed,
2237 * there's nothing to do
2238 */
2239 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2240 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2241 goto done;
2242
2243 /* Use PHY topology as baseline for configuration */
2244 memset(pcaps, 0, sizeof(*pcaps));
2245 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2246 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2247 pcaps, NULL);
2248 else
2249 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2250 pcaps, NULL);
2251 if (err) {
2252 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2253 vsi->vsi_num, err);
2254 goto done;
2255 }
2256
2257 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2258 if (!cfg) {
2259 err = -ENOMEM;
2260 goto done;
2261 }
2262
2263 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2264
2265 /* Speed - If default override pending, use curr_user_phy_cfg set in
2266 * ice_init_phy_user_cfg_ldo.
2267 */
2268 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2269 vsi->back->state)) {
2270 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2271 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2272 } else {
2273 u64 phy_low = 0, phy_high = 0;
2274
2275 ice_update_phy_type(&phy_low, &phy_high,
2276 pi->phy.curr_user_speed_req);
2277 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2278 cfg->phy_type_high = pcaps->phy_type_high &
2279 cpu_to_le64(phy_high);
2280 }
2281
2282 /* Can't provide what was requested; use PHY capabilities */
2283 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2284 cfg->phy_type_low = pcaps->phy_type_low;
2285 cfg->phy_type_high = pcaps->phy_type_high;
2286 }
2287
2288 /* FEC */
2289 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2290
2291 /* Can't provide what was requested; use PHY capabilities */
2292 if (cfg->link_fec_opt !=
2293 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2294 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2295 cfg->link_fec_opt = pcaps->link_fec_options;
2296 }
2297
2298 /* Flow Control - always supported; no need to check against
2299 * capabilities
2300 */
2301 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2302
2303 /* Enable link and link update */
2304 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2305
2306 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2307 if (err)
2308 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2309 vsi->vsi_num, err);
2310
2311 kfree(cfg);
2312 done:
2313 kfree(pcaps);
2314 return err;
2315 }
2316
2317 /**
2318 * ice_check_media_subtask - Check for media
2319 * @pf: pointer to PF struct
2320 *
2321 * If media is available, then initialize PHY user configuration if it is not
2322 * been, and configure the PHY if the interface is up.
2323 */
ice_check_media_subtask(struct ice_pf * pf)2324 static void ice_check_media_subtask(struct ice_pf *pf)
2325 {
2326 struct ice_port_info *pi;
2327 struct ice_vsi *vsi;
2328 int err;
2329
2330 /* No need to check for media if it's already present */
2331 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2332 return;
2333
2334 vsi = ice_get_main_vsi(pf);
2335 if (!vsi)
2336 return;
2337
2338 /* Refresh link info and check if media is present */
2339 pi = vsi->port_info;
2340 err = ice_update_link_info(pi);
2341 if (err)
2342 return;
2343
2344 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2345
2346 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2347 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2348 ice_init_phy_user_cfg(pi);
2349
2350 /* PHY settings are reset on media insertion, reconfigure
2351 * PHY to preserve settings.
2352 */
2353 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2354 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2355 return;
2356
2357 err = ice_configure_phy(vsi);
2358 if (!err)
2359 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2360
2361 /* A Link Status Event will be generated; the event handler
2362 * will complete bringing the interface up
2363 */
2364 }
2365 }
2366
2367 /**
2368 * ice_service_task - manage and run subtasks
2369 * @work: pointer to work_struct contained by the PF struct
2370 */
ice_service_task(struct work_struct * work)2371 static void ice_service_task(struct work_struct *work)
2372 {
2373 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2374 unsigned long start_time = jiffies;
2375
2376 /* subtasks */
2377
2378 /* process reset requests first */
2379 ice_reset_subtask(pf);
2380
2381 /* bail if a reset/recovery cycle is pending or rebuild failed */
2382 if (ice_is_reset_in_progress(pf->state) ||
2383 test_bit(ICE_SUSPENDED, pf->state) ||
2384 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2385 ice_service_task_complete(pf);
2386 return;
2387 }
2388
2389 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2390 struct iidc_event *event;
2391
2392 event = kzalloc(sizeof(*event), GFP_KERNEL);
2393 if (event) {
2394 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2395 /* report the entire OICR value to AUX driver */
2396 swap(event->reg, pf->oicr_err_reg);
2397 ice_send_event_to_aux(pf, event);
2398 kfree(event);
2399 }
2400 }
2401
2402 /* unplug aux dev per request, if an unplug request came in
2403 * while processing a plug request, this will handle it
2404 */
2405 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2406 ice_unplug_aux_dev(pf);
2407
2408 /* Plug aux device per request */
2409 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2410 ice_plug_aux_dev(pf);
2411
2412 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2413 struct iidc_event *event;
2414
2415 event = kzalloc(sizeof(*event), GFP_KERNEL);
2416 if (event) {
2417 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2418 ice_send_event_to_aux(pf, event);
2419 kfree(event);
2420 }
2421 }
2422
2423 ice_clean_adminq_subtask(pf);
2424 ice_check_media_subtask(pf);
2425 ice_check_for_hang_subtask(pf);
2426 ice_sync_fltr_subtask(pf);
2427 ice_handle_mdd_event(pf);
2428 ice_watchdog_subtask(pf);
2429
2430 if (ice_is_safe_mode(pf)) {
2431 ice_service_task_complete(pf);
2432 return;
2433 }
2434
2435 ice_process_vflr_event(pf);
2436 ice_clean_mailboxq_subtask(pf);
2437 ice_clean_sbq_subtask(pf);
2438 ice_sync_arfs_fltrs(pf);
2439 ice_flush_fdir_ctx(pf);
2440
2441 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2442 ice_service_task_complete(pf);
2443
2444 /* If the tasks have taken longer than one service timer period
2445 * or there is more work to be done, reset the service timer to
2446 * schedule the service task now.
2447 */
2448 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2449 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2450 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2451 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2452 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2453 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2454 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2455 mod_timer(&pf->serv_tmr, jiffies);
2456 }
2457
2458 /**
2459 * ice_set_ctrlq_len - helper function to set controlq length
2460 * @hw: pointer to the HW instance
2461 */
ice_set_ctrlq_len(struct ice_hw * hw)2462 static void ice_set_ctrlq_len(struct ice_hw *hw)
2463 {
2464 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2465 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2466 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2467 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2468 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2469 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2470 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2471 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2472 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2473 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2474 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2475 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2476 }
2477
2478 /**
2479 * ice_schedule_reset - schedule a reset
2480 * @pf: board private structure
2481 * @reset: reset being requested
2482 */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2483 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2484 {
2485 struct device *dev = ice_pf_to_dev(pf);
2486
2487 /* bail out if earlier reset has failed */
2488 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2489 dev_dbg(dev, "earlier reset has failed\n");
2490 return -EIO;
2491 }
2492 /* bail if reset/recovery already in progress */
2493 if (ice_is_reset_in_progress(pf->state)) {
2494 dev_dbg(dev, "Reset already in progress\n");
2495 return -EBUSY;
2496 }
2497
2498 switch (reset) {
2499 case ICE_RESET_PFR:
2500 set_bit(ICE_PFR_REQ, pf->state);
2501 break;
2502 case ICE_RESET_CORER:
2503 set_bit(ICE_CORER_REQ, pf->state);
2504 break;
2505 case ICE_RESET_GLOBR:
2506 set_bit(ICE_GLOBR_REQ, pf->state);
2507 break;
2508 default:
2509 return -EINVAL;
2510 }
2511
2512 ice_service_task_schedule(pf);
2513 return 0;
2514 }
2515
2516 /**
2517 * ice_irq_affinity_notify - Callback for affinity changes
2518 * @notify: context as to what irq was changed
2519 * @mask: the new affinity mask
2520 *
2521 * This is a callback function used by the irq_set_affinity_notifier function
2522 * so that we may register to receive changes to the irq affinity masks.
2523 */
2524 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2525 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2526 const cpumask_t *mask)
2527 {
2528 struct ice_q_vector *q_vector =
2529 container_of(notify, struct ice_q_vector, affinity_notify);
2530
2531 cpumask_copy(&q_vector->affinity_mask, mask);
2532 }
2533
2534 /**
2535 * ice_irq_affinity_release - Callback for affinity notifier release
2536 * @ref: internal core kernel usage
2537 *
2538 * This is a callback function used by the irq_set_affinity_notifier function
2539 * to inform the current notification subscriber that they will no longer
2540 * receive notifications.
2541 */
ice_irq_affinity_release(struct kref __always_unused * ref)2542 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2543
2544 /**
2545 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2546 * @vsi: the VSI being configured
2547 */
ice_vsi_ena_irq(struct ice_vsi * vsi)2548 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2549 {
2550 struct ice_hw *hw = &vsi->back->hw;
2551 int i;
2552
2553 ice_for_each_q_vector(vsi, i)
2554 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2555
2556 ice_flush(hw);
2557 return 0;
2558 }
2559
2560 /**
2561 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2562 * @vsi: the VSI being configured
2563 * @basename: name for the vector
2564 */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2565 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2566 {
2567 int q_vectors = vsi->num_q_vectors;
2568 struct ice_pf *pf = vsi->back;
2569 struct device *dev;
2570 int rx_int_idx = 0;
2571 int tx_int_idx = 0;
2572 int vector, err;
2573 int irq_num;
2574
2575 dev = ice_pf_to_dev(pf);
2576 for (vector = 0; vector < q_vectors; vector++) {
2577 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2578
2579 irq_num = q_vector->irq.virq;
2580
2581 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2582 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2583 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2584 tx_int_idx++;
2585 } else if (q_vector->rx.rx_ring) {
2586 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2587 "%s-%s-%d", basename, "rx", rx_int_idx++);
2588 } else if (q_vector->tx.tx_ring) {
2589 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2590 "%s-%s-%d", basename, "tx", tx_int_idx++);
2591 } else {
2592 /* skip this unused q_vector */
2593 continue;
2594 }
2595 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2596 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2597 IRQF_SHARED, q_vector->name,
2598 q_vector);
2599 else
2600 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2601 0, q_vector->name, q_vector);
2602 if (err) {
2603 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2604 err);
2605 goto free_q_irqs;
2606 }
2607
2608 /* register for affinity change notifications */
2609 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2610 struct irq_affinity_notify *affinity_notify;
2611
2612 affinity_notify = &q_vector->affinity_notify;
2613 affinity_notify->notify = ice_irq_affinity_notify;
2614 affinity_notify->release = ice_irq_affinity_release;
2615 irq_set_affinity_notifier(irq_num, affinity_notify);
2616 }
2617
2618 /* assign the mask for this irq */
2619 irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
2620 }
2621
2622 err = ice_set_cpu_rx_rmap(vsi);
2623 if (err) {
2624 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2625 vsi->vsi_num, ERR_PTR(err));
2626 goto free_q_irqs;
2627 }
2628
2629 vsi->irqs_ready = true;
2630 return 0;
2631
2632 free_q_irqs:
2633 while (vector--) {
2634 irq_num = vsi->q_vectors[vector]->irq.virq;
2635 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2636 irq_set_affinity_notifier(irq_num, NULL);
2637 irq_update_affinity_hint(irq_num, NULL);
2638 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2639 }
2640 return err;
2641 }
2642
2643 /**
2644 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2645 * @vsi: VSI to setup Tx rings used by XDP
2646 *
2647 * Return 0 on success and negative value on error
2648 */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2649 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2650 {
2651 struct device *dev = ice_pf_to_dev(vsi->back);
2652 struct ice_tx_desc *tx_desc;
2653 int i, j;
2654
2655 ice_for_each_xdp_txq(vsi, i) {
2656 u16 xdp_q_idx = vsi->alloc_txq + i;
2657 struct ice_ring_stats *ring_stats;
2658 struct ice_tx_ring *xdp_ring;
2659
2660 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2661 if (!xdp_ring)
2662 goto free_xdp_rings;
2663
2664 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2665 if (!ring_stats) {
2666 ice_free_tx_ring(xdp_ring);
2667 goto free_xdp_rings;
2668 }
2669
2670 xdp_ring->ring_stats = ring_stats;
2671 xdp_ring->q_index = xdp_q_idx;
2672 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2673 xdp_ring->vsi = vsi;
2674 xdp_ring->netdev = NULL;
2675 xdp_ring->dev = dev;
2676 xdp_ring->count = vsi->num_tx_desc;
2677 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2678 if (ice_setup_tx_ring(xdp_ring))
2679 goto free_xdp_rings;
2680 ice_set_ring_xdp(xdp_ring);
2681 spin_lock_init(&xdp_ring->tx_lock);
2682 for (j = 0; j < xdp_ring->count; j++) {
2683 tx_desc = ICE_TX_DESC(xdp_ring, j);
2684 tx_desc->cmd_type_offset_bsz = 0;
2685 }
2686 }
2687
2688 return 0;
2689
2690 free_xdp_rings:
2691 for (; i >= 0; i--) {
2692 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2693 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2694 vsi->xdp_rings[i]->ring_stats = NULL;
2695 ice_free_tx_ring(vsi->xdp_rings[i]);
2696 }
2697 }
2698 return -ENOMEM;
2699 }
2700
2701 /**
2702 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2703 * @vsi: VSI to set the bpf prog on
2704 * @prog: the bpf prog pointer
2705 */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2706 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2707 {
2708 struct bpf_prog *old_prog;
2709 int i;
2710
2711 old_prog = xchg(&vsi->xdp_prog, prog);
2712 ice_for_each_rxq(vsi, i)
2713 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2714
2715 if (old_prog)
2716 bpf_prog_put(old_prog);
2717 }
2718
ice_xdp_ring_from_qid(struct ice_vsi * vsi,int qid)2719 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2720 {
2721 struct ice_q_vector *q_vector;
2722 struct ice_tx_ring *ring;
2723
2724 if (static_key_enabled(&ice_xdp_locking_key))
2725 return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2726
2727 q_vector = vsi->rx_rings[qid]->q_vector;
2728 ice_for_each_tx_ring(ring, q_vector->tx)
2729 if (ice_ring_is_xdp(ring))
2730 return ring;
2731
2732 return NULL;
2733 }
2734
2735 /**
2736 * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2737 * @vsi: the VSI with XDP rings being configured
2738 *
2739 * Map XDP rings to interrupt vectors and perform the configuration steps
2740 * dependent on the mapping.
2741 */
ice_map_xdp_rings(struct ice_vsi * vsi)2742 void ice_map_xdp_rings(struct ice_vsi *vsi)
2743 {
2744 int xdp_rings_rem = vsi->num_xdp_txq;
2745 int v_idx, q_idx;
2746
2747 /* follow the logic from ice_vsi_map_rings_to_vectors */
2748 ice_for_each_q_vector(vsi, v_idx) {
2749 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2750 int xdp_rings_per_v, q_id, q_base;
2751
2752 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2753 vsi->num_q_vectors - v_idx);
2754 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2755
2756 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2757 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2758
2759 xdp_ring->q_vector = q_vector;
2760 xdp_ring->next = q_vector->tx.tx_ring;
2761 q_vector->tx.tx_ring = xdp_ring;
2762 }
2763 xdp_rings_rem -= xdp_rings_per_v;
2764 }
2765
2766 ice_for_each_rxq(vsi, q_idx) {
2767 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2768 q_idx);
2769 ice_tx_xsk_pool(vsi, q_idx);
2770 }
2771 }
2772
2773 /**
2774 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2775 * @vsi: VSI to bring up Tx rings used by XDP
2776 * @prog: bpf program that will be assigned to VSI
2777 * @cfg_type: create from scratch or restore the existing configuration
2778 *
2779 * Return 0 on success and negative value on error
2780 */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog,enum ice_xdp_cfg cfg_type)2781 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2782 enum ice_xdp_cfg cfg_type)
2783 {
2784 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2785 struct ice_pf *pf = vsi->back;
2786 struct ice_qs_cfg xdp_qs_cfg = {
2787 .qs_mutex = &pf->avail_q_mutex,
2788 .pf_map = pf->avail_txqs,
2789 .pf_map_size = pf->max_pf_txqs,
2790 .q_count = vsi->num_xdp_txq,
2791 .scatter_count = ICE_MAX_SCATTER_TXQS,
2792 .vsi_map = vsi->txq_map,
2793 .vsi_map_offset = vsi->alloc_txq,
2794 .mapping_mode = ICE_VSI_MAP_CONTIG
2795 };
2796 struct device *dev;
2797 int status, i;
2798
2799 dev = ice_pf_to_dev(pf);
2800 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2801 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2802 if (!vsi->xdp_rings)
2803 return -ENOMEM;
2804
2805 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2806 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2807 goto err_map_xdp;
2808
2809 if (static_key_enabled(&ice_xdp_locking_key))
2810 netdev_warn(vsi->netdev,
2811 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2812
2813 if (ice_xdp_alloc_setup_rings(vsi))
2814 goto clear_xdp_rings;
2815
2816 /* omit the scheduler update if in reset path; XDP queues will be
2817 * taken into account at the end of ice_vsi_rebuild, where
2818 * ice_cfg_vsi_lan is being called
2819 */
2820 if (cfg_type == ICE_XDP_CFG_PART)
2821 return 0;
2822
2823 ice_map_xdp_rings(vsi);
2824
2825 /* tell the Tx scheduler that right now we have
2826 * additional queues
2827 */
2828 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2829 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2830
2831 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2832 max_txqs);
2833 if (status) {
2834 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2835 status);
2836 goto clear_xdp_rings;
2837 }
2838
2839 /* assign the prog only when it's not already present on VSI;
2840 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2841 * VSI rebuild that happens under ethtool -L can expose us to
2842 * the bpf_prog refcount issues as we would be swapping same
2843 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2844 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2845 * this is not harmful as dev_xdp_install bumps the refcount
2846 * before calling the op exposed by the driver;
2847 */
2848 if (!ice_is_xdp_ena_vsi(vsi))
2849 ice_vsi_assign_bpf_prog(vsi, prog);
2850
2851 return 0;
2852 clear_xdp_rings:
2853 ice_for_each_xdp_txq(vsi, i)
2854 if (vsi->xdp_rings[i]) {
2855 kfree_rcu(vsi->xdp_rings[i], rcu);
2856 vsi->xdp_rings[i] = NULL;
2857 }
2858
2859 err_map_xdp:
2860 mutex_lock(&pf->avail_q_mutex);
2861 ice_for_each_xdp_txq(vsi, i) {
2862 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2863 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2864 }
2865 mutex_unlock(&pf->avail_q_mutex);
2866
2867 devm_kfree(dev, vsi->xdp_rings);
2868 return -ENOMEM;
2869 }
2870
2871 /**
2872 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2873 * @vsi: VSI to remove XDP rings
2874 * @cfg_type: disable XDP permanently or allow it to be restored later
2875 *
2876 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2877 * resources
2878 */
ice_destroy_xdp_rings(struct ice_vsi * vsi,enum ice_xdp_cfg cfg_type)2879 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2880 {
2881 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2882 struct ice_pf *pf = vsi->back;
2883 int i, v_idx;
2884
2885 /* q_vectors are freed in reset path so there's no point in detaching
2886 * rings
2887 */
2888 if (cfg_type == ICE_XDP_CFG_PART)
2889 goto free_qmap;
2890
2891 ice_for_each_q_vector(vsi, v_idx) {
2892 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2893 struct ice_tx_ring *ring;
2894
2895 ice_for_each_tx_ring(ring, q_vector->tx)
2896 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2897 break;
2898
2899 /* restore the value of last node prior to XDP setup */
2900 q_vector->tx.tx_ring = ring;
2901 }
2902
2903 free_qmap:
2904 mutex_lock(&pf->avail_q_mutex);
2905 ice_for_each_xdp_txq(vsi, i) {
2906 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2907 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2908 }
2909 mutex_unlock(&pf->avail_q_mutex);
2910
2911 ice_for_each_xdp_txq(vsi, i)
2912 if (vsi->xdp_rings[i]) {
2913 if (vsi->xdp_rings[i]->desc) {
2914 synchronize_rcu();
2915 ice_free_tx_ring(vsi->xdp_rings[i]);
2916 }
2917 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2918 vsi->xdp_rings[i]->ring_stats = NULL;
2919 kfree_rcu(vsi->xdp_rings[i], rcu);
2920 vsi->xdp_rings[i] = NULL;
2921 }
2922
2923 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2924 vsi->xdp_rings = NULL;
2925
2926 if (static_key_enabled(&ice_xdp_locking_key))
2927 static_branch_dec(&ice_xdp_locking_key);
2928
2929 if (cfg_type == ICE_XDP_CFG_PART)
2930 return 0;
2931
2932 ice_vsi_assign_bpf_prog(vsi, NULL);
2933
2934 /* notify Tx scheduler that we destroyed XDP queues and bring
2935 * back the old number of child nodes
2936 */
2937 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2938 max_txqs[i] = vsi->num_txq;
2939
2940 /* change number of XDP Tx queues to 0 */
2941 vsi->num_xdp_txq = 0;
2942
2943 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2944 max_txqs);
2945 }
2946
2947 /**
2948 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2949 * @vsi: VSI to schedule napi on
2950 */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2951 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2952 {
2953 int i;
2954
2955 ice_for_each_rxq(vsi, i) {
2956 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2957
2958 if (READ_ONCE(rx_ring->xsk_pool))
2959 napi_schedule(&rx_ring->q_vector->napi);
2960 }
2961 }
2962
2963 /**
2964 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2965 * @vsi: VSI to determine the count of XDP Tx qs
2966 *
2967 * returns 0 if Tx qs count is higher than at least half of CPU count,
2968 * -ENOMEM otherwise
2969 */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2970 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2971 {
2972 u16 avail = ice_get_avail_txq_count(vsi->back);
2973 u16 cpus = num_possible_cpus();
2974
2975 if (avail < cpus / 2)
2976 return -ENOMEM;
2977
2978 if (vsi->type == ICE_VSI_SF)
2979 avail = vsi->alloc_txq;
2980
2981 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2982
2983 if (vsi->num_xdp_txq < cpus)
2984 static_branch_inc(&ice_xdp_locking_key);
2985
2986 return 0;
2987 }
2988
2989 /**
2990 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2991 * @vsi: Pointer to VSI structure
2992 */
ice_max_xdp_frame_size(struct ice_vsi * vsi)2993 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2994 {
2995 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2996 return ICE_RXBUF_1664;
2997 else
2998 return ICE_RXBUF_3072;
2999 }
3000
3001 /**
3002 * ice_xdp_setup_prog - Add or remove XDP eBPF program
3003 * @vsi: VSI to setup XDP for
3004 * @prog: XDP program
3005 * @extack: netlink extended ack
3006 */
3007 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)3008 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
3009 struct netlink_ext_ack *extack)
3010 {
3011 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
3012 int ret = 0, xdp_ring_err = 0;
3013 bool if_running;
3014
3015 if (prog && !prog->aux->xdp_has_frags) {
3016 if (frame_size > ice_max_xdp_frame_size(vsi)) {
3017 NL_SET_ERR_MSG_MOD(extack,
3018 "MTU is too large for linear frames and XDP prog does not support frags");
3019 return -EOPNOTSUPP;
3020 }
3021 }
3022
3023 /* hot swap progs and avoid toggling link */
3024 if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
3025 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
3026 ice_vsi_assign_bpf_prog(vsi, prog);
3027 return 0;
3028 }
3029
3030 if_running = netif_running(vsi->netdev) &&
3031 !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
3032
3033 /* need to stop netdev while setting up the program for Rx rings */
3034 if (if_running) {
3035 ret = ice_down(vsi);
3036 if (ret) {
3037 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3038 return ret;
3039 }
3040 }
3041
3042 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3043 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3044 if (xdp_ring_err) {
3045 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3046 } else {
3047 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3048 ICE_XDP_CFG_FULL);
3049 if (xdp_ring_err)
3050 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3051 }
3052 xdp_features_set_redirect_target(vsi->netdev, true);
3053 /* reallocate Rx queues that are used for zero-copy */
3054 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
3055 if (xdp_ring_err)
3056 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
3057 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3058 xdp_features_clear_redirect_target(vsi->netdev);
3059 xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3060 if (xdp_ring_err)
3061 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3062 /* reallocate Rx queues that were used for zero-copy */
3063 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
3064 if (xdp_ring_err)
3065 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
3066 }
3067
3068 if (if_running)
3069 ret = ice_up(vsi);
3070
3071 if (!ret && prog)
3072 ice_vsi_rx_napi_schedule(vsi);
3073
3074 return (ret || xdp_ring_err) ? -ENOMEM : 0;
3075 }
3076
3077 /**
3078 * ice_xdp_safe_mode - XDP handler for safe mode
3079 * @dev: netdevice
3080 * @xdp: XDP command
3081 */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)3082 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3083 struct netdev_bpf *xdp)
3084 {
3085 NL_SET_ERR_MSG_MOD(xdp->extack,
3086 "Please provide working DDP firmware package in order to use XDP\n"
3087 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3088 return -EOPNOTSUPP;
3089 }
3090
3091 /**
3092 * ice_xdp - implements XDP handler
3093 * @dev: netdevice
3094 * @xdp: XDP command
3095 */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)3096 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3097 {
3098 struct ice_netdev_priv *np = netdev_priv(dev);
3099 struct ice_vsi *vsi = np->vsi;
3100 int ret;
3101
3102 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
3103 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
3104 return -EINVAL;
3105 }
3106
3107 mutex_lock(&vsi->xdp_state_lock);
3108
3109 switch (xdp->command) {
3110 case XDP_SETUP_PROG:
3111 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3112 break;
3113 case XDP_SETUP_XSK_POOL:
3114 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
3115 break;
3116 default:
3117 ret = -EINVAL;
3118 }
3119
3120 mutex_unlock(&vsi->xdp_state_lock);
3121 return ret;
3122 }
3123
3124 /**
3125 * ice_ena_misc_vector - enable the non-queue interrupts
3126 * @pf: board private structure
3127 */
ice_ena_misc_vector(struct ice_pf * pf)3128 static void ice_ena_misc_vector(struct ice_pf *pf)
3129 {
3130 struct ice_hw *hw = &pf->hw;
3131 u32 pf_intr_start_offset;
3132 u32 val;
3133
3134 /* Disable anti-spoof detection interrupt to prevent spurious event
3135 * interrupts during a function reset. Anti-spoof functionally is
3136 * still supported.
3137 */
3138 val = rd32(hw, GL_MDCK_TX_TDPU);
3139 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3140 wr32(hw, GL_MDCK_TX_TDPU, val);
3141
3142 /* clear things first */
3143 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3144 rd32(hw, PFINT_OICR); /* read to clear */
3145
3146 val = (PFINT_OICR_ECC_ERR_M |
3147 PFINT_OICR_MAL_DETECT_M |
3148 PFINT_OICR_GRST_M |
3149 PFINT_OICR_PCI_EXCEPTION_M |
3150 PFINT_OICR_VFLR_M |
3151 PFINT_OICR_HMC_ERR_M |
3152 PFINT_OICR_PE_PUSH_M |
3153 PFINT_OICR_PE_CRITERR_M);
3154
3155 wr32(hw, PFINT_OICR_ENA, val);
3156
3157 /* SW_ITR_IDX = 0, but don't change INTENA */
3158 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3159 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3160
3161 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3162 return;
3163 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3164 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3165 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3166 }
3167
3168 /**
3169 * ice_ll_ts_intr - ll_ts interrupt handler
3170 * @irq: interrupt number
3171 * @data: pointer to a q_vector
3172 */
ice_ll_ts_intr(int __always_unused irq,void * data)3173 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3174 {
3175 struct ice_pf *pf = data;
3176 u32 pf_intr_start_offset;
3177 struct ice_ptp_tx *tx;
3178 unsigned long flags;
3179 struct ice_hw *hw;
3180 u32 val;
3181 u8 idx;
3182
3183 hw = &pf->hw;
3184 tx = &pf->ptp.port.tx;
3185 spin_lock_irqsave(&tx->lock, flags);
3186 ice_ptp_complete_tx_single_tstamp(tx);
3187
3188 idx = find_next_bit_wrap(tx->in_use, tx->len,
3189 tx->last_ll_ts_idx_read + 1);
3190 if (idx != tx->len)
3191 ice_ptp_req_tx_single_tstamp(tx, idx);
3192 spin_unlock_irqrestore(&tx->lock, flags);
3193
3194 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3195 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3196 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3197 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3198 val);
3199
3200 return IRQ_HANDLED;
3201 }
3202
3203 /**
3204 * ice_misc_intr - misc interrupt handler
3205 * @irq: interrupt number
3206 * @data: pointer to a q_vector
3207 */
ice_misc_intr(int __always_unused irq,void * data)3208 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3209 {
3210 struct ice_pf *pf = (struct ice_pf *)data;
3211 irqreturn_t ret = IRQ_HANDLED;
3212 struct ice_hw *hw = &pf->hw;
3213 struct device *dev;
3214 u32 oicr, ena_mask;
3215
3216 dev = ice_pf_to_dev(pf);
3217 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3218 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3219 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3220
3221 oicr = rd32(hw, PFINT_OICR);
3222 ena_mask = rd32(hw, PFINT_OICR_ENA);
3223
3224 if (oicr & PFINT_OICR_SWINT_M) {
3225 ena_mask &= ~PFINT_OICR_SWINT_M;
3226 pf->sw_int_count++;
3227 }
3228
3229 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3230 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3231 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3232 }
3233 if (oicr & PFINT_OICR_VFLR_M) {
3234 /* disable any further VFLR event notifications */
3235 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3236 u32 reg = rd32(hw, PFINT_OICR_ENA);
3237
3238 reg &= ~PFINT_OICR_VFLR_M;
3239 wr32(hw, PFINT_OICR_ENA, reg);
3240 } else {
3241 ena_mask &= ~PFINT_OICR_VFLR_M;
3242 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3243 }
3244 }
3245
3246 if (oicr & PFINT_OICR_GRST_M) {
3247 u32 reset;
3248
3249 /* we have a reset warning */
3250 ena_mask &= ~PFINT_OICR_GRST_M;
3251 reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3252 rd32(hw, GLGEN_RSTAT));
3253
3254 if (reset == ICE_RESET_CORER)
3255 pf->corer_count++;
3256 else if (reset == ICE_RESET_GLOBR)
3257 pf->globr_count++;
3258 else if (reset == ICE_RESET_EMPR)
3259 pf->empr_count++;
3260 else
3261 dev_dbg(dev, "Invalid reset type %d\n", reset);
3262
3263 /* If a reset cycle isn't already in progress, we set a bit in
3264 * pf->state so that the service task can start a reset/rebuild.
3265 */
3266 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3267 if (reset == ICE_RESET_CORER)
3268 set_bit(ICE_CORER_RECV, pf->state);
3269 else if (reset == ICE_RESET_GLOBR)
3270 set_bit(ICE_GLOBR_RECV, pf->state);
3271 else
3272 set_bit(ICE_EMPR_RECV, pf->state);
3273
3274 /* There are couple of different bits at play here.
3275 * hw->reset_ongoing indicates whether the hardware is
3276 * in reset. This is set to true when a reset interrupt
3277 * is received and set back to false after the driver
3278 * has determined that the hardware is out of reset.
3279 *
3280 * ICE_RESET_OICR_RECV in pf->state indicates
3281 * that a post reset rebuild is required before the
3282 * driver is operational again. This is set above.
3283 *
3284 * As this is the start of the reset/rebuild cycle, set
3285 * both to indicate that.
3286 */
3287 hw->reset_ongoing = true;
3288 }
3289 }
3290
3291 if (oicr & PFINT_OICR_TSYN_TX_M) {
3292 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3293 if (ice_pf_state_is_nominal(pf) &&
3294 pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3295 struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3296 unsigned long flags;
3297 u8 idx;
3298
3299 spin_lock_irqsave(&tx->lock, flags);
3300 idx = find_next_bit_wrap(tx->in_use, tx->len,
3301 tx->last_ll_ts_idx_read + 1);
3302 if (idx != tx->len)
3303 ice_ptp_req_tx_single_tstamp(tx, idx);
3304 spin_unlock_irqrestore(&tx->lock, flags);
3305 } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3306 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3307 ret = IRQ_WAKE_THREAD;
3308 }
3309 }
3310
3311 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3312 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3313 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3314
3315 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3316
3317 if (ice_pf_src_tmr_owned(pf)) {
3318 /* Save EVENTs from GLTSYN register */
3319 pf->ptp.ext_ts_irq |= gltsyn_stat &
3320 (GLTSYN_STAT_EVENT0_M |
3321 GLTSYN_STAT_EVENT1_M |
3322 GLTSYN_STAT_EVENT2_M);
3323
3324 ice_ptp_extts_event(pf);
3325 }
3326 }
3327
3328 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3329 if (oicr & ICE_AUX_CRIT_ERR) {
3330 pf->oicr_err_reg |= oicr;
3331 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3332 ena_mask &= ~ICE_AUX_CRIT_ERR;
3333 }
3334
3335 /* Report any remaining unexpected interrupts */
3336 oicr &= ena_mask;
3337 if (oicr) {
3338 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3339 /* If a critical error is pending there is no choice but to
3340 * reset the device.
3341 */
3342 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3343 PFINT_OICR_ECC_ERR_M)) {
3344 set_bit(ICE_PFR_REQ, pf->state);
3345 }
3346 }
3347 ice_service_task_schedule(pf);
3348 if (ret == IRQ_HANDLED)
3349 ice_irq_dynamic_ena(hw, NULL, NULL);
3350
3351 return ret;
3352 }
3353
3354 /**
3355 * ice_misc_intr_thread_fn - misc interrupt thread function
3356 * @irq: interrupt number
3357 * @data: pointer to a q_vector
3358 */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3359 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3360 {
3361 struct ice_pf *pf = data;
3362 struct ice_hw *hw;
3363
3364 hw = &pf->hw;
3365
3366 if (ice_is_reset_in_progress(pf->state))
3367 goto skip_irq;
3368
3369 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3370 /* Process outstanding Tx timestamps. If there is more work,
3371 * re-arm the interrupt to trigger again.
3372 */
3373 if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3374 wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3375 ice_flush(hw);
3376 }
3377 }
3378
3379 skip_irq:
3380 ice_irq_dynamic_ena(hw, NULL, NULL);
3381
3382 return IRQ_HANDLED;
3383 }
3384
3385 /**
3386 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3387 * @hw: pointer to HW structure
3388 */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3389 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3390 {
3391 /* disable Admin queue Interrupt causes */
3392 wr32(hw, PFINT_FW_CTL,
3393 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3394
3395 /* disable Mailbox queue Interrupt causes */
3396 wr32(hw, PFINT_MBX_CTL,
3397 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3398
3399 wr32(hw, PFINT_SB_CTL,
3400 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3401
3402 /* disable Control queue Interrupt causes */
3403 wr32(hw, PFINT_OICR_CTL,
3404 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3405
3406 ice_flush(hw);
3407 }
3408
3409 /**
3410 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3411 * @pf: board private structure
3412 */
ice_free_irq_msix_ll_ts(struct ice_pf * pf)3413 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3414 {
3415 int irq_num = pf->ll_ts_irq.virq;
3416
3417 synchronize_irq(irq_num);
3418 devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3419
3420 ice_free_irq(pf, pf->ll_ts_irq);
3421 }
3422
3423 /**
3424 * ice_free_irq_msix_misc - Unroll misc vector setup
3425 * @pf: board private structure
3426 */
ice_free_irq_msix_misc(struct ice_pf * pf)3427 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3428 {
3429 int misc_irq_num = pf->oicr_irq.virq;
3430 struct ice_hw *hw = &pf->hw;
3431
3432 ice_dis_ctrlq_interrupts(hw);
3433
3434 /* disable OICR interrupt */
3435 wr32(hw, PFINT_OICR_ENA, 0);
3436 ice_flush(hw);
3437
3438 synchronize_irq(misc_irq_num);
3439 devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3440
3441 ice_free_irq(pf, pf->oicr_irq);
3442 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3443 ice_free_irq_msix_ll_ts(pf);
3444 }
3445
3446 /**
3447 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3448 * @hw: pointer to HW structure
3449 * @reg_idx: HW vector index to associate the control queue interrupts with
3450 */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3451 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3452 {
3453 u32 val;
3454
3455 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3456 PFINT_OICR_CTL_CAUSE_ENA_M);
3457 wr32(hw, PFINT_OICR_CTL, val);
3458
3459 /* enable Admin queue Interrupt causes */
3460 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3461 PFINT_FW_CTL_CAUSE_ENA_M);
3462 wr32(hw, PFINT_FW_CTL, val);
3463
3464 /* enable Mailbox queue Interrupt causes */
3465 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3466 PFINT_MBX_CTL_CAUSE_ENA_M);
3467 wr32(hw, PFINT_MBX_CTL, val);
3468
3469 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3470 /* enable Sideband queue Interrupt causes */
3471 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3472 PFINT_SB_CTL_CAUSE_ENA_M);
3473 wr32(hw, PFINT_SB_CTL, val);
3474 }
3475
3476 ice_flush(hw);
3477 }
3478
3479 /**
3480 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3481 * @pf: board private structure
3482 *
3483 * This sets up the handler for MSIX 0, which is used to manage the
3484 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3485 * when in MSI or Legacy interrupt mode.
3486 */
ice_req_irq_msix_misc(struct ice_pf * pf)3487 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3488 {
3489 struct device *dev = ice_pf_to_dev(pf);
3490 struct ice_hw *hw = &pf->hw;
3491 u32 pf_intr_start_offset;
3492 struct msi_map irq;
3493 int err = 0;
3494
3495 if (!pf->int_name[0])
3496 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3497 dev_driver_string(dev), dev_name(dev));
3498
3499 if (!pf->int_name_ll_ts[0])
3500 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3501 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3502 /* Do not request IRQ but do enable OICR interrupt since settings are
3503 * lost during reset. Note that this function is called only during
3504 * rebuild path and not while reset is in progress.
3505 */
3506 if (ice_is_reset_in_progress(pf->state))
3507 goto skip_req_irq;
3508
3509 /* reserve one vector in irq_tracker for misc interrupts */
3510 irq = ice_alloc_irq(pf, false);
3511 if (irq.index < 0)
3512 return irq.index;
3513
3514 pf->oicr_irq = irq;
3515 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3516 ice_misc_intr_thread_fn, 0,
3517 pf->int_name, pf);
3518 if (err) {
3519 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3520 pf->int_name, err);
3521 ice_free_irq(pf, pf->oicr_irq);
3522 return err;
3523 }
3524
3525 /* reserve one vector in irq_tracker for ll_ts interrupt */
3526 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3527 goto skip_req_irq;
3528
3529 irq = ice_alloc_irq(pf, false);
3530 if (irq.index < 0)
3531 return irq.index;
3532
3533 pf->ll_ts_irq = irq;
3534 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3535 pf->int_name_ll_ts, pf);
3536 if (err) {
3537 dev_err(dev, "devm_request_irq for %s failed: %d\n",
3538 pf->int_name_ll_ts, err);
3539 ice_free_irq(pf, pf->ll_ts_irq);
3540 return err;
3541 }
3542
3543 skip_req_irq:
3544 ice_ena_misc_vector(pf);
3545
3546 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3547 /* This enables LL TS interrupt */
3548 pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3549 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3550 wr32(hw, PFINT_SB_CTL,
3551 ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3552 PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3553 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3554 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3555
3556 ice_flush(hw);
3557 ice_irq_dynamic_ena(hw, NULL, NULL);
3558
3559 return 0;
3560 }
3561
3562 /**
3563 * ice_set_ops - set netdev and ethtools ops for the given netdev
3564 * @vsi: the VSI associated with the new netdev
3565 */
ice_set_ops(struct ice_vsi * vsi)3566 static void ice_set_ops(struct ice_vsi *vsi)
3567 {
3568 struct net_device *netdev = vsi->netdev;
3569 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3570
3571 if (ice_is_safe_mode(pf)) {
3572 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3573 ice_set_ethtool_safe_mode_ops(netdev);
3574 return;
3575 }
3576
3577 netdev->netdev_ops = &ice_netdev_ops;
3578 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3579 netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3580 ice_set_ethtool_ops(netdev);
3581
3582 if (vsi->type != ICE_VSI_PF)
3583 return;
3584
3585 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3586 NETDEV_XDP_ACT_XSK_ZEROCOPY |
3587 NETDEV_XDP_ACT_RX_SG;
3588 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3589 }
3590
3591 /**
3592 * ice_set_netdev_features - set features for the given netdev
3593 * @netdev: netdev instance
3594 */
ice_set_netdev_features(struct net_device * netdev)3595 void ice_set_netdev_features(struct net_device *netdev)
3596 {
3597 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3598 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3599 netdev_features_t csumo_features;
3600 netdev_features_t vlano_features;
3601 netdev_features_t dflt_features;
3602 netdev_features_t tso_features;
3603
3604 if (ice_is_safe_mode(pf)) {
3605 /* safe mode */
3606 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3607 netdev->hw_features = netdev->features;
3608 return;
3609 }
3610
3611 dflt_features = NETIF_F_SG |
3612 NETIF_F_HIGHDMA |
3613 NETIF_F_NTUPLE |
3614 NETIF_F_RXHASH;
3615
3616 csumo_features = NETIF_F_RXCSUM |
3617 NETIF_F_IP_CSUM |
3618 NETIF_F_SCTP_CRC |
3619 NETIF_F_IPV6_CSUM;
3620
3621 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3622 NETIF_F_HW_VLAN_CTAG_TX |
3623 NETIF_F_HW_VLAN_CTAG_RX;
3624
3625 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3626 if (is_dvm_ena)
3627 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3628
3629 tso_features = NETIF_F_TSO |
3630 NETIF_F_TSO_ECN |
3631 NETIF_F_TSO6 |
3632 NETIF_F_GSO_GRE |
3633 NETIF_F_GSO_UDP_TUNNEL |
3634 NETIF_F_GSO_GRE_CSUM |
3635 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3636 NETIF_F_GSO_PARTIAL |
3637 NETIF_F_GSO_IPXIP4 |
3638 NETIF_F_GSO_IPXIP6 |
3639 NETIF_F_GSO_UDP_L4;
3640
3641 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3642 NETIF_F_GSO_GRE_CSUM;
3643 /* set features that user can change */
3644 netdev->hw_features = dflt_features | csumo_features |
3645 vlano_features | tso_features;
3646
3647 /* add support for HW_CSUM on packets with MPLS header */
3648 netdev->mpls_features = NETIF_F_HW_CSUM |
3649 NETIF_F_TSO |
3650 NETIF_F_TSO6;
3651
3652 /* enable features */
3653 netdev->features |= netdev->hw_features;
3654
3655 netdev->hw_features |= NETIF_F_HW_TC;
3656 netdev->hw_features |= NETIF_F_LOOPBACK;
3657
3658 /* encap and VLAN devices inherit default, csumo and tso features */
3659 netdev->hw_enc_features |= dflt_features | csumo_features |
3660 tso_features;
3661 netdev->vlan_features |= dflt_features | csumo_features |
3662 tso_features;
3663
3664 /* advertise support but don't enable by default since only one type of
3665 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3666 * type turns on the other has to be turned off. This is enforced by the
3667 * ice_fix_features() ndo callback.
3668 */
3669 if (is_dvm_ena)
3670 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3671 NETIF_F_HW_VLAN_STAG_TX;
3672
3673 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3674 * be changed at runtime
3675 */
3676 netdev->hw_features |= NETIF_F_RXFCS;
3677
3678 netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3679 }
3680
3681 /**
3682 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3683 * @lut: Lookup table
3684 * @rss_table_size: Lookup table size
3685 * @rss_size: Range of queue number for hashing
3686 */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3687 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3688 {
3689 u16 i;
3690
3691 for (i = 0; i < rss_table_size; i++)
3692 lut[i] = i % rss_size;
3693 }
3694
3695 /**
3696 * ice_pf_vsi_setup - Set up a PF VSI
3697 * @pf: board private structure
3698 * @pi: pointer to the port_info instance
3699 *
3700 * Returns pointer to the successfully allocated VSI software struct
3701 * on success, otherwise returns NULL on failure.
3702 */
3703 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3704 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3705 {
3706 struct ice_vsi_cfg_params params = {};
3707
3708 params.type = ICE_VSI_PF;
3709 params.port_info = pi;
3710 params.flags = ICE_VSI_FLAG_INIT;
3711
3712 return ice_vsi_setup(pf, ¶ms);
3713 }
3714
3715 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3716 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3717 struct ice_channel *ch)
3718 {
3719 struct ice_vsi_cfg_params params = {};
3720
3721 params.type = ICE_VSI_CHNL;
3722 params.port_info = pi;
3723 params.ch = ch;
3724 params.flags = ICE_VSI_FLAG_INIT;
3725
3726 return ice_vsi_setup(pf, ¶ms);
3727 }
3728
3729 /**
3730 * ice_ctrl_vsi_setup - Set up a control VSI
3731 * @pf: board private structure
3732 * @pi: pointer to the port_info instance
3733 *
3734 * Returns pointer to the successfully allocated VSI software struct
3735 * on success, otherwise returns NULL on failure.
3736 */
3737 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3738 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3739 {
3740 struct ice_vsi_cfg_params params = {};
3741
3742 params.type = ICE_VSI_CTRL;
3743 params.port_info = pi;
3744 params.flags = ICE_VSI_FLAG_INIT;
3745
3746 return ice_vsi_setup(pf, ¶ms);
3747 }
3748
3749 /**
3750 * ice_lb_vsi_setup - Set up a loopback VSI
3751 * @pf: board private structure
3752 * @pi: pointer to the port_info instance
3753 *
3754 * Returns pointer to the successfully allocated VSI software struct
3755 * on success, otherwise returns NULL on failure.
3756 */
3757 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3758 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3759 {
3760 struct ice_vsi_cfg_params params = {};
3761
3762 params.type = ICE_VSI_LB;
3763 params.port_info = pi;
3764 params.flags = ICE_VSI_FLAG_INIT;
3765
3766 return ice_vsi_setup(pf, ¶ms);
3767 }
3768
3769 /**
3770 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3771 * @netdev: network interface to be adjusted
3772 * @proto: VLAN TPID
3773 * @vid: VLAN ID to be added
3774 *
3775 * net_device_ops implementation for adding VLAN IDs
3776 */
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3777 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3778 {
3779 struct ice_netdev_priv *np = netdev_priv(netdev);
3780 struct ice_vsi_vlan_ops *vlan_ops;
3781 struct ice_vsi *vsi = np->vsi;
3782 struct ice_vlan vlan;
3783 int ret;
3784
3785 /* VLAN 0 is added by default during load/reset */
3786 if (!vid)
3787 return 0;
3788
3789 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3790 usleep_range(1000, 2000);
3791
3792 /* Add multicast promisc rule for the VLAN ID to be added if
3793 * all-multicast is currently enabled.
3794 */
3795 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3796 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3797 ICE_MCAST_VLAN_PROMISC_BITS,
3798 vid);
3799 if (ret)
3800 goto finish;
3801 }
3802
3803 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3804
3805 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3806 * packets aren't pruned by the device's internal switch on Rx
3807 */
3808 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3809 ret = vlan_ops->add_vlan(vsi, &vlan);
3810 if (ret)
3811 goto finish;
3812
3813 /* If all-multicast is currently enabled and this VLAN ID is only one
3814 * besides VLAN-0 we have to update look-up type of multicast promisc
3815 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3816 */
3817 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3818 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3819 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3820 ICE_MCAST_PROMISC_BITS, 0);
3821 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3822 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3823 }
3824
3825 finish:
3826 clear_bit(ICE_CFG_BUSY, vsi->state);
3827
3828 return ret;
3829 }
3830
3831 /**
3832 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3833 * @netdev: network interface to be adjusted
3834 * @proto: VLAN TPID
3835 * @vid: VLAN ID to be removed
3836 *
3837 * net_device_ops implementation for removing VLAN IDs
3838 */
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3839 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3840 {
3841 struct ice_netdev_priv *np = netdev_priv(netdev);
3842 struct ice_vsi_vlan_ops *vlan_ops;
3843 struct ice_vsi *vsi = np->vsi;
3844 struct ice_vlan vlan;
3845 int ret;
3846
3847 /* don't allow removal of VLAN 0 */
3848 if (!vid)
3849 return 0;
3850
3851 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3852 usleep_range(1000, 2000);
3853
3854 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3855 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3856 if (ret) {
3857 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3858 vsi->vsi_num);
3859 vsi->current_netdev_flags |= IFF_ALLMULTI;
3860 }
3861
3862 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3863
3864 /* Make sure VLAN delete is successful before updating VLAN
3865 * information
3866 */
3867 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3868 ret = vlan_ops->del_vlan(vsi, &vlan);
3869 if (ret)
3870 goto finish;
3871
3872 /* Remove multicast promisc rule for the removed VLAN ID if
3873 * all-multicast is enabled.
3874 */
3875 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3876 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3877 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3878
3879 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3880 /* Update look-up type of multicast promisc rule for VLAN 0
3881 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3882 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3883 */
3884 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3885 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3886 ICE_MCAST_VLAN_PROMISC_BITS,
3887 0);
3888 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3889 ICE_MCAST_PROMISC_BITS, 0);
3890 }
3891 }
3892
3893 finish:
3894 clear_bit(ICE_CFG_BUSY, vsi->state);
3895
3896 return ret;
3897 }
3898
3899 /**
3900 * ice_rep_indr_tc_block_unbind
3901 * @cb_priv: indirection block private data
3902 */
ice_rep_indr_tc_block_unbind(void * cb_priv)3903 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3904 {
3905 struct ice_indr_block_priv *indr_priv = cb_priv;
3906
3907 list_del(&indr_priv->list);
3908 kfree(indr_priv);
3909 }
3910
3911 /**
3912 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3913 * @vsi: VSI struct which has the netdev
3914 */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3915 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3916 {
3917 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3918
3919 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3920 ice_rep_indr_tc_block_unbind);
3921 }
3922
3923 /**
3924 * ice_tc_indir_block_register - Register TC indirect block notifications
3925 * @vsi: VSI struct which has the netdev
3926 *
3927 * Returns 0 on success, negative value on failure
3928 */
ice_tc_indir_block_register(struct ice_vsi * vsi)3929 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3930 {
3931 struct ice_netdev_priv *np;
3932
3933 if (!vsi || !vsi->netdev)
3934 return -EINVAL;
3935
3936 np = netdev_priv(vsi->netdev);
3937
3938 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3939 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3940 }
3941
3942 /**
3943 * ice_get_avail_q_count - Get count of queues in use
3944 * @pf_qmap: bitmap to get queue use count from
3945 * @lock: pointer to a mutex that protects access to pf_qmap
3946 * @size: size of the bitmap
3947 */
3948 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3949 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3950 {
3951 unsigned long bit;
3952 u16 count = 0;
3953
3954 mutex_lock(lock);
3955 for_each_clear_bit(bit, pf_qmap, size)
3956 count++;
3957 mutex_unlock(lock);
3958
3959 return count;
3960 }
3961
3962 /**
3963 * ice_get_avail_txq_count - Get count of Tx queues in use
3964 * @pf: pointer to an ice_pf instance
3965 */
ice_get_avail_txq_count(struct ice_pf * pf)3966 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3967 {
3968 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3969 pf->max_pf_txqs);
3970 }
3971
3972 /**
3973 * ice_get_avail_rxq_count - Get count of Rx queues in use
3974 * @pf: pointer to an ice_pf instance
3975 */
ice_get_avail_rxq_count(struct ice_pf * pf)3976 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3977 {
3978 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3979 pf->max_pf_rxqs);
3980 }
3981
3982 /**
3983 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3984 * @pf: board private structure to initialize
3985 */
ice_deinit_pf(struct ice_pf * pf)3986 static void ice_deinit_pf(struct ice_pf *pf)
3987 {
3988 ice_service_task_stop(pf);
3989 mutex_destroy(&pf->lag_mutex);
3990 mutex_destroy(&pf->adev_mutex);
3991 mutex_destroy(&pf->sw_mutex);
3992 mutex_destroy(&pf->tc_mutex);
3993 mutex_destroy(&pf->avail_q_mutex);
3994 mutex_destroy(&pf->vfs.table_lock);
3995
3996 if (pf->avail_txqs) {
3997 bitmap_free(pf->avail_txqs);
3998 pf->avail_txqs = NULL;
3999 }
4000
4001 if (pf->avail_rxqs) {
4002 bitmap_free(pf->avail_rxqs);
4003 pf->avail_rxqs = NULL;
4004 }
4005
4006 if (pf->ptp.clock)
4007 ptp_clock_unregister(pf->ptp.clock);
4008
4009 xa_destroy(&pf->dyn_ports);
4010 xa_destroy(&pf->sf_nums);
4011 }
4012
4013 /**
4014 * ice_set_pf_caps - set PFs capability flags
4015 * @pf: pointer to the PF instance
4016 */
ice_set_pf_caps(struct ice_pf * pf)4017 static void ice_set_pf_caps(struct ice_pf *pf)
4018 {
4019 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
4020
4021 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4022 if (func_caps->common_cap.rdma)
4023 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4024 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4025 if (func_caps->common_cap.dcb)
4026 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4027 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4028 if (func_caps->common_cap.sr_iov_1_1) {
4029 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4030 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
4031 ICE_MAX_SRIOV_VFS);
4032 }
4033 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4034 if (func_caps->common_cap.rss_table_size)
4035 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4036
4037 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4038 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4039 u16 unused;
4040
4041 /* ctrl_vsi_idx will be set to a valid value when flow director
4042 * is setup by ice_init_fdir
4043 */
4044 pf->ctrl_vsi_idx = ICE_NO_VSI;
4045 set_bit(ICE_FLAG_FD_ENA, pf->flags);
4046 /* force guaranteed filter pool for PF */
4047 ice_alloc_fd_guar_item(&pf->hw, &unused,
4048 func_caps->fd_fltr_guar);
4049 /* force shared filter pool for PF */
4050 ice_alloc_fd_shrd_item(&pf->hw, &unused,
4051 func_caps->fd_fltr_best_effort);
4052 }
4053
4054 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4055 if (func_caps->common_cap.ieee_1588 &&
4056 !(pf->hw.mac_type == ICE_MAC_E830))
4057 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4058
4059 pf->max_pf_txqs = func_caps->common_cap.num_txq;
4060 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4061 }
4062
4063 /**
4064 * ice_init_pf - Initialize general software structures (struct ice_pf)
4065 * @pf: board private structure to initialize
4066 */
ice_init_pf(struct ice_pf * pf)4067 static int ice_init_pf(struct ice_pf *pf)
4068 {
4069 ice_set_pf_caps(pf);
4070
4071 mutex_init(&pf->sw_mutex);
4072 mutex_init(&pf->tc_mutex);
4073 mutex_init(&pf->adev_mutex);
4074 mutex_init(&pf->lag_mutex);
4075
4076 INIT_HLIST_HEAD(&pf->aq_wait_list);
4077 spin_lock_init(&pf->aq_wait_lock);
4078 init_waitqueue_head(&pf->aq_wait_queue);
4079
4080 init_waitqueue_head(&pf->reset_wait_queue);
4081
4082 /* setup service timer and periodic service task */
4083 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4084 pf->serv_tmr_period = HZ;
4085 INIT_WORK(&pf->serv_task, ice_service_task);
4086 clear_bit(ICE_SERVICE_SCHED, pf->state);
4087
4088 mutex_init(&pf->avail_q_mutex);
4089 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4090 if (!pf->avail_txqs)
4091 return -ENOMEM;
4092
4093 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4094 if (!pf->avail_rxqs) {
4095 bitmap_free(pf->avail_txqs);
4096 pf->avail_txqs = NULL;
4097 return -ENOMEM;
4098 }
4099
4100 mutex_init(&pf->vfs.table_lock);
4101 hash_init(pf->vfs.table);
4102 ice_mbx_init_snapshot(&pf->hw);
4103
4104 xa_init(&pf->dyn_ports);
4105 xa_init(&pf->sf_nums);
4106
4107 return 0;
4108 }
4109
4110 /**
4111 * ice_is_wol_supported - check if WoL is supported
4112 * @hw: pointer to hardware info
4113 *
4114 * Check if WoL is supported based on the HW configuration.
4115 * Returns true if NVM supports and enables WoL for this port, false otherwise
4116 */
ice_is_wol_supported(struct ice_hw * hw)4117 bool ice_is_wol_supported(struct ice_hw *hw)
4118 {
4119 u16 wol_ctrl;
4120
4121 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4122 * word) indicates WoL is not supported on the corresponding PF ID.
4123 */
4124 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4125 return false;
4126
4127 return !(BIT(hw->port_info->lport) & wol_ctrl);
4128 }
4129
4130 /**
4131 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4132 * @vsi: VSI being changed
4133 * @new_rx: new number of Rx queues
4134 * @new_tx: new number of Tx queues
4135 * @locked: is adev device_lock held
4136 *
4137 * Only change the number of queues if new_tx, or new_rx is non-0.
4138 *
4139 * Returns 0 on success.
4140 */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)4141 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4142 {
4143 struct ice_pf *pf = vsi->back;
4144 int i, err = 0, timeout = 50;
4145
4146 if (!new_rx && !new_tx)
4147 return -EINVAL;
4148
4149 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4150 timeout--;
4151 if (!timeout)
4152 return -EBUSY;
4153 usleep_range(1000, 2000);
4154 }
4155
4156 if (new_tx)
4157 vsi->req_txq = (u16)new_tx;
4158 if (new_rx)
4159 vsi->req_rxq = (u16)new_rx;
4160
4161 /* set for the next time the netdev is started */
4162 if (!netif_running(vsi->netdev)) {
4163 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4164 if (err)
4165 goto rebuild_err;
4166 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4167 goto done;
4168 }
4169
4170 ice_vsi_close(vsi);
4171 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4172 if (err)
4173 goto rebuild_err;
4174
4175 ice_for_each_traffic_class(i) {
4176 if (vsi->tc_cfg.ena_tc & BIT(i))
4177 netdev_set_tc_queue(vsi->netdev,
4178 vsi->tc_cfg.tc_info[i].netdev_tc,
4179 vsi->tc_cfg.tc_info[i].qcount_tx,
4180 vsi->tc_cfg.tc_info[i].qoffset);
4181 }
4182 ice_pf_dcb_recfg(pf, locked);
4183 ice_vsi_open(vsi);
4184 goto done;
4185
4186 rebuild_err:
4187 dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4188 err);
4189 done:
4190 clear_bit(ICE_CFG_BUSY, pf->state);
4191 return err;
4192 }
4193
4194 /**
4195 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4196 * @pf: PF to configure
4197 *
4198 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4199 * VSI can still Tx/Rx VLAN tagged packets.
4200 */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4201 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4202 {
4203 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4204 struct ice_vsi_ctx *ctxt;
4205 struct ice_hw *hw;
4206 int status;
4207
4208 if (!vsi)
4209 return;
4210
4211 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4212 if (!ctxt)
4213 return;
4214
4215 hw = &pf->hw;
4216 ctxt->info = vsi->info;
4217
4218 ctxt->info.valid_sections =
4219 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4220 ICE_AQ_VSI_PROP_SECURITY_VALID |
4221 ICE_AQ_VSI_PROP_SW_VALID);
4222
4223 /* disable VLAN anti-spoof */
4224 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4225 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4226
4227 /* disable VLAN pruning and keep all other settings */
4228 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4229
4230 /* allow all VLANs on Tx and don't strip on Rx */
4231 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4232 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4233
4234 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4235 if (status) {
4236 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4237 status, ice_aq_str(hw->adminq.sq_last_status));
4238 } else {
4239 vsi->info.sec_flags = ctxt->info.sec_flags;
4240 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4241 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4242 }
4243
4244 kfree(ctxt);
4245 }
4246
4247 /**
4248 * ice_log_pkg_init - log result of DDP package load
4249 * @hw: pointer to hardware info
4250 * @state: state of package load
4251 */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4252 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4253 {
4254 struct ice_pf *pf = hw->back;
4255 struct device *dev;
4256
4257 dev = ice_pf_to_dev(pf);
4258
4259 switch (state) {
4260 case ICE_DDP_PKG_SUCCESS:
4261 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4262 hw->active_pkg_name,
4263 hw->active_pkg_ver.major,
4264 hw->active_pkg_ver.minor,
4265 hw->active_pkg_ver.update,
4266 hw->active_pkg_ver.draft);
4267 break;
4268 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4269 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4270 hw->active_pkg_name,
4271 hw->active_pkg_ver.major,
4272 hw->active_pkg_ver.minor,
4273 hw->active_pkg_ver.update,
4274 hw->active_pkg_ver.draft);
4275 break;
4276 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4277 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4278 hw->active_pkg_name,
4279 hw->active_pkg_ver.major,
4280 hw->active_pkg_ver.minor,
4281 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4282 break;
4283 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4284 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4285 hw->active_pkg_name,
4286 hw->active_pkg_ver.major,
4287 hw->active_pkg_ver.minor,
4288 hw->active_pkg_ver.update,
4289 hw->active_pkg_ver.draft,
4290 hw->pkg_name,
4291 hw->pkg_ver.major,
4292 hw->pkg_ver.minor,
4293 hw->pkg_ver.update,
4294 hw->pkg_ver.draft);
4295 break;
4296 case ICE_DDP_PKG_FW_MISMATCH:
4297 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4298 break;
4299 case ICE_DDP_PKG_INVALID_FILE:
4300 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4301 break;
4302 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4303 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4304 break;
4305 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4306 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4307 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4308 break;
4309 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4310 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4311 break;
4312 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4313 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4314 break;
4315 case ICE_DDP_PKG_LOAD_ERROR:
4316 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4317 /* poll for reset to complete */
4318 if (ice_check_reset(hw))
4319 dev_err(dev, "Error resetting device. Please reload the driver\n");
4320 break;
4321 case ICE_DDP_PKG_ERR:
4322 default:
4323 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4324 break;
4325 }
4326 }
4327
4328 /**
4329 * ice_load_pkg - load/reload the DDP Package file
4330 * @firmware: firmware structure when firmware requested or NULL for reload
4331 * @pf: pointer to the PF instance
4332 *
4333 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4334 * initialize HW tables.
4335 */
4336 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4337 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4338 {
4339 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4340 struct device *dev = ice_pf_to_dev(pf);
4341 struct ice_hw *hw = &pf->hw;
4342
4343 /* Load DDP Package */
4344 if (firmware && !hw->pkg_copy) {
4345 state = ice_copy_and_init_pkg(hw, firmware->data,
4346 firmware->size);
4347 ice_log_pkg_init(hw, state);
4348 } else if (!firmware && hw->pkg_copy) {
4349 /* Reload package during rebuild after CORER/GLOBR reset */
4350 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4351 ice_log_pkg_init(hw, state);
4352 } else {
4353 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4354 }
4355
4356 if (!ice_is_init_pkg_successful(state)) {
4357 /* Safe Mode */
4358 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4359 return;
4360 }
4361
4362 /* Successful download package is the precondition for advanced
4363 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4364 */
4365 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4366 }
4367
4368 /**
4369 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4370 * @pf: pointer to the PF structure
4371 *
4372 * There is no error returned here because the driver should be able to handle
4373 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4374 * specifically with Tx.
4375 */
ice_verify_cacheline_size(struct ice_pf * pf)4376 static void ice_verify_cacheline_size(struct ice_pf *pf)
4377 {
4378 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4379 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4380 ICE_CACHE_LINE_BYTES);
4381 }
4382
4383 /**
4384 * ice_send_version - update firmware with driver version
4385 * @pf: PF struct
4386 *
4387 * Returns 0 on success, else error code
4388 */
ice_send_version(struct ice_pf * pf)4389 static int ice_send_version(struct ice_pf *pf)
4390 {
4391 struct ice_driver_ver dv;
4392
4393 dv.major_ver = 0xff;
4394 dv.minor_ver = 0xff;
4395 dv.build_ver = 0xff;
4396 dv.subbuild_ver = 0;
4397 strscpy((char *)dv.driver_string, UTS_RELEASE,
4398 sizeof(dv.driver_string));
4399 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4400 }
4401
4402 /**
4403 * ice_init_fdir - Initialize flow director VSI and configuration
4404 * @pf: pointer to the PF instance
4405 *
4406 * returns 0 on success, negative on error
4407 */
ice_init_fdir(struct ice_pf * pf)4408 static int ice_init_fdir(struct ice_pf *pf)
4409 {
4410 struct device *dev = ice_pf_to_dev(pf);
4411 struct ice_vsi *ctrl_vsi;
4412 int err;
4413
4414 /* Side Band Flow Director needs to have a control VSI.
4415 * Allocate it and store it in the PF.
4416 */
4417 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4418 if (!ctrl_vsi) {
4419 dev_dbg(dev, "could not create control VSI\n");
4420 return -ENOMEM;
4421 }
4422
4423 err = ice_vsi_open_ctrl(ctrl_vsi);
4424 if (err) {
4425 dev_dbg(dev, "could not open control VSI\n");
4426 goto err_vsi_open;
4427 }
4428
4429 mutex_init(&pf->hw.fdir_fltr_lock);
4430
4431 err = ice_fdir_create_dflt_rules(pf);
4432 if (err)
4433 goto err_fdir_rule;
4434
4435 return 0;
4436
4437 err_fdir_rule:
4438 ice_fdir_release_flows(&pf->hw);
4439 ice_vsi_close(ctrl_vsi);
4440 err_vsi_open:
4441 ice_vsi_release(ctrl_vsi);
4442 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4443 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4444 pf->ctrl_vsi_idx = ICE_NO_VSI;
4445 }
4446 return err;
4447 }
4448
ice_deinit_fdir(struct ice_pf * pf)4449 static void ice_deinit_fdir(struct ice_pf *pf)
4450 {
4451 struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4452
4453 if (!vsi)
4454 return;
4455
4456 ice_vsi_manage_fdir(vsi, false);
4457 ice_vsi_release(vsi);
4458 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4459 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4460 pf->ctrl_vsi_idx = ICE_NO_VSI;
4461 }
4462
4463 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4464 }
4465
4466 /**
4467 * ice_get_opt_fw_name - return optional firmware file name or NULL
4468 * @pf: pointer to the PF instance
4469 */
ice_get_opt_fw_name(struct ice_pf * pf)4470 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4471 {
4472 /* Optional firmware name same as default with additional dash
4473 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4474 */
4475 struct pci_dev *pdev = pf->pdev;
4476 char *opt_fw_filename;
4477 u64 dsn;
4478
4479 /* Determine the name of the optional file using the DSN (two
4480 * dwords following the start of the DSN Capability).
4481 */
4482 dsn = pci_get_dsn(pdev);
4483 if (!dsn)
4484 return NULL;
4485
4486 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4487 if (!opt_fw_filename)
4488 return NULL;
4489
4490 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4491 ICE_DDP_PKG_PATH, dsn);
4492
4493 return opt_fw_filename;
4494 }
4495
4496 /**
4497 * ice_request_fw - Device initialization routine
4498 * @pf: pointer to the PF instance
4499 * @firmware: double pointer to firmware struct
4500 *
4501 * Return: zero when successful, negative values otherwise.
4502 */
ice_request_fw(struct ice_pf * pf,const struct firmware ** firmware)4503 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4504 {
4505 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4506 struct device *dev = ice_pf_to_dev(pf);
4507 int err = 0;
4508
4509 /* optional device-specific DDP (if present) overrides the default DDP
4510 * package file. kernel logs a debug message if the file doesn't exist,
4511 * and warning messages for other errors.
4512 */
4513 if (opt_fw_filename) {
4514 err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4515 kfree(opt_fw_filename);
4516 if (!err)
4517 return err;
4518 }
4519 err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4520 if (err)
4521 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4522
4523 return err;
4524 }
4525
4526 /**
4527 * ice_init_tx_topology - performs Tx topology initialization
4528 * @hw: pointer to the hardware structure
4529 * @firmware: pointer to firmware structure
4530 *
4531 * Return: zero when init was successful, negative values otherwise.
4532 */
4533 static int
ice_init_tx_topology(struct ice_hw * hw,const struct firmware * firmware)4534 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4535 {
4536 u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4537 struct ice_pf *pf = hw->back;
4538 struct device *dev;
4539 u8 *buf_copy;
4540 int err;
4541
4542 dev = ice_pf_to_dev(pf);
4543 /* ice_cfg_tx_topo buf argument is not a constant,
4544 * so we have to make a copy
4545 */
4546 buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
4547
4548 err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
4549 if (!err) {
4550 if (hw->num_tx_sched_layers > num_tx_sched_layers)
4551 dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4552 else
4553 dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4554 /* if there was a change in topology ice_cfg_tx_topo triggered
4555 * a CORER and we need to re-init hw
4556 */
4557 ice_deinit_hw(hw);
4558 err = ice_init_hw(hw);
4559
4560 return err;
4561 } else if (err == -EIO) {
4562 dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4563 }
4564
4565 return 0;
4566 }
4567
4568 /**
4569 * ice_init_ddp_config - DDP related configuration
4570 * @hw: pointer to the hardware structure
4571 * @pf: pointer to pf structure
4572 *
4573 * This function loads DDP file from the disk, then initializes Tx
4574 * topology. At the end DDP package is loaded on the card.
4575 *
4576 * Return: zero when init was successful, negative values otherwise.
4577 */
ice_init_ddp_config(struct ice_hw * hw,struct ice_pf * pf)4578 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4579 {
4580 struct device *dev = ice_pf_to_dev(pf);
4581 const struct firmware *firmware = NULL;
4582 int err;
4583
4584 err = ice_request_fw(pf, &firmware);
4585 if (err) {
4586 dev_err(dev, "Fail during requesting FW: %d\n", err);
4587 return err;
4588 }
4589
4590 err = ice_init_tx_topology(hw, firmware);
4591 if (err) {
4592 dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4593 err);
4594 release_firmware(firmware);
4595 return err;
4596 }
4597
4598 /* Download firmware to device */
4599 ice_load_pkg(firmware, pf);
4600 release_firmware(firmware);
4601
4602 return 0;
4603 }
4604
4605 /**
4606 * ice_print_wake_reason - show the wake up cause in the log
4607 * @pf: pointer to the PF struct
4608 */
ice_print_wake_reason(struct ice_pf * pf)4609 static void ice_print_wake_reason(struct ice_pf *pf)
4610 {
4611 u32 wus = pf->wakeup_reason;
4612 const char *wake_str;
4613
4614 /* if no wake event, nothing to print */
4615 if (!wus)
4616 return;
4617
4618 if (wus & PFPM_WUS_LNKC_M)
4619 wake_str = "Link\n";
4620 else if (wus & PFPM_WUS_MAG_M)
4621 wake_str = "Magic Packet\n";
4622 else if (wus & PFPM_WUS_MNG_M)
4623 wake_str = "Management\n";
4624 else if (wus & PFPM_WUS_FW_RST_WK_M)
4625 wake_str = "Firmware Reset\n";
4626 else
4627 wake_str = "Unknown\n";
4628
4629 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4630 }
4631
4632 /**
4633 * ice_pf_fwlog_update_module - update 1 module
4634 * @pf: pointer to the PF struct
4635 * @log_level: log_level to use for the @module
4636 * @module: module to update
4637 */
ice_pf_fwlog_update_module(struct ice_pf * pf,int log_level,int module)4638 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4639 {
4640 struct ice_hw *hw = &pf->hw;
4641
4642 hw->fwlog_cfg.module_entries[module].log_level = log_level;
4643 }
4644
4645 /**
4646 * ice_register_netdev - register netdev
4647 * @vsi: pointer to the VSI struct
4648 */
ice_register_netdev(struct ice_vsi * vsi)4649 static int ice_register_netdev(struct ice_vsi *vsi)
4650 {
4651 int err;
4652
4653 if (!vsi || !vsi->netdev)
4654 return -EIO;
4655
4656 err = register_netdev(vsi->netdev);
4657 if (err)
4658 return err;
4659
4660 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4661 netif_carrier_off(vsi->netdev);
4662 netif_tx_stop_all_queues(vsi->netdev);
4663
4664 return 0;
4665 }
4666
ice_unregister_netdev(struct ice_vsi * vsi)4667 static void ice_unregister_netdev(struct ice_vsi *vsi)
4668 {
4669 if (!vsi || !vsi->netdev)
4670 return;
4671
4672 unregister_netdev(vsi->netdev);
4673 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4674 }
4675
4676 /**
4677 * ice_cfg_netdev - Allocate, configure and register a netdev
4678 * @vsi: the VSI associated with the new netdev
4679 *
4680 * Returns 0 on success, negative value on failure
4681 */
ice_cfg_netdev(struct ice_vsi * vsi)4682 static int ice_cfg_netdev(struct ice_vsi *vsi)
4683 {
4684 struct ice_netdev_priv *np;
4685 struct net_device *netdev;
4686 u8 mac_addr[ETH_ALEN];
4687
4688 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4689 vsi->alloc_rxq);
4690 if (!netdev)
4691 return -ENOMEM;
4692
4693 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4694 vsi->netdev = netdev;
4695 np = netdev_priv(netdev);
4696 np->vsi = vsi;
4697
4698 ice_set_netdev_features(netdev);
4699 ice_set_ops(vsi);
4700
4701 if (vsi->type == ICE_VSI_PF) {
4702 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4703 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4704 eth_hw_addr_set(netdev, mac_addr);
4705 }
4706
4707 netdev->priv_flags |= IFF_UNICAST_FLT;
4708
4709 /* Setup netdev TC information */
4710 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4711
4712 netdev->max_mtu = ICE_MAX_MTU;
4713
4714 return 0;
4715 }
4716
ice_decfg_netdev(struct ice_vsi * vsi)4717 static void ice_decfg_netdev(struct ice_vsi *vsi)
4718 {
4719 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4720 free_netdev(vsi->netdev);
4721 vsi->netdev = NULL;
4722 }
4723
4724 /**
4725 * ice_wait_for_fw - wait for full FW readiness
4726 * @hw: pointer to the hardware structure
4727 * @timeout: milliseconds that can elapse before timing out
4728 */
ice_wait_for_fw(struct ice_hw * hw,u32 timeout)4729 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4730 {
4731 int fw_loading;
4732 u32 elapsed = 0;
4733
4734 while (elapsed <= timeout) {
4735 fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4736
4737 /* firmware was not yet loaded, we have to wait more */
4738 if (fw_loading) {
4739 elapsed += 100;
4740 msleep(100);
4741 continue;
4742 }
4743 return 0;
4744 }
4745
4746 return -ETIMEDOUT;
4747 }
4748
ice_init_dev(struct ice_pf * pf)4749 int ice_init_dev(struct ice_pf *pf)
4750 {
4751 struct device *dev = ice_pf_to_dev(pf);
4752 struct ice_hw *hw = &pf->hw;
4753 int err;
4754
4755 err = ice_init_hw(hw);
4756 if (err) {
4757 dev_err(dev, "ice_init_hw failed: %d\n", err);
4758 return err;
4759 }
4760
4761 /* Some cards require longer initialization times
4762 * due to necessity of loading FW from an external source.
4763 * This can take even half a minute.
4764 */
4765 if (ice_is_pf_c827(hw)) {
4766 err = ice_wait_for_fw(hw, 30000);
4767 if (err) {
4768 dev_err(dev, "ice_wait_for_fw timed out");
4769 return err;
4770 }
4771 }
4772
4773 ice_init_feature_support(pf);
4774
4775 err = ice_init_ddp_config(hw, pf);
4776 if (err)
4777 return err;
4778
4779 /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4780 * set in pf->state, which will cause ice_is_safe_mode to return
4781 * true
4782 */
4783 if (ice_is_safe_mode(pf)) {
4784 /* we already got function/device capabilities but these don't
4785 * reflect what the driver needs to do in safe mode. Instead of
4786 * adding conditional logic everywhere to ignore these
4787 * device/function capabilities, override them.
4788 */
4789 ice_set_safe_mode_caps(hw);
4790 }
4791
4792 err = ice_init_pf(pf);
4793 if (err) {
4794 dev_err(dev, "ice_init_pf failed: %d\n", err);
4795 goto err_init_pf;
4796 }
4797
4798 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4799 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4800 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4801 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4802 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4803 pf->hw.udp_tunnel_nic.tables[0].n_entries =
4804 pf->hw.tnl.valid_count[TNL_VXLAN];
4805 pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4806 UDP_TUNNEL_TYPE_VXLAN;
4807 }
4808 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4809 pf->hw.udp_tunnel_nic.tables[1].n_entries =
4810 pf->hw.tnl.valid_count[TNL_GENEVE];
4811 pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4812 UDP_TUNNEL_TYPE_GENEVE;
4813 }
4814
4815 err = ice_init_interrupt_scheme(pf);
4816 if (err) {
4817 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4818 err = -EIO;
4819 goto err_init_interrupt_scheme;
4820 }
4821
4822 /* In case of MSIX we are going to setup the misc vector right here
4823 * to handle admin queue events etc. In case of legacy and MSI
4824 * the misc functionality and queue processing is combined in
4825 * the same vector and that gets setup at open.
4826 */
4827 err = ice_req_irq_msix_misc(pf);
4828 if (err) {
4829 dev_err(dev, "setup of misc vector failed: %d\n", err);
4830 goto err_req_irq_msix_misc;
4831 }
4832
4833 return 0;
4834
4835 err_req_irq_msix_misc:
4836 ice_clear_interrupt_scheme(pf);
4837 err_init_interrupt_scheme:
4838 ice_deinit_pf(pf);
4839 err_init_pf:
4840 ice_deinit_hw(hw);
4841 return err;
4842 }
4843
ice_deinit_dev(struct ice_pf * pf)4844 void ice_deinit_dev(struct ice_pf *pf)
4845 {
4846 ice_free_irq_msix_misc(pf);
4847 ice_deinit_pf(pf);
4848 ice_deinit_hw(&pf->hw);
4849
4850 /* Service task is already stopped, so call reset directly. */
4851 ice_reset(&pf->hw, ICE_RESET_PFR);
4852 pci_wait_for_pending_transaction(pf->pdev);
4853 ice_clear_interrupt_scheme(pf);
4854 }
4855
ice_init_features(struct ice_pf * pf)4856 static void ice_init_features(struct ice_pf *pf)
4857 {
4858 struct device *dev = ice_pf_to_dev(pf);
4859
4860 if (ice_is_safe_mode(pf))
4861 return;
4862
4863 /* initialize DDP driven features */
4864 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4865 ice_ptp_init(pf);
4866
4867 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4868 ice_gnss_init(pf);
4869
4870 if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4871 ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4872 ice_dpll_init(pf);
4873
4874 /* Note: Flow director init failure is non-fatal to load */
4875 if (ice_init_fdir(pf))
4876 dev_err(dev, "could not initialize flow director\n");
4877
4878 /* Note: DCB init failure is non-fatal to load */
4879 if (ice_init_pf_dcb(pf, false)) {
4880 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4881 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4882 } else {
4883 ice_cfg_lldp_mib_change(&pf->hw, true);
4884 }
4885
4886 if (ice_init_lag(pf))
4887 dev_warn(dev, "Failed to init link aggregation support\n");
4888
4889 ice_hwmon_init(pf);
4890 }
4891
ice_deinit_features(struct ice_pf * pf)4892 static void ice_deinit_features(struct ice_pf *pf)
4893 {
4894 if (ice_is_safe_mode(pf))
4895 return;
4896
4897 ice_deinit_lag(pf);
4898 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4899 ice_cfg_lldp_mib_change(&pf->hw, false);
4900 ice_deinit_fdir(pf);
4901 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4902 ice_gnss_exit(pf);
4903 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4904 ice_ptp_release(pf);
4905 if (test_bit(ICE_FLAG_DPLL, pf->flags))
4906 ice_dpll_deinit(pf);
4907 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4908 xa_destroy(&pf->eswitch.reprs);
4909 }
4910
ice_init_wakeup(struct ice_pf * pf)4911 static void ice_init_wakeup(struct ice_pf *pf)
4912 {
4913 /* Save wakeup reason register for later use */
4914 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4915
4916 /* check for a power management event */
4917 ice_print_wake_reason(pf);
4918
4919 /* clear wake status, all bits */
4920 wr32(&pf->hw, PFPM_WUS, U32_MAX);
4921
4922 /* Disable WoL at init, wait for user to enable */
4923 device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4924 }
4925
ice_init_link(struct ice_pf * pf)4926 static int ice_init_link(struct ice_pf *pf)
4927 {
4928 struct device *dev = ice_pf_to_dev(pf);
4929 int err;
4930
4931 err = ice_init_link_events(pf->hw.port_info);
4932 if (err) {
4933 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4934 return err;
4935 }
4936
4937 /* not a fatal error if this fails */
4938 err = ice_init_nvm_phy_type(pf->hw.port_info);
4939 if (err)
4940 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4941
4942 /* not a fatal error if this fails */
4943 err = ice_update_link_info(pf->hw.port_info);
4944 if (err)
4945 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4946
4947 ice_init_link_dflt_override(pf->hw.port_info);
4948
4949 ice_check_link_cfg_err(pf,
4950 pf->hw.port_info->phy.link_info.link_cfg_err);
4951
4952 /* if media available, initialize PHY settings */
4953 if (pf->hw.port_info->phy.link_info.link_info &
4954 ICE_AQ_MEDIA_AVAILABLE) {
4955 /* not a fatal error if this fails */
4956 err = ice_init_phy_user_cfg(pf->hw.port_info);
4957 if (err)
4958 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4959
4960 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4961 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4962
4963 if (vsi)
4964 ice_configure_phy(vsi);
4965 }
4966 } else {
4967 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4968 }
4969
4970 return err;
4971 }
4972
ice_init_pf_sw(struct ice_pf * pf)4973 static int ice_init_pf_sw(struct ice_pf *pf)
4974 {
4975 bool dvm = ice_is_dvm_ena(&pf->hw);
4976 struct ice_vsi *vsi;
4977 int err;
4978
4979 /* create switch struct for the switch element created by FW on boot */
4980 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4981 if (!pf->first_sw)
4982 return -ENOMEM;
4983
4984 if (pf->hw.evb_veb)
4985 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4986 else
4987 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4988
4989 pf->first_sw->pf = pf;
4990
4991 /* record the sw_id available for later use */
4992 pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4993
4994 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4995 if (err)
4996 goto err_aq_set_port_params;
4997
4998 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4999 if (!vsi) {
5000 err = -ENOMEM;
5001 goto err_pf_vsi_setup;
5002 }
5003
5004 return 0;
5005
5006 err_pf_vsi_setup:
5007 err_aq_set_port_params:
5008 kfree(pf->first_sw);
5009 return err;
5010 }
5011
ice_deinit_pf_sw(struct ice_pf * pf)5012 static void ice_deinit_pf_sw(struct ice_pf *pf)
5013 {
5014 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5015
5016 if (!vsi)
5017 return;
5018
5019 ice_vsi_release(vsi);
5020 kfree(pf->first_sw);
5021 }
5022
ice_alloc_vsis(struct ice_pf * pf)5023 static int ice_alloc_vsis(struct ice_pf *pf)
5024 {
5025 struct device *dev = ice_pf_to_dev(pf);
5026
5027 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
5028 if (!pf->num_alloc_vsi)
5029 return -EIO;
5030
5031 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
5032 dev_warn(dev,
5033 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
5034 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
5035 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
5036 }
5037
5038 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
5039 GFP_KERNEL);
5040 if (!pf->vsi)
5041 return -ENOMEM;
5042
5043 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
5044 sizeof(*pf->vsi_stats), GFP_KERNEL);
5045 if (!pf->vsi_stats) {
5046 devm_kfree(dev, pf->vsi);
5047 return -ENOMEM;
5048 }
5049
5050 return 0;
5051 }
5052
ice_dealloc_vsis(struct ice_pf * pf)5053 static void ice_dealloc_vsis(struct ice_pf *pf)
5054 {
5055 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
5056 pf->vsi_stats = NULL;
5057
5058 pf->num_alloc_vsi = 0;
5059 devm_kfree(ice_pf_to_dev(pf), pf->vsi);
5060 pf->vsi = NULL;
5061 }
5062
ice_init_devlink(struct ice_pf * pf)5063 static int ice_init_devlink(struct ice_pf *pf)
5064 {
5065 int err;
5066
5067 err = ice_devlink_register_params(pf);
5068 if (err)
5069 return err;
5070
5071 ice_devlink_init_regions(pf);
5072 ice_devlink_register(pf);
5073
5074 return 0;
5075 }
5076
ice_deinit_devlink(struct ice_pf * pf)5077 static void ice_deinit_devlink(struct ice_pf *pf)
5078 {
5079 ice_devlink_unregister(pf);
5080 ice_devlink_destroy_regions(pf);
5081 ice_devlink_unregister_params(pf);
5082 }
5083
ice_init(struct ice_pf * pf)5084 static int ice_init(struct ice_pf *pf)
5085 {
5086 int err;
5087
5088 err = ice_init_dev(pf);
5089 if (err)
5090 return err;
5091
5092 err = ice_alloc_vsis(pf);
5093 if (err)
5094 goto err_alloc_vsis;
5095
5096 err = ice_init_pf_sw(pf);
5097 if (err)
5098 goto err_init_pf_sw;
5099
5100 ice_init_wakeup(pf);
5101
5102 err = ice_init_link(pf);
5103 if (err)
5104 goto err_init_link;
5105
5106 err = ice_send_version(pf);
5107 if (err)
5108 goto err_init_link;
5109
5110 ice_verify_cacheline_size(pf);
5111
5112 if (ice_is_safe_mode(pf))
5113 ice_set_safe_mode_vlan_cfg(pf);
5114 else
5115 /* print PCI link speed and width */
5116 pcie_print_link_status(pf->pdev);
5117
5118 /* ready to go, so clear down state bit */
5119 clear_bit(ICE_DOWN, pf->state);
5120 clear_bit(ICE_SERVICE_DIS, pf->state);
5121
5122 /* since everything is good, start the service timer */
5123 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5124
5125 return 0;
5126
5127 err_init_link:
5128 ice_deinit_pf_sw(pf);
5129 err_init_pf_sw:
5130 ice_dealloc_vsis(pf);
5131 err_alloc_vsis:
5132 ice_deinit_dev(pf);
5133 return err;
5134 }
5135
ice_deinit(struct ice_pf * pf)5136 static void ice_deinit(struct ice_pf *pf)
5137 {
5138 set_bit(ICE_SERVICE_DIS, pf->state);
5139 set_bit(ICE_DOWN, pf->state);
5140
5141 ice_deinit_pf_sw(pf);
5142 ice_dealloc_vsis(pf);
5143 ice_deinit_dev(pf);
5144 }
5145
5146 /**
5147 * ice_load - load pf by init hw and starting VSI
5148 * @pf: pointer to the pf instance
5149 *
5150 * This function has to be called under devl_lock.
5151 */
ice_load(struct ice_pf * pf)5152 int ice_load(struct ice_pf *pf)
5153 {
5154 struct ice_vsi *vsi;
5155 int err;
5156
5157 devl_assert_locked(priv_to_devlink(pf));
5158
5159 vsi = ice_get_main_vsi(pf);
5160
5161 /* init channel list */
5162 INIT_LIST_HEAD(&vsi->ch_list);
5163
5164 err = ice_cfg_netdev(vsi);
5165 if (err)
5166 return err;
5167
5168 /* Setup DCB netlink interface */
5169 ice_dcbnl_setup(vsi);
5170
5171 err = ice_init_mac_fltr(pf);
5172 if (err)
5173 goto err_init_mac_fltr;
5174
5175 err = ice_devlink_create_pf_port(pf);
5176 if (err)
5177 goto err_devlink_create_pf_port;
5178
5179 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5180
5181 err = ice_register_netdev(vsi);
5182 if (err)
5183 goto err_register_netdev;
5184
5185 err = ice_tc_indir_block_register(vsi);
5186 if (err)
5187 goto err_tc_indir_block_register;
5188
5189 ice_napi_add(vsi);
5190
5191 err = ice_init_rdma(pf);
5192 if (err)
5193 goto err_init_rdma;
5194
5195 ice_init_features(pf);
5196 ice_service_task_restart(pf);
5197
5198 clear_bit(ICE_DOWN, pf->state);
5199
5200 return 0;
5201
5202 err_init_rdma:
5203 ice_tc_indir_block_unregister(vsi);
5204 err_tc_indir_block_register:
5205 ice_unregister_netdev(vsi);
5206 err_register_netdev:
5207 ice_devlink_destroy_pf_port(pf);
5208 err_devlink_create_pf_port:
5209 err_init_mac_fltr:
5210 ice_decfg_netdev(vsi);
5211 return err;
5212 }
5213
5214 /**
5215 * ice_unload - unload pf by stopping VSI and deinit hw
5216 * @pf: pointer to the pf instance
5217 *
5218 * This function has to be called under devl_lock.
5219 */
ice_unload(struct ice_pf * pf)5220 void ice_unload(struct ice_pf *pf)
5221 {
5222 struct ice_vsi *vsi = ice_get_main_vsi(pf);
5223
5224 devl_assert_locked(priv_to_devlink(pf));
5225
5226 ice_deinit_features(pf);
5227 ice_deinit_rdma(pf);
5228 ice_tc_indir_block_unregister(vsi);
5229 ice_unregister_netdev(vsi);
5230 ice_devlink_destroy_pf_port(pf);
5231 ice_decfg_netdev(vsi);
5232 }
5233
5234 /**
5235 * ice_probe - Device initialization routine
5236 * @pdev: PCI device information struct
5237 * @ent: entry in ice_pci_tbl
5238 *
5239 * Returns 0 on success, negative on failure
5240 */
5241 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)5242 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5243 {
5244 struct device *dev = &pdev->dev;
5245 struct ice_adapter *adapter;
5246 struct ice_pf *pf;
5247 struct ice_hw *hw;
5248 int err;
5249
5250 if (pdev->is_virtfn) {
5251 dev_err(dev, "can't probe a virtual function\n");
5252 return -EINVAL;
5253 }
5254
5255 /* when under a kdump kernel initiate a reset before enabling the
5256 * device in order to clear out any pending DMA transactions. These
5257 * transactions can cause some systems to machine check when doing
5258 * the pcim_enable_device() below.
5259 */
5260 if (is_kdump_kernel()) {
5261 pci_save_state(pdev);
5262 pci_clear_master(pdev);
5263 err = pcie_flr(pdev);
5264 if (err)
5265 return err;
5266 pci_restore_state(pdev);
5267 }
5268
5269 /* this driver uses devres, see
5270 * Documentation/driver-api/driver-model/devres.rst
5271 */
5272 err = pcim_enable_device(pdev);
5273 if (err)
5274 return err;
5275
5276 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5277 if (err) {
5278 dev_err(dev, "BAR0 I/O map error %d\n", err);
5279 return err;
5280 }
5281
5282 pf = ice_allocate_pf(dev);
5283 if (!pf)
5284 return -ENOMEM;
5285
5286 /* initialize Auxiliary index to invalid value */
5287 pf->aux_idx = -1;
5288
5289 /* set up for high or low DMA */
5290 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5291 if (err) {
5292 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5293 return err;
5294 }
5295
5296 pci_set_master(pdev);
5297
5298 adapter = ice_adapter_get(pdev);
5299 if (IS_ERR(adapter))
5300 return PTR_ERR(adapter);
5301
5302 pf->pdev = pdev;
5303 pf->adapter = adapter;
5304 pci_set_drvdata(pdev, pf);
5305 set_bit(ICE_DOWN, pf->state);
5306 /* Disable service task until DOWN bit is cleared */
5307 set_bit(ICE_SERVICE_DIS, pf->state);
5308
5309 hw = &pf->hw;
5310 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5311 pci_save_state(pdev);
5312
5313 hw->back = pf;
5314 hw->port_info = NULL;
5315 hw->vendor_id = pdev->vendor;
5316 hw->device_id = pdev->device;
5317 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5318 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5319 hw->subsystem_device_id = pdev->subsystem_device;
5320 hw->bus.device = PCI_SLOT(pdev->devfn);
5321 hw->bus.func = PCI_FUNC(pdev->devfn);
5322 ice_set_ctrlq_len(hw);
5323
5324 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5325
5326 #ifndef CONFIG_DYNAMIC_DEBUG
5327 if (debug < -1)
5328 hw->debug_mask = debug;
5329 #endif
5330
5331 err = ice_init(pf);
5332 if (err)
5333 goto err_init;
5334
5335 devl_lock(priv_to_devlink(pf));
5336 err = ice_load(pf);
5337 if (err)
5338 goto err_load;
5339
5340 err = ice_init_devlink(pf);
5341 if (err)
5342 goto err_init_devlink;
5343 devl_unlock(priv_to_devlink(pf));
5344
5345 return 0;
5346
5347 err_init_devlink:
5348 ice_unload(pf);
5349 err_load:
5350 devl_unlock(priv_to_devlink(pf));
5351 ice_deinit(pf);
5352 err_init:
5353 ice_adapter_put(pdev);
5354 return err;
5355 }
5356
5357 /**
5358 * ice_set_wake - enable or disable Wake on LAN
5359 * @pf: pointer to the PF struct
5360 *
5361 * Simple helper for WoL control
5362 */
ice_set_wake(struct ice_pf * pf)5363 static void ice_set_wake(struct ice_pf *pf)
5364 {
5365 struct ice_hw *hw = &pf->hw;
5366 bool wol = pf->wol_ena;
5367
5368 /* clear wake state, otherwise new wake events won't fire */
5369 wr32(hw, PFPM_WUS, U32_MAX);
5370
5371 /* enable / disable APM wake up, no RMW needed */
5372 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5373
5374 /* set magic packet filter enabled */
5375 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5376 }
5377
5378 /**
5379 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5380 * @pf: pointer to the PF struct
5381 *
5382 * Issue firmware command to enable multicast magic wake, making
5383 * sure that any locally administered address (LAA) is used for
5384 * wake, and that PF reset doesn't undo the LAA.
5385 */
ice_setup_mc_magic_wake(struct ice_pf * pf)5386 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5387 {
5388 struct device *dev = ice_pf_to_dev(pf);
5389 struct ice_hw *hw = &pf->hw;
5390 u8 mac_addr[ETH_ALEN];
5391 struct ice_vsi *vsi;
5392 int status;
5393 u8 flags;
5394
5395 if (!pf->wol_ena)
5396 return;
5397
5398 vsi = ice_get_main_vsi(pf);
5399 if (!vsi)
5400 return;
5401
5402 /* Get current MAC address in case it's an LAA */
5403 if (vsi->netdev)
5404 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5405 else
5406 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5407
5408 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5409 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5410 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5411
5412 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5413 if (status)
5414 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5415 status, ice_aq_str(hw->adminq.sq_last_status));
5416 }
5417
5418 /**
5419 * ice_remove - Device removal routine
5420 * @pdev: PCI device information struct
5421 */
ice_remove(struct pci_dev * pdev)5422 static void ice_remove(struct pci_dev *pdev)
5423 {
5424 struct ice_pf *pf = pci_get_drvdata(pdev);
5425 int i;
5426
5427 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5428 if (!ice_is_reset_in_progress(pf->state))
5429 break;
5430 msleep(100);
5431 }
5432
5433 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5434 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5435 ice_free_vfs(pf);
5436 }
5437
5438 ice_hwmon_exit(pf);
5439
5440 ice_service_task_stop(pf);
5441 ice_aq_cancel_waiting_tasks(pf);
5442 set_bit(ICE_DOWN, pf->state);
5443
5444 if (!ice_is_safe_mode(pf))
5445 ice_remove_arfs(pf);
5446
5447 devl_lock(priv_to_devlink(pf));
5448 ice_dealloc_all_dynamic_ports(pf);
5449 ice_deinit_devlink(pf);
5450
5451 ice_unload(pf);
5452 devl_unlock(priv_to_devlink(pf));
5453
5454 ice_deinit(pf);
5455 ice_vsi_release_all(pf);
5456
5457 ice_setup_mc_magic_wake(pf);
5458 ice_set_wake(pf);
5459
5460 ice_adapter_put(pdev);
5461 }
5462
5463 /**
5464 * ice_shutdown - PCI callback for shutting down device
5465 * @pdev: PCI device information struct
5466 */
ice_shutdown(struct pci_dev * pdev)5467 static void ice_shutdown(struct pci_dev *pdev)
5468 {
5469 struct ice_pf *pf = pci_get_drvdata(pdev);
5470
5471 ice_remove(pdev);
5472
5473 if (system_state == SYSTEM_POWER_OFF) {
5474 pci_wake_from_d3(pdev, pf->wol_ena);
5475 pci_set_power_state(pdev, PCI_D3hot);
5476 }
5477 }
5478
5479 /**
5480 * ice_prepare_for_shutdown - prep for PCI shutdown
5481 * @pf: board private structure
5482 *
5483 * Inform or close all dependent features in prep for PCI device shutdown
5484 */
ice_prepare_for_shutdown(struct ice_pf * pf)5485 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5486 {
5487 struct ice_hw *hw = &pf->hw;
5488 u32 v;
5489
5490 /* Notify VFs of impending reset */
5491 if (ice_check_sq_alive(hw, &hw->mailboxq))
5492 ice_vc_notify_reset(pf);
5493
5494 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5495
5496 /* disable the VSIs and their queues that are not already DOWN */
5497 ice_pf_dis_all_vsi(pf, false);
5498
5499 ice_for_each_vsi(pf, v)
5500 if (pf->vsi[v])
5501 pf->vsi[v]->vsi_num = 0;
5502
5503 ice_shutdown_all_ctrlq(hw, true);
5504 }
5505
5506 /**
5507 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5508 * @pf: board private structure to reinitialize
5509 *
5510 * This routine reinitialize interrupt scheme that was cleared during
5511 * power management suspend callback.
5512 *
5513 * This should be called during resume routine to re-allocate the q_vectors
5514 * and reacquire interrupts.
5515 */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5516 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5517 {
5518 struct device *dev = ice_pf_to_dev(pf);
5519 int ret, v;
5520
5521 /* Since we clear MSIX flag during suspend, we need to
5522 * set it back during resume...
5523 */
5524
5525 ret = ice_init_interrupt_scheme(pf);
5526 if (ret) {
5527 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5528 return ret;
5529 }
5530
5531 /* Remap vectors and rings, after successful re-init interrupts */
5532 ice_for_each_vsi(pf, v) {
5533 if (!pf->vsi[v])
5534 continue;
5535
5536 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5537 if (ret)
5538 goto err_reinit;
5539 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5540 rtnl_lock();
5541 ice_vsi_set_napi_queues(pf->vsi[v]);
5542 rtnl_unlock();
5543 }
5544
5545 ret = ice_req_irq_msix_misc(pf);
5546 if (ret) {
5547 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5548 ret);
5549 goto err_reinit;
5550 }
5551
5552 return 0;
5553
5554 err_reinit:
5555 while (v--)
5556 if (pf->vsi[v]) {
5557 rtnl_lock();
5558 ice_vsi_clear_napi_queues(pf->vsi[v]);
5559 rtnl_unlock();
5560 ice_vsi_free_q_vectors(pf->vsi[v]);
5561 }
5562
5563 return ret;
5564 }
5565
5566 /**
5567 * ice_suspend
5568 * @dev: generic device information structure
5569 *
5570 * Power Management callback to quiesce the device and prepare
5571 * for D3 transition.
5572 */
ice_suspend(struct device * dev)5573 static int ice_suspend(struct device *dev)
5574 {
5575 struct pci_dev *pdev = to_pci_dev(dev);
5576 struct ice_pf *pf;
5577 int disabled, v;
5578
5579 pf = pci_get_drvdata(pdev);
5580
5581 if (!ice_pf_state_is_nominal(pf)) {
5582 dev_err(dev, "Device is not ready, no need to suspend it\n");
5583 return -EBUSY;
5584 }
5585
5586 /* Stop watchdog tasks until resume completion.
5587 * Even though it is most likely that the service task is
5588 * disabled if the device is suspended or down, the service task's
5589 * state is controlled by a different state bit, and we should
5590 * store and honor whatever state that bit is in at this point.
5591 */
5592 disabled = ice_service_task_stop(pf);
5593
5594 ice_deinit_rdma(pf);
5595
5596 /* Already suspended?, then there is nothing to do */
5597 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5598 if (!disabled)
5599 ice_service_task_restart(pf);
5600 return 0;
5601 }
5602
5603 if (test_bit(ICE_DOWN, pf->state) ||
5604 ice_is_reset_in_progress(pf->state)) {
5605 dev_err(dev, "can't suspend device in reset or already down\n");
5606 if (!disabled)
5607 ice_service_task_restart(pf);
5608 return 0;
5609 }
5610
5611 ice_setup_mc_magic_wake(pf);
5612
5613 ice_prepare_for_shutdown(pf);
5614
5615 ice_set_wake(pf);
5616
5617 /* Free vectors, clear the interrupt scheme and release IRQs
5618 * for proper hibernation, especially with large number of CPUs.
5619 * Otherwise hibernation might fail when mapping all the vectors back
5620 * to CPU0.
5621 */
5622 ice_free_irq_msix_misc(pf);
5623 ice_for_each_vsi(pf, v) {
5624 if (!pf->vsi[v])
5625 continue;
5626 rtnl_lock();
5627 ice_vsi_clear_napi_queues(pf->vsi[v]);
5628 rtnl_unlock();
5629 ice_vsi_free_q_vectors(pf->vsi[v]);
5630 }
5631 ice_clear_interrupt_scheme(pf);
5632
5633 pci_save_state(pdev);
5634 pci_wake_from_d3(pdev, pf->wol_ena);
5635 pci_set_power_state(pdev, PCI_D3hot);
5636 return 0;
5637 }
5638
5639 /**
5640 * ice_resume - PM callback for waking up from D3
5641 * @dev: generic device information structure
5642 */
ice_resume(struct device * dev)5643 static int ice_resume(struct device *dev)
5644 {
5645 struct pci_dev *pdev = to_pci_dev(dev);
5646 enum ice_reset_req reset_type;
5647 struct ice_pf *pf;
5648 struct ice_hw *hw;
5649 int ret;
5650
5651 pci_set_power_state(pdev, PCI_D0);
5652 pci_restore_state(pdev);
5653 pci_save_state(pdev);
5654
5655 if (!pci_device_is_present(pdev))
5656 return -ENODEV;
5657
5658 ret = pci_enable_device_mem(pdev);
5659 if (ret) {
5660 dev_err(dev, "Cannot enable device after suspend\n");
5661 return ret;
5662 }
5663
5664 pf = pci_get_drvdata(pdev);
5665 hw = &pf->hw;
5666
5667 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5668 ice_print_wake_reason(pf);
5669
5670 /* We cleared the interrupt scheme when we suspended, so we need to
5671 * restore it now to resume device functionality.
5672 */
5673 ret = ice_reinit_interrupt_scheme(pf);
5674 if (ret)
5675 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5676
5677 ret = ice_init_rdma(pf);
5678 if (ret)
5679 dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5680 ret);
5681
5682 clear_bit(ICE_DOWN, pf->state);
5683 /* Now perform PF reset and rebuild */
5684 reset_type = ICE_RESET_PFR;
5685 /* re-enable service task for reset, but allow reset to schedule it */
5686 clear_bit(ICE_SERVICE_DIS, pf->state);
5687
5688 if (ice_schedule_reset(pf, reset_type))
5689 dev_err(dev, "Reset during resume failed.\n");
5690
5691 clear_bit(ICE_SUSPENDED, pf->state);
5692 ice_service_task_restart(pf);
5693
5694 /* Restart the service task */
5695 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5696
5697 return 0;
5698 }
5699
5700 /**
5701 * ice_pci_err_detected - warning that PCI error has been detected
5702 * @pdev: PCI device information struct
5703 * @err: the type of PCI error
5704 *
5705 * Called to warn that something happened on the PCI bus and the error handling
5706 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5707 */
5708 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5709 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5710 {
5711 struct ice_pf *pf = pci_get_drvdata(pdev);
5712
5713 if (!pf) {
5714 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5715 __func__, err);
5716 return PCI_ERS_RESULT_DISCONNECT;
5717 }
5718
5719 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5720 ice_service_task_stop(pf);
5721
5722 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5723 set_bit(ICE_PFR_REQ, pf->state);
5724 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5725 }
5726 }
5727
5728 return PCI_ERS_RESULT_NEED_RESET;
5729 }
5730
5731 /**
5732 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5733 * @pdev: PCI device information struct
5734 *
5735 * Called to determine if the driver can recover from the PCI slot reset by
5736 * using a register read to determine if the device is recoverable.
5737 */
ice_pci_err_slot_reset(struct pci_dev * pdev)5738 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5739 {
5740 struct ice_pf *pf = pci_get_drvdata(pdev);
5741 pci_ers_result_t result;
5742 int err;
5743 u32 reg;
5744
5745 err = pci_enable_device_mem(pdev);
5746 if (err) {
5747 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5748 err);
5749 result = PCI_ERS_RESULT_DISCONNECT;
5750 } else {
5751 pci_set_master(pdev);
5752 pci_restore_state(pdev);
5753 pci_save_state(pdev);
5754 pci_wake_from_d3(pdev, false);
5755
5756 /* Check for life */
5757 reg = rd32(&pf->hw, GLGEN_RTRIG);
5758 if (!reg)
5759 result = PCI_ERS_RESULT_RECOVERED;
5760 else
5761 result = PCI_ERS_RESULT_DISCONNECT;
5762 }
5763
5764 return result;
5765 }
5766
5767 /**
5768 * ice_pci_err_resume - restart operations after PCI error recovery
5769 * @pdev: PCI device information struct
5770 *
5771 * Called to allow the driver to bring things back up after PCI error and/or
5772 * reset recovery have finished
5773 */
ice_pci_err_resume(struct pci_dev * pdev)5774 static void ice_pci_err_resume(struct pci_dev *pdev)
5775 {
5776 struct ice_pf *pf = pci_get_drvdata(pdev);
5777
5778 if (!pf) {
5779 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5780 __func__);
5781 return;
5782 }
5783
5784 if (test_bit(ICE_SUSPENDED, pf->state)) {
5785 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5786 __func__);
5787 return;
5788 }
5789
5790 ice_restore_all_vfs_msi_state(pf);
5791
5792 ice_do_reset(pf, ICE_RESET_PFR);
5793 ice_service_task_restart(pf);
5794 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5795 }
5796
5797 /**
5798 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5799 * @pdev: PCI device information struct
5800 */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5801 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5802 {
5803 struct ice_pf *pf = pci_get_drvdata(pdev);
5804
5805 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5806 ice_service_task_stop(pf);
5807
5808 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5809 set_bit(ICE_PFR_REQ, pf->state);
5810 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5811 }
5812 }
5813 }
5814
5815 /**
5816 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5817 * @pdev: PCI device information struct
5818 */
ice_pci_err_reset_done(struct pci_dev * pdev)5819 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5820 {
5821 ice_pci_err_resume(pdev);
5822 }
5823
5824 /* ice_pci_tbl - PCI Device ID Table
5825 *
5826 * Wildcard entries (PCI_ANY_ID) should come last
5827 * Last entry must be all 0s
5828 *
5829 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5830 * Class, Class Mask, private data (not used) }
5831 */
5832 static const struct pci_device_id ice_pci_tbl[] = {
5833 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5834 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5835 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5836 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5837 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5838 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5839 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5840 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5841 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5842 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5843 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5844 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5845 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5846 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5847 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5848 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5849 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5850 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5851 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5852 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5853 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5854 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5855 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5856 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5857 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5858 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5859 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5860 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5861 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5862 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5863 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5864 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5865 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5866 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5867 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5868 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5869 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5870 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5871 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5872 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5873 /* required last entry */
5874 {}
5875 };
5876 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5877
5878 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5879
5880 static const struct pci_error_handlers ice_pci_err_handler = {
5881 .error_detected = ice_pci_err_detected,
5882 .slot_reset = ice_pci_err_slot_reset,
5883 .reset_prepare = ice_pci_err_reset_prepare,
5884 .reset_done = ice_pci_err_reset_done,
5885 .resume = ice_pci_err_resume
5886 };
5887
5888 static struct pci_driver ice_driver = {
5889 .name = KBUILD_MODNAME,
5890 .id_table = ice_pci_tbl,
5891 .probe = ice_probe,
5892 .remove = ice_remove,
5893 .driver.pm = pm_sleep_ptr(&ice_pm_ops),
5894 .shutdown = ice_shutdown,
5895 .sriov_configure = ice_sriov_configure,
5896 .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5897 .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5898 .err_handler = &ice_pci_err_handler
5899 };
5900
5901 /**
5902 * ice_module_init - Driver registration routine
5903 *
5904 * ice_module_init is the first routine called when the driver is
5905 * loaded. All it does is register with the PCI subsystem.
5906 */
ice_module_init(void)5907 static int __init ice_module_init(void)
5908 {
5909 int status = -ENOMEM;
5910
5911 pr_info("%s\n", ice_driver_string);
5912 pr_info("%s\n", ice_copyright);
5913
5914 ice_adv_lnk_speed_maps_init();
5915
5916 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5917 if (!ice_wq) {
5918 pr_err("Failed to create workqueue\n");
5919 return status;
5920 }
5921
5922 ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5923 if (!ice_lag_wq) {
5924 pr_err("Failed to create LAG workqueue\n");
5925 goto err_dest_wq;
5926 }
5927
5928 ice_debugfs_init();
5929
5930 status = pci_register_driver(&ice_driver);
5931 if (status) {
5932 pr_err("failed to register PCI driver, err %d\n", status);
5933 goto err_dest_lag_wq;
5934 }
5935
5936 status = ice_sf_driver_register();
5937 if (status) {
5938 pr_err("Failed to register SF driver, err %d\n", status);
5939 goto err_sf_driver;
5940 }
5941
5942 return 0;
5943
5944 err_sf_driver:
5945 pci_unregister_driver(&ice_driver);
5946 err_dest_lag_wq:
5947 destroy_workqueue(ice_lag_wq);
5948 ice_debugfs_exit();
5949 err_dest_wq:
5950 destroy_workqueue(ice_wq);
5951 return status;
5952 }
5953 module_init(ice_module_init);
5954
5955 /**
5956 * ice_module_exit - Driver exit cleanup routine
5957 *
5958 * ice_module_exit is called just before the driver is removed
5959 * from memory.
5960 */
ice_module_exit(void)5961 static void __exit ice_module_exit(void)
5962 {
5963 ice_sf_driver_unregister();
5964 pci_unregister_driver(&ice_driver);
5965 ice_debugfs_exit();
5966 destroy_workqueue(ice_wq);
5967 destroy_workqueue(ice_lag_wq);
5968 pr_info("module unloaded\n");
5969 }
5970 module_exit(ice_module_exit);
5971
5972 /**
5973 * ice_set_mac_address - NDO callback to set MAC address
5974 * @netdev: network interface device structure
5975 * @pi: pointer to an address structure
5976 *
5977 * Returns 0 on success, negative on failure
5978 */
ice_set_mac_address(struct net_device * netdev,void * pi)5979 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5980 {
5981 struct ice_netdev_priv *np = netdev_priv(netdev);
5982 struct ice_vsi *vsi = np->vsi;
5983 struct ice_pf *pf = vsi->back;
5984 struct ice_hw *hw = &pf->hw;
5985 struct sockaddr *addr = pi;
5986 u8 old_mac[ETH_ALEN];
5987 u8 flags = 0;
5988 u8 *mac;
5989 int err;
5990
5991 mac = (u8 *)addr->sa_data;
5992
5993 if (!is_valid_ether_addr(mac))
5994 return -EADDRNOTAVAIL;
5995
5996 if (test_bit(ICE_DOWN, pf->state) ||
5997 ice_is_reset_in_progress(pf->state)) {
5998 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5999 mac);
6000 return -EBUSY;
6001 }
6002
6003 if (ice_chnl_dmac_fltr_cnt(pf)) {
6004 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
6005 mac);
6006 return -EAGAIN;
6007 }
6008
6009 netif_addr_lock_bh(netdev);
6010 ether_addr_copy(old_mac, netdev->dev_addr);
6011 /* change the netdev's MAC address */
6012 eth_hw_addr_set(netdev, mac);
6013 netif_addr_unlock_bh(netdev);
6014
6015 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
6016 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
6017 if (err && err != -ENOENT) {
6018 err = -EADDRNOTAVAIL;
6019 goto err_update_filters;
6020 }
6021
6022 /* Add filter for new MAC. If filter exists, return success */
6023 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
6024 if (err == -EEXIST) {
6025 /* Although this MAC filter is already present in hardware it's
6026 * possible in some cases (e.g. bonding) that dev_addr was
6027 * modified outside of the driver and needs to be restored back
6028 * to this value.
6029 */
6030 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
6031
6032 return 0;
6033 } else if (err) {
6034 /* error if the new filter addition failed */
6035 err = -EADDRNOTAVAIL;
6036 }
6037
6038 err_update_filters:
6039 if (err) {
6040 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6041 mac);
6042 netif_addr_lock_bh(netdev);
6043 eth_hw_addr_set(netdev, old_mac);
6044 netif_addr_unlock_bh(netdev);
6045 return err;
6046 }
6047
6048 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6049 netdev->dev_addr);
6050
6051 /* write new MAC address to the firmware */
6052 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6053 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6054 if (err) {
6055 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6056 mac, err);
6057 }
6058 return 0;
6059 }
6060
6061 /**
6062 * ice_set_rx_mode - NDO callback to set the netdev filters
6063 * @netdev: network interface device structure
6064 */
ice_set_rx_mode(struct net_device * netdev)6065 static void ice_set_rx_mode(struct net_device *netdev)
6066 {
6067 struct ice_netdev_priv *np = netdev_priv(netdev);
6068 struct ice_vsi *vsi = np->vsi;
6069
6070 if (!vsi || ice_is_switchdev_running(vsi->back))
6071 return;
6072
6073 /* Set the flags to synchronize filters
6074 * ndo_set_rx_mode may be triggered even without a change in netdev
6075 * flags
6076 */
6077 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6078 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6079 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6080
6081 /* schedule our worker thread which will take care of
6082 * applying the new filter changes
6083 */
6084 ice_service_task_schedule(vsi->back);
6085 }
6086
6087 /**
6088 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6089 * @netdev: network interface device structure
6090 * @queue_index: Queue ID
6091 * @maxrate: maximum bandwidth in Mbps
6092 */
6093 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)6094 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6095 {
6096 struct ice_netdev_priv *np = netdev_priv(netdev);
6097 struct ice_vsi *vsi = np->vsi;
6098 u16 q_handle;
6099 int status;
6100 u8 tc;
6101
6102 /* Validate maxrate requested is within permitted range */
6103 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6104 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6105 maxrate, queue_index);
6106 return -EINVAL;
6107 }
6108
6109 q_handle = vsi->tx_rings[queue_index]->q_handle;
6110 tc = ice_dcb_get_tc(vsi, queue_index);
6111
6112 vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6113 if (!vsi) {
6114 netdev_err(netdev, "Invalid VSI for given queue %d\n",
6115 queue_index);
6116 return -EINVAL;
6117 }
6118
6119 /* Set BW back to default, when user set maxrate to 0 */
6120 if (!maxrate)
6121 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6122 q_handle, ICE_MAX_BW);
6123 else
6124 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6125 q_handle, ICE_MAX_BW, maxrate * 1000);
6126 if (status)
6127 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6128 status);
6129
6130 return status;
6131 }
6132
6133 /**
6134 * ice_fdb_add - add an entry to the hardware database
6135 * @ndm: the input from the stack
6136 * @tb: pointer to array of nladdr (unused)
6137 * @dev: the net device pointer
6138 * @addr: the MAC address entry being added
6139 * @vid: VLAN ID
6140 * @flags: instructions from stack about fdb operation
6141 * @extack: netlink extended ack
6142 */
6143 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack __always_unused * extack)6144 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6145 struct net_device *dev, const unsigned char *addr, u16 vid,
6146 u16 flags, struct netlink_ext_ack __always_unused *extack)
6147 {
6148 int err;
6149
6150 if (vid) {
6151 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6152 return -EINVAL;
6153 }
6154 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6155 netdev_err(dev, "FDB only supports static addresses\n");
6156 return -EINVAL;
6157 }
6158
6159 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6160 err = dev_uc_add_excl(dev, addr);
6161 else if (is_multicast_ether_addr(addr))
6162 err = dev_mc_add_excl(dev, addr);
6163 else
6164 err = -EINVAL;
6165
6166 /* Only return duplicate errors if NLM_F_EXCL is set */
6167 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6168 err = 0;
6169
6170 return err;
6171 }
6172
6173 /**
6174 * ice_fdb_del - delete an entry from the hardware database
6175 * @ndm: the input from the stack
6176 * @tb: pointer to array of nladdr (unused)
6177 * @dev: the net device pointer
6178 * @addr: the MAC address entry being added
6179 * @vid: VLAN ID
6180 * @extack: netlink extended ack
6181 */
6182 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,struct netlink_ext_ack * extack)6183 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6184 struct net_device *dev, const unsigned char *addr,
6185 __always_unused u16 vid, struct netlink_ext_ack *extack)
6186 {
6187 int err;
6188
6189 if (ndm->ndm_state & NUD_PERMANENT) {
6190 netdev_err(dev, "FDB only supports static addresses\n");
6191 return -EINVAL;
6192 }
6193
6194 if (is_unicast_ether_addr(addr))
6195 err = dev_uc_del(dev, addr);
6196 else if (is_multicast_ether_addr(addr))
6197 err = dev_mc_del(dev, addr);
6198 else
6199 err = -EINVAL;
6200
6201 return err;
6202 }
6203
6204 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6205 NETIF_F_HW_VLAN_CTAG_TX | \
6206 NETIF_F_HW_VLAN_STAG_RX | \
6207 NETIF_F_HW_VLAN_STAG_TX)
6208
6209 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
6210 NETIF_F_HW_VLAN_STAG_RX)
6211
6212 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
6213 NETIF_F_HW_VLAN_STAG_FILTER)
6214
6215 /**
6216 * ice_fix_features - fix the netdev features flags based on device limitations
6217 * @netdev: ptr to the netdev that flags are being fixed on
6218 * @features: features that need to be checked and possibly fixed
6219 *
6220 * Make sure any fixups are made to features in this callback. This enables the
6221 * driver to not have to check unsupported configurations throughout the driver
6222 * because that's the responsiblity of this callback.
6223 *
6224 * Single VLAN Mode (SVM) Supported Features:
6225 * NETIF_F_HW_VLAN_CTAG_FILTER
6226 * NETIF_F_HW_VLAN_CTAG_RX
6227 * NETIF_F_HW_VLAN_CTAG_TX
6228 *
6229 * Double VLAN Mode (DVM) Supported Features:
6230 * NETIF_F_HW_VLAN_CTAG_FILTER
6231 * NETIF_F_HW_VLAN_CTAG_RX
6232 * NETIF_F_HW_VLAN_CTAG_TX
6233 *
6234 * NETIF_F_HW_VLAN_STAG_FILTER
6235 * NETIF_HW_VLAN_STAG_RX
6236 * NETIF_HW_VLAN_STAG_TX
6237 *
6238 * Features that need fixing:
6239 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6240 * These are mutually exlusive as the VSI context cannot support multiple
6241 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
6242 * is not done, then default to clearing the requested STAG offload
6243 * settings.
6244 *
6245 * All supported filtering has to be enabled or disabled together. For
6246 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6247 * together. If this is not done, then default to VLAN filtering disabled.
6248 * These are mutually exclusive as there is currently no way to
6249 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6250 * prune rules.
6251 */
6252 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)6253 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6254 {
6255 struct ice_netdev_priv *np = netdev_priv(netdev);
6256 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6257 bool cur_ctag, cur_stag, req_ctag, req_stag;
6258
6259 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6260 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6261 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6262
6263 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6264 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6265 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6266
6267 if (req_vlan_fltr != cur_vlan_fltr) {
6268 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6269 if (req_ctag && req_stag) {
6270 features |= NETIF_VLAN_FILTERING_FEATURES;
6271 } else if (!req_ctag && !req_stag) {
6272 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6273 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
6274 (!cur_stag && req_stag && !cur_ctag)) {
6275 features |= NETIF_VLAN_FILTERING_FEATURES;
6276 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6277 } else if ((cur_ctag && !req_ctag && cur_stag) ||
6278 (cur_stag && !req_stag && cur_ctag)) {
6279 features &= ~NETIF_VLAN_FILTERING_FEATURES;
6280 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6281 }
6282 } else {
6283 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6284 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6285
6286 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6287 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6288 }
6289 }
6290
6291 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6292 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6293 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6294 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6295 NETIF_F_HW_VLAN_STAG_TX);
6296 }
6297
6298 if (!(netdev->features & NETIF_F_RXFCS) &&
6299 (features & NETIF_F_RXFCS) &&
6300 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6301 !ice_vsi_has_non_zero_vlans(np->vsi)) {
6302 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6303 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6304 }
6305
6306 return features;
6307 }
6308
6309 /**
6310 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6311 * @vsi: PF's VSI
6312 * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6313 *
6314 * Store current stripped VLAN proto in ring packet context,
6315 * so it can be accessed more efficiently by packet processing code.
6316 */
6317 static void
ice_set_rx_rings_vlan_proto(struct ice_vsi * vsi,__be16 vlan_ethertype)6318 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6319 {
6320 u16 i;
6321
6322 ice_for_each_alloc_rxq(vsi, i)
6323 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6324 }
6325
6326 /**
6327 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6328 * @vsi: PF's VSI
6329 * @features: features used to determine VLAN offload settings
6330 *
6331 * First, determine the vlan_ethertype based on the VLAN offload bits in
6332 * features. Then determine if stripping and insertion should be enabled or
6333 * disabled. Finally enable or disable VLAN stripping and insertion.
6334 */
6335 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)6336 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6337 {
6338 bool enable_stripping = true, enable_insertion = true;
6339 struct ice_vsi_vlan_ops *vlan_ops;
6340 int strip_err = 0, insert_err = 0;
6341 u16 vlan_ethertype = 0;
6342
6343 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6344
6345 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6346 vlan_ethertype = ETH_P_8021AD;
6347 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6348 vlan_ethertype = ETH_P_8021Q;
6349
6350 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6351 enable_stripping = false;
6352 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6353 enable_insertion = false;
6354
6355 if (enable_stripping)
6356 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6357 else
6358 strip_err = vlan_ops->dis_stripping(vsi);
6359
6360 if (enable_insertion)
6361 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6362 else
6363 insert_err = vlan_ops->dis_insertion(vsi);
6364
6365 if (strip_err || insert_err)
6366 return -EIO;
6367
6368 ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6369 htons(vlan_ethertype) : 0);
6370
6371 return 0;
6372 }
6373
6374 /**
6375 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6376 * @vsi: PF's VSI
6377 * @features: features used to determine VLAN filtering settings
6378 *
6379 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6380 * features.
6381 */
6382 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)6383 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6384 {
6385 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6386 int err = 0;
6387
6388 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6389 * if either bit is set
6390 */
6391 if (features &
6392 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6393 err = vlan_ops->ena_rx_filtering(vsi);
6394 else
6395 err = vlan_ops->dis_rx_filtering(vsi);
6396
6397 return err;
6398 }
6399
6400 /**
6401 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6402 * @netdev: ptr to the netdev being adjusted
6403 * @features: the feature set that the stack is suggesting
6404 *
6405 * Only update VLAN settings if the requested_vlan_features are different than
6406 * the current_vlan_features.
6407 */
6408 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6409 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6410 {
6411 netdev_features_t current_vlan_features, requested_vlan_features;
6412 struct ice_netdev_priv *np = netdev_priv(netdev);
6413 struct ice_vsi *vsi = np->vsi;
6414 int err;
6415
6416 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6417 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6418 if (current_vlan_features ^ requested_vlan_features) {
6419 if ((features & NETIF_F_RXFCS) &&
6420 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6421 dev_err(ice_pf_to_dev(vsi->back),
6422 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6423 return -EIO;
6424 }
6425
6426 err = ice_set_vlan_offload_features(vsi, features);
6427 if (err)
6428 return err;
6429 }
6430
6431 current_vlan_features = netdev->features &
6432 NETIF_VLAN_FILTERING_FEATURES;
6433 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6434 if (current_vlan_features ^ requested_vlan_features) {
6435 err = ice_set_vlan_filtering_features(vsi, features);
6436 if (err)
6437 return err;
6438 }
6439
6440 return 0;
6441 }
6442
6443 /**
6444 * ice_set_loopback - turn on/off loopback mode on underlying PF
6445 * @vsi: ptr to VSI
6446 * @ena: flag to indicate the on/off setting
6447 */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6448 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6449 {
6450 bool if_running = netif_running(vsi->netdev);
6451 int ret;
6452
6453 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6454 ret = ice_down(vsi);
6455 if (ret) {
6456 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6457 return ret;
6458 }
6459 }
6460 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6461 if (ret)
6462 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6463 if (if_running)
6464 ret = ice_up(vsi);
6465
6466 return ret;
6467 }
6468
6469 /**
6470 * ice_set_features - set the netdev feature flags
6471 * @netdev: ptr to the netdev being adjusted
6472 * @features: the feature set that the stack is suggesting
6473 */
6474 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6475 ice_set_features(struct net_device *netdev, netdev_features_t features)
6476 {
6477 netdev_features_t changed = netdev->features ^ features;
6478 struct ice_netdev_priv *np = netdev_priv(netdev);
6479 struct ice_vsi *vsi = np->vsi;
6480 struct ice_pf *pf = vsi->back;
6481 int ret = 0;
6482
6483 /* Don't set any netdev advanced features with device in Safe Mode */
6484 if (ice_is_safe_mode(pf)) {
6485 dev_err(ice_pf_to_dev(pf),
6486 "Device is in Safe Mode - not enabling advanced netdev features\n");
6487 return ret;
6488 }
6489
6490 /* Do not change setting during reset */
6491 if (ice_is_reset_in_progress(pf->state)) {
6492 dev_err(ice_pf_to_dev(pf),
6493 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6494 return -EBUSY;
6495 }
6496
6497 /* Multiple features can be changed in one call so keep features in
6498 * separate if/else statements to guarantee each feature is checked
6499 */
6500 if (changed & NETIF_F_RXHASH)
6501 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6502
6503 ret = ice_set_vlan_features(netdev, features);
6504 if (ret)
6505 return ret;
6506
6507 /* Turn on receive of FCS aka CRC, and after setting this
6508 * flag the packet data will have the 4 byte CRC appended
6509 */
6510 if (changed & NETIF_F_RXFCS) {
6511 if ((features & NETIF_F_RXFCS) &&
6512 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6513 dev_err(ice_pf_to_dev(vsi->back),
6514 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6515 return -EIO;
6516 }
6517
6518 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6519 ret = ice_down_up(vsi);
6520 if (ret)
6521 return ret;
6522 }
6523
6524 if (changed & NETIF_F_NTUPLE) {
6525 bool ena = !!(features & NETIF_F_NTUPLE);
6526
6527 ice_vsi_manage_fdir(vsi, ena);
6528 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6529 }
6530
6531 /* don't turn off hw_tc_offload when ADQ is already enabled */
6532 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6533 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6534 return -EACCES;
6535 }
6536
6537 if (changed & NETIF_F_HW_TC) {
6538 bool ena = !!(features & NETIF_F_HW_TC);
6539
6540 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6541 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6542 }
6543
6544 if (changed & NETIF_F_LOOPBACK)
6545 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6546
6547 return ret;
6548 }
6549
6550 /**
6551 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6552 * @vsi: VSI to setup VLAN properties for
6553 */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6554 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6555 {
6556 int err;
6557
6558 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6559 if (err)
6560 return err;
6561
6562 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6563 if (err)
6564 return err;
6565
6566 return ice_vsi_add_vlan_zero(vsi);
6567 }
6568
6569 /**
6570 * ice_vsi_cfg_lan - Setup the VSI lan related config
6571 * @vsi: the VSI being configured
6572 *
6573 * Return 0 on success and negative value on error
6574 */
ice_vsi_cfg_lan(struct ice_vsi * vsi)6575 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6576 {
6577 int err;
6578
6579 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6580 ice_set_rx_mode(vsi->netdev);
6581
6582 err = ice_vsi_vlan_setup(vsi);
6583 if (err)
6584 return err;
6585 }
6586 ice_vsi_cfg_dcb_rings(vsi);
6587
6588 err = ice_vsi_cfg_lan_txqs(vsi);
6589 if (!err && ice_is_xdp_ena_vsi(vsi))
6590 err = ice_vsi_cfg_xdp_txqs(vsi);
6591 if (!err)
6592 err = ice_vsi_cfg_rxqs(vsi);
6593
6594 return err;
6595 }
6596
6597 /* THEORY OF MODERATION:
6598 * The ice driver hardware works differently than the hardware that DIMLIB was
6599 * originally made for. ice hardware doesn't have packet count limits that
6600 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6601 * which is hard-coded to a limit of 250,000 ints/second.
6602 * If not using dynamic moderation, the INTRL value can be modified
6603 * by ethtool rx-usecs-high.
6604 */
6605 struct ice_dim {
6606 /* the throttle rate for interrupts, basically worst case delay before
6607 * an initial interrupt fires, value is stored in microseconds.
6608 */
6609 u16 itr;
6610 };
6611
6612 /* Make a different profile for Rx that doesn't allow quite so aggressive
6613 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6614 * second.
6615 */
6616 static const struct ice_dim rx_profile[] = {
6617 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6618 {8}, /* 125,000 ints/s */
6619 {16}, /* 62,500 ints/s */
6620 {62}, /* 16,129 ints/s */
6621 {126} /* 7,936 ints/s */
6622 };
6623
6624 /* The transmit profile, which has the same sorts of values
6625 * as the previous struct
6626 */
6627 static const struct ice_dim tx_profile[] = {
6628 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6629 {8}, /* 125,000 ints/s */
6630 {40}, /* 16,125 ints/s */
6631 {128}, /* 7,812 ints/s */
6632 {256} /* 3,906 ints/s */
6633 };
6634
ice_tx_dim_work(struct work_struct * work)6635 static void ice_tx_dim_work(struct work_struct *work)
6636 {
6637 struct ice_ring_container *rc;
6638 struct dim *dim;
6639 u16 itr;
6640
6641 dim = container_of(work, struct dim, work);
6642 rc = dim->priv;
6643
6644 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6645
6646 /* look up the values in our local table */
6647 itr = tx_profile[dim->profile_ix].itr;
6648
6649 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6650 ice_write_itr(rc, itr);
6651
6652 dim->state = DIM_START_MEASURE;
6653 }
6654
ice_rx_dim_work(struct work_struct * work)6655 static void ice_rx_dim_work(struct work_struct *work)
6656 {
6657 struct ice_ring_container *rc;
6658 struct dim *dim;
6659 u16 itr;
6660
6661 dim = container_of(work, struct dim, work);
6662 rc = dim->priv;
6663
6664 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6665
6666 /* look up the values in our local table */
6667 itr = rx_profile[dim->profile_ix].itr;
6668
6669 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6670 ice_write_itr(rc, itr);
6671
6672 dim->state = DIM_START_MEASURE;
6673 }
6674
6675 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6676
6677 /**
6678 * ice_init_moderation - set up interrupt moderation
6679 * @q_vector: the vector containing rings to be configured
6680 *
6681 * Set up interrupt moderation registers, with the intent to do the right thing
6682 * when called from reset or from probe, and whether or not dynamic moderation
6683 * is enabled or not. Take special care to write all the registers in both
6684 * dynamic moderation mode or not in order to make sure hardware is in a known
6685 * state.
6686 */
ice_init_moderation(struct ice_q_vector * q_vector)6687 static void ice_init_moderation(struct ice_q_vector *q_vector)
6688 {
6689 struct ice_ring_container *rc;
6690 bool tx_dynamic, rx_dynamic;
6691
6692 rc = &q_vector->tx;
6693 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6694 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6695 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6696 rc->dim.priv = rc;
6697 tx_dynamic = ITR_IS_DYNAMIC(rc);
6698
6699 /* set the initial TX ITR to match the above */
6700 ice_write_itr(rc, tx_dynamic ?
6701 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6702
6703 rc = &q_vector->rx;
6704 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6705 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6706 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6707 rc->dim.priv = rc;
6708 rx_dynamic = ITR_IS_DYNAMIC(rc);
6709
6710 /* set the initial RX ITR to match the above */
6711 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6712 rc->itr_setting);
6713
6714 ice_set_q_vector_intrl(q_vector);
6715 }
6716
6717 /**
6718 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6719 * @vsi: the VSI being configured
6720 */
ice_napi_enable_all(struct ice_vsi * vsi)6721 static void ice_napi_enable_all(struct ice_vsi *vsi)
6722 {
6723 int q_idx;
6724
6725 if (!vsi->netdev)
6726 return;
6727
6728 ice_for_each_q_vector(vsi, q_idx) {
6729 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6730
6731 ice_init_moderation(q_vector);
6732
6733 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6734 napi_enable(&q_vector->napi);
6735 }
6736 }
6737
6738 /**
6739 * ice_up_complete - Finish the last steps of bringing up a connection
6740 * @vsi: The VSI being configured
6741 *
6742 * Return 0 on success and negative value on error
6743 */
ice_up_complete(struct ice_vsi * vsi)6744 static int ice_up_complete(struct ice_vsi *vsi)
6745 {
6746 struct ice_pf *pf = vsi->back;
6747 int err;
6748
6749 ice_vsi_cfg_msix(vsi);
6750
6751 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6752 * Tx queue group list was configured and the context bits were
6753 * programmed using ice_vsi_cfg_txqs
6754 */
6755 err = ice_vsi_start_all_rx_rings(vsi);
6756 if (err)
6757 return err;
6758
6759 clear_bit(ICE_VSI_DOWN, vsi->state);
6760 ice_napi_enable_all(vsi);
6761 ice_vsi_ena_irq(vsi);
6762
6763 if (vsi->port_info &&
6764 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6765 ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
6766 vsi->type == ICE_VSI_SF)))) {
6767 ice_print_link_msg(vsi, true);
6768 netif_tx_start_all_queues(vsi->netdev);
6769 netif_carrier_on(vsi->netdev);
6770 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6771 }
6772
6773 /* Perform an initial read of the statistics registers now to
6774 * set the baseline so counters are ready when interface is up
6775 */
6776 ice_update_eth_stats(vsi);
6777
6778 if (vsi->type == ICE_VSI_PF)
6779 ice_service_task_schedule(pf);
6780
6781 return 0;
6782 }
6783
6784 /**
6785 * ice_up - Bring the connection back up after being down
6786 * @vsi: VSI being configured
6787 */
ice_up(struct ice_vsi * vsi)6788 int ice_up(struct ice_vsi *vsi)
6789 {
6790 int err;
6791
6792 err = ice_vsi_cfg_lan(vsi);
6793 if (!err)
6794 err = ice_up_complete(vsi);
6795
6796 return err;
6797 }
6798
6799 /**
6800 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6801 * @syncp: pointer to u64_stats_sync
6802 * @stats: stats that pkts and bytes count will be taken from
6803 * @pkts: packets stats counter
6804 * @bytes: bytes stats counter
6805 *
6806 * This function fetches stats from the ring considering the atomic operations
6807 * that needs to be performed to read u64 values in 32 bit machine.
6808 */
6809 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6810 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6811 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6812 {
6813 unsigned int start;
6814
6815 do {
6816 start = u64_stats_fetch_begin(syncp);
6817 *pkts = stats.pkts;
6818 *bytes = stats.bytes;
6819 } while (u64_stats_fetch_retry(syncp, start));
6820 }
6821
6822 /**
6823 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6824 * @vsi: the VSI to be updated
6825 * @vsi_stats: the stats struct to be updated
6826 * @rings: rings to work on
6827 * @count: number of rings
6828 */
6829 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6830 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6831 struct rtnl_link_stats64 *vsi_stats,
6832 struct ice_tx_ring **rings, u16 count)
6833 {
6834 u16 i;
6835
6836 for (i = 0; i < count; i++) {
6837 struct ice_tx_ring *ring;
6838 u64 pkts = 0, bytes = 0;
6839
6840 ring = READ_ONCE(rings[i]);
6841 if (!ring || !ring->ring_stats)
6842 continue;
6843 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6844 ring->ring_stats->stats, &pkts,
6845 &bytes);
6846 vsi_stats->tx_packets += pkts;
6847 vsi_stats->tx_bytes += bytes;
6848 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6849 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6850 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6851 }
6852 }
6853
6854 /**
6855 * ice_update_vsi_ring_stats - Update VSI stats counters
6856 * @vsi: the VSI to be updated
6857 */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6858 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6859 {
6860 struct rtnl_link_stats64 *net_stats, *stats_prev;
6861 struct rtnl_link_stats64 *vsi_stats;
6862 struct ice_pf *pf = vsi->back;
6863 u64 pkts, bytes;
6864 int i;
6865
6866 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6867 if (!vsi_stats)
6868 return;
6869
6870 /* reset non-netdev (extended) stats */
6871 vsi->tx_restart = 0;
6872 vsi->tx_busy = 0;
6873 vsi->tx_linearize = 0;
6874 vsi->rx_buf_failed = 0;
6875 vsi->rx_page_failed = 0;
6876
6877 rcu_read_lock();
6878
6879 /* update Tx rings counters */
6880 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6881 vsi->num_txq);
6882
6883 /* update Rx rings counters */
6884 ice_for_each_rxq(vsi, i) {
6885 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6886 struct ice_ring_stats *ring_stats;
6887
6888 ring_stats = ring->ring_stats;
6889 ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6890 ring_stats->stats, &pkts,
6891 &bytes);
6892 vsi_stats->rx_packets += pkts;
6893 vsi_stats->rx_bytes += bytes;
6894 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6895 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6896 }
6897
6898 /* update XDP Tx rings counters */
6899 if (ice_is_xdp_ena_vsi(vsi))
6900 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6901 vsi->num_xdp_txq);
6902
6903 rcu_read_unlock();
6904
6905 net_stats = &vsi->net_stats;
6906 stats_prev = &vsi->net_stats_prev;
6907
6908 /* Update netdev counters, but keep in mind that values could start at
6909 * random value after PF reset. And as we increase the reported stat by
6910 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6911 * let's skip this round.
6912 */
6913 if (likely(pf->stat_prev_loaded)) {
6914 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6915 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6916 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6917 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6918 }
6919
6920 stats_prev->tx_packets = vsi_stats->tx_packets;
6921 stats_prev->tx_bytes = vsi_stats->tx_bytes;
6922 stats_prev->rx_packets = vsi_stats->rx_packets;
6923 stats_prev->rx_bytes = vsi_stats->rx_bytes;
6924
6925 kfree(vsi_stats);
6926 }
6927
6928 /**
6929 * ice_update_vsi_stats - Update VSI stats counters
6930 * @vsi: the VSI to be updated
6931 */
ice_update_vsi_stats(struct ice_vsi * vsi)6932 void ice_update_vsi_stats(struct ice_vsi *vsi)
6933 {
6934 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6935 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6936 struct ice_pf *pf = vsi->back;
6937
6938 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6939 test_bit(ICE_CFG_BUSY, pf->state))
6940 return;
6941
6942 /* get stats as recorded by Tx/Rx rings */
6943 ice_update_vsi_ring_stats(vsi);
6944
6945 /* get VSI stats as recorded by the hardware */
6946 ice_update_eth_stats(vsi);
6947
6948 cur_ns->tx_errors = cur_es->tx_errors;
6949 cur_ns->rx_dropped = cur_es->rx_discards;
6950 cur_ns->tx_dropped = cur_es->tx_discards;
6951 cur_ns->multicast = cur_es->rx_multicast;
6952
6953 /* update some more netdev stats if this is main VSI */
6954 if (vsi->type == ICE_VSI_PF) {
6955 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6956 cur_ns->rx_errors = pf->stats.crc_errors +
6957 pf->stats.illegal_bytes +
6958 pf->stats.rx_undersize +
6959 pf->hw_csum_rx_error +
6960 pf->stats.rx_jabber +
6961 pf->stats.rx_fragments +
6962 pf->stats.rx_oversize;
6963 /* record drops from the port level */
6964 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6965 }
6966 }
6967
6968 /**
6969 * ice_update_pf_stats - Update PF port stats counters
6970 * @pf: PF whose stats needs to be updated
6971 */
ice_update_pf_stats(struct ice_pf * pf)6972 void ice_update_pf_stats(struct ice_pf *pf)
6973 {
6974 struct ice_hw_port_stats *prev_ps, *cur_ps;
6975 struct ice_hw *hw = &pf->hw;
6976 u16 fd_ctr_base;
6977 u8 port;
6978
6979 port = hw->port_info->lport;
6980 prev_ps = &pf->stats_prev;
6981 cur_ps = &pf->stats;
6982
6983 if (ice_is_reset_in_progress(pf->state))
6984 pf->stat_prev_loaded = false;
6985
6986 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6987 &prev_ps->eth.rx_bytes,
6988 &cur_ps->eth.rx_bytes);
6989
6990 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6991 &prev_ps->eth.rx_unicast,
6992 &cur_ps->eth.rx_unicast);
6993
6994 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6995 &prev_ps->eth.rx_multicast,
6996 &cur_ps->eth.rx_multicast);
6997
6998 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6999 &prev_ps->eth.rx_broadcast,
7000 &cur_ps->eth.rx_broadcast);
7001
7002 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
7003 &prev_ps->eth.rx_discards,
7004 &cur_ps->eth.rx_discards);
7005
7006 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
7007 &prev_ps->eth.tx_bytes,
7008 &cur_ps->eth.tx_bytes);
7009
7010 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
7011 &prev_ps->eth.tx_unicast,
7012 &cur_ps->eth.tx_unicast);
7013
7014 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
7015 &prev_ps->eth.tx_multicast,
7016 &cur_ps->eth.tx_multicast);
7017
7018 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
7019 &prev_ps->eth.tx_broadcast,
7020 &cur_ps->eth.tx_broadcast);
7021
7022 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
7023 &prev_ps->tx_dropped_link_down,
7024 &cur_ps->tx_dropped_link_down);
7025
7026 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
7027 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
7028
7029 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
7030 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
7031
7032 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
7033 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
7034
7035 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
7036 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7037
7038 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7039 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7040
7041 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7042 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7043
7044 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7045 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7046
7047 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7048 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7049
7050 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7051 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7052
7053 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7054 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7055
7056 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7057 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7058
7059 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7060 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7061
7062 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7063 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7064
7065 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7066 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7067
7068 fd_ctr_base = hw->fd_ctr_base;
7069
7070 ice_stat_update40(hw,
7071 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7072 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7073 &cur_ps->fd_sb_match);
7074 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7075 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7076
7077 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7078 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7079
7080 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7081 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7082
7083 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7084 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7085
7086 ice_update_dcb_stats(pf);
7087
7088 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7089 &prev_ps->crc_errors, &cur_ps->crc_errors);
7090
7091 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7092 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7093
7094 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7095 &prev_ps->mac_local_faults,
7096 &cur_ps->mac_local_faults);
7097
7098 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7099 &prev_ps->mac_remote_faults,
7100 &cur_ps->mac_remote_faults);
7101
7102 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7103 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7104
7105 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7106 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7107
7108 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7109 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7110
7111 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7112 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7113
7114 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7115
7116 pf->stat_prev_loaded = true;
7117 }
7118
7119 /**
7120 * ice_get_stats64 - get statistics for network device structure
7121 * @netdev: network interface device structure
7122 * @stats: main device statistics structure
7123 */
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)7124 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7125 {
7126 struct ice_netdev_priv *np = netdev_priv(netdev);
7127 struct rtnl_link_stats64 *vsi_stats;
7128 struct ice_vsi *vsi = np->vsi;
7129
7130 vsi_stats = &vsi->net_stats;
7131
7132 if (!vsi->num_txq || !vsi->num_rxq)
7133 return;
7134
7135 /* netdev packet/byte stats come from ring counter. These are obtained
7136 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7137 * But, only call the update routine and read the registers if VSI is
7138 * not down.
7139 */
7140 if (!test_bit(ICE_VSI_DOWN, vsi->state))
7141 ice_update_vsi_ring_stats(vsi);
7142 stats->tx_packets = vsi_stats->tx_packets;
7143 stats->tx_bytes = vsi_stats->tx_bytes;
7144 stats->rx_packets = vsi_stats->rx_packets;
7145 stats->rx_bytes = vsi_stats->rx_bytes;
7146
7147 /* The rest of the stats can be read from the hardware but instead we
7148 * just return values that the watchdog task has already obtained from
7149 * the hardware.
7150 */
7151 stats->multicast = vsi_stats->multicast;
7152 stats->tx_errors = vsi_stats->tx_errors;
7153 stats->tx_dropped = vsi_stats->tx_dropped;
7154 stats->rx_errors = vsi_stats->rx_errors;
7155 stats->rx_dropped = vsi_stats->rx_dropped;
7156 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7157 stats->rx_length_errors = vsi_stats->rx_length_errors;
7158 }
7159
7160 /**
7161 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7162 * @vsi: VSI having NAPI disabled
7163 */
ice_napi_disable_all(struct ice_vsi * vsi)7164 static void ice_napi_disable_all(struct ice_vsi *vsi)
7165 {
7166 int q_idx;
7167
7168 if (!vsi->netdev)
7169 return;
7170
7171 ice_for_each_q_vector(vsi, q_idx) {
7172 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7173
7174 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7175 napi_disable(&q_vector->napi);
7176
7177 cancel_work_sync(&q_vector->tx.dim.work);
7178 cancel_work_sync(&q_vector->rx.dim.work);
7179 }
7180 }
7181
7182 /**
7183 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7184 * @vsi: the VSI being un-configured
7185 */
ice_vsi_dis_irq(struct ice_vsi * vsi)7186 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7187 {
7188 struct ice_pf *pf = vsi->back;
7189 struct ice_hw *hw = &pf->hw;
7190 u32 val;
7191 int i;
7192
7193 /* disable interrupt causation from each Rx queue; Tx queues are
7194 * handled in ice_vsi_stop_tx_ring()
7195 */
7196 if (vsi->rx_rings) {
7197 ice_for_each_rxq(vsi, i) {
7198 if (vsi->rx_rings[i]) {
7199 u16 reg;
7200
7201 reg = vsi->rx_rings[i]->reg_idx;
7202 val = rd32(hw, QINT_RQCTL(reg));
7203 val &= ~QINT_RQCTL_CAUSE_ENA_M;
7204 wr32(hw, QINT_RQCTL(reg), val);
7205 }
7206 }
7207 }
7208
7209 /* disable each interrupt */
7210 ice_for_each_q_vector(vsi, i) {
7211 if (!vsi->q_vectors[i])
7212 continue;
7213 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7214 }
7215
7216 ice_flush(hw);
7217
7218 /* don't call synchronize_irq() for VF's from the host */
7219 if (vsi->type == ICE_VSI_VF)
7220 return;
7221
7222 ice_for_each_q_vector(vsi, i)
7223 synchronize_irq(vsi->q_vectors[i]->irq.virq);
7224 }
7225
7226 /**
7227 * ice_down - Shutdown the connection
7228 * @vsi: The VSI being stopped
7229 *
7230 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7231 */
ice_down(struct ice_vsi * vsi)7232 int ice_down(struct ice_vsi *vsi)
7233 {
7234 int i, tx_err, rx_err, vlan_err = 0;
7235
7236 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7237
7238 if (vsi->netdev) {
7239 vlan_err = ice_vsi_del_vlan_zero(vsi);
7240 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
7241 netif_carrier_off(vsi->netdev);
7242 netif_tx_disable(vsi->netdev);
7243 }
7244
7245 ice_vsi_dis_irq(vsi);
7246
7247 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7248 if (tx_err)
7249 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7250 vsi->vsi_num, tx_err);
7251 if (!tx_err && vsi->xdp_rings) {
7252 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7253 if (tx_err)
7254 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7255 vsi->vsi_num, tx_err);
7256 }
7257
7258 rx_err = ice_vsi_stop_all_rx_rings(vsi);
7259 if (rx_err)
7260 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7261 vsi->vsi_num, rx_err);
7262
7263 ice_napi_disable_all(vsi);
7264
7265 ice_for_each_txq(vsi, i)
7266 ice_clean_tx_ring(vsi->tx_rings[i]);
7267
7268 if (vsi->xdp_rings)
7269 ice_for_each_xdp_txq(vsi, i)
7270 ice_clean_tx_ring(vsi->xdp_rings[i]);
7271
7272 ice_for_each_rxq(vsi, i)
7273 ice_clean_rx_ring(vsi->rx_rings[i]);
7274
7275 if (tx_err || rx_err || vlan_err) {
7276 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7277 vsi->vsi_num, vsi->vsw->sw_id);
7278 return -EIO;
7279 }
7280
7281 return 0;
7282 }
7283
7284 /**
7285 * ice_down_up - shutdown the VSI connection and bring it up
7286 * @vsi: the VSI to be reconnected
7287 */
ice_down_up(struct ice_vsi * vsi)7288 int ice_down_up(struct ice_vsi *vsi)
7289 {
7290 int ret;
7291
7292 /* if DOWN already set, nothing to do */
7293 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7294 return 0;
7295
7296 ret = ice_down(vsi);
7297 if (ret)
7298 return ret;
7299
7300 ret = ice_up(vsi);
7301 if (ret) {
7302 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7303 return ret;
7304 }
7305
7306 return 0;
7307 }
7308
7309 /**
7310 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7311 * @vsi: VSI having resources allocated
7312 *
7313 * Return 0 on success, negative on failure
7314 */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)7315 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7316 {
7317 int i, err = 0;
7318
7319 if (!vsi->num_txq) {
7320 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7321 vsi->vsi_num);
7322 return -EINVAL;
7323 }
7324
7325 ice_for_each_txq(vsi, i) {
7326 struct ice_tx_ring *ring = vsi->tx_rings[i];
7327
7328 if (!ring)
7329 return -EINVAL;
7330
7331 if (vsi->netdev)
7332 ring->netdev = vsi->netdev;
7333 err = ice_setup_tx_ring(ring);
7334 if (err)
7335 break;
7336 }
7337
7338 return err;
7339 }
7340
7341 /**
7342 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7343 * @vsi: VSI having resources allocated
7344 *
7345 * Return 0 on success, negative on failure
7346 */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)7347 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7348 {
7349 int i, err = 0;
7350
7351 if (!vsi->num_rxq) {
7352 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7353 vsi->vsi_num);
7354 return -EINVAL;
7355 }
7356
7357 ice_for_each_rxq(vsi, i) {
7358 struct ice_rx_ring *ring = vsi->rx_rings[i];
7359
7360 if (!ring)
7361 return -EINVAL;
7362
7363 if (vsi->netdev)
7364 ring->netdev = vsi->netdev;
7365 err = ice_setup_rx_ring(ring);
7366 if (err)
7367 break;
7368 }
7369
7370 return err;
7371 }
7372
7373 /**
7374 * ice_vsi_open_ctrl - open control VSI for use
7375 * @vsi: the VSI to open
7376 *
7377 * Initialization of the Control VSI
7378 *
7379 * Returns 0 on success, negative value on error
7380 */
ice_vsi_open_ctrl(struct ice_vsi * vsi)7381 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7382 {
7383 char int_name[ICE_INT_NAME_STR_LEN];
7384 struct ice_pf *pf = vsi->back;
7385 struct device *dev;
7386 int err;
7387
7388 dev = ice_pf_to_dev(pf);
7389 /* allocate descriptors */
7390 err = ice_vsi_setup_tx_rings(vsi);
7391 if (err)
7392 goto err_setup_tx;
7393
7394 err = ice_vsi_setup_rx_rings(vsi);
7395 if (err)
7396 goto err_setup_rx;
7397
7398 err = ice_vsi_cfg_lan(vsi);
7399 if (err)
7400 goto err_setup_rx;
7401
7402 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7403 dev_driver_string(dev), dev_name(dev));
7404 err = ice_vsi_req_irq_msix(vsi, int_name);
7405 if (err)
7406 goto err_setup_rx;
7407
7408 ice_vsi_cfg_msix(vsi);
7409
7410 err = ice_vsi_start_all_rx_rings(vsi);
7411 if (err)
7412 goto err_up_complete;
7413
7414 clear_bit(ICE_VSI_DOWN, vsi->state);
7415 ice_vsi_ena_irq(vsi);
7416
7417 return 0;
7418
7419 err_up_complete:
7420 ice_down(vsi);
7421 err_setup_rx:
7422 ice_vsi_free_rx_rings(vsi);
7423 err_setup_tx:
7424 ice_vsi_free_tx_rings(vsi);
7425
7426 return err;
7427 }
7428
7429 /**
7430 * ice_vsi_open - Called when a network interface is made active
7431 * @vsi: the VSI to open
7432 *
7433 * Initialization of the VSI
7434 *
7435 * Returns 0 on success, negative value on error
7436 */
ice_vsi_open(struct ice_vsi * vsi)7437 int ice_vsi_open(struct ice_vsi *vsi)
7438 {
7439 char int_name[ICE_INT_NAME_STR_LEN];
7440 struct ice_pf *pf = vsi->back;
7441 int err;
7442
7443 /* allocate descriptors */
7444 err = ice_vsi_setup_tx_rings(vsi);
7445 if (err)
7446 goto err_setup_tx;
7447
7448 err = ice_vsi_setup_rx_rings(vsi);
7449 if (err)
7450 goto err_setup_rx;
7451
7452 err = ice_vsi_cfg_lan(vsi);
7453 if (err)
7454 goto err_setup_rx;
7455
7456 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7457 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7458 err = ice_vsi_req_irq_msix(vsi, int_name);
7459 if (err)
7460 goto err_setup_rx;
7461
7462 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7463
7464 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
7465 /* Notify the stack of the actual queue counts. */
7466 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7467 if (err)
7468 goto err_set_qs;
7469
7470 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7471 if (err)
7472 goto err_set_qs;
7473
7474 ice_vsi_set_napi_queues(vsi);
7475 }
7476
7477 err = ice_up_complete(vsi);
7478 if (err)
7479 goto err_up_complete;
7480
7481 return 0;
7482
7483 err_up_complete:
7484 ice_down(vsi);
7485 err_set_qs:
7486 ice_vsi_free_irq(vsi);
7487 err_setup_rx:
7488 ice_vsi_free_rx_rings(vsi);
7489 err_setup_tx:
7490 ice_vsi_free_tx_rings(vsi);
7491
7492 return err;
7493 }
7494
7495 /**
7496 * ice_vsi_release_all - Delete all VSIs
7497 * @pf: PF from which all VSIs are being removed
7498 */
ice_vsi_release_all(struct ice_pf * pf)7499 static void ice_vsi_release_all(struct ice_pf *pf)
7500 {
7501 int err, i;
7502
7503 if (!pf->vsi)
7504 return;
7505
7506 ice_for_each_vsi(pf, i) {
7507 if (!pf->vsi[i])
7508 continue;
7509
7510 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7511 continue;
7512
7513 err = ice_vsi_release(pf->vsi[i]);
7514 if (err)
7515 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7516 i, err, pf->vsi[i]->vsi_num);
7517 }
7518 }
7519
7520 /**
7521 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7522 * @pf: pointer to the PF instance
7523 * @type: VSI type to rebuild
7524 *
7525 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7526 */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7527 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7528 {
7529 struct device *dev = ice_pf_to_dev(pf);
7530 int i, err;
7531
7532 ice_for_each_vsi(pf, i) {
7533 struct ice_vsi *vsi = pf->vsi[i];
7534
7535 if (!vsi || vsi->type != type)
7536 continue;
7537
7538 /* rebuild the VSI */
7539 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7540 if (err) {
7541 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7542 err, vsi->idx, ice_vsi_type_str(type));
7543 return err;
7544 }
7545
7546 /* replay filters for the VSI */
7547 err = ice_replay_vsi(&pf->hw, vsi->idx);
7548 if (err) {
7549 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7550 err, vsi->idx, ice_vsi_type_str(type));
7551 return err;
7552 }
7553
7554 /* Re-map HW VSI number, using VSI handle that has been
7555 * previously validated in ice_replay_vsi() call above
7556 */
7557 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7558
7559 /* enable the VSI */
7560 err = ice_ena_vsi(vsi, false);
7561 if (err) {
7562 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7563 err, vsi->idx, ice_vsi_type_str(type));
7564 return err;
7565 }
7566
7567 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7568 ice_vsi_type_str(type));
7569 }
7570
7571 return 0;
7572 }
7573
7574 /**
7575 * ice_update_pf_netdev_link - Update PF netdev link status
7576 * @pf: pointer to the PF instance
7577 */
ice_update_pf_netdev_link(struct ice_pf * pf)7578 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7579 {
7580 bool link_up;
7581 int i;
7582
7583 ice_for_each_vsi(pf, i) {
7584 struct ice_vsi *vsi = pf->vsi[i];
7585
7586 if (!vsi || vsi->type != ICE_VSI_PF)
7587 return;
7588
7589 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7590 if (link_up) {
7591 netif_carrier_on(pf->vsi[i]->netdev);
7592 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7593 } else {
7594 netif_carrier_off(pf->vsi[i]->netdev);
7595 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7596 }
7597 }
7598 }
7599
7600 /**
7601 * ice_rebuild - rebuild after reset
7602 * @pf: PF to rebuild
7603 * @reset_type: type of reset
7604 *
7605 * Do not rebuild VF VSI in this flow because that is already handled via
7606 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7607 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7608 * to reset/rebuild all the VF VSI twice.
7609 */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7610 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7611 {
7612 struct ice_vsi *vsi = ice_get_main_vsi(pf);
7613 struct device *dev = ice_pf_to_dev(pf);
7614 struct ice_hw *hw = &pf->hw;
7615 bool dvm;
7616 int err;
7617
7618 if (test_bit(ICE_DOWN, pf->state))
7619 goto clear_recovery;
7620
7621 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7622
7623 #define ICE_EMP_RESET_SLEEP_MS 5000
7624 if (reset_type == ICE_RESET_EMPR) {
7625 /* If an EMP reset has occurred, any previously pending flash
7626 * update will have completed. We no longer know whether or
7627 * not the NVM update EMP reset is restricted.
7628 */
7629 pf->fw_emp_reset_disabled = false;
7630
7631 msleep(ICE_EMP_RESET_SLEEP_MS);
7632 }
7633
7634 err = ice_init_all_ctrlq(hw);
7635 if (err) {
7636 dev_err(dev, "control queues init failed %d\n", err);
7637 goto err_init_ctrlq;
7638 }
7639
7640 /* if DDP was previously loaded successfully */
7641 if (!ice_is_safe_mode(pf)) {
7642 /* reload the SW DB of filter tables */
7643 if (reset_type == ICE_RESET_PFR)
7644 ice_fill_blk_tbls(hw);
7645 else
7646 /* Reload DDP Package after CORER/GLOBR reset */
7647 ice_load_pkg(NULL, pf);
7648 }
7649
7650 err = ice_clear_pf_cfg(hw);
7651 if (err) {
7652 dev_err(dev, "clear PF configuration failed %d\n", err);
7653 goto err_init_ctrlq;
7654 }
7655
7656 ice_clear_pxe_mode(hw);
7657
7658 err = ice_init_nvm(hw);
7659 if (err) {
7660 dev_err(dev, "ice_init_nvm failed %d\n", err);
7661 goto err_init_ctrlq;
7662 }
7663
7664 err = ice_get_caps(hw);
7665 if (err) {
7666 dev_err(dev, "ice_get_caps failed %d\n", err);
7667 goto err_init_ctrlq;
7668 }
7669
7670 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7671 if (err) {
7672 dev_err(dev, "set_mac_cfg failed %d\n", err);
7673 goto err_init_ctrlq;
7674 }
7675
7676 dvm = ice_is_dvm_ena(hw);
7677
7678 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7679 if (err)
7680 goto err_init_ctrlq;
7681
7682 err = ice_sched_init_port(hw->port_info);
7683 if (err)
7684 goto err_sched_init_port;
7685
7686 /* start misc vector */
7687 err = ice_req_irq_msix_misc(pf);
7688 if (err) {
7689 dev_err(dev, "misc vector setup failed: %d\n", err);
7690 goto err_sched_init_port;
7691 }
7692
7693 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7694 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7695 if (!rd32(hw, PFQF_FD_SIZE)) {
7696 u16 unused, guar, b_effort;
7697
7698 guar = hw->func_caps.fd_fltr_guar;
7699 b_effort = hw->func_caps.fd_fltr_best_effort;
7700
7701 /* force guaranteed filter pool for PF */
7702 ice_alloc_fd_guar_item(hw, &unused, guar);
7703 /* force shared filter pool for PF */
7704 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7705 }
7706 }
7707
7708 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7709 ice_dcb_rebuild(pf);
7710
7711 /* If the PF previously had enabled PTP, PTP init needs to happen before
7712 * the VSI rebuild. If not, this causes the PTP link status events to
7713 * fail.
7714 */
7715 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7716 ice_ptp_rebuild(pf, reset_type);
7717
7718 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7719 ice_gnss_init(pf);
7720
7721 /* rebuild PF VSI */
7722 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7723 if (err) {
7724 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7725 goto err_vsi_rebuild;
7726 }
7727
7728 if (reset_type == ICE_RESET_PFR) {
7729 err = ice_rebuild_channels(pf);
7730 if (err) {
7731 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7732 err);
7733 goto err_vsi_rebuild;
7734 }
7735 }
7736
7737 /* If Flow Director is active */
7738 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7739 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7740 if (err) {
7741 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7742 goto err_vsi_rebuild;
7743 }
7744
7745 /* replay HW Flow Director recipes */
7746 if (hw->fdir_prof)
7747 ice_fdir_replay_flows(hw);
7748
7749 /* replay Flow Director filters */
7750 ice_fdir_replay_fltrs(pf);
7751
7752 ice_rebuild_arfs(pf);
7753 }
7754
7755 if (vsi && vsi->netdev)
7756 netif_device_attach(vsi->netdev);
7757
7758 ice_update_pf_netdev_link(pf);
7759
7760 /* tell the firmware we are up */
7761 err = ice_send_version(pf);
7762 if (err) {
7763 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7764 err);
7765 goto err_vsi_rebuild;
7766 }
7767
7768 ice_replay_post(hw);
7769
7770 /* if we get here, reset flow is successful */
7771 clear_bit(ICE_RESET_FAILED, pf->state);
7772
7773 ice_plug_aux_dev(pf);
7774 if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7775 ice_lag_rebuild(pf);
7776
7777 /* Restore timestamp mode settings after VSI rebuild */
7778 ice_ptp_restore_timestamp_mode(pf);
7779 return;
7780
7781 err_vsi_rebuild:
7782 err_sched_init_port:
7783 ice_sched_cleanup_all(hw);
7784 err_init_ctrlq:
7785 ice_shutdown_all_ctrlq(hw, false);
7786 set_bit(ICE_RESET_FAILED, pf->state);
7787 clear_recovery:
7788 /* set this bit in PF state to control service task scheduling */
7789 set_bit(ICE_NEEDS_RESTART, pf->state);
7790 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7791 }
7792
7793 /**
7794 * ice_change_mtu - NDO callback to change the MTU
7795 * @netdev: network interface device structure
7796 * @new_mtu: new value for maximum frame size
7797 *
7798 * Returns 0 on success, negative on failure
7799 */
ice_change_mtu(struct net_device * netdev,int new_mtu)7800 int ice_change_mtu(struct net_device *netdev, int new_mtu)
7801 {
7802 struct ice_netdev_priv *np = netdev_priv(netdev);
7803 struct ice_vsi *vsi = np->vsi;
7804 struct ice_pf *pf = vsi->back;
7805 struct bpf_prog *prog;
7806 u8 count = 0;
7807 int err = 0;
7808
7809 if (new_mtu == (int)netdev->mtu) {
7810 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7811 return 0;
7812 }
7813
7814 prog = vsi->xdp_prog;
7815 if (prog && !prog->aux->xdp_has_frags) {
7816 int frame_size = ice_max_xdp_frame_size(vsi);
7817
7818 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7819 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7820 frame_size - ICE_ETH_PKT_HDR_PAD);
7821 return -EINVAL;
7822 }
7823 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7824 if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7825 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7826 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7827 return -EINVAL;
7828 }
7829 }
7830
7831 /* if a reset is in progress, wait for some time for it to complete */
7832 do {
7833 if (ice_is_reset_in_progress(pf->state)) {
7834 count++;
7835 usleep_range(1000, 2000);
7836 } else {
7837 break;
7838 }
7839
7840 } while (count < 100);
7841
7842 if (count == 100) {
7843 netdev_err(netdev, "can't change MTU. Device is busy\n");
7844 return -EBUSY;
7845 }
7846
7847 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7848 err = ice_down_up(vsi);
7849 if (err)
7850 return err;
7851
7852 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7853 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7854
7855 return err;
7856 }
7857
7858 /**
7859 * ice_eth_ioctl - Access the hwtstamp interface
7860 * @netdev: network interface device structure
7861 * @ifr: interface request data
7862 * @cmd: ioctl command
7863 */
ice_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)7864 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7865 {
7866 struct ice_netdev_priv *np = netdev_priv(netdev);
7867 struct ice_pf *pf = np->vsi->back;
7868
7869 switch (cmd) {
7870 case SIOCGHWTSTAMP:
7871 return ice_ptp_get_ts_config(pf, ifr);
7872 case SIOCSHWTSTAMP:
7873 return ice_ptp_set_ts_config(pf, ifr);
7874 default:
7875 return -EOPNOTSUPP;
7876 }
7877 }
7878
7879 /**
7880 * ice_aq_str - convert AQ err code to a string
7881 * @aq_err: the AQ error code to convert
7882 */
ice_aq_str(enum ice_aq_err aq_err)7883 const char *ice_aq_str(enum ice_aq_err aq_err)
7884 {
7885 switch (aq_err) {
7886 case ICE_AQ_RC_OK:
7887 return "OK";
7888 case ICE_AQ_RC_EPERM:
7889 return "ICE_AQ_RC_EPERM";
7890 case ICE_AQ_RC_ENOENT:
7891 return "ICE_AQ_RC_ENOENT";
7892 case ICE_AQ_RC_ENOMEM:
7893 return "ICE_AQ_RC_ENOMEM";
7894 case ICE_AQ_RC_EBUSY:
7895 return "ICE_AQ_RC_EBUSY";
7896 case ICE_AQ_RC_EEXIST:
7897 return "ICE_AQ_RC_EEXIST";
7898 case ICE_AQ_RC_EINVAL:
7899 return "ICE_AQ_RC_EINVAL";
7900 case ICE_AQ_RC_ENOSPC:
7901 return "ICE_AQ_RC_ENOSPC";
7902 case ICE_AQ_RC_ENOSYS:
7903 return "ICE_AQ_RC_ENOSYS";
7904 case ICE_AQ_RC_EMODE:
7905 return "ICE_AQ_RC_EMODE";
7906 case ICE_AQ_RC_ENOSEC:
7907 return "ICE_AQ_RC_ENOSEC";
7908 case ICE_AQ_RC_EBADSIG:
7909 return "ICE_AQ_RC_EBADSIG";
7910 case ICE_AQ_RC_ESVN:
7911 return "ICE_AQ_RC_ESVN";
7912 case ICE_AQ_RC_EBADMAN:
7913 return "ICE_AQ_RC_EBADMAN";
7914 case ICE_AQ_RC_EBADBUF:
7915 return "ICE_AQ_RC_EBADBUF";
7916 }
7917
7918 return "ICE_AQ_RC_UNKNOWN";
7919 }
7920
7921 /**
7922 * ice_set_rss_lut - Set RSS LUT
7923 * @vsi: Pointer to VSI structure
7924 * @lut: Lookup table
7925 * @lut_size: Lookup table size
7926 *
7927 * Returns 0 on success, negative on failure
7928 */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7929 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7930 {
7931 struct ice_aq_get_set_rss_lut_params params = {};
7932 struct ice_hw *hw = &vsi->back->hw;
7933 int status;
7934
7935 if (!lut)
7936 return -EINVAL;
7937
7938 params.vsi_handle = vsi->idx;
7939 params.lut_size = lut_size;
7940 params.lut_type = vsi->rss_lut_type;
7941 params.lut = lut;
7942
7943 status = ice_aq_set_rss_lut(hw, ¶ms);
7944 if (status)
7945 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7946 status, ice_aq_str(hw->adminq.sq_last_status));
7947
7948 return status;
7949 }
7950
7951 /**
7952 * ice_set_rss_key - Set RSS key
7953 * @vsi: Pointer to the VSI structure
7954 * @seed: RSS hash seed
7955 *
7956 * Returns 0 on success, negative on failure
7957 */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)7958 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7959 {
7960 struct ice_hw *hw = &vsi->back->hw;
7961 int status;
7962
7963 if (!seed)
7964 return -EINVAL;
7965
7966 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7967 if (status)
7968 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7969 status, ice_aq_str(hw->adminq.sq_last_status));
7970
7971 return status;
7972 }
7973
7974 /**
7975 * ice_get_rss_lut - Get RSS LUT
7976 * @vsi: Pointer to VSI structure
7977 * @lut: Buffer to store the lookup table entries
7978 * @lut_size: Size of buffer to store the lookup table entries
7979 *
7980 * Returns 0 on success, negative on failure
7981 */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7982 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7983 {
7984 struct ice_aq_get_set_rss_lut_params params = {};
7985 struct ice_hw *hw = &vsi->back->hw;
7986 int status;
7987
7988 if (!lut)
7989 return -EINVAL;
7990
7991 params.vsi_handle = vsi->idx;
7992 params.lut_size = lut_size;
7993 params.lut_type = vsi->rss_lut_type;
7994 params.lut = lut;
7995
7996 status = ice_aq_get_rss_lut(hw, ¶ms);
7997 if (status)
7998 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7999 status, ice_aq_str(hw->adminq.sq_last_status));
8000
8001 return status;
8002 }
8003
8004 /**
8005 * ice_get_rss_key - Get RSS key
8006 * @vsi: Pointer to VSI structure
8007 * @seed: Buffer to store the key in
8008 *
8009 * Returns 0 on success, negative on failure
8010 */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)8011 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
8012 {
8013 struct ice_hw *hw = &vsi->back->hw;
8014 int status;
8015
8016 if (!seed)
8017 return -EINVAL;
8018
8019 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
8020 if (status)
8021 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
8022 status, ice_aq_str(hw->adminq.sq_last_status));
8023
8024 return status;
8025 }
8026
8027 /**
8028 * ice_set_rss_hfunc - Set RSS HASH function
8029 * @vsi: Pointer to VSI structure
8030 * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
8031 *
8032 * Returns 0 on success, negative on failure
8033 */
ice_set_rss_hfunc(struct ice_vsi * vsi,u8 hfunc)8034 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
8035 {
8036 struct ice_hw *hw = &vsi->back->hw;
8037 struct ice_vsi_ctx *ctx;
8038 bool symm;
8039 int err;
8040
8041 if (hfunc == vsi->rss_hfunc)
8042 return 0;
8043
8044 if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8045 hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8046 return -EOPNOTSUPP;
8047
8048 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8049 if (!ctx)
8050 return -ENOMEM;
8051
8052 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8053 ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8054 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8055 ctx->info.q_opt_rss |=
8056 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8057 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8058 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8059
8060 err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8061 if (err) {
8062 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8063 vsi->vsi_num, err);
8064 } else {
8065 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8066 vsi->rss_hfunc = hfunc;
8067 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8068 hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8069 "Symmetric " : "");
8070 }
8071 kfree(ctx);
8072 if (err)
8073 return err;
8074
8075 /* Fix the symmetry setting for all existing RSS configurations */
8076 symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8077 return ice_set_rss_cfg_symm(hw, vsi, symm);
8078 }
8079
8080 /**
8081 * ice_bridge_getlink - Get the hardware bridge mode
8082 * @skb: skb buff
8083 * @pid: process ID
8084 * @seq: RTNL message seq
8085 * @dev: the netdev being configured
8086 * @filter_mask: filter mask passed in
8087 * @nlflags: netlink flags passed in
8088 *
8089 * Return the bridge mode (VEB/VEPA)
8090 */
8091 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)8092 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8093 struct net_device *dev, u32 filter_mask, int nlflags)
8094 {
8095 struct ice_netdev_priv *np = netdev_priv(dev);
8096 struct ice_vsi *vsi = np->vsi;
8097 struct ice_pf *pf = vsi->back;
8098 u16 bmode;
8099
8100 bmode = pf->first_sw->bridge_mode;
8101
8102 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8103 filter_mask, NULL);
8104 }
8105
8106 /**
8107 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8108 * @vsi: Pointer to VSI structure
8109 * @bmode: Hardware bridge mode (VEB/VEPA)
8110 *
8111 * Returns 0 on success, negative on failure
8112 */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)8113 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8114 {
8115 struct ice_aqc_vsi_props *vsi_props;
8116 struct ice_hw *hw = &vsi->back->hw;
8117 struct ice_vsi_ctx *ctxt;
8118 int ret;
8119
8120 vsi_props = &vsi->info;
8121
8122 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8123 if (!ctxt)
8124 return -ENOMEM;
8125
8126 ctxt->info = vsi->info;
8127
8128 if (bmode == BRIDGE_MODE_VEB)
8129 /* change from VEPA to VEB mode */
8130 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8131 else
8132 /* change from VEB to VEPA mode */
8133 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8134 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8135
8136 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8137 if (ret) {
8138 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8139 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
8140 goto out;
8141 }
8142 /* Update sw flags for book keeping */
8143 vsi_props->sw_flags = ctxt->info.sw_flags;
8144
8145 out:
8146 kfree(ctxt);
8147 return ret;
8148 }
8149
8150 /**
8151 * ice_bridge_setlink - Set the hardware bridge mode
8152 * @dev: the netdev being configured
8153 * @nlh: RTNL message
8154 * @flags: bridge setlink flags
8155 * @extack: netlink extended ack
8156 *
8157 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8158 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8159 * not already set for all VSIs connected to this switch. And also update the
8160 * unicast switch filter rules for the corresponding switch of the netdev.
8161 */
8162 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)8163 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8164 u16 __always_unused flags,
8165 struct netlink_ext_ack __always_unused *extack)
8166 {
8167 struct ice_netdev_priv *np = netdev_priv(dev);
8168 struct ice_pf *pf = np->vsi->back;
8169 struct nlattr *attr, *br_spec;
8170 struct ice_hw *hw = &pf->hw;
8171 struct ice_sw *pf_sw;
8172 int rem, v, err = 0;
8173
8174 pf_sw = pf->first_sw;
8175 /* find the attribute in the netlink message */
8176 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8177 if (!br_spec)
8178 return -EINVAL;
8179
8180 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8181 __u16 mode = nla_get_u16(attr);
8182
8183 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8184 return -EINVAL;
8185 /* Continue if bridge mode is not being flipped */
8186 if (mode == pf_sw->bridge_mode)
8187 continue;
8188 /* Iterates through the PF VSI list and update the loopback
8189 * mode of the VSI
8190 */
8191 ice_for_each_vsi(pf, v) {
8192 if (!pf->vsi[v])
8193 continue;
8194 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8195 if (err)
8196 return err;
8197 }
8198
8199 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8200 /* Update the unicast switch filter rules for the corresponding
8201 * switch of the netdev
8202 */
8203 err = ice_update_sw_rule_bridge_mode(hw);
8204 if (err) {
8205 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8206 mode, err,
8207 ice_aq_str(hw->adminq.sq_last_status));
8208 /* revert hw->evb_veb */
8209 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8210 return err;
8211 }
8212
8213 pf_sw->bridge_mode = mode;
8214 }
8215
8216 return 0;
8217 }
8218
8219 /**
8220 * ice_tx_timeout - Respond to a Tx Hang
8221 * @netdev: network interface device structure
8222 * @txqueue: Tx queue
8223 */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)8224 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8225 {
8226 struct ice_netdev_priv *np = netdev_priv(netdev);
8227 struct ice_tx_ring *tx_ring = NULL;
8228 struct ice_vsi *vsi = np->vsi;
8229 struct ice_pf *pf = vsi->back;
8230 u32 i;
8231
8232 pf->tx_timeout_count++;
8233
8234 /* Check if PFC is enabled for the TC to which the queue belongs
8235 * to. If yes then Tx timeout is not caused by a hung queue, no
8236 * need to reset and rebuild
8237 */
8238 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8239 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8240 txqueue);
8241 return;
8242 }
8243
8244 /* now that we have an index, find the tx_ring struct */
8245 ice_for_each_txq(vsi, i)
8246 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8247 if (txqueue == vsi->tx_rings[i]->q_index) {
8248 tx_ring = vsi->tx_rings[i];
8249 break;
8250 }
8251
8252 /* Reset recovery level if enough time has elapsed after last timeout.
8253 * Also ensure no new reset action happens before next timeout period.
8254 */
8255 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8256 pf->tx_timeout_recovery_level = 1;
8257 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8258 netdev->watchdog_timeo)))
8259 return;
8260
8261 if (tx_ring) {
8262 struct ice_hw *hw = &pf->hw;
8263 u32 head, val = 0;
8264
8265 head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8266 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8267 /* Read interrupt register */
8268 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8269
8270 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8271 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8272 head, tx_ring->next_to_use, val);
8273 }
8274
8275 pf->tx_timeout_last_recovery = jiffies;
8276 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8277 pf->tx_timeout_recovery_level, txqueue);
8278
8279 switch (pf->tx_timeout_recovery_level) {
8280 case 1:
8281 set_bit(ICE_PFR_REQ, pf->state);
8282 break;
8283 case 2:
8284 set_bit(ICE_CORER_REQ, pf->state);
8285 break;
8286 case 3:
8287 set_bit(ICE_GLOBR_REQ, pf->state);
8288 break;
8289 default:
8290 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8291 set_bit(ICE_DOWN, pf->state);
8292 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8293 set_bit(ICE_SERVICE_DIS, pf->state);
8294 break;
8295 }
8296
8297 ice_service_task_schedule(pf);
8298 pf->tx_timeout_recovery_level++;
8299 }
8300
8301 /**
8302 * ice_setup_tc_cls_flower - flower classifier offloads
8303 * @np: net device to configure
8304 * @filter_dev: device on which filter is added
8305 * @cls_flower: offload data
8306 */
8307 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower)8308 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8309 struct net_device *filter_dev,
8310 struct flow_cls_offload *cls_flower)
8311 {
8312 struct ice_vsi *vsi = np->vsi;
8313
8314 if (cls_flower->common.chain_index)
8315 return -EOPNOTSUPP;
8316
8317 switch (cls_flower->command) {
8318 case FLOW_CLS_REPLACE:
8319 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8320 case FLOW_CLS_DESTROY:
8321 return ice_del_cls_flower(vsi, cls_flower);
8322 default:
8323 return -EINVAL;
8324 }
8325 }
8326
8327 /**
8328 * ice_setup_tc_block_cb - callback handler registered for TC block
8329 * @type: TC SETUP type
8330 * @type_data: TC flower offload data that contains user input
8331 * @cb_priv: netdev private data
8332 */
8333 static int
ice_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)8334 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8335 {
8336 struct ice_netdev_priv *np = cb_priv;
8337
8338 switch (type) {
8339 case TC_SETUP_CLSFLOWER:
8340 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8341 type_data);
8342 default:
8343 return -EOPNOTSUPP;
8344 }
8345 }
8346
8347 /**
8348 * ice_validate_mqprio_qopt - Validate TCF input parameters
8349 * @vsi: Pointer to VSI
8350 * @mqprio_qopt: input parameters for mqprio queue configuration
8351 *
8352 * This function validates MQPRIO params, such as qcount (power of 2 wherever
8353 * needed), and make sure user doesn't specify qcount and BW rate limit
8354 * for TCs, which are more than "num_tc"
8355 */
8356 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)8357 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8358 struct tc_mqprio_qopt_offload *mqprio_qopt)
8359 {
8360 int non_power_of_2_qcount = 0;
8361 struct ice_pf *pf = vsi->back;
8362 int max_rss_q_cnt = 0;
8363 u64 sum_min_rate = 0;
8364 struct device *dev;
8365 int i, speed;
8366 u8 num_tc;
8367
8368 if (vsi->type != ICE_VSI_PF)
8369 return -EINVAL;
8370
8371 if (mqprio_qopt->qopt.offset[0] != 0 ||
8372 mqprio_qopt->qopt.num_tc < 1 ||
8373 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8374 return -EINVAL;
8375
8376 dev = ice_pf_to_dev(pf);
8377 vsi->ch_rss_size = 0;
8378 num_tc = mqprio_qopt->qopt.num_tc;
8379 speed = ice_get_link_speed_kbps(vsi);
8380
8381 for (i = 0; num_tc; i++) {
8382 int qcount = mqprio_qopt->qopt.count[i];
8383 u64 max_rate, min_rate, rem;
8384
8385 if (!qcount)
8386 return -EINVAL;
8387
8388 if (is_power_of_2(qcount)) {
8389 if (non_power_of_2_qcount &&
8390 qcount > non_power_of_2_qcount) {
8391 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8392 qcount, non_power_of_2_qcount);
8393 return -EINVAL;
8394 }
8395 if (qcount > max_rss_q_cnt)
8396 max_rss_q_cnt = qcount;
8397 } else {
8398 if (non_power_of_2_qcount &&
8399 qcount != non_power_of_2_qcount) {
8400 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8401 qcount, non_power_of_2_qcount);
8402 return -EINVAL;
8403 }
8404 if (qcount < max_rss_q_cnt) {
8405 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8406 qcount, max_rss_q_cnt);
8407 return -EINVAL;
8408 }
8409 max_rss_q_cnt = qcount;
8410 non_power_of_2_qcount = qcount;
8411 }
8412
8413 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8414 * converts the bandwidth rate limit into Bytes/s when
8415 * passing it down to the driver. So convert input bandwidth
8416 * from Bytes/s to Kbps
8417 */
8418 max_rate = mqprio_qopt->max_rate[i];
8419 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8420
8421 /* min_rate is minimum guaranteed rate and it can't be zero */
8422 min_rate = mqprio_qopt->min_rate[i];
8423 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8424 sum_min_rate += min_rate;
8425
8426 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8427 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8428 min_rate, ICE_MIN_BW_LIMIT);
8429 return -EINVAL;
8430 }
8431
8432 if (max_rate && max_rate > speed) {
8433 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8434 i, max_rate, speed);
8435 return -EINVAL;
8436 }
8437
8438 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8439 if (rem) {
8440 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8441 i, ICE_MIN_BW_LIMIT);
8442 return -EINVAL;
8443 }
8444
8445 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8446 if (rem) {
8447 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8448 i, ICE_MIN_BW_LIMIT);
8449 return -EINVAL;
8450 }
8451
8452 /* min_rate can't be more than max_rate, except when max_rate
8453 * is zero (implies max_rate sought is max line rate). In such
8454 * a case min_rate can be more than max.
8455 */
8456 if (max_rate && min_rate > max_rate) {
8457 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8458 min_rate, max_rate);
8459 return -EINVAL;
8460 }
8461
8462 if (i >= mqprio_qopt->qopt.num_tc - 1)
8463 break;
8464 if (mqprio_qopt->qopt.offset[i + 1] !=
8465 (mqprio_qopt->qopt.offset[i] + qcount))
8466 return -EINVAL;
8467 }
8468 if (vsi->num_rxq <
8469 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8470 return -EINVAL;
8471 if (vsi->num_txq <
8472 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8473 return -EINVAL;
8474
8475 if (sum_min_rate && sum_min_rate > (u64)speed) {
8476 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8477 sum_min_rate, speed);
8478 return -EINVAL;
8479 }
8480
8481 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8482 vsi->ch_rss_size = max_rss_q_cnt;
8483
8484 return 0;
8485 }
8486
8487 /**
8488 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8489 * @pf: ptr to PF device
8490 * @vsi: ptr to VSI
8491 */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8492 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8493 {
8494 struct device *dev = ice_pf_to_dev(pf);
8495 bool added = false;
8496 struct ice_hw *hw;
8497 int flow;
8498
8499 if (!(vsi->num_gfltr || vsi->num_bfltr))
8500 return -EINVAL;
8501
8502 hw = &pf->hw;
8503 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8504 struct ice_fd_hw_prof *prof;
8505 int tun, status;
8506 u64 entry_h;
8507
8508 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8509 hw->fdir_prof[flow]->cnt))
8510 continue;
8511
8512 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8513 enum ice_flow_priority prio;
8514
8515 /* add this VSI to FDir profile for this flow */
8516 prio = ICE_FLOW_PRIO_NORMAL;
8517 prof = hw->fdir_prof[flow];
8518 status = ice_flow_add_entry(hw, ICE_BLK_FD,
8519 prof->prof_id[tun],
8520 prof->vsi_h[0], vsi->idx,
8521 prio, prof->fdir_seg[tun],
8522 &entry_h);
8523 if (status) {
8524 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8525 vsi->idx, flow);
8526 continue;
8527 }
8528
8529 prof->entry_h[prof->cnt][tun] = entry_h;
8530 }
8531
8532 /* store VSI for filter replay and delete */
8533 prof->vsi_h[prof->cnt] = vsi->idx;
8534 prof->cnt++;
8535
8536 added = true;
8537 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8538 flow);
8539 }
8540
8541 if (!added)
8542 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8543
8544 return 0;
8545 }
8546
8547 /**
8548 * ice_add_channel - add a channel by adding VSI
8549 * @pf: ptr to PF device
8550 * @sw_id: underlying HW switching element ID
8551 * @ch: ptr to channel structure
8552 *
8553 * Add a channel (VSI) using add_vsi and queue_map
8554 */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8555 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8556 {
8557 struct device *dev = ice_pf_to_dev(pf);
8558 struct ice_vsi *vsi;
8559
8560 if (ch->type != ICE_VSI_CHNL) {
8561 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8562 return -EINVAL;
8563 }
8564
8565 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8566 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8567 dev_err(dev, "create chnl VSI failure\n");
8568 return -EINVAL;
8569 }
8570
8571 ice_add_vsi_to_fdir(pf, vsi);
8572
8573 ch->sw_id = sw_id;
8574 ch->vsi_num = vsi->vsi_num;
8575 ch->info.mapping_flags = vsi->info.mapping_flags;
8576 ch->ch_vsi = vsi;
8577 /* set the back pointer of channel for newly created VSI */
8578 vsi->ch = ch;
8579
8580 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8581 sizeof(vsi->info.q_mapping));
8582 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8583 sizeof(vsi->info.tc_mapping));
8584
8585 return 0;
8586 }
8587
8588 /**
8589 * ice_chnl_cfg_res
8590 * @vsi: the VSI being setup
8591 * @ch: ptr to channel structure
8592 *
8593 * Configure channel specific resources such as rings, vector.
8594 */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8595 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8596 {
8597 int i;
8598
8599 for (i = 0; i < ch->num_txq; i++) {
8600 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8601 struct ice_ring_container *rc;
8602 struct ice_tx_ring *tx_ring;
8603 struct ice_rx_ring *rx_ring;
8604
8605 tx_ring = vsi->tx_rings[ch->base_q + i];
8606 rx_ring = vsi->rx_rings[ch->base_q + i];
8607 if (!tx_ring || !rx_ring)
8608 continue;
8609
8610 /* setup ring being channel enabled */
8611 tx_ring->ch = ch;
8612 rx_ring->ch = ch;
8613
8614 /* following code block sets up vector specific attributes */
8615 tx_q_vector = tx_ring->q_vector;
8616 rx_q_vector = rx_ring->q_vector;
8617 if (!tx_q_vector && !rx_q_vector)
8618 continue;
8619
8620 if (tx_q_vector) {
8621 tx_q_vector->ch = ch;
8622 /* setup Tx and Rx ITR setting if DIM is off */
8623 rc = &tx_q_vector->tx;
8624 if (!ITR_IS_DYNAMIC(rc))
8625 ice_write_itr(rc, rc->itr_setting);
8626 }
8627 if (rx_q_vector) {
8628 rx_q_vector->ch = ch;
8629 /* setup Tx and Rx ITR setting if DIM is off */
8630 rc = &rx_q_vector->rx;
8631 if (!ITR_IS_DYNAMIC(rc))
8632 ice_write_itr(rc, rc->itr_setting);
8633 }
8634 }
8635
8636 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8637 * GLINT_ITR register would have written to perform in-context
8638 * update, hence perform flush
8639 */
8640 if (ch->num_txq || ch->num_rxq)
8641 ice_flush(&vsi->back->hw);
8642 }
8643
8644 /**
8645 * ice_cfg_chnl_all_res - configure channel resources
8646 * @vsi: pte to main_vsi
8647 * @ch: ptr to channel structure
8648 *
8649 * This function configures channel specific resources such as flow-director
8650 * counter index, and other resources such as queues, vectors, ITR settings
8651 */
8652 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8653 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8654 {
8655 /* configure channel (aka ADQ) resources such as queues, vectors,
8656 * ITR settings for channel specific vectors and anything else
8657 */
8658 ice_chnl_cfg_res(vsi, ch);
8659 }
8660
8661 /**
8662 * ice_setup_hw_channel - setup new channel
8663 * @pf: ptr to PF device
8664 * @vsi: the VSI being setup
8665 * @ch: ptr to channel structure
8666 * @sw_id: underlying HW switching element ID
8667 * @type: type of channel to be created (VMDq2/VF)
8668 *
8669 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8670 * and configures Tx rings accordingly
8671 */
8672 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8673 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8674 struct ice_channel *ch, u16 sw_id, u8 type)
8675 {
8676 struct device *dev = ice_pf_to_dev(pf);
8677 int ret;
8678
8679 ch->base_q = vsi->next_base_q;
8680 ch->type = type;
8681
8682 ret = ice_add_channel(pf, sw_id, ch);
8683 if (ret) {
8684 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8685 return ret;
8686 }
8687
8688 /* configure/setup ADQ specific resources */
8689 ice_cfg_chnl_all_res(vsi, ch);
8690
8691 /* make sure to update the next_base_q so that subsequent channel's
8692 * (aka ADQ) VSI queue map is correct
8693 */
8694 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8695 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8696 ch->num_rxq);
8697
8698 return 0;
8699 }
8700
8701 /**
8702 * ice_setup_channel - setup new channel using uplink element
8703 * @pf: ptr to PF device
8704 * @vsi: the VSI being setup
8705 * @ch: ptr to channel structure
8706 *
8707 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8708 * and uplink switching element
8709 */
8710 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8711 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8712 struct ice_channel *ch)
8713 {
8714 struct device *dev = ice_pf_to_dev(pf);
8715 u16 sw_id;
8716 int ret;
8717
8718 if (vsi->type != ICE_VSI_PF) {
8719 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8720 return false;
8721 }
8722
8723 sw_id = pf->first_sw->sw_id;
8724
8725 /* create channel (VSI) */
8726 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8727 if (ret) {
8728 dev_err(dev, "failed to setup hw_channel\n");
8729 return false;
8730 }
8731 dev_dbg(dev, "successfully created channel()\n");
8732
8733 return ch->ch_vsi ? true : false;
8734 }
8735
8736 /**
8737 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8738 * @vsi: VSI to be configured
8739 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8740 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8741 */
8742 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8743 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8744 {
8745 int err;
8746
8747 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8748 if (err)
8749 return err;
8750
8751 return ice_set_max_bw_limit(vsi, max_tx_rate);
8752 }
8753
8754 /**
8755 * ice_create_q_channel - function to create channel
8756 * @vsi: VSI to be configured
8757 * @ch: ptr to channel (it contains channel specific params)
8758 *
8759 * This function creates channel (VSI) using num_queues specified by user,
8760 * reconfigs RSS if needed.
8761 */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8762 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8763 {
8764 struct ice_pf *pf = vsi->back;
8765 struct device *dev;
8766
8767 if (!ch)
8768 return -EINVAL;
8769
8770 dev = ice_pf_to_dev(pf);
8771 if (!ch->num_txq || !ch->num_rxq) {
8772 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8773 return -EINVAL;
8774 }
8775
8776 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8777 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8778 vsi->cnt_q_avail, ch->num_txq);
8779 return -EINVAL;
8780 }
8781
8782 if (!ice_setup_channel(pf, vsi, ch)) {
8783 dev_info(dev, "Failed to setup channel\n");
8784 return -EINVAL;
8785 }
8786 /* configure BW rate limit */
8787 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8788 int ret;
8789
8790 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8791 ch->min_tx_rate);
8792 if (ret)
8793 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8794 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8795 else
8796 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8797 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8798 }
8799
8800 vsi->cnt_q_avail -= ch->num_txq;
8801
8802 return 0;
8803 }
8804
8805 /**
8806 * ice_rem_all_chnl_fltrs - removes all channel filters
8807 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8808 *
8809 * Remove all advanced switch filters only if they are channel specific
8810 * tc-flower based filter
8811 */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8812 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8813 {
8814 struct ice_tc_flower_fltr *fltr;
8815 struct hlist_node *node;
8816
8817 /* to remove all channel filters, iterate an ordered list of filters */
8818 hlist_for_each_entry_safe(fltr, node,
8819 &pf->tc_flower_fltr_list,
8820 tc_flower_node) {
8821 struct ice_rule_query_data rule;
8822 int status;
8823
8824 /* for now process only channel specific filters */
8825 if (!ice_is_chnl_fltr(fltr))
8826 continue;
8827
8828 rule.rid = fltr->rid;
8829 rule.rule_id = fltr->rule_id;
8830 rule.vsi_handle = fltr->dest_vsi_handle;
8831 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8832 if (status) {
8833 if (status == -ENOENT)
8834 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8835 rule.rule_id);
8836 else
8837 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8838 status);
8839 } else if (fltr->dest_vsi) {
8840 /* update advanced switch filter count */
8841 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8842 u32 flags = fltr->flags;
8843
8844 fltr->dest_vsi->num_chnl_fltr--;
8845 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8846 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8847 pf->num_dmac_chnl_fltrs--;
8848 }
8849 }
8850
8851 hlist_del(&fltr->tc_flower_node);
8852 kfree(fltr);
8853 }
8854 }
8855
8856 /**
8857 * ice_remove_q_channels - Remove queue channels for the TCs
8858 * @vsi: VSI to be configured
8859 * @rem_fltr: delete advanced switch filter or not
8860 *
8861 * Remove queue channels for the TCs
8862 */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8863 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8864 {
8865 struct ice_channel *ch, *ch_tmp;
8866 struct ice_pf *pf = vsi->back;
8867 int i;
8868
8869 /* remove all tc-flower based filter if they are channel filters only */
8870 if (rem_fltr)
8871 ice_rem_all_chnl_fltrs(pf);
8872
8873 /* remove ntuple filters since queue configuration is being changed */
8874 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8875 struct ice_hw *hw = &pf->hw;
8876
8877 mutex_lock(&hw->fdir_fltr_lock);
8878 ice_fdir_del_all_fltrs(vsi);
8879 mutex_unlock(&hw->fdir_fltr_lock);
8880 }
8881
8882 /* perform cleanup for channels if they exist */
8883 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8884 struct ice_vsi *ch_vsi;
8885
8886 list_del(&ch->list);
8887 ch_vsi = ch->ch_vsi;
8888 if (!ch_vsi) {
8889 kfree(ch);
8890 continue;
8891 }
8892
8893 /* Reset queue contexts */
8894 for (i = 0; i < ch->num_rxq; i++) {
8895 struct ice_tx_ring *tx_ring;
8896 struct ice_rx_ring *rx_ring;
8897
8898 tx_ring = vsi->tx_rings[ch->base_q + i];
8899 rx_ring = vsi->rx_rings[ch->base_q + i];
8900 if (tx_ring) {
8901 tx_ring->ch = NULL;
8902 if (tx_ring->q_vector)
8903 tx_ring->q_vector->ch = NULL;
8904 }
8905 if (rx_ring) {
8906 rx_ring->ch = NULL;
8907 if (rx_ring->q_vector)
8908 rx_ring->q_vector->ch = NULL;
8909 }
8910 }
8911
8912 /* Release FD resources for the channel VSI */
8913 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8914
8915 /* clear the VSI from scheduler tree */
8916 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8917
8918 /* Delete VSI from FW, PF and HW VSI arrays */
8919 ice_vsi_delete(ch->ch_vsi);
8920
8921 /* free the channel */
8922 kfree(ch);
8923 }
8924
8925 /* clear the channel VSI map which is stored in main VSI */
8926 ice_for_each_chnl_tc(i)
8927 vsi->tc_map_vsi[i] = NULL;
8928
8929 /* reset main VSI's all TC information */
8930 vsi->all_enatc = 0;
8931 vsi->all_numtc = 0;
8932 }
8933
8934 /**
8935 * ice_rebuild_channels - rebuild channel
8936 * @pf: ptr to PF
8937 *
8938 * Recreate channel VSIs and replay filters
8939 */
ice_rebuild_channels(struct ice_pf * pf)8940 static int ice_rebuild_channels(struct ice_pf *pf)
8941 {
8942 struct device *dev = ice_pf_to_dev(pf);
8943 struct ice_vsi *main_vsi;
8944 bool rem_adv_fltr = true;
8945 struct ice_channel *ch;
8946 struct ice_vsi *vsi;
8947 int tc_idx = 1;
8948 int i, err;
8949
8950 main_vsi = ice_get_main_vsi(pf);
8951 if (!main_vsi)
8952 return 0;
8953
8954 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8955 main_vsi->old_numtc == 1)
8956 return 0; /* nothing to be done */
8957
8958 /* reconfigure main VSI based on old value of TC and cached values
8959 * for MQPRIO opts
8960 */
8961 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8962 if (err) {
8963 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8964 main_vsi->old_ena_tc, main_vsi->vsi_num);
8965 return err;
8966 }
8967
8968 /* rebuild ADQ VSIs */
8969 ice_for_each_vsi(pf, i) {
8970 enum ice_vsi_type type;
8971
8972 vsi = pf->vsi[i];
8973 if (!vsi || vsi->type != ICE_VSI_CHNL)
8974 continue;
8975
8976 type = vsi->type;
8977
8978 /* rebuild ADQ VSI */
8979 err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8980 if (err) {
8981 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8982 ice_vsi_type_str(type), vsi->idx, err);
8983 goto cleanup;
8984 }
8985
8986 /* Re-map HW VSI number, using VSI handle that has been
8987 * previously validated in ice_replay_vsi() call above
8988 */
8989 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8990
8991 /* replay filters for the VSI */
8992 err = ice_replay_vsi(&pf->hw, vsi->idx);
8993 if (err) {
8994 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8995 ice_vsi_type_str(type), err, vsi->idx);
8996 rem_adv_fltr = false;
8997 goto cleanup;
8998 }
8999 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
9000 ice_vsi_type_str(type), vsi->idx);
9001
9002 /* store ADQ VSI at correct TC index in main VSI's
9003 * map of TC to VSI
9004 */
9005 main_vsi->tc_map_vsi[tc_idx++] = vsi;
9006 }
9007
9008 /* ADQ VSI(s) has been rebuilt successfully, so setup
9009 * channel for main VSI's Tx and Rx rings
9010 */
9011 list_for_each_entry(ch, &main_vsi->ch_list, list) {
9012 struct ice_vsi *ch_vsi;
9013
9014 ch_vsi = ch->ch_vsi;
9015 if (!ch_vsi)
9016 continue;
9017
9018 /* reconfig channel resources */
9019 ice_cfg_chnl_all_res(main_vsi, ch);
9020
9021 /* replay BW rate limit if it is non-zero */
9022 if (!ch->max_tx_rate && !ch->min_tx_rate)
9023 continue;
9024
9025 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
9026 ch->min_tx_rate);
9027 if (err)
9028 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9029 err, ch->max_tx_rate, ch->min_tx_rate,
9030 ch_vsi->vsi_num);
9031 else
9032 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9033 ch->max_tx_rate, ch->min_tx_rate,
9034 ch_vsi->vsi_num);
9035 }
9036
9037 /* reconfig RSS for main VSI */
9038 if (main_vsi->ch_rss_size)
9039 ice_vsi_cfg_rss_lut_key(main_vsi);
9040
9041 return 0;
9042
9043 cleanup:
9044 ice_remove_q_channels(main_vsi, rem_adv_fltr);
9045 return err;
9046 }
9047
9048 /**
9049 * ice_create_q_channels - Add queue channel for the given TCs
9050 * @vsi: VSI to be configured
9051 *
9052 * Configures queue channel mapping to the given TCs
9053 */
ice_create_q_channels(struct ice_vsi * vsi)9054 static int ice_create_q_channels(struct ice_vsi *vsi)
9055 {
9056 struct ice_pf *pf = vsi->back;
9057 struct ice_channel *ch;
9058 int ret = 0, i;
9059
9060 ice_for_each_chnl_tc(i) {
9061 if (!(vsi->all_enatc & BIT(i)))
9062 continue;
9063
9064 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9065 if (!ch) {
9066 ret = -ENOMEM;
9067 goto err_free;
9068 }
9069 INIT_LIST_HEAD(&ch->list);
9070 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9071 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9072 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9073 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9074 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9075
9076 /* convert to Kbits/s */
9077 if (ch->max_tx_rate)
9078 ch->max_tx_rate = div_u64(ch->max_tx_rate,
9079 ICE_BW_KBPS_DIVISOR);
9080 if (ch->min_tx_rate)
9081 ch->min_tx_rate = div_u64(ch->min_tx_rate,
9082 ICE_BW_KBPS_DIVISOR);
9083
9084 ret = ice_create_q_channel(vsi, ch);
9085 if (ret) {
9086 dev_err(ice_pf_to_dev(pf),
9087 "failed creating channel TC:%d\n", i);
9088 kfree(ch);
9089 goto err_free;
9090 }
9091 list_add_tail(&ch->list, &vsi->ch_list);
9092 vsi->tc_map_vsi[i] = ch->ch_vsi;
9093 dev_dbg(ice_pf_to_dev(pf),
9094 "successfully created channel: VSI %pK\n", ch->ch_vsi);
9095 }
9096 return 0;
9097
9098 err_free:
9099 ice_remove_q_channels(vsi, false);
9100
9101 return ret;
9102 }
9103
9104 /**
9105 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9106 * @netdev: net device to configure
9107 * @type_data: TC offload data
9108 */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)9109 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9110 {
9111 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9112 struct ice_netdev_priv *np = netdev_priv(netdev);
9113 struct ice_vsi *vsi = np->vsi;
9114 struct ice_pf *pf = vsi->back;
9115 u16 mode, ena_tc_qdisc = 0;
9116 int cur_txq, cur_rxq;
9117 u8 hw = 0, num_tcf;
9118 struct device *dev;
9119 int ret, i;
9120
9121 dev = ice_pf_to_dev(pf);
9122 num_tcf = mqprio_qopt->qopt.num_tc;
9123 hw = mqprio_qopt->qopt.hw;
9124 mode = mqprio_qopt->mode;
9125 if (!hw) {
9126 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9127 vsi->ch_rss_size = 0;
9128 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9129 goto config_tcf;
9130 }
9131
9132 /* Generate queue region map for number of TCF requested */
9133 for (i = 0; i < num_tcf; i++)
9134 ena_tc_qdisc |= BIT(i);
9135
9136 switch (mode) {
9137 case TC_MQPRIO_MODE_CHANNEL:
9138
9139 if (pf->hw.port_info->is_custom_tx_enabled) {
9140 dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9141 return -EBUSY;
9142 }
9143 ice_tear_down_devlink_rate_tree(pf);
9144
9145 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9146 if (ret) {
9147 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9148 ret);
9149 return ret;
9150 }
9151 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9152 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9153 /* don't assume state of hw_tc_offload during driver load
9154 * and set the flag for TC flower filter if hw_tc_offload
9155 * already ON
9156 */
9157 if (vsi->netdev->features & NETIF_F_HW_TC)
9158 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9159 break;
9160 default:
9161 return -EINVAL;
9162 }
9163
9164 config_tcf:
9165
9166 /* Requesting same TCF configuration as already enabled */
9167 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9168 mode != TC_MQPRIO_MODE_CHANNEL)
9169 return 0;
9170
9171 /* Pause VSI queues */
9172 ice_dis_vsi(vsi, true);
9173
9174 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9175 ice_remove_q_channels(vsi, true);
9176
9177 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9178 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9179 num_online_cpus());
9180 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9181 num_online_cpus());
9182 } else {
9183 /* logic to rebuild VSI, same like ethtool -L */
9184 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9185
9186 for (i = 0; i < num_tcf; i++) {
9187 if (!(ena_tc_qdisc & BIT(i)))
9188 continue;
9189
9190 offset = vsi->mqprio_qopt.qopt.offset[i];
9191 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9192 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9193 }
9194 vsi->req_txq = offset + qcount_tx;
9195 vsi->req_rxq = offset + qcount_rx;
9196
9197 /* store away original rss_size info, so that it gets reused
9198 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9199 * determine, what should be the rss_sizefor main VSI
9200 */
9201 vsi->orig_rss_size = vsi->rss_size;
9202 }
9203
9204 /* save current values of Tx and Rx queues before calling VSI rebuild
9205 * for fallback option
9206 */
9207 cur_txq = vsi->num_txq;
9208 cur_rxq = vsi->num_rxq;
9209
9210 /* proceed with rebuild main VSI using correct number of queues */
9211 ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9212 if (ret) {
9213 /* fallback to current number of queues */
9214 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9215 vsi->req_txq = cur_txq;
9216 vsi->req_rxq = cur_rxq;
9217 clear_bit(ICE_RESET_FAILED, pf->state);
9218 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9219 dev_err(dev, "Rebuild of main VSI failed again\n");
9220 return ret;
9221 }
9222 }
9223
9224 vsi->all_numtc = num_tcf;
9225 vsi->all_enatc = ena_tc_qdisc;
9226 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9227 if (ret) {
9228 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9229 vsi->vsi_num);
9230 goto exit;
9231 }
9232
9233 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9234 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9235 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9236
9237 /* set TC0 rate limit if specified */
9238 if (max_tx_rate || min_tx_rate) {
9239 /* convert to Kbits/s */
9240 if (max_tx_rate)
9241 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9242 if (min_tx_rate)
9243 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9244
9245 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9246 if (!ret) {
9247 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9248 max_tx_rate, min_tx_rate, vsi->vsi_num);
9249 } else {
9250 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9251 max_tx_rate, min_tx_rate, vsi->vsi_num);
9252 goto exit;
9253 }
9254 }
9255 ret = ice_create_q_channels(vsi);
9256 if (ret) {
9257 netdev_err(netdev, "failed configuring queue channels\n");
9258 goto exit;
9259 } else {
9260 netdev_dbg(netdev, "successfully configured channels\n");
9261 }
9262 }
9263
9264 if (vsi->ch_rss_size)
9265 ice_vsi_cfg_rss_lut_key(vsi);
9266
9267 exit:
9268 /* if error, reset the all_numtc and all_enatc */
9269 if (ret) {
9270 vsi->all_numtc = 0;
9271 vsi->all_enatc = 0;
9272 }
9273 /* resume VSI */
9274 ice_ena_vsi(vsi, true);
9275
9276 return ret;
9277 }
9278
9279 static LIST_HEAD(ice_block_cb_list);
9280
9281 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)9282 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9283 void *type_data)
9284 {
9285 struct ice_netdev_priv *np = netdev_priv(netdev);
9286 struct ice_pf *pf = np->vsi->back;
9287 bool locked = false;
9288 int err;
9289
9290 switch (type) {
9291 case TC_SETUP_BLOCK:
9292 return flow_block_cb_setup_simple(type_data,
9293 &ice_block_cb_list,
9294 ice_setup_tc_block_cb,
9295 np, np, true);
9296 case TC_SETUP_QDISC_MQPRIO:
9297 if (ice_is_eswitch_mode_switchdev(pf)) {
9298 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9299 return -EOPNOTSUPP;
9300 }
9301
9302 if (pf->adev) {
9303 mutex_lock(&pf->adev_mutex);
9304 device_lock(&pf->adev->dev);
9305 locked = true;
9306 if (pf->adev->dev.driver) {
9307 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9308 err = -EBUSY;
9309 goto adev_unlock;
9310 }
9311 }
9312
9313 /* setup traffic classifier for receive side */
9314 mutex_lock(&pf->tc_mutex);
9315 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9316 mutex_unlock(&pf->tc_mutex);
9317
9318 adev_unlock:
9319 if (locked) {
9320 device_unlock(&pf->adev->dev);
9321 mutex_unlock(&pf->adev_mutex);
9322 }
9323 return err;
9324 default:
9325 return -EOPNOTSUPP;
9326 }
9327 return -EOPNOTSUPP;
9328 }
9329
9330 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)9331 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9332 struct net_device *netdev)
9333 {
9334 struct ice_indr_block_priv *cb_priv;
9335
9336 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9337 if (!cb_priv->netdev)
9338 return NULL;
9339 if (cb_priv->netdev == netdev)
9340 return cb_priv;
9341 }
9342 return NULL;
9343 }
9344
9345 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)9346 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9347 void *indr_priv)
9348 {
9349 struct ice_indr_block_priv *priv = indr_priv;
9350 struct ice_netdev_priv *np = priv->np;
9351
9352 switch (type) {
9353 case TC_SETUP_CLSFLOWER:
9354 return ice_setup_tc_cls_flower(np, priv->netdev,
9355 (struct flow_cls_offload *)
9356 type_data);
9357 default:
9358 return -EOPNOTSUPP;
9359 }
9360 }
9361
9362 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9363 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9364 struct ice_netdev_priv *np,
9365 struct flow_block_offload *f, void *data,
9366 void (*cleanup)(struct flow_block_cb *block_cb))
9367 {
9368 struct ice_indr_block_priv *indr_priv;
9369 struct flow_block_cb *block_cb;
9370
9371 if (!ice_is_tunnel_supported(netdev) &&
9372 !(is_vlan_dev(netdev) &&
9373 vlan_dev_real_dev(netdev) == np->vsi->netdev))
9374 return -EOPNOTSUPP;
9375
9376 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9377 return -EOPNOTSUPP;
9378
9379 switch (f->command) {
9380 case FLOW_BLOCK_BIND:
9381 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9382 if (indr_priv)
9383 return -EEXIST;
9384
9385 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9386 if (!indr_priv)
9387 return -ENOMEM;
9388
9389 indr_priv->netdev = netdev;
9390 indr_priv->np = np;
9391 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9392
9393 block_cb =
9394 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9395 indr_priv, indr_priv,
9396 ice_rep_indr_tc_block_unbind,
9397 f, netdev, sch, data, np,
9398 cleanup);
9399
9400 if (IS_ERR(block_cb)) {
9401 list_del(&indr_priv->list);
9402 kfree(indr_priv);
9403 return PTR_ERR(block_cb);
9404 }
9405 flow_block_cb_add(block_cb, f);
9406 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9407 break;
9408 case FLOW_BLOCK_UNBIND:
9409 indr_priv = ice_indr_block_priv_lookup(np, netdev);
9410 if (!indr_priv)
9411 return -ENOENT;
9412
9413 block_cb = flow_block_cb_lookup(f->block,
9414 ice_indr_setup_block_cb,
9415 indr_priv);
9416 if (!block_cb)
9417 return -ENOENT;
9418
9419 flow_indr_block_cb_remove(block_cb, f);
9420
9421 list_del(&block_cb->driver_list);
9422 break;
9423 default:
9424 return -EOPNOTSUPP;
9425 }
9426 return 0;
9427 }
9428
9429 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9430 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9431 void *cb_priv, enum tc_setup_type type, void *type_data,
9432 void *data,
9433 void (*cleanup)(struct flow_block_cb *block_cb))
9434 {
9435 switch (type) {
9436 case TC_SETUP_BLOCK:
9437 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9438 data, cleanup);
9439
9440 default:
9441 return -EOPNOTSUPP;
9442 }
9443 }
9444
9445 /**
9446 * ice_open - Called when a network interface becomes active
9447 * @netdev: network interface device structure
9448 *
9449 * The open entry point is called when a network interface is made
9450 * active by the system (IFF_UP). At this point all resources needed
9451 * for transmit and receive operations are allocated, the interrupt
9452 * handler is registered with the OS, the netdev watchdog is enabled,
9453 * and the stack is notified that the interface is ready.
9454 *
9455 * Returns 0 on success, negative value on failure
9456 */
ice_open(struct net_device * netdev)9457 int ice_open(struct net_device *netdev)
9458 {
9459 struct ice_netdev_priv *np = netdev_priv(netdev);
9460 struct ice_pf *pf = np->vsi->back;
9461
9462 if (ice_is_reset_in_progress(pf->state)) {
9463 netdev_err(netdev, "can't open net device while reset is in progress");
9464 return -EBUSY;
9465 }
9466
9467 return ice_open_internal(netdev);
9468 }
9469
9470 /**
9471 * ice_open_internal - Called when a network interface becomes active
9472 * @netdev: network interface device structure
9473 *
9474 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9475 * handling routine
9476 *
9477 * Returns 0 on success, negative value on failure
9478 */
ice_open_internal(struct net_device * netdev)9479 int ice_open_internal(struct net_device *netdev)
9480 {
9481 struct ice_netdev_priv *np = netdev_priv(netdev);
9482 struct ice_vsi *vsi = np->vsi;
9483 struct ice_pf *pf = vsi->back;
9484 struct ice_port_info *pi;
9485 int err;
9486
9487 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9488 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9489 return -EIO;
9490 }
9491
9492 netif_carrier_off(netdev);
9493
9494 pi = vsi->port_info;
9495 err = ice_update_link_info(pi);
9496 if (err) {
9497 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9498 return err;
9499 }
9500
9501 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9502
9503 /* Set PHY if there is media, otherwise, turn off PHY */
9504 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9505 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9506 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9507 err = ice_init_phy_user_cfg(pi);
9508 if (err) {
9509 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9510 err);
9511 return err;
9512 }
9513 }
9514
9515 err = ice_configure_phy(vsi);
9516 if (err) {
9517 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9518 err);
9519 return err;
9520 }
9521 } else {
9522 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9523 ice_set_link(vsi, false);
9524 }
9525
9526 err = ice_vsi_open(vsi);
9527 if (err)
9528 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9529 vsi->vsi_num, vsi->vsw->sw_id);
9530
9531 /* Update existing tunnels information */
9532 udp_tunnel_get_rx_info(netdev);
9533
9534 return err;
9535 }
9536
9537 /**
9538 * ice_stop - Disables a network interface
9539 * @netdev: network interface device structure
9540 *
9541 * The stop entry point is called when an interface is de-activated by the OS,
9542 * and the netdevice enters the DOWN state. The hardware is still under the
9543 * driver's control, but the netdev interface is disabled.
9544 *
9545 * Returns success only - not allowed to fail
9546 */
ice_stop(struct net_device * netdev)9547 int ice_stop(struct net_device *netdev)
9548 {
9549 struct ice_netdev_priv *np = netdev_priv(netdev);
9550 struct ice_vsi *vsi = np->vsi;
9551 struct ice_pf *pf = vsi->back;
9552
9553 if (ice_is_reset_in_progress(pf->state)) {
9554 netdev_err(netdev, "can't stop net device while reset is in progress");
9555 return -EBUSY;
9556 }
9557
9558 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9559 int link_err = ice_force_phys_link_state(vsi, false);
9560
9561 if (link_err) {
9562 if (link_err == -ENOMEDIUM)
9563 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9564 vsi->vsi_num);
9565 else
9566 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9567 vsi->vsi_num, link_err);
9568
9569 ice_vsi_close(vsi);
9570 return -EIO;
9571 }
9572 }
9573
9574 ice_vsi_close(vsi);
9575
9576 return 0;
9577 }
9578
9579 /**
9580 * ice_features_check - Validate encapsulated packet conforms to limits
9581 * @skb: skb buffer
9582 * @netdev: This port's netdev
9583 * @features: Offload features that the stack believes apply
9584 */
9585 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9586 ice_features_check(struct sk_buff *skb,
9587 struct net_device __always_unused *netdev,
9588 netdev_features_t features)
9589 {
9590 bool gso = skb_is_gso(skb);
9591 size_t len;
9592
9593 /* No point in doing any of this if neither checksum nor GSO are
9594 * being requested for this frame. We can rule out both by just
9595 * checking for CHECKSUM_PARTIAL
9596 */
9597 if (skb->ip_summed != CHECKSUM_PARTIAL)
9598 return features;
9599
9600 /* We cannot support GSO if the MSS is going to be less than
9601 * 64 bytes. If it is then we need to drop support for GSO.
9602 */
9603 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9604 features &= ~NETIF_F_GSO_MASK;
9605
9606 len = skb_network_offset(skb);
9607 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9608 goto out_rm_features;
9609
9610 len = skb_network_header_len(skb);
9611 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9612 goto out_rm_features;
9613
9614 if (skb->encapsulation) {
9615 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9616 * the case of IPIP frames, the transport header pointer is
9617 * after the inner header! So check to make sure that this
9618 * is a GRE or UDP_TUNNEL frame before doing that math.
9619 */
9620 if (gso && (skb_shinfo(skb)->gso_type &
9621 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9622 len = skb_inner_network_header(skb) -
9623 skb_transport_header(skb);
9624 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9625 goto out_rm_features;
9626 }
9627
9628 len = skb_inner_network_header_len(skb);
9629 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9630 goto out_rm_features;
9631 }
9632
9633 return features;
9634 out_rm_features:
9635 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9636 }
9637
9638 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9639 .ndo_open = ice_open,
9640 .ndo_stop = ice_stop,
9641 .ndo_start_xmit = ice_start_xmit,
9642 .ndo_set_mac_address = ice_set_mac_address,
9643 .ndo_validate_addr = eth_validate_addr,
9644 .ndo_change_mtu = ice_change_mtu,
9645 .ndo_get_stats64 = ice_get_stats64,
9646 .ndo_tx_timeout = ice_tx_timeout,
9647 .ndo_bpf = ice_xdp_safe_mode,
9648 };
9649
9650 static const struct net_device_ops ice_netdev_ops = {
9651 .ndo_open = ice_open,
9652 .ndo_stop = ice_stop,
9653 .ndo_start_xmit = ice_start_xmit,
9654 .ndo_select_queue = ice_select_queue,
9655 .ndo_features_check = ice_features_check,
9656 .ndo_fix_features = ice_fix_features,
9657 .ndo_set_rx_mode = ice_set_rx_mode,
9658 .ndo_set_mac_address = ice_set_mac_address,
9659 .ndo_validate_addr = eth_validate_addr,
9660 .ndo_change_mtu = ice_change_mtu,
9661 .ndo_get_stats64 = ice_get_stats64,
9662 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9663 .ndo_eth_ioctl = ice_eth_ioctl,
9664 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9665 .ndo_set_vf_mac = ice_set_vf_mac,
9666 .ndo_get_vf_config = ice_get_vf_cfg,
9667 .ndo_set_vf_trust = ice_set_vf_trust,
9668 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9669 .ndo_set_vf_link_state = ice_set_vf_link_state,
9670 .ndo_get_vf_stats = ice_get_vf_stats,
9671 .ndo_set_vf_rate = ice_set_vf_bw,
9672 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9673 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9674 .ndo_setup_tc = ice_setup_tc,
9675 .ndo_set_features = ice_set_features,
9676 .ndo_bridge_getlink = ice_bridge_getlink,
9677 .ndo_bridge_setlink = ice_bridge_setlink,
9678 .ndo_fdb_add = ice_fdb_add,
9679 .ndo_fdb_del = ice_fdb_del,
9680 #ifdef CONFIG_RFS_ACCEL
9681 .ndo_rx_flow_steer = ice_rx_flow_steer,
9682 #endif
9683 .ndo_tx_timeout = ice_tx_timeout,
9684 .ndo_bpf = ice_xdp,
9685 .ndo_xdp_xmit = ice_xdp_xmit,
9686 .ndo_xsk_wakeup = ice_xsk_wakeup,
9687 };
9688