xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision 3339b99ef6fe38dac43b534cba3a8a0e29fb2eff)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "devlink/devlink.h"
17 #include "devlink/devlink_port.h"
18 #include "ice_hwmon.h"
19 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
20  * ice tracepoint functions. This must be done exactly once across the
21  * ice driver.
22  */
23 #define CREATE_TRACE_POINTS
24 #include "ice_trace.h"
25 #include "ice_eswitch.h"
26 #include "ice_tc_lib.h"
27 #include "ice_vsi_vlan_ops.h"
28 #include <net/xdp_sock_drv.h>
29 
30 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
31 static const char ice_driver_string[] = DRV_SUMMARY;
32 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
33 
34 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
35 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
36 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
37 
38 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
39 MODULE_DESCRIPTION(DRV_SUMMARY);
40 MODULE_IMPORT_NS(LIBIE);
41 MODULE_LICENSE("GPL v2");
42 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
43 
44 static int debug = -1;
45 module_param(debug, int, 0644);
46 #ifndef CONFIG_DYNAMIC_DEBUG
47 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
48 #else
49 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
50 #endif /* !CONFIG_DYNAMIC_DEBUG */
51 
52 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
53 EXPORT_SYMBOL(ice_xdp_locking_key);
54 
55 /**
56  * ice_hw_to_dev - Get device pointer from the hardware structure
57  * @hw: pointer to the device HW structure
58  *
59  * Used to access the device pointer from compilation units which can't easily
60  * include the definition of struct ice_pf without leading to circular header
61  * dependencies.
62  */
63 struct device *ice_hw_to_dev(struct ice_hw *hw)
64 {
65 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
66 
67 	return &pf->pdev->dev;
68 }
69 
70 static struct workqueue_struct *ice_wq;
71 struct workqueue_struct *ice_lag_wq;
72 static const struct net_device_ops ice_netdev_safe_mode_ops;
73 static const struct net_device_ops ice_netdev_ops;
74 
75 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
76 
77 static void ice_vsi_release_all(struct ice_pf *pf);
78 
79 static int ice_rebuild_channels(struct ice_pf *pf);
80 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
81 
82 static int
83 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
84 		     void *cb_priv, enum tc_setup_type type, void *type_data,
85 		     void *data,
86 		     void (*cleanup)(struct flow_block_cb *block_cb));
87 
88 bool netif_is_ice(const struct net_device *dev)
89 {
90 	return dev && (dev->netdev_ops == &ice_netdev_ops);
91 }
92 
93 /**
94  * ice_get_tx_pending - returns number of Tx descriptors not processed
95  * @ring: the ring of descriptors
96  */
97 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
98 {
99 	u16 head, tail;
100 
101 	head = ring->next_to_clean;
102 	tail = ring->next_to_use;
103 
104 	if (head != tail)
105 		return (head < tail) ?
106 			tail - head : (tail + ring->count - head);
107 	return 0;
108 }
109 
110 /**
111  * ice_check_for_hang_subtask - check for and recover hung queues
112  * @pf: pointer to PF struct
113  */
114 static void ice_check_for_hang_subtask(struct ice_pf *pf)
115 {
116 	struct ice_vsi *vsi = NULL;
117 	struct ice_hw *hw;
118 	unsigned int i;
119 	int packets;
120 	u32 v;
121 
122 	ice_for_each_vsi(pf, v)
123 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
124 			vsi = pf->vsi[v];
125 			break;
126 		}
127 
128 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
129 		return;
130 
131 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
132 		return;
133 
134 	hw = &vsi->back->hw;
135 
136 	ice_for_each_txq(vsi, i) {
137 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
138 		struct ice_ring_stats *ring_stats;
139 
140 		if (!tx_ring)
141 			continue;
142 		if (ice_ring_ch_enabled(tx_ring))
143 			continue;
144 
145 		ring_stats = tx_ring->ring_stats;
146 		if (!ring_stats)
147 			continue;
148 
149 		if (tx_ring->desc) {
150 			/* If packet counter has not changed the queue is
151 			 * likely stalled, so force an interrupt for this
152 			 * queue.
153 			 *
154 			 * prev_pkt would be negative if there was no
155 			 * pending work.
156 			 */
157 			packets = ring_stats->stats.pkts & INT_MAX;
158 			if (ring_stats->tx_stats.prev_pkt == packets) {
159 				/* Trigger sw interrupt to revive the queue */
160 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
161 				continue;
162 			}
163 
164 			/* Memory barrier between read of packet count and call
165 			 * to ice_get_tx_pending()
166 			 */
167 			smp_rmb();
168 			ring_stats->tx_stats.prev_pkt =
169 			    ice_get_tx_pending(tx_ring) ? packets : -1;
170 		}
171 	}
172 }
173 
174 /**
175  * ice_init_mac_fltr - Set initial MAC filters
176  * @pf: board private structure
177  *
178  * Set initial set of MAC filters for PF VSI; configure filters for permanent
179  * address and broadcast address. If an error is encountered, netdevice will be
180  * unregistered.
181  */
182 static int ice_init_mac_fltr(struct ice_pf *pf)
183 {
184 	struct ice_vsi *vsi;
185 	u8 *perm_addr;
186 
187 	vsi = ice_get_main_vsi(pf);
188 	if (!vsi)
189 		return -EINVAL;
190 
191 	perm_addr = vsi->port_info->mac.perm_addr;
192 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
193 }
194 
195 /**
196  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
197  * @netdev: the net device on which the sync is happening
198  * @addr: MAC address to sync
199  *
200  * This is a callback function which is called by the in kernel device sync
201  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
202  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
203  * MAC filters from the hardware.
204  */
205 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
206 {
207 	struct ice_netdev_priv *np = netdev_priv(netdev);
208 	struct ice_vsi *vsi = np->vsi;
209 
210 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
211 				     ICE_FWD_TO_VSI))
212 		return -EINVAL;
213 
214 	return 0;
215 }
216 
217 /**
218  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
219  * @netdev: the net device on which the unsync is happening
220  * @addr: MAC address to unsync
221  *
222  * This is a callback function which is called by the in kernel device unsync
223  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
224  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
225  * delete the MAC filters from the hardware.
226  */
227 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
228 {
229 	struct ice_netdev_priv *np = netdev_priv(netdev);
230 	struct ice_vsi *vsi = np->vsi;
231 
232 	/* Under some circumstances, we might receive a request to delete our
233 	 * own device address from our uc list. Because we store the device
234 	 * address in the VSI's MAC filter list, we need to ignore such
235 	 * requests and not delete our device address from this list.
236 	 */
237 	if (ether_addr_equal(addr, netdev->dev_addr))
238 		return 0;
239 
240 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
241 				     ICE_FWD_TO_VSI))
242 		return -EINVAL;
243 
244 	return 0;
245 }
246 
247 /**
248  * ice_vsi_fltr_changed - check if filter state changed
249  * @vsi: VSI to be checked
250  *
251  * returns true if filter state has changed, false otherwise.
252  */
253 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
254 {
255 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
256 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
257 }
258 
259 /**
260  * ice_set_promisc - Enable promiscuous mode for a given PF
261  * @vsi: the VSI being configured
262  * @promisc_m: mask of promiscuous config bits
263  *
264  */
265 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
266 {
267 	int status;
268 
269 	if (vsi->type != ICE_VSI_PF)
270 		return 0;
271 
272 	if (ice_vsi_has_non_zero_vlans(vsi)) {
273 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
274 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
275 						       promisc_m);
276 	} else {
277 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
278 						  promisc_m, 0);
279 	}
280 	if (status && status != -EEXIST)
281 		return status;
282 
283 	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
284 		   vsi->vsi_num, promisc_m);
285 	return 0;
286 }
287 
288 /**
289  * ice_clear_promisc - Disable promiscuous mode for a given PF
290  * @vsi: the VSI being configured
291  * @promisc_m: mask of promiscuous config bits
292  *
293  */
294 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
295 {
296 	int status;
297 
298 	if (vsi->type != ICE_VSI_PF)
299 		return 0;
300 
301 	if (ice_vsi_has_non_zero_vlans(vsi)) {
302 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
303 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
304 							 promisc_m);
305 	} else {
306 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
307 						    promisc_m, 0);
308 	}
309 
310 	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
311 		   vsi->vsi_num, promisc_m);
312 	return status;
313 }
314 
315 /**
316  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
317  * @vsi: ptr to the VSI
318  *
319  * Push any outstanding VSI filter changes through the AdminQ.
320  */
321 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
322 {
323 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
324 	struct device *dev = ice_pf_to_dev(vsi->back);
325 	struct net_device *netdev = vsi->netdev;
326 	bool promisc_forced_on = false;
327 	struct ice_pf *pf = vsi->back;
328 	struct ice_hw *hw = &pf->hw;
329 	u32 changed_flags = 0;
330 	int err;
331 
332 	if (!vsi->netdev)
333 		return -EINVAL;
334 
335 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
336 		usleep_range(1000, 2000);
337 
338 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
339 	vsi->current_netdev_flags = vsi->netdev->flags;
340 
341 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
342 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
343 
344 	if (ice_vsi_fltr_changed(vsi)) {
345 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
346 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
347 
348 		/* grab the netdev's addr_list_lock */
349 		netif_addr_lock_bh(netdev);
350 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
351 			      ice_add_mac_to_unsync_list);
352 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
353 			      ice_add_mac_to_unsync_list);
354 		/* our temp lists are populated. release lock */
355 		netif_addr_unlock_bh(netdev);
356 	}
357 
358 	/* Remove MAC addresses in the unsync list */
359 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
360 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
361 	if (err) {
362 		netdev_err(netdev, "Failed to delete MAC filters\n");
363 		/* if we failed because of alloc failures, just bail */
364 		if (err == -ENOMEM)
365 			goto out;
366 	}
367 
368 	/* Add MAC addresses in the sync list */
369 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
370 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
371 	/* If filter is added successfully or already exists, do not go into
372 	 * 'if' condition and report it as error. Instead continue processing
373 	 * rest of the function.
374 	 */
375 	if (err && err != -EEXIST) {
376 		netdev_err(netdev, "Failed to add MAC filters\n");
377 		/* If there is no more space for new umac filters, VSI
378 		 * should go into promiscuous mode. There should be some
379 		 * space reserved for promiscuous filters.
380 		 */
381 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
382 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
383 				      vsi->state)) {
384 			promisc_forced_on = true;
385 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
386 				    vsi->vsi_num);
387 		} else {
388 			goto out;
389 		}
390 	}
391 	err = 0;
392 	/* check for changes in promiscuous modes */
393 	if (changed_flags & IFF_ALLMULTI) {
394 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
395 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
396 			if (err) {
397 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
398 				goto out_promisc;
399 			}
400 		} else {
401 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
402 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
403 			if (err) {
404 				vsi->current_netdev_flags |= IFF_ALLMULTI;
405 				goto out_promisc;
406 			}
407 		}
408 	}
409 
410 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
411 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
412 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
413 		if (vsi->current_netdev_flags & IFF_PROMISC) {
414 			/* Apply Rx filter rule to get traffic from wire */
415 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
416 				err = ice_set_dflt_vsi(vsi);
417 				if (err && err != -EEXIST) {
418 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
419 						   err, vsi->vsi_num);
420 					vsi->current_netdev_flags &=
421 						~IFF_PROMISC;
422 					goto out_promisc;
423 				}
424 				err = 0;
425 				vlan_ops->dis_rx_filtering(vsi);
426 
427 				/* promiscuous mode implies allmulticast so
428 				 * that VSIs that are in promiscuous mode are
429 				 * subscribed to multicast packets coming to
430 				 * the port
431 				 */
432 				err = ice_set_promisc(vsi,
433 						      ICE_MCAST_PROMISC_BITS);
434 				if (err)
435 					goto out_promisc;
436 			}
437 		} else {
438 			/* Clear Rx filter to remove traffic from wire */
439 			if (ice_is_vsi_dflt_vsi(vsi)) {
440 				err = ice_clear_dflt_vsi(vsi);
441 				if (err) {
442 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
443 						   err, vsi->vsi_num);
444 					vsi->current_netdev_flags |=
445 						IFF_PROMISC;
446 					goto out_promisc;
447 				}
448 				if (vsi->netdev->features &
449 				    NETIF_F_HW_VLAN_CTAG_FILTER)
450 					vlan_ops->ena_rx_filtering(vsi);
451 			}
452 
453 			/* disable allmulti here, but only if allmulti is not
454 			 * still enabled for the netdev
455 			 */
456 			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
457 				err = ice_clear_promisc(vsi,
458 							ICE_MCAST_PROMISC_BITS);
459 				if (err) {
460 					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
461 						   err, vsi->vsi_num);
462 				}
463 			}
464 		}
465 	}
466 	goto exit;
467 
468 out_promisc:
469 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
470 	goto exit;
471 out:
472 	/* if something went wrong then set the changed flag so we try again */
473 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
474 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
475 exit:
476 	clear_bit(ICE_CFG_BUSY, vsi->state);
477 	return err;
478 }
479 
480 /**
481  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
482  * @pf: board private structure
483  */
484 static void ice_sync_fltr_subtask(struct ice_pf *pf)
485 {
486 	int v;
487 
488 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
489 		return;
490 
491 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
492 
493 	ice_for_each_vsi(pf, v)
494 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
495 		    ice_vsi_sync_fltr(pf->vsi[v])) {
496 			/* come back and try again later */
497 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
498 			break;
499 		}
500 }
501 
502 /**
503  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
504  * @pf: the PF
505  * @locked: is the rtnl_lock already held
506  */
507 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
508 {
509 	int node;
510 	int v;
511 
512 	ice_for_each_vsi(pf, v)
513 		if (pf->vsi[v])
514 			ice_dis_vsi(pf->vsi[v], locked);
515 
516 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
517 		pf->pf_agg_node[node].num_vsis = 0;
518 
519 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
520 		pf->vf_agg_node[node].num_vsis = 0;
521 }
522 
523 /**
524  * ice_clear_sw_switch_recipes - clear switch recipes
525  * @pf: board private structure
526  *
527  * Mark switch recipes as not created in sw structures. There are cases where
528  * rules (especially advanced rules) need to be restored, either re-read from
529  * hardware or added again. For example after the reset. 'recp_created' flag
530  * prevents from doing that and need to be cleared upfront.
531  */
532 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
533 {
534 	struct ice_sw_recipe *recp;
535 	u8 i;
536 
537 	recp = pf->hw.switch_info->recp_list;
538 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
539 		recp[i].recp_created = false;
540 }
541 
542 /**
543  * ice_prepare_for_reset - prep for reset
544  * @pf: board private structure
545  * @reset_type: reset type requested
546  *
547  * Inform or close all dependent features in prep for reset.
548  */
549 static void
550 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
551 {
552 	struct ice_hw *hw = &pf->hw;
553 	struct ice_vsi *vsi;
554 	struct ice_vf *vf;
555 	unsigned int bkt;
556 
557 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
558 
559 	/* already prepared for reset */
560 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
561 		return;
562 
563 	ice_unplug_aux_dev(pf);
564 
565 	/* Notify VFs of impending reset */
566 	if (ice_check_sq_alive(hw, &hw->mailboxq))
567 		ice_vc_notify_reset(pf);
568 
569 	/* Disable VFs until reset is completed */
570 	mutex_lock(&pf->vfs.table_lock);
571 	ice_for_each_vf(pf, bkt, vf)
572 		ice_set_vf_state_dis(vf);
573 	mutex_unlock(&pf->vfs.table_lock);
574 
575 	if (ice_is_eswitch_mode_switchdev(pf)) {
576 		if (reset_type != ICE_RESET_PFR)
577 			ice_clear_sw_switch_recipes(pf);
578 	}
579 
580 	/* release ADQ specific HW and SW resources */
581 	vsi = ice_get_main_vsi(pf);
582 	if (!vsi)
583 		goto skip;
584 
585 	/* to be on safe side, reset orig_rss_size so that normal flow
586 	 * of deciding rss_size can take precedence
587 	 */
588 	vsi->orig_rss_size = 0;
589 
590 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
591 		if (reset_type == ICE_RESET_PFR) {
592 			vsi->old_ena_tc = vsi->all_enatc;
593 			vsi->old_numtc = vsi->all_numtc;
594 		} else {
595 			ice_remove_q_channels(vsi, true);
596 
597 			/* for other reset type, do not support channel rebuild
598 			 * hence reset needed info
599 			 */
600 			vsi->old_ena_tc = 0;
601 			vsi->all_enatc = 0;
602 			vsi->old_numtc = 0;
603 			vsi->all_numtc = 0;
604 			vsi->req_txq = 0;
605 			vsi->req_rxq = 0;
606 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
607 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
608 		}
609 	}
610 skip:
611 
612 	/* clear SW filtering DB */
613 	ice_clear_hw_tbls(hw);
614 	/* disable the VSIs and their queues that are not already DOWN */
615 	ice_pf_dis_all_vsi(pf, false);
616 
617 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
618 		ice_ptp_prepare_for_reset(pf, reset_type);
619 
620 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
621 		ice_gnss_exit(pf);
622 
623 	if (hw->port_info)
624 		ice_sched_clear_port(hw->port_info);
625 
626 	ice_shutdown_all_ctrlq(hw);
627 
628 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
629 }
630 
631 /**
632  * ice_do_reset - Initiate one of many types of resets
633  * @pf: board private structure
634  * @reset_type: reset type requested before this function was called.
635  */
636 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
637 {
638 	struct device *dev = ice_pf_to_dev(pf);
639 	struct ice_hw *hw = &pf->hw;
640 
641 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
642 
643 	if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
644 		dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
645 		reset_type = ICE_RESET_CORER;
646 	}
647 
648 	ice_prepare_for_reset(pf, reset_type);
649 
650 	/* trigger the reset */
651 	if (ice_reset(hw, reset_type)) {
652 		dev_err(dev, "reset %d failed\n", reset_type);
653 		set_bit(ICE_RESET_FAILED, pf->state);
654 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
655 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
656 		clear_bit(ICE_PFR_REQ, pf->state);
657 		clear_bit(ICE_CORER_REQ, pf->state);
658 		clear_bit(ICE_GLOBR_REQ, pf->state);
659 		wake_up(&pf->reset_wait_queue);
660 		return;
661 	}
662 
663 	/* PFR is a bit of a special case because it doesn't result in an OICR
664 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
665 	 * associated state bits.
666 	 */
667 	if (reset_type == ICE_RESET_PFR) {
668 		pf->pfr_count++;
669 		ice_rebuild(pf, reset_type);
670 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
671 		clear_bit(ICE_PFR_REQ, pf->state);
672 		wake_up(&pf->reset_wait_queue);
673 		ice_reset_all_vfs(pf);
674 	}
675 }
676 
677 /**
678  * ice_reset_subtask - Set up for resetting the device and driver
679  * @pf: board private structure
680  */
681 static void ice_reset_subtask(struct ice_pf *pf)
682 {
683 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
684 
685 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
686 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
687 	 * of reset is pending and sets bits in pf->state indicating the reset
688 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
689 	 * prepare for pending reset if not already (for PF software-initiated
690 	 * global resets the software should already be prepared for it as
691 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
692 	 * by firmware or software on other PFs, that bit is not set so prepare
693 	 * for the reset now), poll for reset done, rebuild and return.
694 	 */
695 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
696 		/* Perform the largest reset requested */
697 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
698 			reset_type = ICE_RESET_CORER;
699 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
700 			reset_type = ICE_RESET_GLOBR;
701 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
702 			reset_type = ICE_RESET_EMPR;
703 		/* return if no valid reset type requested */
704 		if (reset_type == ICE_RESET_INVAL)
705 			return;
706 		ice_prepare_for_reset(pf, reset_type);
707 
708 		/* make sure we are ready to rebuild */
709 		if (ice_check_reset(&pf->hw)) {
710 			set_bit(ICE_RESET_FAILED, pf->state);
711 		} else {
712 			/* done with reset. start rebuild */
713 			pf->hw.reset_ongoing = false;
714 			ice_rebuild(pf, reset_type);
715 			/* clear bit to resume normal operations, but
716 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
717 			 */
718 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
719 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
720 			clear_bit(ICE_PFR_REQ, pf->state);
721 			clear_bit(ICE_CORER_REQ, pf->state);
722 			clear_bit(ICE_GLOBR_REQ, pf->state);
723 			wake_up(&pf->reset_wait_queue);
724 			ice_reset_all_vfs(pf);
725 		}
726 
727 		return;
728 	}
729 
730 	/* No pending resets to finish processing. Check for new resets */
731 	if (test_bit(ICE_PFR_REQ, pf->state)) {
732 		reset_type = ICE_RESET_PFR;
733 		if (pf->lag && pf->lag->bonded) {
734 			dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
735 			reset_type = ICE_RESET_CORER;
736 		}
737 	}
738 	if (test_bit(ICE_CORER_REQ, pf->state))
739 		reset_type = ICE_RESET_CORER;
740 	if (test_bit(ICE_GLOBR_REQ, pf->state))
741 		reset_type = ICE_RESET_GLOBR;
742 	/* If no valid reset type requested just return */
743 	if (reset_type == ICE_RESET_INVAL)
744 		return;
745 
746 	/* reset if not already down or busy */
747 	if (!test_bit(ICE_DOWN, pf->state) &&
748 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
749 		ice_do_reset(pf, reset_type);
750 	}
751 }
752 
753 /**
754  * ice_print_topo_conflict - print topology conflict message
755  * @vsi: the VSI whose topology status is being checked
756  */
757 static void ice_print_topo_conflict(struct ice_vsi *vsi)
758 {
759 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
760 	case ICE_AQ_LINK_TOPO_CONFLICT:
761 	case ICE_AQ_LINK_MEDIA_CONFLICT:
762 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
763 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
764 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
765 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
766 		break;
767 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
768 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
769 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
770 		else
771 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
772 		break;
773 	default:
774 		break;
775 	}
776 }
777 
778 /**
779  * ice_print_link_msg - print link up or down message
780  * @vsi: the VSI whose link status is being queried
781  * @isup: boolean for if the link is now up or down
782  */
783 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
784 {
785 	struct ice_aqc_get_phy_caps_data *caps;
786 	const char *an_advertised;
787 	const char *fec_req;
788 	const char *speed;
789 	const char *fec;
790 	const char *fc;
791 	const char *an;
792 	int status;
793 
794 	if (!vsi)
795 		return;
796 
797 	if (vsi->current_isup == isup)
798 		return;
799 
800 	vsi->current_isup = isup;
801 
802 	if (!isup) {
803 		netdev_info(vsi->netdev, "NIC Link is Down\n");
804 		return;
805 	}
806 
807 	switch (vsi->port_info->phy.link_info.link_speed) {
808 	case ICE_AQ_LINK_SPEED_200GB:
809 		speed = "200 G";
810 		break;
811 	case ICE_AQ_LINK_SPEED_100GB:
812 		speed = "100 G";
813 		break;
814 	case ICE_AQ_LINK_SPEED_50GB:
815 		speed = "50 G";
816 		break;
817 	case ICE_AQ_LINK_SPEED_40GB:
818 		speed = "40 G";
819 		break;
820 	case ICE_AQ_LINK_SPEED_25GB:
821 		speed = "25 G";
822 		break;
823 	case ICE_AQ_LINK_SPEED_20GB:
824 		speed = "20 G";
825 		break;
826 	case ICE_AQ_LINK_SPEED_10GB:
827 		speed = "10 G";
828 		break;
829 	case ICE_AQ_LINK_SPEED_5GB:
830 		speed = "5 G";
831 		break;
832 	case ICE_AQ_LINK_SPEED_2500MB:
833 		speed = "2.5 G";
834 		break;
835 	case ICE_AQ_LINK_SPEED_1000MB:
836 		speed = "1 G";
837 		break;
838 	case ICE_AQ_LINK_SPEED_100MB:
839 		speed = "100 M";
840 		break;
841 	default:
842 		speed = "Unknown ";
843 		break;
844 	}
845 
846 	switch (vsi->port_info->fc.current_mode) {
847 	case ICE_FC_FULL:
848 		fc = "Rx/Tx";
849 		break;
850 	case ICE_FC_TX_PAUSE:
851 		fc = "Tx";
852 		break;
853 	case ICE_FC_RX_PAUSE:
854 		fc = "Rx";
855 		break;
856 	case ICE_FC_NONE:
857 		fc = "None";
858 		break;
859 	default:
860 		fc = "Unknown";
861 		break;
862 	}
863 
864 	/* Get FEC mode based on negotiated link info */
865 	switch (vsi->port_info->phy.link_info.fec_info) {
866 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
867 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
868 		fec = "RS-FEC";
869 		break;
870 	case ICE_AQ_LINK_25G_KR_FEC_EN:
871 		fec = "FC-FEC/BASE-R";
872 		break;
873 	default:
874 		fec = "NONE";
875 		break;
876 	}
877 
878 	/* check if autoneg completed, might be false due to not supported */
879 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
880 		an = "True";
881 	else
882 		an = "False";
883 
884 	/* Get FEC mode requested based on PHY caps last SW configuration */
885 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
886 	if (!caps) {
887 		fec_req = "Unknown";
888 		an_advertised = "Unknown";
889 		goto done;
890 	}
891 
892 	status = ice_aq_get_phy_caps(vsi->port_info, false,
893 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
894 	if (status)
895 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
896 
897 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
898 
899 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
900 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
901 		fec_req = "RS-FEC";
902 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
903 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
904 		fec_req = "FC-FEC/BASE-R";
905 	else
906 		fec_req = "NONE";
907 
908 	kfree(caps);
909 
910 done:
911 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
912 		    speed, fec_req, fec, an_advertised, an, fc);
913 	ice_print_topo_conflict(vsi);
914 }
915 
916 /**
917  * ice_vsi_link_event - update the VSI's netdev
918  * @vsi: the VSI on which the link event occurred
919  * @link_up: whether or not the VSI needs to be set up or down
920  */
921 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
922 {
923 	if (!vsi)
924 		return;
925 
926 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
927 		return;
928 
929 	if (vsi->type == ICE_VSI_PF) {
930 		if (link_up == netif_carrier_ok(vsi->netdev))
931 			return;
932 
933 		if (link_up) {
934 			netif_carrier_on(vsi->netdev);
935 			netif_tx_wake_all_queues(vsi->netdev);
936 		} else {
937 			netif_carrier_off(vsi->netdev);
938 			netif_tx_stop_all_queues(vsi->netdev);
939 		}
940 	}
941 }
942 
943 /**
944  * ice_set_dflt_mib - send a default config MIB to the FW
945  * @pf: private PF struct
946  *
947  * This function sends a default configuration MIB to the FW.
948  *
949  * If this function errors out at any point, the driver is still able to
950  * function.  The main impact is that LFC may not operate as expected.
951  * Therefore an error state in this function should be treated with a DBG
952  * message and continue on with driver rebuild/reenable.
953  */
954 static void ice_set_dflt_mib(struct ice_pf *pf)
955 {
956 	struct device *dev = ice_pf_to_dev(pf);
957 	u8 mib_type, *buf, *lldpmib = NULL;
958 	u16 len, typelen, offset = 0;
959 	struct ice_lldp_org_tlv *tlv;
960 	struct ice_hw *hw = &pf->hw;
961 	u32 ouisubtype;
962 
963 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
964 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
965 	if (!lldpmib) {
966 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
967 			__func__);
968 		return;
969 	}
970 
971 	/* Add ETS CFG TLV */
972 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
973 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
974 		   ICE_IEEE_ETS_TLV_LEN);
975 	tlv->typelen = htons(typelen);
976 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
977 		      ICE_IEEE_SUBTYPE_ETS_CFG);
978 	tlv->ouisubtype = htonl(ouisubtype);
979 
980 	buf = tlv->tlvinfo;
981 	buf[0] = 0;
982 
983 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
984 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
985 	 * Octets 13 - 20 are TSA values - leave as zeros
986 	 */
987 	buf[5] = 0x64;
988 	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
989 	offset += len + 2;
990 	tlv = (struct ice_lldp_org_tlv *)
991 		((char *)tlv + sizeof(tlv->typelen) + len);
992 
993 	/* Add ETS REC TLV */
994 	buf = tlv->tlvinfo;
995 	tlv->typelen = htons(typelen);
996 
997 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
998 		      ICE_IEEE_SUBTYPE_ETS_REC);
999 	tlv->ouisubtype = htonl(ouisubtype);
1000 
1001 	/* First octet of buf is reserved
1002 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
1003 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
1004 	 * Octets 13 - 20 are TSA value - leave as zeros
1005 	 */
1006 	buf[5] = 0x64;
1007 	offset += len + 2;
1008 	tlv = (struct ice_lldp_org_tlv *)
1009 		((char *)tlv + sizeof(tlv->typelen) + len);
1010 
1011 	/* Add PFC CFG TLV */
1012 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1013 		   ICE_IEEE_PFC_TLV_LEN);
1014 	tlv->typelen = htons(typelen);
1015 
1016 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1017 		      ICE_IEEE_SUBTYPE_PFC_CFG);
1018 	tlv->ouisubtype = htonl(ouisubtype);
1019 
1020 	/* Octet 1 left as all zeros - PFC disabled */
1021 	buf[0] = 0x08;
1022 	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1023 	offset += len + 2;
1024 
1025 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1026 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1027 
1028 	kfree(lldpmib);
1029 }
1030 
1031 /**
1032  * ice_check_phy_fw_load - check if PHY FW load failed
1033  * @pf: pointer to PF struct
1034  * @link_cfg_err: bitmap from the link info structure
1035  *
1036  * check if external PHY FW load failed and print an error message if it did
1037  */
1038 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1039 {
1040 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1041 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1042 		return;
1043 	}
1044 
1045 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1046 		return;
1047 
1048 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1049 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1050 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1051 	}
1052 }
1053 
1054 /**
1055  * ice_check_module_power
1056  * @pf: pointer to PF struct
1057  * @link_cfg_err: bitmap from the link info structure
1058  *
1059  * check module power level returned by a previous call to aq_get_link_info
1060  * and print error messages if module power level is not supported
1061  */
1062 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1063 {
1064 	/* if module power level is supported, clear the flag */
1065 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1066 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1067 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1068 		return;
1069 	}
1070 
1071 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1072 	 * above block didn't clear this bit, there's nothing to do
1073 	 */
1074 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1075 		return;
1076 
1077 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1078 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1079 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1080 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1081 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1082 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1083 	}
1084 }
1085 
1086 /**
1087  * ice_check_link_cfg_err - check if link configuration failed
1088  * @pf: pointer to the PF struct
1089  * @link_cfg_err: bitmap from the link info structure
1090  *
1091  * print if any link configuration failure happens due to the value in the
1092  * link_cfg_err parameter in the link info structure
1093  */
1094 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1095 {
1096 	ice_check_module_power(pf, link_cfg_err);
1097 	ice_check_phy_fw_load(pf, link_cfg_err);
1098 }
1099 
1100 /**
1101  * ice_link_event - process the link event
1102  * @pf: PF that the link event is associated with
1103  * @pi: port_info for the port that the link event is associated with
1104  * @link_up: true if the physical link is up and false if it is down
1105  * @link_speed: current link speed received from the link event
1106  *
1107  * Returns 0 on success and negative on failure
1108  */
1109 static int
1110 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1111 	       u16 link_speed)
1112 {
1113 	struct device *dev = ice_pf_to_dev(pf);
1114 	struct ice_phy_info *phy_info;
1115 	struct ice_vsi *vsi;
1116 	u16 old_link_speed;
1117 	bool old_link;
1118 	int status;
1119 
1120 	phy_info = &pi->phy;
1121 	phy_info->link_info_old = phy_info->link_info;
1122 
1123 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1124 	old_link_speed = phy_info->link_info_old.link_speed;
1125 
1126 	/* update the link info structures and re-enable link events,
1127 	 * don't bail on failure due to other book keeping needed
1128 	 */
1129 	status = ice_update_link_info(pi);
1130 	if (status)
1131 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1132 			pi->lport, status,
1133 			ice_aq_str(pi->hw->adminq.sq_last_status));
1134 
1135 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1136 
1137 	/* Check if the link state is up after updating link info, and treat
1138 	 * this event as an UP event since the link is actually UP now.
1139 	 */
1140 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1141 		link_up = true;
1142 
1143 	vsi = ice_get_main_vsi(pf);
1144 	if (!vsi || !vsi->port_info)
1145 		return -EINVAL;
1146 
1147 	/* turn off PHY if media was removed */
1148 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1149 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1150 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1151 		ice_set_link(vsi, false);
1152 	}
1153 
1154 	/* if the old link up/down and speed is the same as the new */
1155 	if (link_up == old_link && link_speed == old_link_speed)
1156 		return 0;
1157 
1158 	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1159 
1160 	if (ice_is_dcb_active(pf)) {
1161 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1162 			ice_dcb_rebuild(pf);
1163 	} else {
1164 		if (link_up)
1165 			ice_set_dflt_mib(pf);
1166 	}
1167 	ice_vsi_link_event(vsi, link_up);
1168 	ice_print_link_msg(vsi, link_up);
1169 
1170 	ice_vc_notify_link_state(pf);
1171 
1172 	return 0;
1173 }
1174 
1175 /**
1176  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1177  * @pf: board private structure
1178  */
1179 static void ice_watchdog_subtask(struct ice_pf *pf)
1180 {
1181 	int i;
1182 
1183 	/* if interface is down do nothing */
1184 	if (test_bit(ICE_DOWN, pf->state) ||
1185 	    test_bit(ICE_CFG_BUSY, pf->state))
1186 		return;
1187 
1188 	/* make sure we don't do these things too often */
1189 	if (time_before(jiffies,
1190 			pf->serv_tmr_prev + pf->serv_tmr_period))
1191 		return;
1192 
1193 	pf->serv_tmr_prev = jiffies;
1194 
1195 	/* Update the stats for active netdevs so the network stack
1196 	 * can look at updated numbers whenever it cares to
1197 	 */
1198 	ice_update_pf_stats(pf);
1199 	ice_for_each_vsi(pf, i)
1200 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1201 			ice_update_vsi_stats(pf->vsi[i]);
1202 }
1203 
1204 /**
1205  * ice_init_link_events - enable/initialize link events
1206  * @pi: pointer to the port_info instance
1207  *
1208  * Returns -EIO on failure, 0 on success
1209  */
1210 static int ice_init_link_events(struct ice_port_info *pi)
1211 {
1212 	u16 mask;
1213 
1214 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1215 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1216 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1217 
1218 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1219 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1220 			pi->lport);
1221 		return -EIO;
1222 	}
1223 
1224 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1225 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1226 			pi->lport);
1227 		return -EIO;
1228 	}
1229 
1230 	return 0;
1231 }
1232 
1233 /**
1234  * ice_handle_link_event - handle link event via ARQ
1235  * @pf: PF that the link event is associated with
1236  * @event: event structure containing link status info
1237  */
1238 static int
1239 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1240 {
1241 	struct ice_aqc_get_link_status_data *link_data;
1242 	struct ice_port_info *port_info;
1243 	int status;
1244 
1245 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1246 	port_info = pf->hw.port_info;
1247 	if (!port_info)
1248 		return -EINVAL;
1249 
1250 	status = ice_link_event(pf, port_info,
1251 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1252 				le16_to_cpu(link_data->link_speed));
1253 	if (status)
1254 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1255 			status);
1256 
1257 	return status;
1258 }
1259 
1260 /**
1261  * ice_get_fwlog_data - copy the FW log data from ARQ event
1262  * @pf: PF that the FW log event is associated with
1263  * @event: event structure containing FW log data
1264  */
1265 static void
1266 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1267 {
1268 	struct ice_fwlog_data *fwlog;
1269 	struct ice_hw *hw = &pf->hw;
1270 
1271 	fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1272 
1273 	memset(fwlog->data, 0, PAGE_SIZE);
1274 	fwlog->data_size = le16_to_cpu(event->desc.datalen);
1275 
1276 	memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1277 	ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1278 
1279 	if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1280 		/* the rings are full so bump the head to create room */
1281 		ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1282 					 hw->fwlog_ring.size);
1283 	}
1284 }
1285 
1286 /**
1287  * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1288  * @pf: pointer to the PF private structure
1289  * @task: intermediate helper storage and identifier for waiting
1290  * @opcode: the opcode to wait for
1291  *
1292  * Prepares to wait for a specific AdminQ completion event on the ARQ for
1293  * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1294  *
1295  * Calls are separated to allow caller registering for event before sending
1296  * the command, which mitigates a race between registering and FW responding.
1297  *
1298  * To obtain only the descriptor contents, pass an task->event with null
1299  * msg_buf. If the complete data buffer is desired, allocate the
1300  * task->event.msg_buf with enough space ahead of time.
1301  */
1302 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1303 			   u16 opcode)
1304 {
1305 	INIT_HLIST_NODE(&task->entry);
1306 	task->opcode = opcode;
1307 	task->state = ICE_AQ_TASK_WAITING;
1308 
1309 	spin_lock_bh(&pf->aq_wait_lock);
1310 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1311 	spin_unlock_bh(&pf->aq_wait_lock);
1312 }
1313 
1314 /**
1315  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1316  * @pf: pointer to the PF private structure
1317  * @task: ptr prepared by ice_aq_prep_for_event()
1318  * @timeout: how long to wait, in jiffies
1319  *
1320  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1321  * current thread will be put to sleep until the specified event occurs or
1322  * until the given timeout is reached.
1323  *
1324  * Returns: zero on success, or a negative error code on failure.
1325  */
1326 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1327 			  unsigned long timeout)
1328 {
1329 	enum ice_aq_task_state *state = &task->state;
1330 	struct device *dev = ice_pf_to_dev(pf);
1331 	unsigned long start = jiffies;
1332 	long ret;
1333 	int err;
1334 
1335 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1336 					       *state != ICE_AQ_TASK_WAITING,
1337 					       timeout);
1338 	switch (*state) {
1339 	case ICE_AQ_TASK_NOT_PREPARED:
1340 		WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1341 		err = -EINVAL;
1342 		break;
1343 	case ICE_AQ_TASK_WAITING:
1344 		err = ret < 0 ? ret : -ETIMEDOUT;
1345 		break;
1346 	case ICE_AQ_TASK_CANCELED:
1347 		err = ret < 0 ? ret : -ECANCELED;
1348 		break;
1349 	case ICE_AQ_TASK_COMPLETE:
1350 		err = ret < 0 ? ret : 0;
1351 		break;
1352 	default:
1353 		WARN(1, "Unexpected AdminQ wait task state %u", *state);
1354 		err = -EINVAL;
1355 		break;
1356 	}
1357 
1358 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1359 		jiffies_to_msecs(jiffies - start),
1360 		jiffies_to_msecs(timeout),
1361 		task->opcode);
1362 
1363 	spin_lock_bh(&pf->aq_wait_lock);
1364 	hlist_del(&task->entry);
1365 	spin_unlock_bh(&pf->aq_wait_lock);
1366 
1367 	return err;
1368 }
1369 
1370 /**
1371  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1372  * @pf: pointer to the PF private structure
1373  * @opcode: the opcode of the event
1374  * @event: the event to check
1375  *
1376  * Loops over the current list of pending threads waiting for an AdminQ event.
1377  * For each matching task, copy the contents of the event into the task
1378  * structure and wake up the thread.
1379  *
1380  * If multiple threads wait for the same opcode, they will all be woken up.
1381  *
1382  * Note that event->msg_buf will only be duplicated if the event has a buffer
1383  * with enough space already allocated. Otherwise, only the descriptor and
1384  * message length will be copied.
1385  *
1386  * Returns: true if an event was found, false otherwise
1387  */
1388 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1389 				struct ice_rq_event_info *event)
1390 {
1391 	struct ice_rq_event_info *task_ev;
1392 	struct ice_aq_task *task;
1393 	bool found = false;
1394 
1395 	spin_lock_bh(&pf->aq_wait_lock);
1396 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1397 		if (task->state != ICE_AQ_TASK_WAITING)
1398 			continue;
1399 		if (task->opcode != opcode)
1400 			continue;
1401 
1402 		task_ev = &task->event;
1403 		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1404 		task_ev->msg_len = event->msg_len;
1405 
1406 		/* Only copy the data buffer if a destination was set */
1407 		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1408 			memcpy(task_ev->msg_buf, event->msg_buf,
1409 			       event->buf_len);
1410 			task_ev->buf_len = event->buf_len;
1411 		}
1412 
1413 		task->state = ICE_AQ_TASK_COMPLETE;
1414 		found = true;
1415 	}
1416 	spin_unlock_bh(&pf->aq_wait_lock);
1417 
1418 	if (found)
1419 		wake_up(&pf->aq_wait_queue);
1420 }
1421 
1422 /**
1423  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1424  * @pf: the PF private structure
1425  *
1426  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1427  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1428  */
1429 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1430 {
1431 	struct ice_aq_task *task;
1432 
1433 	spin_lock_bh(&pf->aq_wait_lock);
1434 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1435 		task->state = ICE_AQ_TASK_CANCELED;
1436 	spin_unlock_bh(&pf->aq_wait_lock);
1437 
1438 	wake_up(&pf->aq_wait_queue);
1439 }
1440 
1441 #define ICE_MBX_OVERFLOW_WATERMARK 64
1442 
1443 /**
1444  * __ice_clean_ctrlq - helper function to clean controlq rings
1445  * @pf: ptr to struct ice_pf
1446  * @q_type: specific Control queue type
1447  */
1448 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1449 {
1450 	struct device *dev = ice_pf_to_dev(pf);
1451 	struct ice_rq_event_info event;
1452 	struct ice_hw *hw = &pf->hw;
1453 	struct ice_ctl_q_info *cq;
1454 	u16 pending, i = 0;
1455 	const char *qtype;
1456 	u32 oldval, val;
1457 
1458 	/* Do not clean control queue if/when PF reset fails */
1459 	if (test_bit(ICE_RESET_FAILED, pf->state))
1460 		return 0;
1461 
1462 	switch (q_type) {
1463 	case ICE_CTL_Q_ADMIN:
1464 		cq = &hw->adminq;
1465 		qtype = "Admin";
1466 		break;
1467 	case ICE_CTL_Q_SB:
1468 		cq = &hw->sbq;
1469 		qtype = "Sideband";
1470 		break;
1471 	case ICE_CTL_Q_MAILBOX:
1472 		cq = &hw->mailboxq;
1473 		qtype = "Mailbox";
1474 		/* we are going to try to detect a malicious VF, so set the
1475 		 * state to begin detection
1476 		 */
1477 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1478 		break;
1479 	default:
1480 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1481 		return 0;
1482 	}
1483 
1484 	/* check for error indications - PF_xx_AxQLEN register layout for
1485 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1486 	 */
1487 	val = rd32(hw, cq->rq.len);
1488 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1489 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1490 		oldval = val;
1491 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1492 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1493 				qtype);
1494 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1495 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1496 				qtype);
1497 		}
1498 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1499 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1500 				qtype);
1501 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1502 			 PF_FW_ARQLEN_ARQCRIT_M);
1503 		if (oldval != val)
1504 			wr32(hw, cq->rq.len, val);
1505 	}
1506 
1507 	val = rd32(hw, cq->sq.len);
1508 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1509 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1510 		oldval = val;
1511 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1512 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1513 				qtype);
1514 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1515 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1516 				qtype);
1517 		}
1518 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1519 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1520 				qtype);
1521 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1522 			 PF_FW_ATQLEN_ATQCRIT_M);
1523 		if (oldval != val)
1524 			wr32(hw, cq->sq.len, val);
1525 	}
1526 
1527 	event.buf_len = cq->rq_buf_size;
1528 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1529 	if (!event.msg_buf)
1530 		return 0;
1531 
1532 	do {
1533 		struct ice_mbx_data data = {};
1534 		u16 opcode;
1535 		int ret;
1536 
1537 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1538 		if (ret == -EALREADY)
1539 			break;
1540 		if (ret) {
1541 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1542 				ret);
1543 			break;
1544 		}
1545 
1546 		opcode = le16_to_cpu(event.desc.opcode);
1547 
1548 		/* Notify any thread that might be waiting for this event */
1549 		ice_aq_check_events(pf, opcode, &event);
1550 
1551 		switch (opcode) {
1552 		case ice_aqc_opc_get_link_status:
1553 			if (ice_handle_link_event(pf, &event))
1554 				dev_err(dev, "Could not handle link event\n");
1555 			break;
1556 		case ice_aqc_opc_event_lan_overflow:
1557 			ice_vf_lan_overflow_event(pf, &event);
1558 			break;
1559 		case ice_mbx_opc_send_msg_to_pf:
1560 			data.num_msg_proc = i;
1561 			data.num_pending_arq = pending;
1562 			data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1563 			data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1564 
1565 			ice_vc_process_vf_msg(pf, &event, &data);
1566 			break;
1567 		case ice_aqc_opc_fw_logs_event:
1568 			ice_get_fwlog_data(pf, &event);
1569 			break;
1570 		case ice_aqc_opc_lldp_set_mib_change:
1571 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1572 			break;
1573 		default:
1574 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1575 				qtype, opcode);
1576 			break;
1577 		}
1578 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1579 
1580 	kfree(event.msg_buf);
1581 
1582 	return pending && (i == ICE_DFLT_IRQ_WORK);
1583 }
1584 
1585 /**
1586  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1587  * @hw: pointer to hardware info
1588  * @cq: control queue information
1589  *
1590  * returns true if there are pending messages in a queue, false if there aren't
1591  */
1592 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1593 {
1594 	u16 ntu;
1595 
1596 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1597 	return cq->rq.next_to_clean != ntu;
1598 }
1599 
1600 /**
1601  * ice_clean_adminq_subtask - clean the AdminQ rings
1602  * @pf: board private structure
1603  */
1604 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1605 {
1606 	struct ice_hw *hw = &pf->hw;
1607 
1608 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1609 		return;
1610 
1611 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1612 		return;
1613 
1614 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1615 
1616 	/* There might be a situation where new messages arrive to a control
1617 	 * queue between processing the last message and clearing the
1618 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1619 	 * ice_ctrlq_pending) and process new messages if any.
1620 	 */
1621 	if (ice_ctrlq_pending(hw, &hw->adminq))
1622 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1623 
1624 	ice_flush(hw);
1625 }
1626 
1627 /**
1628  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1629  * @pf: board private structure
1630  */
1631 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1632 {
1633 	struct ice_hw *hw = &pf->hw;
1634 
1635 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1636 		return;
1637 
1638 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1639 		return;
1640 
1641 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1642 
1643 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1644 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1645 
1646 	ice_flush(hw);
1647 }
1648 
1649 /**
1650  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1651  * @pf: board private structure
1652  */
1653 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1654 {
1655 	struct ice_hw *hw = &pf->hw;
1656 
1657 	/* if mac_type is not generic, sideband is not supported
1658 	 * and there's nothing to do here
1659 	 */
1660 	if (!ice_is_generic_mac(hw)) {
1661 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1662 		return;
1663 	}
1664 
1665 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1666 		return;
1667 
1668 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1669 		return;
1670 
1671 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1672 
1673 	if (ice_ctrlq_pending(hw, &hw->sbq))
1674 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1675 
1676 	ice_flush(hw);
1677 }
1678 
1679 /**
1680  * ice_service_task_schedule - schedule the service task to wake up
1681  * @pf: board private structure
1682  *
1683  * If not already scheduled, this puts the task into the work queue.
1684  */
1685 void ice_service_task_schedule(struct ice_pf *pf)
1686 {
1687 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1688 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1689 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1690 		queue_work(ice_wq, &pf->serv_task);
1691 }
1692 
1693 /**
1694  * ice_service_task_complete - finish up the service task
1695  * @pf: board private structure
1696  */
1697 static void ice_service_task_complete(struct ice_pf *pf)
1698 {
1699 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1700 
1701 	/* force memory (pf->state) to sync before next service task */
1702 	smp_mb__before_atomic();
1703 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1704 }
1705 
1706 /**
1707  * ice_service_task_stop - stop service task and cancel works
1708  * @pf: board private structure
1709  *
1710  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1711  * 1 otherwise.
1712  */
1713 static int ice_service_task_stop(struct ice_pf *pf)
1714 {
1715 	int ret;
1716 
1717 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1718 
1719 	if (pf->serv_tmr.function)
1720 		del_timer_sync(&pf->serv_tmr);
1721 	if (pf->serv_task.func)
1722 		cancel_work_sync(&pf->serv_task);
1723 
1724 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1725 	return ret;
1726 }
1727 
1728 /**
1729  * ice_service_task_restart - restart service task and schedule works
1730  * @pf: board private structure
1731  *
1732  * This function is needed for suspend and resume works (e.g WoL scenario)
1733  */
1734 static void ice_service_task_restart(struct ice_pf *pf)
1735 {
1736 	clear_bit(ICE_SERVICE_DIS, pf->state);
1737 	ice_service_task_schedule(pf);
1738 }
1739 
1740 /**
1741  * ice_service_timer - timer callback to schedule service task
1742  * @t: pointer to timer_list
1743  */
1744 static void ice_service_timer(struct timer_list *t)
1745 {
1746 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1747 
1748 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1749 	ice_service_task_schedule(pf);
1750 }
1751 
1752 /**
1753  * ice_mdd_maybe_reset_vf - reset VF after MDD event
1754  * @pf: pointer to the PF structure
1755  * @vf: pointer to the VF structure
1756  * @reset_vf_tx: whether Tx MDD has occurred
1757  * @reset_vf_rx: whether Rx MDD has occurred
1758  *
1759  * Since the queue can get stuck on VF MDD events, the PF can be configured to
1760  * automatically reset the VF by enabling the private ethtool flag
1761  * mdd-auto-reset-vf.
1762  */
1763 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1764 				   bool reset_vf_tx, bool reset_vf_rx)
1765 {
1766 	struct device *dev = ice_pf_to_dev(pf);
1767 
1768 	if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1769 		return;
1770 
1771 	/* VF MDD event counters will be cleared by reset, so print the event
1772 	 * prior to reset.
1773 	 */
1774 	if (reset_vf_tx)
1775 		ice_print_vf_tx_mdd_event(vf);
1776 
1777 	if (reset_vf_rx)
1778 		ice_print_vf_rx_mdd_event(vf);
1779 
1780 	dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1781 		 pf->hw.pf_id, vf->vf_id);
1782 	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1783 }
1784 
1785 /**
1786  * ice_handle_mdd_event - handle malicious driver detect event
1787  * @pf: pointer to the PF structure
1788  *
1789  * Called from service task. OICR interrupt handler indicates MDD event.
1790  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1791  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1792  * disable the queue, the PF can be configured to reset the VF using ethtool
1793  * private flag mdd-auto-reset-vf.
1794  */
1795 static void ice_handle_mdd_event(struct ice_pf *pf)
1796 {
1797 	struct device *dev = ice_pf_to_dev(pf);
1798 	struct ice_hw *hw = &pf->hw;
1799 	struct ice_vf *vf;
1800 	unsigned int bkt;
1801 	u32 reg;
1802 
1803 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1804 		/* Since the VF MDD event logging is rate limited, check if
1805 		 * there are pending MDD events.
1806 		 */
1807 		ice_print_vfs_mdd_events(pf);
1808 		return;
1809 	}
1810 
1811 	/* find what triggered an MDD event */
1812 	reg = rd32(hw, GL_MDET_TX_PQM);
1813 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1814 		u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1815 		u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1816 		u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1817 		u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
1818 
1819 		if (netif_msg_tx_err(pf))
1820 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1821 				 event, queue, pf_num, vf_num);
1822 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1823 	}
1824 
1825 	reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1826 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1827 		u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1828 		u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1829 		u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1830 		u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
1831 
1832 		if (netif_msg_tx_err(pf))
1833 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1834 				 event, queue, pf_num, vf_num);
1835 		wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1836 	}
1837 
1838 	reg = rd32(hw, GL_MDET_RX);
1839 	if (reg & GL_MDET_RX_VALID_M) {
1840 		u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1841 		u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1842 		u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1843 		u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
1844 
1845 		if (netif_msg_rx_err(pf))
1846 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1847 				 event, queue, pf_num, vf_num);
1848 		wr32(hw, GL_MDET_RX, 0xffffffff);
1849 	}
1850 
1851 	/* check to see if this PF caused an MDD event */
1852 	reg = rd32(hw, PF_MDET_TX_PQM);
1853 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1854 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1855 		if (netif_msg_tx_err(pf))
1856 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1857 	}
1858 
1859 	reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1860 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1861 		wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1862 		if (netif_msg_tx_err(pf))
1863 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1864 	}
1865 
1866 	reg = rd32(hw, PF_MDET_RX);
1867 	if (reg & PF_MDET_RX_VALID_M) {
1868 		wr32(hw, PF_MDET_RX, 0xFFFF);
1869 		if (netif_msg_rx_err(pf))
1870 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1871 	}
1872 
1873 	/* Check to see if one of the VFs caused an MDD event, and then
1874 	 * increment counters and set print pending
1875 	 */
1876 	mutex_lock(&pf->vfs.table_lock);
1877 	ice_for_each_vf(pf, bkt, vf) {
1878 		bool reset_vf_tx = false, reset_vf_rx = false;
1879 
1880 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1881 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1882 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1883 			vf->mdd_tx_events.count++;
1884 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1885 			if (netif_msg_tx_err(pf))
1886 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1887 					 vf->vf_id);
1888 
1889 			reset_vf_tx = true;
1890 		}
1891 
1892 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1893 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1894 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1895 			vf->mdd_tx_events.count++;
1896 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1897 			if (netif_msg_tx_err(pf))
1898 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1899 					 vf->vf_id);
1900 
1901 			reset_vf_tx = true;
1902 		}
1903 
1904 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1905 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1906 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1907 			vf->mdd_tx_events.count++;
1908 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1909 			if (netif_msg_tx_err(pf))
1910 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1911 					 vf->vf_id);
1912 
1913 			reset_vf_tx = true;
1914 		}
1915 
1916 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1917 		if (reg & VP_MDET_RX_VALID_M) {
1918 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1919 			vf->mdd_rx_events.count++;
1920 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1921 			if (netif_msg_rx_err(pf))
1922 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1923 					 vf->vf_id);
1924 
1925 			reset_vf_rx = true;
1926 		}
1927 
1928 		if (reset_vf_tx || reset_vf_rx)
1929 			ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1930 					       reset_vf_rx);
1931 	}
1932 	mutex_unlock(&pf->vfs.table_lock);
1933 
1934 	ice_print_vfs_mdd_events(pf);
1935 }
1936 
1937 /**
1938  * ice_force_phys_link_state - Force the physical link state
1939  * @vsi: VSI to force the physical link state to up/down
1940  * @link_up: true/false indicates to set the physical link to up/down
1941  *
1942  * Force the physical link state by getting the current PHY capabilities from
1943  * hardware and setting the PHY config based on the determined capabilities. If
1944  * link changes a link event will be triggered because both the Enable Automatic
1945  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1946  *
1947  * Returns 0 on success, negative on failure
1948  */
1949 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1950 {
1951 	struct ice_aqc_get_phy_caps_data *pcaps;
1952 	struct ice_aqc_set_phy_cfg_data *cfg;
1953 	struct ice_port_info *pi;
1954 	struct device *dev;
1955 	int retcode;
1956 
1957 	if (!vsi || !vsi->port_info || !vsi->back)
1958 		return -EINVAL;
1959 	if (vsi->type != ICE_VSI_PF)
1960 		return 0;
1961 
1962 	dev = ice_pf_to_dev(vsi->back);
1963 
1964 	pi = vsi->port_info;
1965 
1966 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1967 	if (!pcaps)
1968 		return -ENOMEM;
1969 
1970 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1971 				      NULL);
1972 	if (retcode) {
1973 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1974 			vsi->vsi_num, retcode);
1975 		retcode = -EIO;
1976 		goto out;
1977 	}
1978 
1979 	/* No change in link */
1980 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1981 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1982 		goto out;
1983 
1984 	/* Use the current user PHY configuration. The current user PHY
1985 	 * configuration is initialized during probe from PHY capabilities
1986 	 * software mode, and updated on set PHY configuration.
1987 	 */
1988 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1989 	if (!cfg) {
1990 		retcode = -ENOMEM;
1991 		goto out;
1992 	}
1993 
1994 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1995 	if (link_up)
1996 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1997 	else
1998 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1999 
2000 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
2001 	if (retcode) {
2002 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2003 			vsi->vsi_num, retcode);
2004 		retcode = -EIO;
2005 	}
2006 
2007 	kfree(cfg);
2008 out:
2009 	kfree(pcaps);
2010 	return retcode;
2011 }
2012 
2013 /**
2014  * ice_init_nvm_phy_type - Initialize the NVM PHY type
2015  * @pi: port info structure
2016  *
2017  * Initialize nvm_phy_type_[low|high] for link lenient mode support
2018  */
2019 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2020 {
2021 	struct ice_aqc_get_phy_caps_data *pcaps;
2022 	struct ice_pf *pf = pi->hw->back;
2023 	int err;
2024 
2025 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2026 	if (!pcaps)
2027 		return -ENOMEM;
2028 
2029 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2030 				  pcaps, NULL);
2031 
2032 	if (err) {
2033 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2034 		goto out;
2035 	}
2036 
2037 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
2038 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
2039 
2040 out:
2041 	kfree(pcaps);
2042 	return err;
2043 }
2044 
2045 /**
2046  * ice_init_link_dflt_override - Initialize link default override
2047  * @pi: port info structure
2048  *
2049  * Initialize link default override and PHY total port shutdown during probe
2050  */
2051 static void ice_init_link_dflt_override(struct ice_port_info *pi)
2052 {
2053 	struct ice_link_default_override_tlv *ldo;
2054 	struct ice_pf *pf = pi->hw->back;
2055 
2056 	ldo = &pf->link_dflt_override;
2057 	if (ice_get_link_default_override(ldo, pi))
2058 		return;
2059 
2060 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2061 		return;
2062 
2063 	/* Enable Total Port Shutdown (override/replace link-down-on-close
2064 	 * ethtool private flag) for ports with Port Disable bit set.
2065 	 */
2066 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2067 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2068 }
2069 
2070 /**
2071  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2072  * @pi: port info structure
2073  *
2074  * If default override is enabled, initialize the user PHY cfg speed and FEC
2075  * settings using the default override mask from the NVM.
2076  *
2077  * The PHY should only be configured with the default override settings the
2078  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2079  * is used to indicate that the user PHY cfg default override is initialized
2080  * and the PHY has not been configured with the default override settings. The
2081  * state is set here, and cleared in ice_configure_phy the first time the PHY is
2082  * configured.
2083  *
2084  * This function should be called only if the FW doesn't support default
2085  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2086  */
2087 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2088 {
2089 	struct ice_link_default_override_tlv *ldo;
2090 	struct ice_aqc_set_phy_cfg_data *cfg;
2091 	struct ice_phy_info *phy = &pi->phy;
2092 	struct ice_pf *pf = pi->hw->back;
2093 
2094 	ldo = &pf->link_dflt_override;
2095 
2096 	/* If link default override is enabled, use to mask NVM PHY capabilities
2097 	 * for speed and FEC default configuration.
2098 	 */
2099 	cfg = &phy->curr_user_phy_cfg;
2100 
2101 	if (ldo->phy_type_low || ldo->phy_type_high) {
2102 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2103 				    cpu_to_le64(ldo->phy_type_low);
2104 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2105 				     cpu_to_le64(ldo->phy_type_high);
2106 	}
2107 	cfg->link_fec_opt = ldo->fec_options;
2108 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2109 
2110 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2111 }
2112 
2113 /**
2114  * ice_init_phy_user_cfg - Initialize the PHY user configuration
2115  * @pi: port info structure
2116  *
2117  * Initialize the current user PHY configuration, speed, FEC, and FC requested
2118  * mode to default. The PHY defaults are from get PHY capabilities topology
2119  * with media so call when media is first available. An error is returned if
2120  * called when media is not available. The PHY initialization completed state is
2121  * set here.
2122  *
2123  * These configurations are used when setting PHY
2124  * configuration. The user PHY configuration is updated on set PHY
2125  * configuration. Returns 0 on success, negative on failure
2126  */
2127 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2128 {
2129 	struct ice_aqc_get_phy_caps_data *pcaps;
2130 	struct ice_phy_info *phy = &pi->phy;
2131 	struct ice_pf *pf = pi->hw->back;
2132 	int err;
2133 
2134 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2135 		return -EIO;
2136 
2137 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2138 	if (!pcaps)
2139 		return -ENOMEM;
2140 
2141 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2142 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2143 					  pcaps, NULL);
2144 	else
2145 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2146 					  pcaps, NULL);
2147 	if (err) {
2148 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2149 		goto err_out;
2150 	}
2151 
2152 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2153 
2154 	/* check if lenient mode is supported and enabled */
2155 	if (ice_fw_supports_link_override(pi->hw) &&
2156 	    !(pcaps->module_compliance_enforcement &
2157 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2158 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2159 
2160 		/* if the FW supports default PHY configuration mode, then the driver
2161 		 * does not have to apply link override settings. If not,
2162 		 * initialize user PHY configuration with link override values
2163 		 */
2164 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2165 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2166 			ice_init_phy_cfg_dflt_override(pi);
2167 			goto out;
2168 		}
2169 	}
2170 
2171 	/* if link default override is not enabled, set user flow control and
2172 	 * FEC settings based on what get_phy_caps returned
2173 	 */
2174 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2175 						      pcaps->link_fec_options);
2176 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2177 
2178 out:
2179 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2180 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2181 err_out:
2182 	kfree(pcaps);
2183 	return err;
2184 }
2185 
2186 /**
2187  * ice_configure_phy - configure PHY
2188  * @vsi: VSI of PHY
2189  *
2190  * Set the PHY configuration. If the current PHY configuration is the same as
2191  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2192  * configure the based get PHY capabilities for topology with media.
2193  */
2194 static int ice_configure_phy(struct ice_vsi *vsi)
2195 {
2196 	struct device *dev = ice_pf_to_dev(vsi->back);
2197 	struct ice_port_info *pi = vsi->port_info;
2198 	struct ice_aqc_get_phy_caps_data *pcaps;
2199 	struct ice_aqc_set_phy_cfg_data *cfg;
2200 	struct ice_phy_info *phy = &pi->phy;
2201 	struct ice_pf *pf = vsi->back;
2202 	int err;
2203 
2204 	/* Ensure we have media as we cannot configure a medialess port */
2205 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2206 		return -ENOMEDIUM;
2207 
2208 	ice_print_topo_conflict(vsi);
2209 
2210 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2211 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2212 		return -EPERM;
2213 
2214 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2215 		return ice_force_phys_link_state(vsi, true);
2216 
2217 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2218 	if (!pcaps)
2219 		return -ENOMEM;
2220 
2221 	/* Get current PHY config */
2222 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2223 				  NULL);
2224 	if (err) {
2225 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2226 			vsi->vsi_num, err);
2227 		goto done;
2228 	}
2229 
2230 	/* If PHY enable link is configured and configuration has not changed,
2231 	 * there's nothing to do
2232 	 */
2233 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2234 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2235 		goto done;
2236 
2237 	/* Use PHY topology as baseline for configuration */
2238 	memset(pcaps, 0, sizeof(*pcaps));
2239 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2240 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2241 					  pcaps, NULL);
2242 	else
2243 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2244 					  pcaps, NULL);
2245 	if (err) {
2246 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2247 			vsi->vsi_num, err);
2248 		goto done;
2249 	}
2250 
2251 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2252 	if (!cfg) {
2253 		err = -ENOMEM;
2254 		goto done;
2255 	}
2256 
2257 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2258 
2259 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2260 	 * ice_init_phy_user_cfg_ldo.
2261 	 */
2262 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2263 			       vsi->back->state)) {
2264 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2265 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2266 	} else {
2267 		u64 phy_low = 0, phy_high = 0;
2268 
2269 		ice_update_phy_type(&phy_low, &phy_high,
2270 				    pi->phy.curr_user_speed_req);
2271 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2272 		cfg->phy_type_high = pcaps->phy_type_high &
2273 				     cpu_to_le64(phy_high);
2274 	}
2275 
2276 	/* Can't provide what was requested; use PHY capabilities */
2277 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2278 		cfg->phy_type_low = pcaps->phy_type_low;
2279 		cfg->phy_type_high = pcaps->phy_type_high;
2280 	}
2281 
2282 	/* FEC */
2283 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2284 
2285 	/* Can't provide what was requested; use PHY capabilities */
2286 	if (cfg->link_fec_opt !=
2287 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2288 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2289 		cfg->link_fec_opt = pcaps->link_fec_options;
2290 	}
2291 
2292 	/* Flow Control - always supported; no need to check against
2293 	 * capabilities
2294 	 */
2295 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2296 
2297 	/* Enable link and link update */
2298 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2299 
2300 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2301 	if (err)
2302 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2303 			vsi->vsi_num, err);
2304 
2305 	kfree(cfg);
2306 done:
2307 	kfree(pcaps);
2308 	return err;
2309 }
2310 
2311 /**
2312  * ice_check_media_subtask - Check for media
2313  * @pf: pointer to PF struct
2314  *
2315  * If media is available, then initialize PHY user configuration if it is not
2316  * been, and configure the PHY if the interface is up.
2317  */
2318 static void ice_check_media_subtask(struct ice_pf *pf)
2319 {
2320 	struct ice_port_info *pi;
2321 	struct ice_vsi *vsi;
2322 	int err;
2323 
2324 	/* No need to check for media if it's already present */
2325 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2326 		return;
2327 
2328 	vsi = ice_get_main_vsi(pf);
2329 	if (!vsi)
2330 		return;
2331 
2332 	/* Refresh link info and check if media is present */
2333 	pi = vsi->port_info;
2334 	err = ice_update_link_info(pi);
2335 	if (err)
2336 		return;
2337 
2338 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2339 
2340 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2341 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2342 			ice_init_phy_user_cfg(pi);
2343 
2344 		/* PHY settings are reset on media insertion, reconfigure
2345 		 * PHY to preserve settings.
2346 		 */
2347 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2348 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2349 			return;
2350 
2351 		err = ice_configure_phy(vsi);
2352 		if (!err)
2353 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2354 
2355 		/* A Link Status Event will be generated; the event handler
2356 		 * will complete bringing the interface up
2357 		 */
2358 	}
2359 }
2360 
2361 /**
2362  * ice_service_task - manage and run subtasks
2363  * @work: pointer to work_struct contained by the PF struct
2364  */
2365 static void ice_service_task(struct work_struct *work)
2366 {
2367 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2368 	unsigned long start_time = jiffies;
2369 
2370 	/* subtasks */
2371 
2372 	/* process reset requests first */
2373 	ice_reset_subtask(pf);
2374 
2375 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2376 	if (ice_is_reset_in_progress(pf->state) ||
2377 	    test_bit(ICE_SUSPENDED, pf->state) ||
2378 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2379 		ice_service_task_complete(pf);
2380 		return;
2381 	}
2382 
2383 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2384 		struct iidc_event *event;
2385 
2386 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2387 		if (event) {
2388 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2389 			/* report the entire OICR value to AUX driver */
2390 			swap(event->reg, pf->oicr_err_reg);
2391 			ice_send_event_to_aux(pf, event);
2392 			kfree(event);
2393 		}
2394 	}
2395 
2396 	/* unplug aux dev per request, if an unplug request came in
2397 	 * while processing a plug request, this will handle it
2398 	 */
2399 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2400 		ice_unplug_aux_dev(pf);
2401 
2402 	/* Plug aux device per request */
2403 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2404 		ice_plug_aux_dev(pf);
2405 
2406 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2407 		struct iidc_event *event;
2408 
2409 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2410 		if (event) {
2411 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2412 			ice_send_event_to_aux(pf, event);
2413 			kfree(event);
2414 		}
2415 	}
2416 
2417 	ice_clean_adminq_subtask(pf);
2418 	ice_check_media_subtask(pf);
2419 	ice_check_for_hang_subtask(pf);
2420 	ice_sync_fltr_subtask(pf);
2421 	ice_handle_mdd_event(pf);
2422 	ice_watchdog_subtask(pf);
2423 
2424 	if (ice_is_safe_mode(pf)) {
2425 		ice_service_task_complete(pf);
2426 		return;
2427 	}
2428 
2429 	ice_process_vflr_event(pf);
2430 	ice_clean_mailboxq_subtask(pf);
2431 	ice_clean_sbq_subtask(pf);
2432 	ice_sync_arfs_fltrs(pf);
2433 	ice_flush_fdir_ctx(pf);
2434 
2435 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2436 	ice_service_task_complete(pf);
2437 
2438 	/* If the tasks have taken longer than one service timer period
2439 	 * or there is more work to be done, reset the service timer to
2440 	 * schedule the service task now.
2441 	 */
2442 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2443 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2444 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2445 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2446 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2447 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2448 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2449 		mod_timer(&pf->serv_tmr, jiffies);
2450 }
2451 
2452 /**
2453  * ice_set_ctrlq_len - helper function to set controlq length
2454  * @hw: pointer to the HW instance
2455  */
2456 static void ice_set_ctrlq_len(struct ice_hw *hw)
2457 {
2458 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2459 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2460 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2461 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2462 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2463 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2464 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2465 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2466 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2467 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2468 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2469 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2470 }
2471 
2472 /**
2473  * ice_schedule_reset - schedule a reset
2474  * @pf: board private structure
2475  * @reset: reset being requested
2476  */
2477 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2478 {
2479 	struct device *dev = ice_pf_to_dev(pf);
2480 
2481 	/* bail out if earlier reset has failed */
2482 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2483 		dev_dbg(dev, "earlier reset has failed\n");
2484 		return -EIO;
2485 	}
2486 	/* bail if reset/recovery already in progress */
2487 	if (ice_is_reset_in_progress(pf->state)) {
2488 		dev_dbg(dev, "Reset already in progress\n");
2489 		return -EBUSY;
2490 	}
2491 
2492 	switch (reset) {
2493 	case ICE_RESET_PFR:
2494 		set_bit(ICE_PFR_REQ, pf->state);
2495 		break;
2496 	case ICE_RESET_CORER:
2497 		set_bit(ICE_CORER_REQ, pf->state);
2498 		break;
2499 	case ICE_RESET_GLOBR:
2500 		set_bit(ICE_GLOBR_REQ, pf->state);
2501 		break;
2502 	default:
2503 		return -EINVAL;
2504 	}
2505 
2506 	ice_service_task_schedule(pf);
2507 	return 0;
2508 }
2509 
2510 /**
2511  * ice_irq_affinity_notify - Callback for affinity changes
2512  * @notify: context as to what irq was changed
2513  * @mask: the new affinity mask
2514  *
2515  * This is a callback function used by the irq_set_affinity_notifier function
2516  * so that we may register to receive changes to the irq affinity masks.
2517  */
2518 static void
2519 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2520 			const cpumask_t *mask)
2521 {
2522 	struct ice_q_vector *q_vector =
2523 		container_of(notify, struct ice_q_vector, affinity_notify);
2524 
2525 	cpumask_copy(&q_vector->affinity_mask, mask);
2526 }
2527 
2528 /**
2529  * ice_irq_affinity_release - Callback for affinity notifier release
2530  * @ref: internal core kernel usage
2531  *
2532  * This is a callback function used by the irq_set_affinity_notifier function
2533  * to inform the current notification subscriber that they will no longer
2534  * receive notifications.
2535  */
2536 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2537 
2538 /**
2539  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2540  * @vsi: the VSI being configured
2541  */
2542 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2543 {
2544 	struct ice_hw *hw = &vsi->back->hw;
2545 	int i;
2546 
2547 	ice_for_each_q_vector(vsi, i)
2548 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2549 
2550 	ice_flush(hw);
2551 	return 0;
2552 }
2553 
2554 /**
2555  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2556  * @vsi: the VSI being configured
2557  * @basename: name for the vector
2558  */
2559 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2560 {
2561 	int q_vectors = vsi->num_q_vectors;
2562 	struct ice_pf *pf = vsi->back;
2563 	struct device *dev;
2564 	int rx_int_idx = 0;
2565 	int tx_int_idx = 0;
2566 	int vector, err;
2567 	int irq_num;
2568 
2569 	dev = ice_pf_to_dev(pf);
2570 	for (vector = 0; vector < q_vectors; vector++) {
2571 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2572 
2573 		irq_num = q_vector->irq.virq;
2574 
2575 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2576 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2577 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2578 			tx_int_idx++;
2579 		} else if (q_vector->rx.rx_ring) {
2580 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2581 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2582 		} else if (q_vector->tx.tx_ring) {
2583 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2584 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2585 		} else {
2586 			/* skip this unused q_vector */
2587 			continue;
2588 		}
2589 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2590 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2591 					       IRQF_SHARED, q_vector->name,
2592 					       q_vector);
2593 		else
2594 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2595 					       0, q_vector->name, q_vector);
2596 		if (err) {
2597 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2598 				   err);
2599 			goto free_q_irqs;
2600 		}
2601 
2602 		/* register for affinity change notifications */
2603 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2604 			struct irq_affinity_notify *affinity_notify;
2605 
2606 			affinity_notify = &q_vector->affinity_notify;
2607 			affinity_notify->notify = ice_irq_affinity_notify;
2608 			affinity_notify->release = ice_irq_affinity_release;
2609 			irq_set_affinity_notifier(irq_num, affinity_notify);
2610 		}
2611 
2612 		/* assign the mask for this irq */
2613 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2614 	}
2615 
2616 	err = ice_set_cpu_rx_rmap(vsi);
2617 	if (err) {
2618 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2619 			   vsi->vsi_num, ERR_PTR(err));
2620 		goto free_q_irqs;
2621 	}
2622 
2623 	vsi->irqs_ready = true;
2624 	return 0;
2625 
2626 free_q_irqs:
2627 	while (vector--) {
2628 		irq_num = vsi->q_vectors[vector]->irq.virq;
2629 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2630 			irq_set_affinity_notifier(irq_num, NULL);
2631 		irq_set_affinity_hint(irq_num, NULL);
2632 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2633 	}
2634 	return err;
2635 }
2636 
2637 /**
2638  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2639  * @vsi: VSI to setup Tx rings used by XDP
2640  *
2641  * Return 0 on success and negative value on error
2642  */
2643 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2644 {
2645 	struct device *dev = ice_pf_to_dev(vsi->back);
2646 	struct ice_tx_desc *tx_desc;
2647 	int i, j;
2648 
2649 	ice_for_each_xdp_txq(vsi, i) {
2650 		u16 xdp_q_idx = vsi->alloc_txq + i;
2651 		struct ice_ring_stats *ring_stats;
2652 		struct ice_tx_ring *xdp_ring;
2653 
2654 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2655 		if (!xdp_ring)
2656 			goto free_xdp_rings;
2657 
2658 		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2659 		if (!ring_stats) {
2660 			ice_free_tx_ring(xdp_ring);
2661 			goto free_xdp_rings;
2662 		}
2663 
2664 		xdp_ring->ring_stats = ring_stats;
2665 		xdp_ring->q_index = xdp_q_idx;
2666 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2667 		xdp_ring->vsi = vsi;
2668 		xdp_ring->netdev = NULL;
2669 		xdp_ring->dev = dev;
2670 		xdp_ring->count = vsi->num_tx_desc;
2671 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2672 		if (ice_setup_tx_ring(xdp_ring))
2673 			goto free_xdp_rings;
2674 		ice_set_ring_xdp(xdp_ring);
2675 		spin_lock_init(&xdp_ring->tx_lock);
2676 		for (j = 0; j < xdp_ring->count; j++) {
2677 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2678 			tx_desc->cmd_type_offset_bsz = 0;
2679 		}
2680 	}
2681 
2682 	return 0;
2683 
2684 free_xdp_rings:
2685 	for (; i >= 0; i--) {
2686 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2687 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2688 			vsi->xdp_rings[i]->ring_stats = NULL;
2689 			ice_free_tx_ring(vsi->xdp_rings[i]);
2690 		}
2691 	}
2692 	return -ENOMEM;
2693 }
2694 
2695 /**
2696  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2697  * @vsi: VSI to set the bpf prog on
2698  * @prog: the bpf prog pointer
2699  */
2700 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2701 {
2702 	struct bpf_prog *old_prog;
2703 	int i;
2704 
2705 	old_prog = xchg(&vsi->xdp_prog, prog);
2706 	ice_for_each_rxq(vsi, i)
2707 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2708 
2709 	if (old_prog)
2710 		bpf_prog_put(old_prog);
2711 }
2712 
2713 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2714 {
2715 	struct ice_q_vector *q_vector;
2716 	struct ice_tx_ring *ring;
2717 
2718 	if (static_key_enabled(&ice_xdp_locking_key))
2719 		return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2720 
2721 	q_vector = vsi->rx_rings[qid]->q_vector;
2722 	ice_for_each_tx_ring(ring, q_vector->tx)
2723 		if (ice_ring_is_xdp(ring))
2724 			return ring;
2725 
2726 	return NULL;
2727 }
2728 
2729 /**
2730  * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2731  * @vsi: the VSI with XDP rings being configured
2732  *
2733  * Map XDP rings to interrupt vectors and perform the configuration steps
2734  * dependent on the mapping.
2735  */
2736 void ice_map_xdp_rings(struct ice_vsi *vsi)
2737 {
2738 	int xdp_rings_rem = vsi->num_xdp_txq;
2739 	int v_idx, q_idx;
2740 
2741 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2742 	ice_for_each_q_vector(vsi, v_idx) {
2743 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2744 		int xdp_rings_per_v, q_id, q_base;
2745 
2746 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2747 					       vsi->num_q_vectors - v_idx);
2748 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2749 
2750 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2751 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2752 
2753 			xdp_ring->q_vector = q_vector;
2754 			xdp_ring->next = q_vector->tx.tx_ring;
2755 			q_vector->tx.tx_ring = xdp_ring;
2756 		}
2757 		xdp_rings_rem -= xdp_rings_per_v;
2758 	}
2759 
2760 	ice_for_each_rxq(vsi, q_idx) {
2761 		vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2762 								       q_idx);
2763 		ice_tx_xsk_pool(vsi, q_idx);
2764 	}
2765 }
2766 
2767 /**
2768  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2769  * @vsi: VSI to bring up Tx rings used by XDP
2770  * @prog: bpf program that will be assigned to VSI
2771  * @cfg_type: create from scratch or restore the existing configuration
2772  *
2773  * Return 0 on success and negative value on error
2774  */
2775 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2776 			  enum ice_xdp_cfg cfg_type)
2777 {
2778 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2779 	struct ice_pf *pf = vsi->back;
2780 	struct ice_qs_cfg xdp_qs_cfg = {
2781 		.qs_mutex = &pf->avail_q_mutex,
2782 		.pf_map = pf->avail_txqs,
2783 		.pf_map_size = pf->max_pf_txqs,
2784 		.q_count = vsi->num_xdp_txq,
2785 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2786 		.vsi_map = vsi->txq_map,
2787 		.vsi_map_offset = vsi->alloc_txq,
2788 		.mapping_mode = ICE_VSI_MAP_CONTIG
2789 	};
2790 	struct device *dev;
2791 	int status, i;
2792 
2793 	dev = ice_pf_to_dev(pf);
2794 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2795 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2796 	if (!vsi->xdp_rings)
2797 		return -ENOMEM;
2798 
2799 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2800 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2801 		goto err_map_xdp;
2802 
2803 	if (static_key_enabled(&ice_xdp_locking_key))
2804 		netdev_warn(vsi->netdev,
2805 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2806 
2807 	if (ice_xdp_alloc_setup_rings(vsi))
2808 		goto clear_xdp_rings;
2809 
2810 	/* omit the scheduler update if in reset path; XDP queues will be
2811 	 * taken into account at the end of ice_vsi_rebuild, where
2812 	 * ice_cfg_vsi_lan is being called
2813 	 */
2814 	if (cfg_type == ICE_XDP_CFG_PART)
2815 		return 0;
2816 
2817 	ice_map_xdp_rings(vsi);
2818 
2819 	/* tell the Tx scheduler that right now we have
2820 	 * additional queues
2821 	 */
2822 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2823 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2824 
2825 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2826 				 max_txqs);
2827 	if (status) {
2828 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2829 			status);
2830 		goto clear_xdp_rings;
2831 	}
2832 
2833 	/* assign the prog only when it's not already present on VSI;
2834 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2835 	 * VSI rebuild that happens under ethtool -L can expose us to
2836 	 * the bpf_prog refcount issues as we would be swapping same
2837 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2838 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2839 	 * this is not harmful as dev_xdp_install bumps the refcount
2840 	 * before calling the op exposed by the driver;
2841 	 */
2842 	if (!ice_is_xdp_ena_vsi(vsi))
2843 		ice_vsi_assign_bpf_prog(vsi, prog);
2844 
2845 	return 0;
2846 clear_xdp_rings:
2847 	ice_for_each_xdp_txq(vsi, i)
2848 		if (vsi->xdp_rings[i]) {
2849 			kfree_rcu(vsi->xdp_rings[i], rcu);
2850 			vsi->xdp_rings[i] = NULL;
2851 		}
2852 
2853 err_map_xdp:
2854 	mutex_lock(&pf->avail_q_mutex);
2855 	ice_for_each_xdp_txq(vsi, i) {
2856 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2857 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2858 	}
2859 	mutex_unlock(&pf->avail_q_mutex);
2860 
2861 	devm_kfree(dev, vsi->xdp_rings);
2862 	return -ENOMEM;
2863 }
2864 
2865 /**
2866  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2867  * @vsi: VSI to remove XDP rings
2868  * @cfg_type: disable XDP permanently or allow it to be restored later
2869  *
2870  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2871  * resources
2872  */
2873 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2874 {
2875 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2876 	struct ice_pf *pf = vsi->back;
2877 	int i, v_idx;
2878 
2879 	/* q_vectors are freed in reset path so there's no point in detaching
2880 	 * rings
2881 	 */
2882 	if (cfg_type == ICE_XDP_CFG_PART)
2883 		goto free_qmap;
2884 
2885 	ice_for_each_q_vector(vsi, v_idx) {
2886 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2887 		struct ice_tx_ring *ring;
2888 
2889 		ice_for_each_tx_ring(ring, q_vector->tx)
2890 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2891 				break;
2892 
2893 		/* restore the value of last node prior to XDP setup */
2894 		q_vector->tx.tx_ring = ring;
2895 	}
2896 
2897 free_qmap:
2898 	mutex_lock(&pf->avail_q_mutex);
2899 	ice_for_each_xdp_txq(vsi, i) {
2900 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2901 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2902 	}
2903 	mutex_unlock(&pf->avail_q_mutex);
2904 
2905 	ice_for_each_xdp_txq(vsi, i)
2906 		if (vsi->xdp_rings[i]) {
2907 			if (vsi->xdp_rings[i]->desc) {
2908 				synchronize_rcu();
2909 				ice_free_tx_ring(vsi->xdp_rings[i]);
2910 			}
2911 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2912 			vsi->xdp_rings[i]->ring_stats = NULL;
2913 			kfree_rcu(vsi->xdp_rings[i], rcu);
2914 			vsi->xdp_rings[i] = NULL;
2915 		}
2916 
2917 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2918 	vsi->xdp_rings = NULL;
2919 
2920 	if (static_key_enabled(&ice_xdp_locking_key))
2921 		static_branch_dec(&ice_xdp_locking_key);
2922 
2923 	if (cfg_type == ICE_XDP_CFG_PART)
2924 		return 0;
2925 
2926 	ice_vsi_assign_bpf_prog(vsi, NULL);
2927 
2928 	/* notify Tx scheduler that we destroyed XDP queues and bring
2929 	 * back the old number of child nodes
2930 	 */
2931 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2932 		max_txqs[i] = vsi->num_txq;
2933 
2934 	/* change number of XDP Tx queues to 0 */
2935 	vsi->num_xdp_txq = 0;
2936 
2937 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2938 			       max_txqs);
2939 }
2940 
2941 /**
2942  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2943  * @vsi: VSI to schedule napi on
2944  */
2945 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2946 {
2947 	int i;
2948 
2949 	ice_for_each_rxq(vsi, i) {
2950 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2951 
2952 		if (rx_ring->xsk_pool)
2953 			napi_schedule(&rx_ring->q_vector->napi);
2954 	}
2955 }
2956 
2957 /**
2958  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2959  * @vsi: VSI to determine the count of XDP Tx qs
2960  *
2961  * returns 0 if Tx qs count is higher than at least half of CPU count,
2962  * -ENOMEM otherwise
2963  */
2964 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2965 {
2966 	u16 avail = ice_get_avail_txq_count(vsi->back);
2967 	u16 cpus = num_possible_cpus();
2968 
2969 	if (avail < cpus / 2)
2970 		return -ENOMEM;
2971 
2972 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2973 
2974 	if (vsi->num_xdp_txq < cpus)
2975 		static_branch_inc(&ice_xdp_locking_key);
2976 
2977 	return 0;
2978 }
2979 
2980 /**
2981  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2982  * @vsi: Pointer to VSI structure
2983  */
2984 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2985 {
2986 	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2987 		return ICE_RXBUF_1664;
2988 	else
2989 		return ICE_RXBUF_3072;
2990 }
2991 
2992 /**
2993  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2994  * @vsi: VSI to setup XDP for
2995  * @prog: XDP program
2996  * @extack: netlink extended ack
2997  */
2998 static int
2999 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
3000 		   struct netlink_ext_ack *extack)
3001 {
3002 	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
3003 	bool if_running = netif_running(vsi->netdev);
3004 	int ret = 0, xdp_ring_err = 0;
3005 
3006 	if (prog && !prog->aux->xdp_has_frags) {
3007 		if (frame_size > ice_max_xdp_frame_size(vsi)) {
3008 			NL_SET_ERR_MSG_MOD(extack,
3009 					   "MTU is too large for linear frames and XDP prog does not support frags");
3010 			return -EOPNOTSUPP;
3011 		}
3012 	}
3013 
3014 	/* hot swap progs and avoid toggling link */
3015 	if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
3016 		ice_vsi_assign_bpf_prog(vsi, prog);
3017 		return 0;
3018 	}
3019 
3020 	/* need to stop netdev while setting up the program for Rx rings */
3021 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
3022 		ret = ice_down(vsi);
3023 		if (ret) {
3024 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3025 			return ret;
3026 		}
3027 	}
3028 
3029 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3030 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3031 		if (xdp_ring_err) {
3032 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3033 		} else {
3034 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3035 							     ICE_XDP_CFG_FULL);
3036 			if (xdp_ring_err)
3037 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3038 		}
3039 		xdp_features_set_redirect_target(vsi->netdev, true);
3040 		/* reallocate Rx queues that are used for zero-copy */
3041 		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
3042 		if (xdp_ring_err)
3043 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
3044 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3045 		xdp_features_clear_redirect_target(vsi->netdev);
3046 		xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3047 		if (xdp_ring_err)
3048 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3049 		/* reallocate Rx queues that were used for zero-copy */
3050 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
3051 		if (xdp_ring_err)
3052 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
3053 	}
3054 
3055 	if (if_running)
3056 		ret = ice_up(vsi);
3057 
3058 	if (!ret && prog)
3059 		ice_vsi_rx_napi_schedule(vsi);
3060 
3061 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
3062 }
3063 
3064 /**
3065  * ice_xdp_safe_mode - XDP handler for safe mode
3066  * @dev: netdevice
3067  * @xdp: XDP command
3068  */
3069 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3070 			     struct netdev_bpf *xdp)
3071 {
3072 	NL_SET_ERR_MSG_MOD(xdp->extack,
3073 			   "Please provide working DDP firmware package in order to use XDP\n"
3074 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3075 	return -EOPNOTSUPP;
3076 }
3077 
3078 /**
3079  * ice_xdp - implements XDP handler
3080  * @dev: netdevice
3081  * @xdp: XDP command
3082  */
3083 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3084 {
3085 	struct ice_netdev_priv *np = netdev_priv(dev);
3086 	struct ice_vsi *vsi = np->vsi;
3087 
3088 	if (vsi->type != ICE_VSI_PF) {
3089 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3090 		return -EINVAL;
3091 	}
3092 
3093 	switch (xdp->command) {
3094 	case XDP_SETUP_PROG:
3095 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3096 	case XDP_SETUP_XSK_POOL:
3097 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3098 					  xdp->xsk.queue_id);
3099 	default:
3100 		return -EINVAL;
3101 	}
3102 }
3103 
3104 /**
3105  * ice_ena_misc_vector - enable the non-queue interrupts
3106  * @pf: board private structure
3107  */
3108 static void ice_ena_misc_vector(struct ice_pf *pf)
3109 {
3110 	struct ice_hw *hw = &pf->hw;
3111 	u32 pf_intr_start_offset;
3112 	u32 val;
3113 
3114 	/* Disable anti-spoof detection interrupt to prevent spurious event
3115 	 * interrupts during a function reset. Anti-spoof functionally is
3116 	 * still supported.
3117 	 */
3118 	val = rd32(hw, GL_MDCK_TX_TDPU);
3119 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3120 	wr32(hw, GL_MDCK_TX_TDPU, val);
3121 
3122 	/* clear things first */
3123 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3124 	rd32(hw, PFINT_OICR);		/* read to clear */
3125 
3126 	val = (PFINT_OICR_ECC_ERR_M |
3127 	       PFINT_OICR_MAL_DETECT_M |
3128 	       PFINT_OICR_GRST_M |
3129 	       PFINT_OICR_PCI_EXCEPTION_M |
3130 	       PFINT_OICR_VFLR_M |
3131 	       PFINT_OICR_HMC_ERR_M |
3132 	       PFINT_OICR_PE_PUSH_M |
3133 	       PFINT_OICR_PE_CRITERR_M);
3134 
3135 	wr32(hw, PFINT_OICR_ENA, val);
3136 
3137 	/* SW_ITR_IDX = 0, but don't change INTENA */
3138 	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3139 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3140 
3141 	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3142 		return;
3143 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3144 	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3145 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3146 }
3147 
3148 /**
3149  * ice_ll_ts_intr - ll_ts interrupt handler
3150  * @irq: interrupt number
3151  * @data: pointer to a q_vector
3152  */
3153 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3154 {
3155 	struct ice_pf *pf = data;
3156 	u32 pf_intr_start_offset;
3157 	struct ice_ptp_tx *tx;
3158 	unsigned long flags;
3159 	struct ice_hw *hw;
3160 	u32 val;
3161 	u8 idx;
3162 
3163 	hw = &pf->hw;
3164 	tx = &pf->ptp.port.tx;
3165 	spin_lock_irqsave(&tx->lock, flags);
3166 	ice_ptp_complete_tx_single_tstamp(tx);
3167 
3168 	idx = find_next_bit_wrap(tx->in_use, tx->len,
3169 				 tx->last_ll_ts_idx_read + 1);
3170 	if (idx != tx->len)
3171 		ice_ptp_req_tx_single_tstamp(tx, idx);
3172 	spin_unlock_irqrestore(&tx->lock, flags);
3173 
3174 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3175 	      (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3176 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3177 	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3178 	     val);
3179 
3180 	return IRQ_HANDLED;
3181 }
3182 
3183 /**
3184  * ice_misc_intr - misc interrupt handler
3185  * @irq: interrupt number
3186  * @data: pointer to a q_vector
3187  */
3188 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3189 {
3190 	struct ice_pf *pf = (struct ice_pf *)data;
3191 	irqreturn_t ret = IRQ_HANDLED;
3192 	struct ice_hw *hw = &pf->hw;
3193 	struct device *dev;
3194 	u32 oicr, ena_mask;
3195 
3196 	dev = ice_pf_to_dev(pf);
3197 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3198 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3199 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3200 
3201 	oicr = rd32(hw, PFINT_OICR);
3202 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3203 
3204 	if (oicr & PFINT_OICR_SWINT_M) {
3205 		ena_mask &= ~PFINT_OICR_SWINT_M;
3206 		pf->sw_int_count++;
3207 	}
3208 
3209 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3210 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3211 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3212 	}
3213 	if (oicr & PFINT_OICR_VFLR_M) {
3214 		/* disable any further VFLR event notifications */
3215 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3216 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3217 
3218 			reg &= ~PFINT_OICR_VFLR_M;
3219 			wr32(hw, PFINT_OICR_ENA, reg);
3220 		} else {
3221 			ena_mask &= ~PFINT_OICR_VFLR_M;
3222 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3223 		}
3224 	}
3225 
3226 	if (oicr & PFINT_OICR_GRST_M) {
3227 		u32 reset;
3228 
3229 		/* we have a reset warning */
3230 		ena_mask &= ~PFINT_OICR_GRST_M;
3231 		reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3232 				  rd32(hw, GLGEN_RSTAT));
3233 
3234 		if (reset == ICE_RESET_CORER)
3235 			pf->corer_count++;
3236 		else if (reset == ICE_RESET_GLOBR)
3237 			pf->globr_count++;
3238 		else if (reset == ICE_RESET_EMPR)
3239 			pf->empr_count++;
3240 		else
3241 			dev_dbg(dev, "Invalid reset type %d\n", reset);
3242 
3243 		/* If a reset cycle isn't already in progress, we set a bit in
3244 		 * pf->state so that the service task can start a reset/rebuild.
3245 		 */
3246 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3247 			if (reset == ICE_RESET_CORER)
3248 				set_bit(ICE_CORER_RECV, pf->state);
3249 			else if (reset == ICE_RESET_GLOBR)
3250 				set_bit(ICE_GLOBR_RECV, pf->state);
3251 			else
3252 				set_bit(ICE_EMPR_RECV, pf->state);
3253 
3254 			/* There are couple of different bits at play here.
3255 			 * hw->reset_ongoing indicates whether the hardware is
3256 			 * in reset. This is set to true when a reset interrupt
3257 			 * is received and set back to false after the driver
3258 			 * has determined that the hardware is out of reset.
3259 			 *
3260 			 * ICE_RESET_OICR_RECV in pf->state indicates
3261 			 * that a post reset rebuild is required before the
3262 			 * driver is operational again. This is set above.
3263 			 *
3264 			 * As this is the start of the reset/rebuild cycle, set
3265 			 * both to indicate that.
3266 			 */
3267 			hw->reset_ongoing = true;
3268 		}
3269 	}
3270 
3271 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3272 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3273 		if (ice_pf_state_is_nominal(pf) &&
3274 		    pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3275 			struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3276 			unsigned long flags;
3277 			u8 idx;
3278 
3279 			spin_lock_irqsave(&tx->lock, flags);
3280 			idx = find_next_bit_wrap(tx->in_use, tx->len,
3281 						 tx->last_ll_ts_idx_read + 1);
3282 			if (idx != tx->len)
3283 				ice_ptp_req_tx_single_tstamp(tx, idx);
3284 			spin_unlock_irqrestore(&tx->lock, flags);
3285 		} else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3286 			set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3287 			ret = IRQ_WAKE_THREAD;
3288 		}
3289 	}
3290 
3291 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3292 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3293 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3294 
3295 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3296 
3297 		if (ice_pf_src_tmr_owned(pf)) {
3298 			/* Save EVENTs from GLTSYN register */
3299 			pf->ptp.ext_ts_irq |= gltsyn_stat &
3300 					      (GLTSYN_STAT_EVENT0_M |
3301 					       GLTSYN_STAT_EVENT1_M |
3302 					       GLTSYN_STAT_EVENT2_M);
3303 
3304 			ice_ptp_extts_event(pf);
3305 		}
3306 	}
3307 
3308 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3309 	if (oicr & ICE_AUX_CRIT_ERR) {
3310 		pf->oicr_err_reg |= oicr;
3311 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3312 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3313 	}
3314 
3315 	/* Report any remaining unexpected interrupts */
3316 	oicr &= ena_mask;
3317 	if (oicr) {
3318 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3319 		/* If a critical error is pending there is no choice but to
3320 		 * reset the device.
3321 		 */
3322 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3323 			    PFINT_OICR_ECC_ERR_M)) {
3324 			set_bit(ICE_PFR_REQ, pf->state);
3325 		}
3326 	}
3327 	ice_service_task_schedule(pf);
3328 	if (ret == IRQ_HANDLED)
3329 		ice_irq_dynamic_ena(hw, NULL, NULL);
3330 
3331 	return ret;
3332 }
3333 
3334 /**
3335  * ice_misc_intr_thread_fn - misc interrupt thread function
3336  * @irq: interrupt number
3337  * @data: pointer to a q_vector
3338  */
3339 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3340 {
3341 	struct ice_pf *pf = data;
3342 	struct ice_hw *hw;
3343 
3344 	hw = &pf->hw;
3345 
3346 	if (ice_is_reset_in_progress(pf->state))
3347 		goto skip_irq;
3348 
3349 	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3350 		/* Process outstanding Tx timestamps. If there is more work,
3351 		 * re-arm the interrupt to trigger again.
3352 		 */
3353 		if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3354 			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3355 			ice_flush(hw);
3356 		}
3357 	}
3358 
3359 skip_irq:
3360 	ice_irq_dynamic_ena(hw, NULL, NULL);
3361 
3362 	return IRQ_HANDLED;
3363 }
3364 
3365 /**
3366  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3367  * @hw: pointer to HW structure
3368  */
3369 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3370 {
3371 	/* disable Admin queue Interrupt causes */
3372 	wr32(hw, PFINT_FW_CTL,
3373 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3374 
3375 	/* disable Mailbox queue Interrupt causes */
3376 	wr32(hw, PFINT_MBX_CTL,
3377 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3378 
3379 	wr32(hw, PFINT_SB_CTL,
3380 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3381 
3382 	/* disable Control queue Interrupt causes */
3383 	wr32(hw, PFINT_OICR_CTL,
3384 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3385 
3386 	ice_flush(hw);
3387 }
3388 
3389 /**
3390  * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3391  * @pf: board private structure
3392  */
3393 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3394 {
3395 	int irq_num = pf->ll_ts_irq.virq;
3396 
3397 	synchronize_irq(irq_num);
3398 	devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3399 
3400 	ice_free_irq(pf, pf->ll_ts_irq);
3401 }
3402 
3403 /**
3404  * ice_free_irq_msix_misc - Unroll misc vector setup
3405  * @pf: board private structure
3406  */
3407 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3408 {
3409 	int misc_irq_num = pf->oicr_irq.virq;
3410 	struct ice_hw *hw = &pf->hw;
3411 
3412 	ice_dis_ctrlq_interrupts(hw);
3413 
3414 	/* disable OICR interrupt */
3415 	wr32(hw, PFINT_OICR_ENA, 0);
3416 	ice_flush(hw);
3417 
3418 	synchronize_irq(misc_irq_num);
3419 	devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3420 
3421 	ice_free_irq(pf, pf->oicr_irq);
3422 	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3423 		ice_free_irq_msix_ll_ts(pf);
3424 }
3425 
3426 /**
3427  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3428  * @hw: pointer to HW structure
3429  * @reg_idx: HW vector index to associate the control queue interrupts with
3430  */
3431 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3432 {
3433 	u32 val;
3434 
3435 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3436 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3437 	wr32(hw, PFINT_OICR_CTL, val);
3438 
3439 	/* enable Admin queue Interrupt causes */
3440 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3441 	       PFINT_FW_CTL_CAUSE_ENA_M);
3442 	wr32(hw, PFINT_FW_CTL, val);
3443 
3444 	/* enable Mailbox queue Interrupt causes */
3445 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3446 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3447 	wr32(hw, PFINT_MBX_CTL, val);
3448 
3449 	if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3450 		/* enable Sideband queue Interrupt causes */
3451 		val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3452 		       PFINT_SB_CTL_CAUSE_ENA_M);
3453 		wr32(hw, PFINT_SB_CTL, val);
3454 	}
3455 
3456 	ice_flush(hw);
3457 }
3458 
3459 /**
3460  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3461  * @pf: board private structure
3462  *
3463  * This sets up the handler for MSIX 0, which is used to manage the
3464  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3465  * when in MSI or Legacy interrupt mode.
3466  */
3467 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3468 {
3469 	struct device *dev = ice_pf_to_dev(pf);
3470 	struct ice_hw *hw = &pf->hw;
3471 	u32 pf_intr_start_offset;
3472 	struct msi_map irq;
3473 	int err = 0;
3474 
3475 	if (!pf->int_name[0])
3476 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3477 			 dev_driver_string(dev), dev_name(dev));
3478 
3479 	if (!pf->int_name_ll_ts[0])
3480 		snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3481 			 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3482 	/* Do not request IRQ but do enable OICR interrupt since settings are
3483 	 * lost during reset. Note that this function is called only during
3484 	 * rebuild path and not while reset is in progress.
3485 	 */
3486 	if (ice_is_reset_in_progress(pf->state))
3487 		goto skip_req_irq;
3488 
3489 	/* reserve one vector in irq_tracker for misc interrupts */
3490 	irq = ice_alloc_irq(pf, false);
3491 	if (irq.index < 0)
3492 		return irq.index;
3493 
3494 	pf->oicr_irq = irq;
3495 	err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3496 					ice_misc_intr_thread_fn, 0,
3497 					pf->int_name, pf);
3498 	if (err) {
3499 		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3500 			pf->int_name, err);
3501 		ice_free_irq(pf, pf->oicr_irq);
3502 		return err;
3503 	}
3504 
3505 	/* reserve one vector in irq_tracker for ll_ts interrupt */
3506 	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3507 		goto skip_req_irq;
3508 
3509 	irq = ice_alloc_irq(pf, false);
3510 	if (irq.index < 0)
3511 		return irq.index;
3512 
3513 	pf->ll_ts_irq = irq;
3514 	err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3515 			       pf->int_name_ll_ts, pf);
3516 	if (err) {
3517 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3518 			pf->int_name_ll_ts, err);
3519 		ice_free_irq(pf, pf->ll_ts_irq);
3520 		return err;
3521 	}
3522 
3523 skip_req_irq:
3524 	ice_ena_misc_vector(pf);
3525 
3526 	ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3527 	/* This enables LL TS interrupt */
3528 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3529 	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3530 		wr32(hw, PFINT_SB_CTL,
3531 		     ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3532 		      PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3533 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3534 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3535 
3536 	ice_flush(hw);
3537 	ice_irq_dynamic_ena(hw, NULL, NULL);
3538 
3539 	return 0;
3540 }
3541 
3542 /**
3543  * ice_napi_add - register NAPI handler for the VSI
3544  * @vsi: VSI for which NAPI handler is to be registered
3545  *
3546  * This function is only called in the driver's load path. Registering the NAPI
3547  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3548  * reset/rebuild, etc.)
3549  */
3550 static void ice_napi_add(struct ice_vsi *vsi)
3551 {
3552 	int v_idx;
3553 
3554 	if (!vsi->netdev)
3555 		return;
3556 
3557 	ice_for_each_q_vector(vsi, v_idx) {
3558 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3559 			       ice_napi_poll);
3560 		__ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
3561 	}
3562 }
3563 
3564 /**
3565  * ice_set_ops - set netdev and ethtools ops for the given netdev
3566  * @vsi: the VSI associated with the new netdev
3567  */
3568 static void ice_set_ops(struct ice_vsi *vsi)
3569 {
3570 	struct net_device *netdev = vsi->netdev;
3571 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3572 
3573 	if (ice_is_safe_mode(pf)) {
3574 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3575 		ice_set_ethtool_safe_mode_ops(netdev);
3576 		return;
3577 	}
3578 
3579 	netdev->netdev_ops = &ice_netdev_ops;
3580 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3581 	netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3582 	ice_set_ethtool_ops(netdev);
3583 
3584 	if (vsi->type != ICE_VSI_PF)
3585 		return;
3586 
3587 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3588 			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3589 			       NETDEV_XDP_ACT_RX_SG;
3590 	netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3591 }
3592 
3593 /**
3594  * ice_set_netdev_features - set features for the given netdev
3595  * @netdev: netdev instance
3596  */
3597 static void ice_set_netdev_features(struct net_device *netdev)
3598 {
3599 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3600 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3601 	netdev_features_t csumo_features;
3602 	netdev_features_t vlano_features;
3603 	netdev_features_t dflt_features;
3604 	netdev_features_t tso_features;
3605 
3606 	if (ice_is_safe_mode(pf)) {
3607 		/* safe mode */
3608 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3609 		netdev->hw_features = netdev->features;
3610 		return;
3611 	}
3612 
3613 	dflt_features = NETIF_F_SG	|
3614 			NETIF_F_HIGHDMA	|
3615 			NETIF_F_NTUPLE	|
3616 			NETIF_F_RXHASH;
3617 
3618 	csumo_features = NETIF_F_RXCSUM	  |
3619 			 NETIF_F_IP_CSUM  |
3620 			 NETIF_F_SCTP_CRC |
3621 			 NETIF_F_IPV6_CSUM;
3622 
3623 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3624 			 NETIF_F_HW_VLAN_CTAG_TX     |
3625 			 NETIF_F_HW_VLAN_CTAG_RX;
3626 
3627 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3628 	if (is_dvm_ena)
3629 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3630 
3631 	tso_features = NETIF_F_TSO			|
3632 		       NETIF_F_TSO_ECN			|
3633 		       NETIF_F_TSO6			|
3634 		       NETIF_F_GSO_GRE			|
3635 		       NETIF_F_GSO_UDP_TUNNEL		|
3636 		       NETIF_F_GSO_GRE_CSUM		|
3637 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3638 		       NETIF_F_GSO_PARTIAL		|
3639 		       NETIF_F_GSO_IPXIP4		|
3640 		       NETIF_F_GSO_IPXIP6		|
3641 		       NETIF_F_GSO_UDP_L4;
3642 
3643 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3644 					NETIF_F_GSO_GRE_CSUM;
3645 	/* set features that user can change */
3646 	netdev->hw_features = dflt_features | csumo_features |
3647 			      vlano_features | tso_features;
3648 
3649 	/* add support for HW_CSUM on packets with MPLS header */
3650 	netdev->mpls_features =  NETIF_F_HW_CSUM |
3651 				 NETIF_F_TSO     |
3652 				 NETIF_F_TSO6;
3653 
3654 	/* enable features */
3655 	netdev->features |= netdev->hw_features;
3656 
3657 	netdev->hw_features |= NETIF_F_HW_TC;
3658 	netdev->hw_features |= NETIF_F_LOOPBACK;
3659 
3660 	/* encap and VLAN devices inherit default, csumo and tso features */
3661 	netdev->hw_enc_features |= dflt_features | csumo_features |
3662 				   tso_features;
3663 	netdev->vlan_features |= dflt_features | csumo_features |
3664 				 tso_features;
3665 
3666 	/* advertise support but don't enable by default since only one type of
3667 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3668 	 * type turns on the other has to be turned off. This is enforced by the
3669 	 * ice_fix_features() ndo callback.
3670 	 */
3671 	if (is_dvm_ena)
3672 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3673 			NETIF_F_HW_VLAN_STAG_TX;
3674 
3675 	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3676 	 * be changed at runtime
3677 	 */
3678 	netdev->hw_features |= NETIF_F_RXFCS;
3679 
3680 	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3681 }
3682 
3683 /**
3684  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3685  * @lut: Lookup table
3686  * @rss_table_size: Lookup table size
3687  * @rss_size: Range of queue number for hashing
3688  */
3689 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3690 {
3691 	u16 i;
3692 
3693 	for (i = 0; i < rss_table_size; i++)
3694 		lut[i] = i % rss_size;
3695 }
3696 
3697 /**
3698  * ice_pf_vsi_setup - Set up a PF VSI
3699  * @pf: board private structure
3700  * @pi: pointer to the port_info instance
3701  *
3702  * Returns pointer to the successfully allocated VSI software struct
3703  * on success, otherwise returns NULL on failure.
3704  */
3705 static struct ice_vsi *
3706 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3707 {
3708 	struct ice_vsi_cfg_params params = {};
3709 
3710 	params.type = ICE_VSI_PF;
3711 	params.port_info = pi;
3712 	params.flags = ICE_VSI_FLAG_INIT;
3713 
3714 	return ice_vsi_setup(pf, &params);
3715 }
3716 
3717 static struct ice_vsi *
3718 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3719 		   struct ice_channel *ch)
3720 {
3721 	struct ice_vsi_cfg_params params = {};
3722 
3723 	params.type = ICE_VSI_CHNL;
3724 	params.port_info = pi;
3725 	params.ch = ch;
3726 	params.flags = ICE_VSI_FLAG_INIT;
3727 
3728 	return ice_vsi_setup(pf, &params);
3729 }
3730 
3731 /**
3732  * ice_ctrl_vsi_setup - Set up a control VSI
3733  * @pf: board private structure
3734  * @pi: pointer to the port_info instance
3735  *
3736  * Returns pointer to the successfully allocated VSI software struct
3737  * on success, otherwise returns NULL on failure.
3738  */
3739 static struct ice_vsi *
3740 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3741 {
3742 	struct ice_vsi_cfg_params params = {};
3743 
3744 	params.type = ICE_VSI_CTRL;
3745 	params.port_info = pi;
3746 	params.flags = ICE_VSI_FLAG_INIT;
3747 
3748 	return ice_vsi_setup(pf, &params);
3749 }
3750 
3751 /**
3752  * ice_lb_vsi_setup - Set up a loopback VSI
3753  * @pf: board private structure
3754  * @pi: pointer to the port_info instance
3755  *
3756  * Returns pointer to the successfully allocated VSI software struct
3757  * on success, otherwise returns NULL on failure.
3758  */
3759 struct ice_vsi *
3760 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3761 {
3762 	struct ice_vsi_cfg_params params = {};
3763 
3764 	params.type = ICE_VSI_LB;
3765 	params.port_info = pi;
3766 	params.flags = ICE_VSI_FLAG_INIT;
3767 
3768 	return ice_vsi_setup(pf, &params);
3769 }
3770 
3771 /**
3772  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3773  * @netdev: network interface to be adjusted
3774  * @proto: VLAN TPID
3775  * @vid: VLAN ID to be added
3776  *
3777  * net_device_ops implementation for adding VLAN IDs
3778  */
3779 static int
3780 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3781 {
3782 	struct ice_netdev_priv *np = netdev_priv(netdev);
3783 	struct ice_vsi_vlan_ops *vlan_ops;
3784 	struct ice_vsi *vsi = np->vsi;
3785 	struct ice_vlan vlan;
3786 	int ret;
3787 
3788 	/* VLAN 0 is added by default during load/reset */
3789 	if (!vid)
3790 		return 0;
3791 
3792 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3793 		usleep_range(1000, 2000);
3794 
3795 	/* Add multicast promisc rule for the VLAN ID to be added if
3796 	 * all-multicast is currently enabled.
3797 	 */
3798 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3799 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3800 					       ICE_MCAST_VLAN_PROMISC_BITS,
3801 					       vid);
3802 		if (ret)
3803 			goto finish;
3804 	}
3805 
3806 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3807 
3808 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3809 	 * packets aren't pruned by the device's internal switch on Rx
3810 	 */
3811 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3812 	ret = vlan_ops->add_vlan(vsi, &vlan);
3813 	if (ret)
3814 		goto finish;
3815 
3816 	/* If all-multicast is currently enabled and this VLAN ID is only one
3817 	 * besides VLAN-0 we have to update look-up type of multicast promisc
3818 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3819 	 */
3820 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3821 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3822 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3823 					   ICE_MCAST_PROMISC_BITS, 0);
3824 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3825 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3826 	}
3827 
3828 finish:
3829 	clear_bit(ICE_CFG_BUSY, vsi->state);
3830 
3831 	return ret;
3832 }
3833 
3834 /**
3835  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3836  * @netdev: network interface to be adjusted
3837  * @proto: VLAN TPID
3838  * @vid: VLAN ID to be removed
3839  *
3840  * net_device_ops implementation for removing VLAN IDs
3841  */
3842 static int
3843 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3844 {
3845 	struct ice_netdev_priv *np = netdev_priv(netdev);
3846 	struct ice_vsi_vlan_ops *vlan_ops;
3847 	struct ice_vsi *vsi = np->vsi;
3848 	struct ice_vlan vlan;
3849 	int ret;
3850 
3851 	/* don't allow removal of VLAN 0 */
3852 	if (!vid)
3853 		return 0;
3854 
3855 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3856 		usleep_range(1000, 2000);
3857 
3858 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3859 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3860 	if (ret) {
3861 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3862 			   vsi->vsi_num);
3863 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3864 	}
3865 
3866 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3867 
3868 	/* Make sure VLAN delete is successful before updating VLAN
3869 	 * information
3870 	 */
3871 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3872 	ret = vlan_ops->del_vlan(vsi, &vlan);
3873 	if (ret)
3874 		goto finish;
3875 
3876 	/* Remove multicast promisc rule for the removed VLAN ID if
3877 	 * all-multicast is enabled.
3878 	 */
3879 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3880 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3881 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3882 
3883 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3884 		/* Update look-up type of multicast promisc rule for VLAN 0
3885 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3886 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3887 		 */
3888 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3889 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3890 						   ICE_MCAST_VLAN_PROMISC_BITS,
3891 						   0);
3892 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3893 						 ICE_MCAST_PROMISC_BITS, 0);
3894 		}
3895 	}
3896 
3897 finish:
3898 	clear_bit(ICE_CFG_BUSY, vsi->state);
3899 
3900 	return ret;
3901 }
3902 
3903 /**
3904  * ice_rep_indr_tc_block_unbind
3905  * @cb_priv: indirection block private data
3906  */
3907 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3908 {
3909 	struct ice_indr_block_priv *indr_priv = cb_priv;
3910 
3911 	list_del(&indr_priv->list);
3912 	kfree(indr_priv);
3913 }
3914 
3915 /**
3916  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3917  * @vsi: VSI struct which has the netdev
3918  */
3919 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3920 {
3921 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3922 
3923 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3924 				 ice_rep_indr_tc_block_unbind);
3925 }
3926 
3927 /**
3928  * ice_tc_indir_block_register - Register TC indirect block notifications
3929  * @vsi: VSI struct which has the netdev
3930  *
3931  * Returns 0 on success, negative value on failure
3932  */
3933 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3934 {
3935 	struct ice_netdev_priv *np;
3936 
3937 	if (!vsi || !vsi->netdev)
3938 		return -EINVAL;
3939 
3940 	np = netdev_priv(vsi->netdev);
3941 
3942 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3943 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3944 }
3945 
3946 /**
3947  * ice_get_avail_q_count - Get count of queues in use
3948  * @pf_qmap: bitmap to get queue use count from
3949  * @lock: pointer to a mutex that protects access to pf_qmap
3950  * @size: size of the bitmap
3951  */
3952 static u16
3953 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3954 {
3955 	unsigned long bit;
3956 	u16 count = 0;
3957 
3958 	mutex_lock(lock);
3959 	for_each_clear_bit(bit, pf_qmap, size)
3960 		count++;
3961 	mutex_unlock(lock);
3962 
3963 	return count;
3964 }
3965 
3966 /**
3967  * ice_get_avail_txq_count - Get count of Tx queues in use
3968  * @pf: pointer to an ice_pf instance
3969  */
3970 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3971 {
3972 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3973 				     pf->max_pf_txqs);
3974 }
3975 
3976 /**
3977  * ice_get_avail_rxq_count - Get count of Rx queues in use
3978  * @pf: pointer to an ice_pf instance
3979  */
3980 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3981 {
3982 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3983 				     pf->max_pf_rxqs);
3984 }
3985 
3986 /**
3987  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3988  * @pf: board private structure to initialize
3989  */
3990 static void ice_deinit_pf(struct ice_pf *pf)
3991 {
3992 	ice_service_task_stop(pf);
3993 	mutex_destroy(&pf->lag_mutex);
3994 	mutex_destroy(&pf->adev_mutex);
3995 	mutex_destroy(&pf->sw_mutex);
3996 	mutex_destroy(&pf->tc_mutex);
3997 	mutex_destroy(&pf->avail_q_mutex);
3998 	mutex_destroy(&pf->vfs.table_lock);
3999 
4000 	if (pf->avail_txqs) {
4001 		bitmap_free(pf->avail_txqs);
4002 		pf->avail_txqs = NULL;
4003 	}
4004 
4005 	if (pf->avail_rxqs) {
4006 		bitmap_free(pf->avail_rxqs);
4007 		pf->avail_rxqs = NULL;
4008 	}
4009 
4010 	if (pf->ptp.clock)
4011 		ptp_clock_unregister(pf->ptp.clock);
4012 }
4013 
4014 /**
4015  * ice_set_pf_caps - set PFs capability flags
4016  * @pf: pointer to the PF instance
4017  */
4018 static void ice_set_pf_caps(struct ice_pf *pf)
4019 {
4020 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
4021 
4022 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4023 	if (func_caps->common_cap.rdma)
4024 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4025 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4026 	if (func_caps->common_cap.dcb)
4027 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4028 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4029 	if (func_caps->common_cap.sr_iov_1_1) {
4030 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4031 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
4032 					      ICE_MAX_SRIOV_VFS);
4033 	}
4034 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4035 	if (func_caps->common_cap.rss_table_size)
4036 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4037 
4038 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4039 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4040 		u16 unused;
4041 
4042 		/* ctrl_vsi_idx will be set to a valid value when flow director
4043 		 * is setup by ice_init_fdir
4044 		 */
4045 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4046 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
4047 		/* force guaranteed filter pool for PF */
4048 		ice_alloc_fd_guar_item(&pf->hw, &unused,
4049 				       func_caps->fd_fltr_guar);
4050 		/* force shared filter pool for PF */
4051 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
4052 				       func_caps->fd_fltr_best_effort);
4053 	}
4054 
4055 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4056 	if (func_caps->common_cap.ieee_1588 &&
4057 	    !(pf->hw.mac_type == ICE_MAC_E830))
4058 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4059 
4060 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
4061 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4062 }
4063 
4064 /**
4065  * ice_init_pf - Initialize general software structures (struct ice_pf)
4066  * @pf: board private structure to initialize
4067  */
4068 static int ice_init_pf(struct ice_pf *pf)
4069 {
4070 	ice_set_pf_caps(pf);
4071 
4072 	mutex_init(&pf->sw_mutex);
4073 	mutex_init(&pf->tc_mutex);
4074 	mutex_init(&pf->adev_mutex);
4075 	mutex_init(&pf->lag_mutex);
4076 
4077 	INIT_HLIST_HEAD(&pf->aq_wait_list);
4078 	spin_lock_init(&pf->aq_wait_lock);
4079 	init_waitqueue_head(&pf->aq_wait_queue);
4080 
4081 	init_waitqueue_head(&pf->reset_wait_queue);
4082 
4083 	/* setup service timer and periodic service task */
4084 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4085 	pf->serv_tmr_period = HZ;
4086 	INIT_WORK(&pf->serv_task, ice_service_task);
4087 	clear_bit(ICE_SERVICE_SCHED, pf->state);
4088 
4089 	mutex_init(&pf->avail_q_mutex);
4090 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4091 	if (!pf->avail_txqs)
4092 		return -ENOMEM;
4093 
4094 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4095 	if (!pf->avail_rxqs) {
4096 		bitmap_free(pf->avail_txqs);
4097 		pf->avail_txqs = NULL;
4098 		return -ENOMEM;
4099 	}
4100 
4101 	mutex_init(&pf->vfs.table_lock);
4102 	hash_init(pf->vfs.table);
4103 	ice_mbx_init_snapshot(&pf->hw);
4104 
4105 	return 0;
4106 }
4107 
4108 /**
4109  * ice_is_wol_supported - check if WoL is supported
4110  * @hw: pointer to hardware info
4111  *
4112  * Check if WoL is supported based on the HW configuration.
4113  * Returns true if NVM supports and enables WoL for this port, false otherwise
4114  */
4115 bool ice_is_wol_supported(struct ice_hw *hw)
4116 {
4117 	u16 wol_ctrl;
4118 
4119 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4120 	 * word) indicates WoL is not supported on the corresponding PF ID.
4121 	 */
4122 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4123 		return false;
4124 
4125 	return !(BIT(hw->port_info->lport) & wol_ctrl);
4126 }
4127 
4128 /**
4129  * ice_vsi_recfg_qs - Change the number of queues on a VSI
4130  * @vsi: VSI being changed
4131  * @new_rx: new number of Rx queues
4132  * @new_tx: new number of Tx queues
4133  * @locked: is adev device_lock held
4134  *
4135  * Only change the number of queues if new_tx, or new_rx is non-0.
4136  *
4137  * Returns 0 on success.
4138  */
4139 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4140 {
4141 	struct ice_pf *pf = vsi->back;
4142 	int err = 0, timeout = 50;
4143 
4144 	if (!new_rx && !new_tx)
4145 		return -EINVAL;
4146 
4147 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4148 		timeout--;
4149 		if (!timeout)
4150 			return -EBUSY;
4151 		usleep_range(1000, 2000);
4152 	}
4153 
4154 	if (new_tx)
4155 		vsi->req_txq = (u16)new_tx;
4156 	if (new_rx)
4157 		vsi->req_rxq = (u16)new_rx;
4158 
4159 	/* set for the next time the netdev is started */
4160 	if (!netif_running(vsi->netdev)) {
4161 		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4162 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4163 		goto done;
4164 	}
4165 
4166 	ice_vsi_close(vsi);
4167 	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4168 	ice_pf_dcb_recfg(pf, locked);
4169 	ice_vsi_open(vsi);
4170 done:
4171 	clear_bit(ICE_CFG_BUSY, pf->state);
4172 	return err;
4173 }
4174 
4175 /**
4176  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4177  * @pf: PF to configure
4178  *
4179  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4180  * VSI can still Tx/Rx VLAN tagged packets.
4181  */
4182 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4183 {
4184 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4185 	struct ice_vsi_ctx *ctxt;
4186 	struct ice_hw *hw;
4187 	int status;
4188 
4189 	if (!vsi)
4190 		return;
4191 
4192 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4193 	if (!ctxt)
4194 		return;
4195 
4196 	hw = &pf->hw;
4197 	ctxt->info = vsi->info;
4198 
4199 	ctxt->info.valid_sections =
4200 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4201 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4202 			    ICE_AQ_VSI_PROP_SW_VALID);
4203 
4204 	/* disable VLAN anti-spoof */
4205 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4206 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4207 
4208 	/* disable VLAN pruning and keep all other settings */
4209 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4210 
4211 	/* allow all VLANs on Tx and don't strip on Rx */
4212 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4213 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4214 
4215 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4216 	if (status) {
4217 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4218 			status, ice_aq_str(hw->adminq.sq_last_status));
4219 	} else {
4220 		vsi->info.sec_flags = ctxt->info.sec_flags;
4221 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4222 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4223 	}
4224 
4225 	kfree(ctxt);
4226 }
4227 
4228 /**
4229  * ice_log_pkg_init - log result of DDP package load
4230  * @hw: pointer to hardware info
4231  * @state: state of package load
4232  */
4233 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4234 {
4235 	struct ice_pf *pf = hw->back;
4236 	struct device *dev;
4237 
4238 	dev = ice_pf_to_dev(pf);
4239 
4240 	switch (state) {
4241 	case ICE_DDP_PKG_SUCCESS:
4242 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4243 			 hw->active_pkg_name,
4244 			 hw->active_pkg_ver.major,
4245 			 hw->active_pkg_ver.minor,
4246 			 hw->active_pkg_ver.update,
4247 			 hw->active_pkg_ver.draft);
4248 		break;
4249 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4250 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4251 			 hw->active_pkg_name,
4252 			 hw->active_pkg_ver.major,
4253 			 hw->active_pkg_ver.minor,
4254 			 hw->active_pkg_ver.update,
4255 			 hw->active_pkg_ver.draft);
4256 		break;
4257 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4258 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4259 			hw->active_pkg_name,
4260 			hw->active_pkg_ver.major,
4261 			hw->active_pkg_ver.minor,
4262 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4263 		break;
4264 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4265 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4266 			 hw->active_pkg_name,
4267 			 hw->active_pkg_ver.major,
4268 			 hw->active_pkg_ver.minor,
4269 			 hw->active_pkg_ver.update,
4270 			 hw->active_pkg_ver.draft,
4271 			 hw->pkg_name,
4272 			 hw->pkg_ver.major,
4273 			 hw->pkg_ver.minor,
4274 			 hw->pkg_ver.update,
4275 			 hw->pkg_ver.draft);
4276 		break;
4277 	case ICE_DDP_PKG_FW_MISMATCH:
4278 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4279 		break;
4280 	case ICE_DDP_PKG_INVALID_FILE:
4281 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4282 		break;
4283 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4284 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4285 		break;
4286 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4287 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4288 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4289 		break;
4290 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4291 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4292 		break;
4293 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4294 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4295 		break;
4296 	case ICE_DDP_PKG_LOAD_ERROR:
4297 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4298 		/* poll for reset to complete */
4299 		if (ice_check_reset(hw))
4300 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4301 		break;
4302 	case ICE_DDP_PKG_ERR:
4303 	default:
4304 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4305 		break;
4306 	}
4307 }
4308 
4309 /**
4310  * ice_load_pkg - load/reload the DDP Package file
4311  * @firmware: firmware structure when firmware requested or NULL for reload
4312  * @pf: pointer to the PF instance
4313  *
4314  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4315  * initialize HW tables.
4316  */
4317 static void
4318 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4319 {
4320 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4321 	struct device *dev = ice_pf_to_dev(pf);
4322 	struct ice_hw *hw = &pf->hw;
4323 
4324 	/* Load DDP Package */
4325 	if (firmware && !hw->pkg_copy) {
4326 		state = ice_copy_and_init_pkg(hw, firmware->data,
4327 					      firmware->size);
4328 		ice_log_pkg_init(hw, state);
4329 	} else if (!firmware && hw->pkg_copy) {
4330 		/* Reload package during rebuild after CORER/GLOBR reset */
4331 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4332 		ice_log_pkg_init(hw, state);
4333 	} else {
4334 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4335 	}
4336 
4337 	if (!ice_is_init_pkg_successful(state)) {
4338 		/* Safe Mode */
4339 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4340 		return;
4341 	}
4342 
4343 	/* Successful download package is the precondition for advanced
4344 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4345 	 */
4346 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4347 }
4348 
4349 /**
4350  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4351  * @pf: pointer to the PF structure
4352  *
4353  * There is no error returned here because the driver should be able to handle
4354  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4355  * specifically with Tx.
4356  */
4357 static void ice_verify_cacheline_size(struct ice_pf *pf)
4358 {
4359 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4360 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4361 			 ICE_CACHE_LINE_BYTES);
4362 }
4363 
4364 /**
4365  * ice_send_version - update firmware with driver version
4366  * @pf: PF struct
4367  *
4368  * Returns 0 on success, else error code
4369  */
4370 static int ice_send_version(struct ice_pf *pf)
4371 {
4372 	struct ice_driver_ver dv;
4373 
4374 	dv.major_ver = 0xff;
4375 	dv.minor_ver = 0xff;
4376 	dv.build_ver = 0xff;
4377 	dv.subbuild_ver = 0;
4378 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4379 		sizeof(dv.driver_string));
4380 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4381 }
4382 
4383 /**
4384  * ice_init_fdir - Initialize flow director VSI and configuration
4385  * @pf: pointer to the PF instance
4386  *
4387  * returns 0 on success, negative on error
4388  */
4389 static int ice_init_fdir(struct ice_pf *pf)
4390 {
4391 	struct device *dev = ice_pf_to_dev(pf);
4392 	struct ice_vsi *ctrl_vsi;
4393 	int err;
4394 
4395 	/* Side Band Flow Director needs to have a control VSI.
4396 	 * Allocate it and store it in the PF.
4397 	 */
4398 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4399 	if (!ctrl_vsi) {
4400 		dev_dbg(dev, "could not create control VSI\n");
4401 		return -ENOMEM;
4402 	}
4403 
4404 	err = ice_vsi_open_ctrl(ctrl_vsi);
4405 	if (err) {
4406 		dev_dbg(dev, "could not open control VSI\n");
4407 		goto err_vsi_open;
4408 	}
4409 
4410 	mutex_init(&pf->hw.fdir_fltr_lock);
4411 
4412 	err = ice_fdir_create_dflt_rules(pf);
4413 	if (err)
4414 		goto err_fdir_rule;
4415 
4416 	return 0;
4417 
4418 err_fdir_rule:
4419 	ice_fdir_release_flows(&pf->hw);
4420 	ice_vsi_close(ctrl_vsi);
4421 err_vsi_open:
4422 	ice_vsi_release(ctrl_vsi);
4423 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4424 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4425 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4426 	}
4427 	return err;
4428 }
4429 
4430 static void ice_deinit_fdir(struct ice_pf *pf)
4431 {
4432 	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4433 
4434 	if (!vsi)
4435 		return;
4436 
4437 	ice_vsi_manage_fdir(vsi, false);
4438 	ice_vsi_release(vsi);
4439 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4440 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4441 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4442 	}
4443 
4444 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4445 }
4446 
4447 /**
4448  * ice_get_opt_fw_name - return optional firmware file name or NULL
4449  * @pf: pointer to the PF instance
4450  */
4451 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4452 {
4453 	/* Optional firmware name same as default with additional dash
4454 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4455 	 */
4456 	struct pci_dev *pdev = pf->pdev;
4457 	char *opt_fw_filename;
4458 	u64 dsn;
4459 
4460 	/* Determine the name of the optional file using the DSN (two
4461 	 * dwords following the start of the DSN Capability).
4462 	 */
4463 	dsn = pci_get_dsn(pdev);
4464 	if (!dsn)
4465 		return NULL;
4466 
4467 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4468 	if (!opt_fw_filename)
4469 		return NULL;
4470 
4471 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4472 		 ICE_DDP_PKG_PATH, dsn);
4473 
4474 	return opt_fw_filename;
4475 }
4476 
4477 /**
4478  * ice_request_fw - Device initialization routine
4479  * @pf: pointer to the PF instance
4480  * @firmware: double pointer to firmware struct
4481  *
4482  * Return: zero when successful, negative values otherwise.
4483  */
4484 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4485 {
4486 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4487 	struct device *dev = ice_pf_to_dev(pf);
4488 	int err = 0;
4489 
4490 	/* optional device-specific DDP (if present) overrides the default DDP
4491 	 * package file. kernel logs a debug message if the file doesn't exist,
4492 	 * and warning messages for other errors.
4493 	 */
4494 	if (opt_fw_filename) {
4495 		err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4496 		kfree(opt_fw_filename);
4497 		if (!err)
4498 			return err;
4499 	}
4500 	err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4501 	if (err)
4502 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4503 
4504 	return err;
4505 }
4506 
4507 /**
4508  * ice_init_tx_topology - performs Tx topology initialization
4509  * @hw: pointer to the hardware structure
4510  * @firmware: pointer to firmware structure
4511  *
4512  * Return: zero when init was successful, negative values otherwise.
4513  */
4514 static int
4515 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4516 {
4517 	u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4518 	struct ice_pf *pf = hw->back;
4519 	struct device *dev;
4520 	u8 *buf_copy;
4521 	int err;
4522 
4523 	dev = ice_pf_to_dev(pf);
4524 	/* ice_cfg_tx_topo buf argument is not a constant,
4525 	 * so we have to make a copy
4526 	 */
4527 	buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
4528 
4529 	err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
4530 	if (!err) {
4531 		if (hw->num_tx_sched_layers > num_tx_sched_layers)
4532 			dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4533 		else
4534 			dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4535 		/* if there was a change in topology ice_cfg_tx_topo triggered
4536 		 * a CORER and we need to re-init hw
4537 		 */
4538 		ice_deinit_hw(hw);
4539 		err = ice_init_hw(hw);
4540 
4541 		return err;
4542 	} else if (err == -EIO) {
4543 		dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4544 	}
4545 
4546 	return 0;
4547 }
4548 
4549 /**
4550  * ice_init_ddp_config - DDP related configuration
4551  * @hw: pointer to the hardware structure
4552  * @pf: pointer to pf structure
4553  *
4554  * This function loads DDP file from the disk, then initializes Tx
4555  * topology. At the end DDP package is loaded on the card.
4556  *
4557  * Return: zero when init was successful, negative values otherwise.
4558  */
4559 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4560 {
4561 	struct device *dev = ice_pf_to_dev(pf);
4562 	const struct firmware *firmware = NULL;
4563 	int err;
4564 
4565 	err = ice_request_fw(pf, &firmware);
4566 	if (err) {
4567 		dev_err(dev, "Fail during requesting FW: %d\n", err);
4568 		return err;
4569 	}
4570 
4571 	err = ice_init_tx_topology(hw, firmware);
4572 	if (err) {
4573 		dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4574 			err);
4575 		release_firmware(firmware);
4576 		return err;
4577 	}
4578 
4579 	/* Download firmware to device */
4580 	ice_load_pkg(firmware, pf);
4581 	release_firmware(firmware);
4582 
4583 	return 0;
4584 }
4585 
4586 /**
4587  * ice_print_wake_reason - show the wake up cause in the log
4588  * @pf: pointer to the PF struct
4589  */
4590 static void ice_print_wake_reason(struct ice_pf *pf)
4591 {
4592 	u32 wus = pf->wakeup_reason;
4593 	const char *wake_str;
4594 
4595 	/* if no wake event, nothing to print */
4596 	if (!wus)
4597 		return;
4598 
4599 	if (wus & PFPM_WUS_LNKC_M)
4600 		wake_str = "Link\n";
4601 	else if (wus & PFPM_WUS_MAG_M)
4602 		wake_str = "Magic Packet\n";
4603 	else if (wus & PFPM_WUS_MNG_M)
4604 		wake_str = "Management\n";
4605 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4606 		wake_str = "Firmware Reset\n";
4607 	else
4608 		wake_str = "Unknown\n";
4609 
4610 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4611 }
4612 
4613 /**
4614  * ice_pf_fwlog_update_module - update 1 module
4615  * @pf: pointer to the PF struct
4616  * @log_level: log_level to use for the @module
4617  * @module: module to update
4618  */
4619 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4620 {
4621 	struct ice_hw *hw = &pf->hw;
4622 
4623 	hw->fwlog_cfg.module_entries[module].log_level = log_level;
4624 }
4625 
4626 /**
4627  * ice_register_netdev - register netdev
4628  * @vsi: pointer to the VSI struct
4629  */
4630 static int ice_register_netdev(struct ice_vsi *vsi)
4631 {
4632 	int err;
4633 
4634 	if (!vsi || !vsi->netdev)
4635 		return -EIO;
4636 
4637 	err = register_netdev(vsi->netdev);
4638 	if (err)
4639 		return err;
4640 
4641 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4642 	netif_carrier_off(vsi->netdev);
4643 	netif_tx_stop_all_queues(vsi->netdev);
4644 
4645 	return 0;
4646 }
4647 
4648 static void ice_unregister_netdev(struct ice_vsi *vsi)
4649 {
4650 	if (!vsi || !vsi->netdev)
4651 		return;
4652 
4653 	unregister_netdev(vsi->netdev);
4654 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4655 }
4656 
4657 /**
4658  * ice_cfg_netdev - Allocate, configure and register a netdev
4659  * @vsi: the VSI associated with the new netdev
4660  *
4661  * Returns 0 on success, negative value on failure
4662  */
4663 static int ice_cfg_netdev(struct ice_vsi *vsi)
4664 {
4665 	struct ice_netdev_priv *np;
4666 	struct net_device *netdev;
4667 	u8 mac_addr[ETH_ALEN];
4668 
4669 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4670 				    vsi->alloc_rxq);
4671 	if (!netdev)
4672 		return -ENOMEM;
4673 
4674 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4675 	vsi->netdev = netdev;
4676 	np = netdev_priv(netdev);
4677 	np->vsi = vsi;
4678 
4679 	ice_set_netdev_features(netdev);
4680 	ice_set_ops(vsi);
4681 
4682 	if (vsi->type == ICE_VSI_PF) {
4683 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4684 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4685 		eth_hw_addr_set(netdev, mac_addr);
4686 	}
4687 
4688 	netdev->priv_flags |= IFF_UNICAST_FLT;
4689 
4690 	/* Setup netdev TC information */
4691 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4692 
4693 	netdev->max_mtu = ICE_MAX_MTU;
4694 
4695 	return 0;
4696 }
4697 
4698 static void ice_decfg_netdev(struct ice_vsi *vsi)
4699 {
4700 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4701 	free_netdev(vsi->netdev);
4702 	vsi->netdev = NULL;
4703 }
4704 
4705 /**
4706  * ice_wait_for_fw - wait for full FW readiness
4707  * @hw: pointer to the hardware structure
4708  * @timeout: milliseconds that can elapse before timing out
4709  */
4710 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4711 {
4712 	int fw_loading;
4713 	u32 elapsed = 0;
4714 
4715 	while (elapsed <= timeout) {
4716 		fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4717 
4718 		/* firmware was not yet loaded, we have to wait more */
4719 		if (fw_loading) {
4720 			elapsed += 100;
4721 			msleep(100);
4722 			continue;
4723 		}
4724 		return 0;
4725 	}
4726 
4727 	return -ETIMEDOUT;
4728 }
4729 
4730 int ice_init_dev(struct ice_pf *pf)
4731 {
4732 	struct device *dev = ice_pf_to_dev(pf);
4733 	struct ice_hw *hw = &pf->hw;
4734 	int err;
4735 
4736 	err = ice_init_hw(hw);
4737 	if (err) {
4738 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4739 		return err;
4740 	}
4741 
4742 	/* Some cards require longer initialization times
4743 	 * due to necessity of loading FW from an external source.
4744 	 * This can take even half a minute.
4745 	 */
4746 	if (ice_is_pf_c827(hw)) {
4747 		err = ice_wait_for_fw(hw, 30000);
4748 		if (err) {
4749 			dev_err(dev, "ice_wait_for_fw timed out");
4750 			return err;
4751 		}
4752 	}
4753 
4754 	ice_init_feature_support(pf);
4755 
4756 	err = ice_init_ddp_config(hw, pf);
4757 	if (err)
4758 		return err;
4759 
4760 	/* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4761 	 * set in pf->state, which will cause ice_is_safe_mode to return
4762 	 * true
4763 	 */
4764 	if (ice_is_safe_mode(pf)) {
4765 		/* we already got function/device capabilities but these don't
4766 		 * reflect what the driver needs to do in safe mode. Instead of
4767 		 * adding conditional logic everywhere to ignore these
4768 		 * device/function capabilities, override them.
4769 		 */
4770 		ice_set_safe_mode_caps(hw);
4771 	}
4772 
4773 	err = ice_init_pf(pf);
4774 	if (err) {
4775 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4776 		goto err_init_pf;
4777 	}
4778 
4779 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4780 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4781 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4782 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4783 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4784 		pf->hw.udp_tunnel_nic.tables[0].n_entries =
4785 			pf->hw.tnl.valid_count[TNL_VXLAN];
4786 		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4787 			UDP_TUNNEL_TYPE_VXLAN;
4788 	}
4789 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4790 		pf->hw.udp_tunnel_nic.tables[1].n_entries =
4791 			pf->hw.tnl.valid_count[TNL_GENEVE];
4792 		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4793 			UDP_TUNNEL_TYPE_GENEVE;
4794 	}
4795 
4796 	err = ice_init_interrupt_scheme(pf);
4797 	if (err) {
4798 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4799 		err = -EIO;
4800 		goto err_init_interrupt_scheme;
4801 	}
4802 
4803 	/* In case of MSIX we are going to setup the misc vector right here
4804 	 * to handle admin queue events etc. In case of legacy and MSI
4805 	 * the misc functionality and queue processing is combined in
4806 	 * the same vector and that gets setup at open.
4807 	 */
4808 	err = ice_req_irq_msix_misc(pf);
4809 	if (err) {
4810 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4811 		goto err_req_irq_msix_misc;
4812 	}
4813 
4814 	return 0;
4815 
4816 err_req_irq_msix_misc:
4817 	ice_clear_interrupt_scheme(pf);
4818 err_init_interrupt_scheme:
4819 	ice_deinit_pf(pf);
4820 err_init_pf:
4821 	ice_deinit_hw(hw);
4822 	return err;
4823 }
4824 
4825 void ice_deinit_dev(struct ice_pf *pf)
4826 {
4827 	ice_free_irq_msix_misc(pf);
4828 	ice_deinit_pf(pf);
4829 	ice_deinit_hw(&pf->hw);
4830 
4831 	/* Service task is already stopped, so call reset directly. */
4832 	ice_reset(&pf->hw, ICE_RESET_PFR);
4833 	pci_wait_for_pending_transaction(pf->pdev);
4834 	ice_clear_interrupt_scheme(pf);
4835 }
4836 
4837 static void ice_init_features(struct ice_pf *pf)
4838 {
4839 	struct device *dev = ice_pf_to_dev(pf);
4840 
4841 	if (ice_is_safe_mode(pf))
4842 		return;
4843 
4844 	/* initialize DDP driven features */
4845 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4846 		ice_ptp_init(pf);
4847 
4848 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4849 		ice_gnss_init(pf);
4850 
4851 	if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4852 	    ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4853 		ice_dpll_init(pf);
4854 
4855 	/* Note: Flow director init failure is non-fatal to load */
4856 	if (ice_init_fdir(pf))
4857 		dev_err(dev, "could not initialize flow director\n");
4858 
4859 	/* Note: DCB init failure is non-fatal to load */
4860 	if (ice_init_pf_dcb(pf, false)) {
4861 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4862 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4863 	} else {
4864 		ice_cfg_lldp_mib_change(&pf->hw, true);
4865 	}
4866 
4867 	if (ice_init_lag(pf))
4868 		dev_warn(dev, "Failed to init link aggregation support\n");
4869 
4870 	ice_hwmon_init(pf);
4871 }
4872 
4873 static void ice_deinit_features(struct ice_pf *pf)
4874 {
4875 	if (ice_is_safe_mode(pf))
4876 		return;
4877 
4878 	ice_deinit_lag(pf);
4879 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4880 		ice_cfg_lldp_mib_change(&pf->hw, false);
4881 	ice_deinit_fdir(pf);
4882 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4883 		ice_gnss_exit(pf);
4884 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4885 		ice_ptp_release(pf);
4886 	if (test_bit(ICE_FLAG_DPLL, pf->flags))
4887 		ice_dpll_deinit(pf);
4888 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4889 		xa_destroy(&pf->eswitch.reprs);
4890 }
4891 
4892 static void ice_init_wakeup(struct ice_pf *pf)
4893 {
4894 	/* Save wakeup reason register for later use */
4895 	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4896 
4897 	/* check for a power management event */
4898 	ice_print_wake_reason(pf);
4899 
4900 	/* clear wake status, all bits */
4901 	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4902 
4903 	/* Disable WoL at init, wait for user to enable */
4904 	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4905 }
4906 
4907 static int ice_init_link(struct ice_pf *pf)
4908 {
4909 	struct device *dev = ice_pf_to_dev(pf);
4910 	int err;
4911 
4912 	err = ice_init_link_events(pf->hw.port_info);
4913 	if (err) {
4914 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4915 		return err;
4916 	}
4917 
4918 	/* not a fatal error if this fails */
4919 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4920 	if (err)
4921 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4922 
4923 	/* not a fatal error if this fails */
4924 	err = ice_update_link_info(pf->hw.port_info);
4925 	if (err)
4926 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4927 
4928 	ice_init_link_dflt_override(pf->hw.port_info);
4929 
4930 	ice_check_link_cfg_err(pf,
4931 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4932 
4933 	/* if media available, initialize PHY settings */
4934 	if (pf->hw.port_info->phy.link_info.link_info &
4935 	    ICE_AQ_MEDIA_AVAILABLE) {
4936 		/* not a fatal error if this fails */
4937 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4938 		if (err)
4939 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4940 
4941 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4942 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4943 
4944 			if (vsi)
4945 				ice_configure_phy(vsi);
4946 		}
4947 	} else {
4948 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4949 	}
4950 
4951 	return err;
4952 }
4953 
4954 static int ice_init_pf_sw(struct ice_pf *pf)
4955 {
4956 	bool dvm = ice_is_dvm_ena(&pf->hw);
4957 	struct ice_vsi *vsi;
4958 	int err;
4959 
4960 	/* create switch struct for the switch element created by FW on boot */
4961 	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4962 	if (!pf->first_sw)
4963 		return -ENOMEM;
4964 
4965 	if (pf->hw.evb_veb)
4966 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4967 	else
4968 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4969 
4970 	pf->first_sw->pf = pf;
4971 
4972 	/* record the sw_id available for later use */
4973 	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4974 
4975 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4976 	if (err)
4977 		goto err_aq_set_port_params;
4978 
4979 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4980 	if (!vsi) {
4981 		err = -ENOMEM;
4982 		goto err_pf_vsi_setup;
4983 	}
4984 
4985 	return 0;
4986 
4987 err_pf_vsi_setup:
4988 err_aq_set_port_params:
4989 	kfree(pf->first_sw);
4990 	return err;
4991 }
4992 
4993 static void ice_deinit_pf_sw(struct ice_pf *pf)
4994 {
4995 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4996 
4997 	if (!vsi)
4998 		return;
4999 
5000 	ice_vsi_release(vsi);
5001 	kfree(pf->first_sw);
5002 }
5003 
5004 static int ice_alloc_vsis(struct ice_pf *pf)
5005 {
5006 	struct device *dev = ice_pf_to_dev(pf);
5007 
5008 	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
5009 	if (!pf->num_alloc_vsi)
5010 		return -EIO;
5011 
5012 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
5013 		dev_warn(dev,
5014 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
5015 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
5016 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
5017 	}
5018 
5019 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
5020 			       GFP_KERNEL);
5021 	if (!pf->vsi)
5022 		return -ENOMEM;
5023 
5024 	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
5025 				     sizeof(*pf->vsi_stats), GFP_KERNEL);
5026 	if (!pf->vsi_stats) {
5027 		devm_kfree(dev, pf->vsi);
5028 		return -ENOMEM;
5029 	}
5030 
5031 	return 0;
5032 }
5033 
5034 static void ice_dealloc_vsis(struct ice_pf *pf)
5035 {
5036 	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
5037 	pf->vsi_stats = NULL;
5038 
5039 	pf->num_alloc_vsi = 0;
5040 	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
5041 	pf->vsi = NULL;
5042 }
5043 
5044 static int ice_init_devlink(struct ice_pf *pf)
5045 {
5046 	int err;
5047 
5048 	err = ice_devlink_register_params(pf);
5049 	if (err)
5050 		return err;
5051 
5052 	ice_devlink_init_regions(pf);
5053 	ice_devlink_register(pf);
5054 
5055 	return 0;
5056 }
5057 
5058 static void ice_deinit_devlink(struct ice_pf *pf)
5059 {
5060 	ice_devlink_unregister(pf);
5061 	ice_devlink_destroy_regions(pf);
5062 	ice_devlink_unregister_params(pf);
5063 }
5064 
5065 static int ice_init(struct ice_pf *pf)
5066 {
5067 	int err;
5068 
5069 	err = ice_init_dev(pf);
5070 	if (err)
5071 		return err;
5072 
5073 	err = ice_alloc_vsis(pf);
5074 	if (err)
5075 		goto err_alloc_vsis;
5076 
5077 	err = ice_init_pf_sw(pf);
5078 	if (err)
5079 		goto err_init_pf_sw;
5080 
5081 	ice_init_wakeup(pf);
5082 
5083 	err = ice_init_link(pf);
5084 	if (err)
5085 		goto err_init_link;
5086 
5087 	err = ice_send_version(pf);
5088 	if (err)
5089 		goto err_init_link;
5090 
5091 	ice_verify_cacheline_size(pf);
5092 
5093 	if (ice_is_safe_mode(pf))
5094 		ice_set_safe_mode_vlan_cfg(pf);
5095 	else
5096 		/* print PCI link speed and width */
5097 		pcie_print_link_status(pf->pdev);
5098 
5099 	/* ready to go, so clear down state bit */
5100 	clear_bit(ICE_DOWN, pf->state);
5101 	clear_bit(ICE_SERVICE_DIS, pf->state);
5102 
5103 	/* since everything is good, start the service timer */
5104 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5105 
5106 	return 0;
5107 
5108 err_init_link:
5109 	ice_deinit_pf_sw(pf);
5110 err_init_pf_sw:
5111 	ice_dealloc_vsis(pf);
5112 err_alloc_vsis:
5113 	ice_deinit_dev(pf);
5114 	return err;
5115 }
5116 
5117 static void ice_deinit(struct ice_pf *pf)
5118 {
5119 	set_bit(ICE_SERVICE_DIS, pf->state);
5120 	set_bit(ICE_DOWN, pf->state);
5121 
5122 	ice_deinit_pf_sw(pf);
5123 	ice_dealloc_vsis(pf);
5124 	ice_deinit_dev(pf);
5125 }
5126 
5127 /**
5128  * ice_load - load pf by init hw and starting VSI
5129  * @pf: pointer to the pf instance
5130  *
5131  * This function has to be called under devl_lock.
5132  */
5133 int ice_load(struct ice_pf *pf)
5134 {
5135 	struct ice_vsi *vsi;
5136 	int err;
5137 
5138 	devl_assert_locked(priv_to_devlink(pf));
5139 
5140 	vsi = ice_get_main_vsi(pf);
5141 
5142 	/* init channel list */
5143 	INIT_LIST_HEAD(&vsi->ch_list);
5144 
5145 	err = ice_cfg_netdev(vsi);
5146 	if (err)
5147 		return err;
5148 
5149 	/* Setup DCB netlink interface */
5150 	ice_dcbnl_setup(vsi);
5151 
5152 	err = ice_init_mac_fltr(pf);
5153 	if (err)
5154 		goto err_init_mac_fltr;
5155 
5156 	err = ice_devlink_create_pf_port(pf);
5157 	if (err)
5158 		goto err_devlink_create_pf_port;
5159 
5160 	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5161 
5162 	err = ice_register_netdev(vsi);
5163 	if (err)
5164 		goto err_register_netdev;
5165 
5166 	err = ice_tc_indir_block_register(vsi);
5167 	if (err)
5168 		goto err_tc_indir_block_register;
5169 
5170 	ice_napi_add(vsi);
5171 
5172 	err = ice_init_rdma(pf);
5173 	if (err)
5174 		goto err_init_rdma;
5175 
5176 	ice_init_features(pf);
5177 	ice_service_task_restart(pf);
5178 
5179 	clear_bit(ICE_DOWN, pf->state);
5180 
5181 	return 0;
5182 
5183 err_init_rdma:
5184 	ice_tc_indir_block_unregister(vsi);
5185 err_tc_indir_block_register:
5186 	ice_unregister_netdev(vsi);
5187 err_register_netdev:
5188 	ice_devlink_destroy_pf_port(pf);
5189 err_devlink_create_pf_port:
5190 err_init_mac_fltr:
5191 	ice_decfg_netdev(vsi);
5192 	return err;
5193 }
5194 
5195 /**
5196  * ice_unload - unload pf by stopping VSI and deinit hw
5197  * @pf: pointer to the pf instance
5198  *
5199  * This function has to be called under devl_lock.
5200  */
5201 void ice_unload(struct ice_pf *pf)
5202 {
5203 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
5204 
5205 	devl_assert_locked(priv_to_devlink(pf));
5206 
5207 	ice_deinit_features(pf);
5208 	ice_deinit_rdma(pf);
5209 	ice_tc_indir_block_unregister(vsi);
5210 	ice_unregister_netdev(vsi);
5211 	ice_devlink_destroy_pf_port(pf);
5212 	ice_decfg_netdev(vsi);
5213 }
5214 
5215 /**
5216  * ice_probe - Device initialization routine
5217  * @pdev: PCI device information struct
5218  * @ent: entry in ice_pci_tbl
5219  *
5220  * Returns 0 on success, negative on failure
5221  */
5222 static int
5223 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5224 {
5225 	struct device *dev = &pdev->dev;
5226 	struct ice_adapter *adapter;
5227 	struct ice_pf *pf;
5228 	struct ice_hw *hw;
5229 	int err;
5230 
5231 	if (pdev->is_virtfn) {
5232 		dev_err(dev, "can't probe a virtual function\n");
5233 		return -EINVAL;
5234 	}
5235 
5236 	/* when under a kdump kernel initiate a reset before enabling the
5237 	 * device in order to clear out any pending DMA transactions. These
5238 	 * transactions can cause some systems to machine check when doing
5239 	 * the pcim_enable_device() below.
5240 	 */
5241 	if (is_kdump_kernel()) {
5242 		pci_save_state(pdev);
5243 		pci_clear_master(pdev);
5244 		err = pcie_flr(pdev);
5245 		if (err)
5246 			return err;
5247 		pci_restore_state(pdev);
5248 	}
5249 
5250 	/* this driver uses devres, see
5251 	 * Documentation/driver-api/driver-model/devres.rst
5252 	 */
5253 	err = pcim_enable_device(pdev);
5254 	if (err)
5255 		return err;
5256 
5257 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5258 	if (err) {
5259 		dev_err(dev, "BAR0 I/O map error %d\n", err);
5260 		return err;
5261 	}
5262 
5263 	pf = ice_allocate_pf(dev);
5264 	if (!pf)
5265 		return -ENOMEM;
5266 
5267 	/* initialize Auxiliary index to invalid value */
5268 	pf->aux_idx = -1;
5269 
5270 	/* set up for high or low DMA */
5271 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5272 	if (err) {
5273 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5274 		return err;
5275 	}
5276 
5277 	pci_set_master(pdev);
5278 
5279 	adapter = ice_adapter_get(pdev);
5280 	if (IS_ERR(adapter))
5281 		return PTR_ERR(adapter);
5282 
5283 	pf->pdev = pdev;
5284 	pf->adapter = adapter;
5285 	pci_set_drvdata(pdev, pf);
5286 	set_bit(ICE_DOWN, pf->state);
5287 	/* Disable service task until DOWN bit is cleared */
5288 	set_bit(ICE_SERVICE_DIS, pf->state);
5289 
5290 	hw = &pf->hw;
5291 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5292 	pci_save_state(pdev);
5293 
5294 	hw->back = pf;
5295 	hw->port_info = NULL;
5296 	hw->vendor_id = pdev->vendor;
5297 	hw->device_id = pdev->device;
5298 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5299 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5300 	hw->subsystem_device_id = pdev->subsystem_device;
5301 	hw->bus.device = PCI_SLOT(pdev->devfn);
5302 	hw->bus.func = PCI_FUNC(pdev->devfn);
5303 	ice_set_ctrlq_len(hw);
5304 
5305 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5306 
5307 #ifndef CONFIG_DYNAMIC_DEBUG
5308 	if (debug < -1)
5309 		hw->debug_mask = debug;
5310 #endif
5311 
5312 	err = ice_init(pf);
5313 	if (err)
5314 		goto err_init;
5315 
5316 	devl_lock(priv_to_devlink(pf));
5317 	err = ice_load(pf);
5318 	if (err)
5319 		goto err_load;
5320 
5321 	err = ice_init_devlink(pf);
5322 	if (err)
5323 		goto err_init_devlink;
5324 	devl_unlock(priv_to_devlink(pf));
5325 
5326 	return 0;
5327 
5328 err_init_devlink:
5329 	ice_unload(pf);
5330 err_load:
5331 	devl_unlock(priv_to_devlink(pf));
5332 	ice_deinit(pf);
5333 err_init:
5334 	ice_adapter_put(pdev);
5335 	pci_disable_device(pdev);
5336 	return err;
5337 }
5338 
5339 /**
5340  * ice_set_wake - enable or disable Wake on LAN
5341  * @pf: pointer to the PF struct
5342  *
5343  * Simple helper for WoL control
5344  */
5345 static void ice_set_wake(struct ice_pf *pf)
5346 {
5347 	struct ice_hw *hw = &pf->hw;
5348 	bool wol = pf->wol_ena;
5349 
5350 	/* clear wake state, otherwise new wake events won't fire */
5351 	wr32(hw, PFPM_WUS, U32_MAX);
5352 
5353 	/* enable / disable APM wake up, no RMW needed */
5354 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5355 
5356 	/* set magic packet filter enabled */
5357 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5358 }
5359 
5360 /**
5361  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5362  * @pf: pointer to the PF struct
5363  *
5364  * Issue firmware command to enable multicast magic wake, making
5365  * sure that any locally administered address (LAA) is used for
5366  * wake, and that PF reset doesn't undo the LAA.
5367  */
5368 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5369 {
5370 	struct device *dev = ice_pf_to_dev(pf);
5371 	struct ice_hw *hw = &pf->hw;
5372 	u8 mac_addr[ETH_ALEN];
5373 	struct ice_vsi *vsi;
5374 	int status;
5375 	u8 flags;
5376 
5377 	if (!pf->wol_ena)
5378 		return;
5379 
5380 	vsi = ice_get_main_vsi(pf);
5381 	if (!vsi)
5382 		return;
5383 
5384 	/* Get current MAC address in case it's an LAA */
5385 	if (vsi->netdev)
5386 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5387 	else
5388 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5389 
5390 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5391 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5392 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5393 
5394 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5395 	if (status)
5396 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5397 			status, ice_aq_str(hw->adminq.sq_last_status));
5398 }
5399 
5400 /**
5401  * ice_remove - Device removal routine
5402  * @pdev: PCI device information struct
5403  */
5404 static void ice_remove(struct pci_dev *pdev)
5405 {
5406 	struct ice_pf *pf = pci_get_drvdata(pdev);
5407 	int i;
5408 
5409 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5410 		if (!ice_is_reset_in_progress(pf->state))
5411 			break;
5412 		msleep(100);
5413 	}
5414 
5415 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5416 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5417 		ice_free_vfs(pf);
5418 	}
5419 
5420 	ice_hwmon_exit(pf);
5421 
5422 	ice_service_task_stop(pf);
5423 	ice_aq_cancel_waiting_tasks(pf);
5424 	set_bit(ICE_DOWN, pf->state);
5425 
5426 	if (!ice_is_safe_mode(pf))
5427 		ice_remove_arfs(pf);
5428 
5429 	devl_lock(priv_to_devlink(pf));
5430 	ice_deinit_devlink(pf);
5431 
5432 	ice_unload(pf);
5433 	devl_unlock(priv_to_devlink(pf));
5434 
5435 	ice_deinit(pf);
5436 	ice_vsi_release_all(pf);
5437 
5438 	ice_setup_mc_magic_wake(pf);
5439 	ice_set_wake(pf);
5440 
5441 	ice_adapter_put(pdev);
5442 	pci_disable_device(pdev);
5443 }
5444 
5445 /**
5446  * ice_shutdown - PCI callback for shutting down device
5447  * @pdev: PCI device information struct
5448  */
5449 static void ice_shutdown(struct pci_dev *pdev)
5450 {
5451 	struct ice_pf *pf = pci_get_drvdata(pdev);
5452 
5453 	ice_remove(pdev);
5454 
5455 	if (system_state == SYSTEM_POWER_OFF) {
5456 		pci_wake_from_d3(pdev, pf->wol_ena);
5457 		pci_set_power_state(pdev, PCI_D3hot);
5458 	}
5459 }
5460 
5461 /**
5462  * ice_prepare_for_shutdown - prep for PCI shutdown
5463  * @pf: board private structure
5464  *
5465  * Inform or close all dependent features in prep for PCI device shutdown
5466  */
5467 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5468 {
5469 	struct ice_hw *hw = &pf->hw;
5470 	u32 v;
5471 
5472 	/* Notify VFs of impending reset */
5473 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5474 		ice_vc_notify_reset(pf);
5475 
5476 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5477 
5478 	/* disable the VSIs and their queues that are not already DOWN */
5479 	ice_pf_dis_all_vsi(pf, false);
5480 
5481 	ice_for_each_vsi(pf, v)
5482 		if (pf->vsi[v])
5483 			pf->vsi[v]->vsi_num = 0;
5484 
5485 	ice_shutdown_all_ctrlq(hw);
5486 }
5487 
5488 /**
5489  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5490  * @pf: board private structure to reinitialize
5491  *
5492  * This routine reinitialize interrupt scheme that was cleared during
5493  * power management suspend callback.
5494  *
5495  * This should be called during resume routine to re-allocate the q_vectors
5496  * and reacquire interrupts.
5497  */
5498 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5499 {
5500 	struct device *dev = ice_pf_to_dev(pf);
5501 	int ret, v;
5502 
5503 	/* Since we clear MSIX flag during suspend, we need to
5504 	 * set it back during resume...
5505 	 */
5506 
5507 	ret = ice_init_interrupt_scheme(pf);
5508 	if (ret) {
5509 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5510 		return ret;
5511 	}
5512 
5513 	/* Remap vectors and rings, after successful re-init interrupts */
5514 	ice_for_each_vsi(pf, v) {
5515 		if (!pf->vsi[v])
5516 			continue;
5517 
5518 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5519 		if (ret)
5520 			goto err_reinit;
5521 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5522 		ice_vsi_set_napi_queues(pf->vsi[v]);
5523 	}
5524 
5525 	ret = ice_req_irq_msix_misc(pf);
5526 	if (ret) {
5527 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5528 			ret);
5529 		goto err_reinit;
5530 	}
5531 
5532 	return 0;
5533 
5534 err_reinit:
5535 	while (v--)
5536 		if (pf->vsi[v])
5537 			ice_vsi_free_q_vectors(pf->vsi[v]);
5538 
5539 	return ret;
5540 }
5541 
5542 /**
5543  * ice_suspend
5544  * @dev: generic device information structure
5545  *
5546  * Power Management callback to quiesce the device and prepare
5547  * for D3 transition.
5548  */
5549 static int ice_suspend(struct device *dev)
5550 {
5551 	struct pci_dev *pdev = to_pci_dev(dev);
5552 	struct ice_pf *pf;
5553 	int disabled, v;
5554 
5555 	pf = pci_get_drvdata(pdev);
5556 
5557 	if (!ice_pf_state_is_nominal(pf)) {
5558 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5559 		return -EBUSY;
5560 	}
5561 
5562 	/* Stop watchdog tasks until resume completion.
5563 	 * Even though it is most likely that the service task is
5564 	 * disabled if the device is suspended or down, the service task's
5565 	 * state is controlled by a different state bit, and we should
5566 	 * store and honor whatever state that bit is in at this point.
5567 	 */
5568 	disabled = ice_service_task_stop(pf);
5569 
5570 	ice_deinit_rdma(pf);
5571 
5572 	/* Already suspended?, then there is nothing to do */
5573 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5574 		if (!disabled)
5575 			ice_service_task_restart(pf);
5576 		return 0;
5577 	}
5578 
5579 	if (test_bit(ICE_DOWN, pf->state) ||
5580 	    ice_is_reset_in_progress(pf->state)) {
5581 		dev_err(dev, "can't suspend device in reset or already down\n");
5582 		if (!disabled)
5583 			ice_service_task_restart(pf);
5584 		return 0;
5585 	}
5586 
5587 	ice_setup_mc_magic_wake(pf);
5588 
5589 	ice_prepare_for_shutdown(pf);
5590 
5591 	ice_set_wake(pf);
5592 
5593 	/* Free vectors, clear the interrupt scheme and release IRQs
5594 	 * for proper hibernation, especially with large number of CPUs.
5595 	 * Otherwise hibernation might fail when mapping all the vectors back
5596 	 * to CPU0.
5597 	 */
5598 	ice_free_irq_msix_misc(pf);
5599 	ice_for_each_vsi(pf, v) {
5600 		if (!pf->vsi[v])
5601 			continue;
5602 		ice_vsi_free_q_vectors(pf->vsi[v]);
5603 	}
5604 	ice_clear_interrupt_scheme(pf);
5605 
5606 	pci_save_state(pdev);
5607 	pci_wake_from_d3(pdev, pf->wol_ena);
5608 	pci_set_power_state(pdev, PCI_D3hot);
5609 	return 0;
5610 }
5611 
5612 /**
5613  * ice_resume - PM callback for waking up from D3
5614  * @dev: generic device information structure
5615  */
5616 static int ice_resume(struct device *dev)
5617 {
5618 	struct pci_dev *pdev = to_pci_dev(dev);
5619 	enum ice_reset_req reset_type;
5620 	struct ice_pf *pf;
5621 	struct ice_hw *hw;
5622 	int ret;
5623 
5624 	pci_set_power_state(pdev, PCI_D0);
5625 	pci_restore_state(pdev);
5626 	pci_save_state(pdev);
5627 
5628 	if (!pci_device_is_present(pdev))
5629 		return -ENODEV;
5630 
5631 	ret = pci_enable_device_mem(pdev);
5632 	if (ret) {
5633 		dev_err(dev, "Cannot enable device after suspend\n");
5634 		return ret;
5635 	}
5636 
5637 	pf = pci_get_drvdata(pdev);
5638 	hw = &pf->hw;
5639 
5640 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5641 	ice_print_wake_reason(pf);
5642 
5643 	/* We cleared the interrupt scheme when we suspended, so we need to
5644 	 * restore it now to resume device functionality.
5645 	 */
5646 	ret = ice_reinit_interrupt_scheme(pf);
5647 	if (ret)
5648 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5649 
5650 	ret = ice_init_rdma(pf);
5651 	if (ret)
5652 		dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5653 			ret);
5654 
5655 	clear_bit(ICE_DOWN, pf->state);
5656 	/* Now perform PF reset and rebuild */
5657 	reset_type = ICE_RESET_PFR;
5658 	/* re-enable service task for reset, but allow reset to schedule it */
5659 	clear_bit(ICE_SERVICE_DIS, pf->state);
5660 
5661 	if (ice_schedule_reset(pf, reset_type))
5662 		dev_err(dev, "Reset during resume failed.\n");
5663 
5664 	clear_bit(ICE_SUSPENDED, pf->state);
5665 	ice_service_task_restart(pf);
5666 
5667 	/* Restart the service task */
5668 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5669 
5670 	return 0;
5671 }
5672 
5673 /**
5674  * ice_pci_err_detected - warning that PCI error has been detected
5675  * @pdev: PCI device information struct
5676  * @err: the type of PCI error
5677  *
5678  * Called to warn that something happened on the PCI bus and the error handling
5679  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5680  */
5681 static pci_ers_result_t
5682 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5683 {
5684 	struct ice_pf *pf = pci_get_drvdata(pdev);
5685 
5686 	if (!pf) {
5687 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5688 			__func__, err);
5689 		return PCI_ERS_RESULT_DISCONNECT;
5690 	}
5691 
5692 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5693 		ice_service_task_stop(pf);
5694 
5695 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5696 			set_bit(ICE_PFR_REQ, pf->state);
5697 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5698 		}
5699 	}
5700 
5701 	return PCI_ERS_RESULT_NEED_RESET;
5702 }
5703 
5704 /**
5705  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5706  * @pdev: PCI device information struct
5707  *
5708  * Called to determine if the driver can recover from the PCI slot reset by
5709  * using a register read to determine if the device is recoverable.
5710  */
5711 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5712 {
5713 	struct ice_pf *pf = pci_get_drvdata(pdev);
5714 	pci_ers_result_t result;
5715 	int err;
5716 	u32 reg;
5717 
5718 	err = pci_enable_device_mem(pdev);
5719 	if (err) {
5720 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5721 			err);
5722 		result = PCI_ERS_RESULT_DISCONNECT;
5723 	} else {
5724 		pci_set_master(pdev);
5725 		pci_restore_state(pdev);
5726 		pci_save_state(pdev);
5727 		pci_wake_from_d3(pdev, false);
5728 
5729 		/* Check for life */
5730 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5731 		if (!reg)
5732 			result = PCI_ERS_RESULT_RECOVERED;
5733 		else
5734 			result = PCI_ERS_RESULT_DISCONNECT;
5735 	}
5736 
5737 	return result;
5738 }
5739 
5740 /**
5741  * ice_pci_err_resume - restart operations after PCI error recovery
5742  * @pdev: PCI device information struct
5743  *
5744  * Called to allow the driver to bring things back up after PCI error and/or
5745  * reset recovery have finished
5746  */
5747 static void ice_pci_err_resume(struct pci_dev *pdev)
5748 {
5749 	struct ice_pf *pf = pci_get_drvdata(pdev);
5750 
5751 	if (!pf) {
5752 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5753 			__func__);
5754 		return;
5755 	}
5756 
5757 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5758 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5759 			__func__);
5760 		return;
5761 	}
5762 
5763 	ice_restore_all_vfs_msi_state(pf);
5764 
5765 	ice_do_reset(pf, ICE_RESET_PFR);
5766 	ice_service_task_restart(pf);
5767 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5768 }
5769 
5770 /**
5771  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5772  * @pdev: PCI device information struct
5773  */
5774 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5775 {
5776 	struct ice_pf *pf = pci_get_drvdata(pdev);
5777 
5778 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5779 		ice_service_task_stop(pf);
5780 
5781 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5782 			set_bit(ICE_PFR_REQ, pf->state);
5783 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5784 		}
5785 	}
5786 }
5787 
5788 /**
5789  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5790  * @pdev: PCI device information struct
5791  */
5792 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5793 {
5794 	ice_pci_err_resume(pdev);
5795 }
5796 
5797 /* ice_pci_tbl - PCI Device ID Table
5798  *
5799  * Wildcard entries (PCI_ANY_ID) should come last
5800  * Last entry must be all 0s
5801  *
5802  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5803  *   Class, Class Mask, private data (not used) }
5804  */
5805 static const struct pci_device_id ice_pci_tbl[] = {
5806 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5807 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5808 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5809 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5810 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5811 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5812 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5813 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5814 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5815 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5816 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5817 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5818 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5819 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5820 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5821 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5822 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5823 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5824 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5825 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5826 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5827 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5828 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5829 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5830 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5831 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5832 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5833 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5834 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5835 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5836 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5837 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5838 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5839 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5840 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5841 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5842 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5843 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5844 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5845 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5846 	/* required last entry */
5847 	{}
5848 };
5849 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5850 
5851 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5852 
5853 static const struct pci_error_handlers ice_pci_err_handler = {
5854 	.error_detected = ice_pci_err_detected,
5855 	.slot_reset = ice_pci_err_slot_reset,
5856 	.reset_prepare = ice_pci_err_reset_prepare,
5857 	.reset_done = ice_pci_err_reset_done,
5858 	.resume = ice_pci_err_resume
5859 };
5860 
5861 static struct pci_driver ice_driver = {
5862 	.name = KBUILD_MODNAME,
5863 	.id_table = ice_pci_tbl,
5864 	.probe = ice_probe,
5865 	.remove = ice_remove,
5866 	.driver.pm = pm_sleep_ptr(&ice_pm_ops),
5867 	.shutdown = ice_shutdown,
5868 	.sriov_configure = ice_sriov_configure,
5869 	.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5870 	.sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5871 	.err_handler = &ice_pci_err_handler
5872 };
5873 
5874 /**
5875  * ice_module_init - Driver registration routine
5876  *
5877  * ice_module_init is the first routine called when the driver is
5878  * loaded. All it does is register with the PCI subsystem.
5879  */
5880 static int __init ice_module_init(void)
5881 {
5882 	int status = -ENOMEM;
5883 
5884 	pr_info("%s\n", ice_driver_string);
5885 	pr_info("%s\n", ice_copyright);
5886 
5887 	ice_adv_lnk_speed_maps_init();
5888 
5889 	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5890 	if (!ice_wq) {
5891 		pr_err("Failed to create workqueue\n");
5892 		return status;
5893 	}
5894 
5895 	ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5896 	if (!ice_lag_wq) {
5897 		pr_err("Failed to create LAG workqueue\n");
5898 		goto err_dest_wq;
5899 	}
5900 
5901 	ice_debugfs_init();
5902 
5903 	status = pci_register_driver(&ice_driver);
5904 	if (status) {
5905 		pr_err("failed to register PCI driver, err %d\n", status);
5906 		goto err_dest_lag_wq;
5907 	}
5908 
5909 	return 0;
5910 
5911 err_dest_lag_wq:
5912 	destroy_workqueue(ice_lag_wq);
5913 	ice_debugfs_exit();
5914 err_dest_wq:
5915 	destroy_workqueue(ice_wq);
5916 	return status;
5917 }
5918 module_init(ice_module_init);
5919 
5920 /**
5921  * ice_module_exit - Driver exit cleanup routine
5922  *
5923  * ice_module_exit is called just before the driver is removed
5924  * from memory.
5925  */
5926 static void __exit ice_module_exit(void)
5927 {
5928 	pci_unregister_driver(&ice_driver);
5929 	ice_debugfs_exit();
5930 	destroy_workqueue(ice_wq);
5931 	destroy_workqueue(ice_lag_wq);
5932 	pr_info("module unloaded\n");
5933 }
5934 module_exit(ice_module_exit);
5935 
5936 /**
5937  * ice_set_mac_address - NDO callback to set MAC address
5938  * @netdev: network interface device structure
5939  * @pi: pointer to an address structure
5940  *
5941  * Returns 0 on success, negative on failure
5942  */
5943 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5944 {
5945 	struct ice_netdev_priv *np = netdev_priv(netdev);
5946 	struct ice_vsi *vsi = np->vsi;
5947 	struct ice_pf *pf = vsi->back;
5948 	struct ice_hw *hw = &pf->hw;
5949 	struct sockaddr *addr = pi;
5950 	u8 old_mac[ETH_ALEN];
5951 	u8 flags = 0;
5952 	u8 *mac;
5953 	int err;
5954 
5955 	mac = (u8 *)addr->sa_data;
5956 
5957 	if (!is_valid_ether_addr(mac))
5958 		return -EADDRNOTAVAIL;
5959 
5960 	if (test_bit(ICE_DOWN, pf->state) ||
5961 	    ice_is_reset_in_progress(pf->state)) {
5962 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5963 			   mac);
5964 		return -EBUSY;
5965 	}
5966 
5967 	if (ice_chnl_dmac_fltr_cnt(pf)) {
5968 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5969 			   mac);
5970 		return -EAGAIN;
5971 	}
5972 
5973 	netif_addr_lock_bh(netdev);
5974 	ether_addr_copy(old_mac, netdev->dev_addr);
5975 	/* change the netdev's MAC address */
5976 	eth_hw_addr_set(netdev, mac);
5977 	netif_addr_unlock_bh(netdev);
5978 
5979 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5980 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5981 	if (err && err != -ENOENT) {
5982 		err = -EADDRNOTAVAIL;
5983 		goto err_update_filters;
5984 	}
5985 
5986 	/* Add filter for new MAC. If filter exists, return success */
5987 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5988 	if (err == -EEXIST) {
5989 		/* Although this MAC filter is already present in hardware it's
5990 		 * possible in some cases (e.g. bonding) that dev_addr was
5991 		 * modified outside of the driver and needs to be restored back
5992 		 * to this value.
5993 		 */
5994 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5995 
5996 		return 0;
5997 	} else if (err) {
5998 		/* error if the new filter addition failed */
5999 		err = -EADDRNOTAVAIL;
6000 	}
6001 
6002 err_update_filters:
6003 	if (err) {
6004 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6005 			   mac);
6006 		netif_addr_lock_bh(netdev);
6007 		eth_hw_addr_set(netdev, old_mac);
6008 		netif_addr_unlock_bh(netdev);
6009 		return err;
6010 	}
6011 
6012 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6013 		   netdev->dev_addr);
6014 
6015 	/* write new MAC address to the firmware */
6016 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6017 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6018 	if (err) {
6019 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6020 			   mac, err);
6021 	}
6022 	return 0;
6023 }
6024 
6025 /**
6026  * ice_set_rx_mode - NDO callback to set the netdev filters
6027  * @netdev: network interface device structure
6028  */
6029 static void ice_set_rx_mode(struct net_device *netdev)
6030 {
6031 	struct ice_netdev_priv *np = netdev_priv(netdev);
6032 	struct ice_vsi *vsi = np->vsi;
6033 
6034 	if (!vsi || ice_is_switchdev_running(vsi->back))
6035 		return;
6036 
6037 	/* Set the flags to synchronize filters
6038 	 * ndo_set_rx_mode may be triggered even without a change in netdev
6039 	 * flags
6040 	 */
6041 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6042 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6043 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6044 
6045 	/* schedule our worker thread which will take care of
6046 	 * applying the new filter changes
6047 	 */
6048 	ice_service_task_schedule(vsi->back);
6049 }
6050 
6051 /**
6052  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6053  * @netdev: network interface device structure
6054  * @queue_index: Queue ID
6055  * @maxrate: maximum bandwidth in Mbps
6056  */
6057 static int
6058 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6059 {
6060 	struct ice_netdev_priv *np = netdev_priv(netdev);
6061 	struct ice_vsi *vsi = np->vsi;
6062 	u16 q_handle;
6063 	int status;
6064 	u8 tc;
6065 
6066 	/* Validate maxrate requested is within permitted range */
6067 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6068 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6069 			   maxrate, queue_index);
6070 		return -EINVAL;
6071 	}
6072 
6073 	q_handle = vsi->tx_rings[queue_index]->q_handle;
6074 	tc = ice_dcb_get_tc(vsi, queue_index);
6075 
6076 	vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6077 	if (!vsi) {
6078 		netdev_err(netdev, "Invalid VSI for given queue %d\n",
6079 			   queue_index);
6080 		return -EINVAL;
6081 	}
6082 
6083 	/* Set BW back to default, when user set maxrate to 0 */
6084 	if (!maxrate)
6085 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6086 					       q_handle, ICE_MAX_BW);
6087 	else
6088 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6089 					  q_handle, ICE_MAX_BW, maxrate * 1000);
6090 	if (status)
6091 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6092 			   status);
6093 
6094 	return status;
6095 }
6096 
6097 /**
6098  * ice_fdb_add - add an entry to the hardware database
6099  * @ndm: the input from the stack
6100  * @tb: pointer to array of nladdr (unused)
6101  * @dev: the net device pointer
6102  * @addr: the MAC address entry being added
6103  * @vid: VLAN ID
6104  * @flags: instructions from stack about fdb operation
6105  * @extack: netlink extended ack
6106  */
6107 static int
6108 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6109 	    struct net_device *dev, const unsigned char *addr, u16 vid,
6110 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
6111 {
6112 	int err;
6113 
6114 	if (vid) {
6115 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6116 		return -EINVAL;
6117 	}
6118 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6119 		netdev_err(dev, "FDB only supports static addresses\n");
6120 		return -EINVAL;
6121 	}
6122 
6123 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6124 		err = dev_uc_add_excl(dev, addr);
6125 	else if (is_multicast_ether_addr(addr))
6126 		err = dev_mc_add_excl(dev, addr);
6127 	else
6128 		err = -EINVAL;
6129 
6130 	/* Only return duplicate errors if NLM_F_EXCL is set */
6131 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
6132 		err = 0;
6133 
6134 	return err;
6135 }
6136 
6137 /**
6138  * ice_fdb_del - delete an entry from the hardware database
6139  * @ndm: the input from the stack
6140  * @tb: pointer to array of nladdr (unused)
6141  * @dev: the net device pointer
6142  * @addr: the MAC address entry being added
6143  * @vid: VLAN ID
6144  * @extack: netlink extended ack
6145  */
6146 static int
6147 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6148 	    struct net_device *dev, const unsigned char *addr,
6149 	    __always_unused u16 vid, struct netlink_ext_ack *extack)
6150 {
6151 	int err;
6152 
6153 	if (ndm->ndm_state & NUD_PERMANENT) {
6154 		netdev_err(dev, "FDB only supports static addresses\n");
6155 		return -EINVAL;
6156 	}
6157 
6158 	if (is_unicast_ether_addr(addr))
6159 		err = dev_uc_del(dev, addr);
6160 	else if (is_multicast_ether_addr(addr))
6161 		err = dev_mc_del(dev, addr);
6162 	else
6163 		err = -EINVAL;
6164 
6165 	return err;
6166 }
6167 
6168 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6169 					 NETIF_F_HW_VLAN_CTAG_TX | \
6170 					 NETIF_F_HW_VLAN_STAG_RX | \
6171 					 NETIF_F_HW_VLAN_STAG_TX)
6172 
6173 #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6174 					 NETIF_F_HW_VLAN_STAG_RX)
6175 
6176 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
6177 					 NETIF_F_HW_VLAN_STAG_FILTER)
6178 
6179 /**
6180  * ice_fix_features - fix the netdev features flags based on device limitations
6181  * @netdev: ptr to the netdev that flags are being fixed on
6182  * @features: features that need to be checked and possibly fixed
6183  *
6184  * Make sure any fixups are made to features in this callback. This enables the
6185  * driver to not have to check unsupported configurations throughout the driver
6186  * because that's the responsiblity of this callback.
6187  *
6188  * Single VLAN Mode (SVM) Supported Features:
6189  *	NETIF_F_HW_VLAN_CTAG_FILTER
6190  *	NETIF_F_HW_VLAN_CTAG_RX
6191  *	NETIF_F_HW_VLAN_CTAG_TX
6192  *
6193  * Double VLAN Mode (DVM) Supported Features:
6194  *	NETIF_F_HW_VLAN_CTAG_FILTER
6195  *	NETIF_F_HW_VLAN_CTAG_RX
6196  *	NETIF_F_HW_VLAN_CTAG_TX
6197  *
6198  *	NETIF_F_HW_VLAN_STAG_FILTER
6199  *	NETIF_HW_VLAN_STAG_RX
6200  *	NETIF_HW_VLAN_STAG_TX
6201  *
6202  * Features that need fixing:
6203  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6204  *	These are mutually exlusive as the VSI context cannot support multiple
6205  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
6206  *	is not done, then default to clearing the requested STAG offload
6207  *	settings.
6208  *
6209  *	All supported filtering has to be enabled or disabled together. For
6210  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6211  *	together. If this is not done, then default to VLAN filtering disabled.
6212  *	These are mutually exclusive as there is currently no way to
6213  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6214  *	prune rules.
6215  */
6216 static netdev_features_t
6217 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6218 {
6219 	struct ice_netdev_priv *np = netdev_priv(netdev);
6220 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6221 	bool cur_ctag, cur_stag, req_ctag, req_stag;
6222 
6223 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6224 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6225 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6226 
6227 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6228 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6229 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6230 
6231 	if (req_vlan_fltr != cur_vlan_fltr) {
6232 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6233 			if (req_ctag && req_stag) {
6234 				features |= NETIF_VLAN_FILTERING_FEATURES;
6235 			} else if (!req_ctag && !req_stag) {
6236 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6237 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
6238 				   (!cur_stag && req_stag && !cur_ctag)) {
6239 				features |= NETIF_VLAN_FILTERING_FEATURES;
6240 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6241 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
6242 				   (cur_stag && !req_stag && cur_ctag)) {
6243 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6244 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6245 			}
6246 		} else {
6247 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6248 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6249 
6250 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6251 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6252 		}
6253 	}
6254 
6255 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6256 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6257 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6258 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6259 			      NETIF_F_HW_VLAN_STAG_TX);
6260 	}
6261 
6262 	if (!(netdev->features & NETIF_F_RXFCS) &&
6263 	    (features & NETIF_F_RXFCS) &&
6264 	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6265 	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6266 		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6267 		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6268 	}
6269 
6270 	return features;
6271 }
6272 
6273 /**
6274  * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6275  * @vsi: PF's VSI
6276  * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6277  *
6278  * Store current stripped VLAN proto in ring packet context,
6279  * so it can be accessed more efficiently by packet processing code.
6280  */
6281 static void
6282 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6283 {
6284 	u16 i;
6285 
6286 	ice_for_each_alloc_rxq(vsi, i)
6287 		vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6288 }
6289 
6290 /**
6291  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6292  * @vsi: PF's VSI
6293  * @features: features used to determine VLAN offload settings
6294  *
6295  * First, determine the vlan_ethertype based on the VLAN offload bits in
6296  * features. Then determine if stripping and insertion should be enabled or
6297  * disabled. Finally enable or disable VLAN stripping and insertion.
6298  */
6299 static int
6300 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6301 {
6302 	bool enable_stripping = true, enable_insertion = true;
6303 	struct ice_vsi_vlan_ops *vlan_ops;
6304 	int strip_err = 0, insert_err = 0;
6305 	u16 vlan_ethertype = 0;
6306 
6307 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6308 
6309 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6310 		vlan_ethertype = ETH_P_8021AD;
6311 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6312 		vlan_ethertype = ETH_P_8021Q;
6313 
6314 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6315 		enable_stripping = false;
6316 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6317 		enable_insertion = false;
6318 
6319 	if (enable_stripping)
6320 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6321 	else
6322 		strip_err = vlan_ops->dis_stripping(vsi);
6323 
6324 	if (enable_insertion)
6325 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6326 	else
6327 		insert_err = vlan_ops->dis_insertion(vsi);
6328 
6329 	if (strip_err || insert_err)
6330 		return -EIO;
6331 
6332 	ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6333 				    htons(vlan_ethertype) : 0);
6334 
6335 	return 0;
6336 }
6337 
6338 /**
6339  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6340  * @vsi: PF's VSI
6341  * @features: features used to determine VLAN filtering settings
6342  *
6343  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6344  * features.
6345  */
6346 static int
6347 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6348 {
6349 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6350 	int err = 0;
6351 
6352 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6353 	 * if either bit is set
6354 	 */
6355 	if (features &
6356 	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6357 		err = vlan_ops->ena_rx_filtering(vsi);
6358 	else
6359 		err = vlan_ops->dis_rx_filtering(vsi);
6360 
6361 	return err;
6362 }
6363 
6364 /**
6365  * ice_set_vlan_features - set VLAN settings based on suggested feature set
6366  * @netdev: ptr to the netdev being adjusted
6367  * @features: the feature set that the stack is suggesting
6368  *
6369  * Only update VLAN settings if the requested_vlan_features are different than
6370  * the current_vlan_features.
6371  */
6372 static int
6373 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6374 {
6375 	netdev_features_t current_vlan_features, requested_vlan_features;
6376 	struct ice_netdev_priv *np = netdev_priv(netdev);
6377 	struct ice_vsi *vsi = np->vsi;
6378 	int err;
6379 
6380 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6381 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6382 	if (current_vlan_features ^ requested_vlan_features) {
6383 		if ((features & NETIF_F_RXFCS) &&
6384 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6385 			dev_err(ice_pf_to_dev(vsi->back),
6386 				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6387 			return -EIO;
6388 		}
6389 
6390 		err = ice_set_vlan_offload_features(vsi, features);
6391 		if (err)
6392 			return err;
6393 	}
6394 
6395 	current_vlan_features = netdev->features &
6396 		NETIF_VLAN_FILTERING_FEATURES;
6397 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6398 	if (current_vlan_features ^ requested_vlan_features) {
6399 		err = ice_set_vlan_filtering_features(vsi, features);
6400 		if (err)
6401 			return err;
6402 	}
6403 
6404 	return 0;
6405 }
6406 
6407 /**
6408  * ice_set_loopback - turn on/off loopback mode on underlying PF
6409  * @vsi: ptr to VSI
6410  * @ena: flag to indicate the on/off setting
6411  */
6412 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6413 {
6414 	bool if_running = netif_running(vsi->netdev);
6415 	int ret;
6416 
6417 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6418 		ret = ice_down(vsi);
6419 		if (ret) {
6420 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6421 			return ret;
6422 		}
6423 	}
6424 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6425 	if (ret)
6426 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6427 	if (if_running)
6428 		ret = ice_up(vsi);
6429 
6430 	return ret;
6431 }
6432 
6433 /**
6434  * ice_set_features - set the netdev feature flags
6435  * @netdev: ptr to the netdev being adjusted
6436  * @features: the feature set that the stack is suggesting
6437  */
6438 static int
6439 ice_set_features(struct net_device *netdev, netdev_features_t features)
6440 {
6441 	netdev_features_t changed = netdev->features ^ features;
6442 	struct ice_netdev_priv *np = netdev_priv(netdev);
6443 	struct ice_vsi *vsi = np->vsi;
6444 	struct ice_pf *pf = vsi->back;
6445 	int ret = 0;
6446 
6447 	/* Don't set any netdev advanced features with device in Safe Mode */
6448 	if (ice_is_safe_mode(pf)) {
6449 		dev_err(ice_pf_to_dev(pf),
6450 			"Device is in Safe Mode - not enabling advanced netdev features\n");
6451 		return ret;
6452 	}
6453 
6454 	/* Do not change setting during reset */
6455 	if (ice_is_reset_in_progress(pf->state)) {
6456 		dev_err(ice_pf_to_dev(pf),
6457 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6458 		return -EBUSY;
6459 	}
6460 
6461 	/* Multiple features can be changed in one call so keep features in
6462 	 * separate if/else statements to guarantee each feature is checked
6463 	 */
6464 	if (changed & NETIF_F_RXHASH)
6465 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6466 
6467 	ret = ice_set_vlan_features(netdev, features);
6468 	if (ret)
6469 		return ret;
6470 
6471 	/* Turn on receive of FCS aka CRC, and after setting this
6472 	 * flag the packet data will have the 4 byte CRC appended
6473 	 */
6474 	if (changed & NETIF_F_RXFCS) {
6475 		if ((features & NETIF_F_RXFCS) &&
6476 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6477 			dev_err(ice_pf_to_dev(vsi->back),
6478 				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6479 			return -EIO;
6480 		}
6481 
6482 		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6483 		ret = ice_down_up(vsi);
6484 		if (ret)
6485 			return ret;
6486 	}
6487 
6488 	if (changed & NETIF_F_NTUPLE) {
6489 		bool ena = !!(features & NETIF_F_NTUPLE);
6490 
6491 		ice_vsi_manage_fdir(vsi, ena);
6492 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6493 	}
6494 
6495 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6496 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6497 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6498 		return -EACCES;
6499 	}
6500 
6501 	if (changed & NETIF_F_HW_TC) {
6502 		bool ena = !!(features & NETIF_F_HW_TC);
6503 
6504 		ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6505 		      clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6506 	}
6507 
6508 	if (changed & NETIF_F_LOOPBACK)
6509 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6510 
6511 	return ret;
6512 }
6513 
6514 /**
6515  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6516  * @vsi: VSI to setup VLAN properties for
6517  */
6518 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6519 {
6520 	int err;
6521 
6522 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6523 	if (err)
6524 		return err;
6525 
6526 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6527 	if (err)
6528 		return err;
6529 
6530 	return ice_vsi_add_vlan_zero(vsi);
6531 }
6532 
6533 /**
6534  * ice_vsi_cfg_lan - Setup the VSI lan related config
6535  * @vsi: the VSI being configured
6536  *
6537  * Return 0 on success and negative value on error
6538  */
6539 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6540 {
6541 	int err;
6542 
6543 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6544 		ice_set_rx_mode(vsi->netdev);
6545 
6546 		err = ice_vsi_vlan_setup(vsi);
6547 		if (err)
6548 			return err;
6549 	}
6550 	ice_vsi_cfg_dcb_rings(vsi);
6551 
6552 	err = ice_vsi_cfg_lan_txqs(vsi);
6553 	if (!err && ice_is_xdp_ena_vsi(vsi))
6554 		err = ice_vsi_cfg_xdp_txqs(vsi);
6555 	if (!err)
6556 		err = ice_vsi_cfg_rxqs(vsi);
6557 
6558 	return err;
6559 }
6560 
6561 /* THEORY OF MODERATION:
6562  * The ice driver hardware works differently than the hardware that DIMLIB was
6563  * originally made for. ice hardware doesn't have packet count limits that
6564  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6565  * which is hard-coded to a limit of 250,000 ints/second.
6566  * If not using dynamic moderation, the INTRL value can be modified
6567  * by ethtool rx-usecs-high.
6568  */
6569 struct ice_dim {
6570 	/* the throttle rate for interrupts, basically worst case delay before
6571 	 * an initial interrupt fires, value is stored in microseconds.
6572 	 */
6573 	u16 itr;
6574 };
6575 
6576 /* Make a different profile for Rx that doesn't allow quite so aggressive
6577  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6578  * second.
6579  */
6580 static const struct ice_dim rx_profile[] = {
6581 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6582 	{8},    /* 125,000 ints/s */
6583 	{16},   /*  62,500 ints/s */
6584 	{62},   /*  16,129 ints/s */
6585 	{126}   /*   7,936 ints/s */
6586 };
6587 
6588 /* The transmit profile, which has the same sorts of values
6589  * as the previous struct
6590  */
6591 static const struct ice_dim tx_profile[] = {
6592 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6593 	{8},    /* 125,000 ints/s */
6594 	{40},   /*  16,125 ints/s */
6595 	{128},  /*   7,812 ints/s */
6596 	{256}   /*   3,906 ints/s */
6597 };
6598 
6599 static void ice_tx_dim_work(struct work_struct *work)
6600 {
6601 	struct ice_ring_container *rc;
6602 	struct dim *dim;
6603 	u16 itr;
6604 
6605 	dim = container_of(work, struct dim, work);
6606 	rc = dim->priv;
6607 
6608 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6609 
6610 	/* look up the values in our local table */
6611 	itr = tx_profile[dim->profile_ix].itr;
6612 
6613 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6614 	ice_write_itr(rc, itr);
6615 
6616 	dim->state = DIM_START_MEASURE;
6617 }
6618 
6619 static void ice_rx_dim_work(struct work_struct *work)
6620 {
6621 	struct ice_ring_container *rc;
6622 	struct dim *dim;
6623 	u16 itr;
6624 
6625 	dim = container_of(work, struct dim, work);
6626 	rc = dim->priv;
6627 
6628 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6629 
6630 	/* look up the values in our local table */
6631 	itr = rx_profile[dim->profile_ix].itr;
6632 
6633 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6634 	ice_write_itr(rc, itr);
6635 
6636 	dim->state = DIM_START_MEASURE;
6637 }
6638 
6639 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6640 
6641 /**
6642  * ice_init_moderation - set up interrupt moderation
6643  * @q_vector: the vector containing rings to be configured
6644  *
6645  * Set up interrupt moderation registers, with the intent to do the right thing
6646  * when called from reset or from probe, and whether or not dynamic moderation
6647  * is enabled or not. Take special care to write all the registers in both
6648  * dynamic moderation mode or not in order to make sure hardware is in a known
6649  * state.
6650  */
6651 static void ice_init_moderation(struct ice_q_vector *q_vector)
6652 {
6653 	struct ice_ring_container *rc;
6654 	bool tx_dynamic, rx_dynamic;
6655 
6656 	rc = &q_vector->tx;
6657 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6658 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6659 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6660 	rc->dim.priv = rc;
6661 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6662 
6663 	/* set the initial TX ITR to match the above */
6664 	ice_write_itr(rc, tx_dynamic ?
6665 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6666 
6667 	rc = &q_vector->rx;
6668 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6669 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6670 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6671 	rc->dim.priv = rc;
6672 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6673 
6674 	/* set the initial RX ITR to match the above */
6675 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6676 				       rc->itr_setting);
6677 
6678 	ice_set_q_vector_intrl(q_vector);
6679 }
6680 
6681 /**
6682  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6683  * @vsi: the VSI being configured
6684  */
6685 static void ice_napi_enable_all(struct ice_vsi *vsi)
6686 {
6687 	int q_idx;
6688 
6689 	if (!vsi->netdev)
6690 		return;
6691 
6692 	ice_for_each_q_vector(vsi, q_idx) {
6693 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6694 
6695 		ice_init_moderation(q_vector);
6696 
6697 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6698 			napi_enable(&q_vector->napi);
6699 	}
6700 }
6701 
6702 /**
6703  * ice_up_complete - Finish the last steps of bringing up a connection
6704  * @vsi: The VSI being configured
6705  *
6706  * Return 0 on success and negative value on error
6707  */
6708 static int ice_up_complete(struct ice_vsi *vsi)
6709 {
6710 	struct ice_pf *pf = vsi->back;
6711 	int err;
6712 
6713 	ice_vsi_cfg_msix(vsi);
6714 
6715 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6716 	 * Tx queue group list was configured and the context bits were
6717 	 * programmed using ice_vsi_cfg_txqs
6718 	 */
6719 	err = ice_vsi_start_all_rx_rings(vsi);
6720 	if (err)
6721 		return err;
6722 
6723 	clear_bit(ICE_VSI_DOWN, vsi->state);
6724 	ice_napi_enable_all(vsi);
6725 	ice_vsi_ena_irq(vsi);
6726 
6727 	if (vsi->port_info &&
6728 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6729 	    vsi->netdev && vsi->type == ICE_VSI_PF) {
6730 		ice_print_link_msg(vsi, true);
6731 		netif_tx_start_all_queues(vsi->netdev);
6732 		netif_carrier_on(vsi->netdev);
6733 		ice_ptp_link_change(pf, pf->hw.pf_id, true);
6734 	}
6735 
6736 	/* Perform an initial read of the statistics registers now to
6737 	 * set the baseline so counters are ready when interface is up
6738 	 */
6739 	ice_update_eth_stats(vsi);
6740 
6741 	if (vsi->type == ICE_VSI_PF)
6742 		ice_service_task_schedule(pf);
6743 
6744 	return 0;
6745 }
6746 
6747 /**
6748  * ice_up - Bring the connection back up after being down
6749  * @vsi: VSI being configured
6750  */
6751 int ice_up(struct ice_vsi *vsi)
6752 {
6753 	int err;
6754 
6755 	err = ice_vsi_cfg_lan(vsi);
6756 	if (!err)
6757 		err = ice_up_complete(vsi);
6758 
6759 	return err;
6760 }
6761 
6762 /**
6763  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6764  * @syncp: pointer to u64_stats_sync
6765  * @stats: stats that pkts and bytes count will be taken from
6766  * @pkts: packets stats counter
6767  * @bytes: bytes stats counter
6768  *
6769  * This function fetches stats from the ring considering the atomic operations
6770  * that needs to be performed to read u64 values in 32 bit machine.
6771  */
6772 void
6773 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6774 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6775 {
6776 	unsigned int start;
6777 
6778 	do {
6779 		start = u64_stats_fetch_begin(syncp);
6780 		*pkts = stats.pkts;
6781 		*bytes = stats.bytes;
6782 	} while (u64_stats_fetch_retry(syncp, start));
6783 }
6784 
6785 /**
6786  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6787  * @vsi: the VSI to be updated
6788  * @vsi_stats: the stats struct to be updated
6789  * @rings: rings to work on
6790  * @count: number of rings
6791  */
6792 static void
6793 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6794 			     struct rtnl_link_stats64 *vsi_stats,
6795 			     struct ice_tx_ring **rings, u16 count)
6796 {
6797 	u16 i;
6798 
6799 	for (i = 0; i < count; i++) {
6800 		struct ice_tx_ring *ring;
6801 		u64 pkts = 0, bytes = 0;
6802 
6803 		ring = READ_ONCE(rings[i]);
6804 		if (!ring || !ring->ring_stats)
6805 			continue;
6806 		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6807 					     ring->ring_stats->stats, &pkts,
6808 					     &bytes);
6809 		vsi_stats->tx_packets += pkts;
6810 		vsi_stats->tx_bytes += bytes;
6811 		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6812 		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6813 		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6814 	}
6815 }
6816 
6817 /**
6818  * ice_update_vsi_ring_stats - Update VSI stats counters
6819  * @vsi: the VSI to be updated
6820  */
6821 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6822 {
6823 	struct rtnl_link_stats64 *net_stats, *stats_prev;
6824 	struct rtnl_link_stats64 *vsi_stats;
6825 	struct ice_pf *pf = vsi->back;
6826 	u64 pkts, bytes;
6827 	int i;
6828 
6829 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6830 	if (!vsi_stats)
6831 		return;
6832 
6833 	/* reset non-netdev (extended) stats */
6834 	vsi->tx_restart = 0;
6835 	vsi->tx_busy = 0;
6836 	vsi->tx_linearize = 0;
6837 	vsi->rx_buf_failed = 0;
6838 	vsi->rx_page_failed = 0;
6839 
6840 	rcu_read_lock();
6841 
6842 	/* update Tx rings counters */
6843 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6844 				     vsi->num_txq);
6845 
6846 	/* update Rx rings counters */
6847 	ice_for_each_rxq(vsi, i) {
6848 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6849 		struct ice_ring_stats *ring_stats;
6850 
6851 		ring_stats = ring->ring_stats;
6852 		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6853 					     ring_stats->stats, &pkts,
6854 					     &bytes);
6855 		vsi_stats->rx_packets += pkts;
6856 		vsi_stats->rx_bytes += bytes;
6857 		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6858 		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6859 	}
6860 
6861 	/* update XDP Tx rings counters */
6862 	if (ice_is_xdp_ena_vsi(vsi))
6863 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6864 					     vsi->num_xdp_txq);
6865 
6866 	rcu_read_unlock();
6867 
6868 	net_stats = &vsi->net_stats;
6869 	stats_prev = &vsi->net_stats_prev;
6870 
6871 	/* Update netdev counters, but keep in mind that values could start at
6872 	 * random value after PF reset. And as we increase the reported stat by
6873 	 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6874 	 * let's skip this round.
6875 	 */
6876 	if (likely(pf->stat_prev_loaded)) {
6877 		net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6878 		net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6879 		net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6880 		net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6881 	}
6882 
6883 	stats_prev->tx_packets = vsi_stats->tx_packets;
6884 	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6885 	stats_prev->rx_packets = vsi_stats->rx_packets;
6886 	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6887 
6888 	kfree(vsi_stats);
6889 }
6890 
6891 /**
6892  * ice_update_vsi_stats - Update VSI stats counters
6893  * @vsi: the VSI to be updated
6894  */
6895 void ice_update_vsi_stats(struct ice_vsi *vsi)
6896 {
6897 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6898 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6899 	struct ice_pf *pf = vsi->back;
6900 
6901 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6902 	    test_bit(ICE_CFG_BUSY, pf->state))
6903 		return;
6904 
6905 	/* get stats as recorded by Tx/Rx rings */
6906 	ice_update_vsi_ring_stats(vsi);
6907 
6908 	/* get VSI stats as recorded by the hardware */
6909 	ice_update_eth_stats(vsi);
6910 
6911 	cur_ns->tx_errors = cur_es->tx_errors;
6912 	cur_ns->rx_dropped = cur_es->rx_discards;
6913 	cur_ns->tx_dropped = cur_es->tx_discards;
6914 	cur_ns->multicast = cur_es->rx_multicast;
6915 
6916 	/* update some more netdev stats if this is main VSI */
6917 	if (vsi->type == ICE_VSI_PF) {
6918 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6919 		cur_ns->rx_errors = pf->stats.crc_errors +
6920 				    pf->stats.illegal_bytes +
6921 				    pf->stats.rx_undersize +
6922 				    pf->hw_csum_rx_error +
6923 				    pf->stats.rx_jabber +
6924 				    pf->stats.rx_fragments +
6925 				    pf->stats.rx_oversize;
6926 		/* record drops from the port level */
6927 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6928 	}
6929 }
6930 
6931 /**
6932  * ice_update_pf_stats - Update PF port stats counters
6933  * @pf: PF whose stats needs to be updated
6934  */
6935 void ice_update_pf_stats(struct ice_pf *pf)
6936 {
6937 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6938 	struct ice_hw *hw = &pf->hw;
6939 	u16 fd_ctr_base;
6940 	u8 port;
6941 
6942 	port = hw->port_info->lport;
6943 	prev_ps = &pf->stats_prev;
6944 	cur_ps = &pf->stats;
6945 
6946 	if (ice_is_reset_in_progress(pf->state))
6947 		pf->stat_prev_loaded = false;
6948 
6949 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6950 			  &prev_ps->eth.rx_bytes,
6951 			  &cur_ps->eth.rx_bytes);
6952 
6953 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6954 			  &prev_ps->eth.rx_unicast,
6955 			  &cur_ps->eth.rx_unicast);
6956 
6957 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6958 			  &prev_ps->eth.rx_multicast,
6959 			  &cur_ps->eth.rx_multicast);
6960 
6961 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6962 			  &prev_ps->eth.rx_broadcast,
6963 			  &cur_ps->eth.rx_broadcast);
6964 
6965 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6966 			  &prev_ps->eth.rx_discards,
6967 			  &cur_ps->eth.rx_discards);
6968 
6969 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6970 			  &prev_ps->eth.tx_bytes,
6971 			  &cur_ps->eth.tx_bytes);
6972 
6973 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6974 			  &prev_ps->eth.tx_unicast,
6975 			  &cur_ps->eth.tx_unicast);
6976 
6977 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6978 			  &prev_ps->eth.tx_multicast,
6979 			  &cur_ps->eth.tx_multicast);
6980 
6981 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6982 			  &prev_ps->eth.tx_broadcast,
6983 			  &cur_ps->eth.tx_broadcast);
6984 
6985 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6986 			  &prev_ps->tx_dropped_link_down,
6987 			  &cur_ps->tx_dropped_link_down);
6988 
6989 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6990 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6991 
6992 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6993 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6994 
6995 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6996 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6997 
6998 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6999 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7000 
7001 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7002 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7003 
7004 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7005 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7006 
7007 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7008 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7009 
7010 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7011 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7012 
7013 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7014 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7015 
7016 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7017 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7018 
7019 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7020 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7021 
7022 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7023 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7024 
7025 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7026 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7027 
7028 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7029 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7030 
7031 	fd_ctr_base = hw->fd_ctr_base;
7032 
7033 	ice_stat_update40(hw,
7034 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7035 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7036 			  &cur_ps->fd_sb_match);
7037 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7038 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7039 
7040 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7041 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7042 
7043 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7044 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7045 
7046 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7047 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7048 
7049 	ice_update_dcb_stats(pf);
7050 
7051 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7052 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
7053 
7054 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7055 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7056 
7057 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7058 			  &prev_ps->mac_local_faults,
7059 			  &cur_ps->mac_local_faults);
7060 
7061 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7062 			  &prev_ps->mac_remote_faults,
7063 			  &cur_ps->mac_remote_faults);
7064 
7065 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7066 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7067 
7068 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7069 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7070 
7071 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7072 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7073 
7074 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7075 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7076 
7077 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7078 
7079 	pf->stat_prev_loaded = true;
7080 }
7081 
7082 /**
7083  * ice_get_stats64 - get statistics for network device structure
7084  * @netdev: network interface device structure
7085  * @stats: main device statistics structure
7086  */
7087 static
7088 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7089 {
7090 	struct ice_netdev_priv *np = netdev_priv(netdev);
7091 	struct rtnl_link_stats64 *vsi_stats;
7092 	struct ice_vsi *vsi = np->vsi;
7093 
7094 	vsi_stats = &vsi->net_stats;
7095 
7096 	if (!vsi->num_txq || !vsi->num_rxq)
7097 		return;
7098 
7099 	/* netdev packet/byte stats come from ring counter. These are obtained
7100 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7101 	 * But, only call the update routine and read the registers if VSI is
7102 	 * not down.
7103 	 */
7104 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
7105 		ice_update_vsi_ring_stats(vsi);
7106 	stats->tx_packets = vsi_stats->tx_packets;
7107 	stats->tx_bytes = vsi_stats->tx_bytes;
7108 	stats->rx_packets = vsi_stats->rx_packets;
7109 	stats->rx_bytes = vsi_stats->rx_bytes;
7110 
7111 	/* The rest of the stats can be read from the hardware but instead we
7112 	 * just return values that the watchdog task has already obtained from
7113 	 * the hardware.
7114 	 */
7115 	stats->multicast = vsi_stats->multicast;
7116 	stats->tx_errors = vsi_stats->tx_errors;
7117 	stats->tx_dropped = vsi_stats->tx_dropped;
7118 	stats->rx_errors = vsi_stats->rx_errors;
7119 	stats->rx_dropped = vsi_stats->rx_dropped;
7120 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7121 	stats->rx_length_errors = vsi_stats->rx_length_errors;
7122 }
7123 
7124 /**
7125  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7126  * @vsi: VSI having NAPI disabled
7127  */
7128 static void ice_napi_disable_all(struct ice_vsi *vsi)
7129 {
7130 	int q_idx;
7131 
7132 	if (!vsi->netdev)
7133 		return;
7134 
7135 	ice_for_each_q_vector(vsi, q_idx) {
7136 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7137 
7138 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7139 			napi_disable(&q_vector->napi);
7140 
7141 		cancel_work_sync(&q_vector->tx.dim.work);
7142 		cancel_work_sync(&q_vector->rx.dim.work);
7143 	}
7144 }
7145 
7146 /**
7147  * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7148  * @vsi: the VSI being un-configured
7149  */
7150 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7151 {
7152 	struct ice_pf *pf = vsi->back;
7153 	struct ice_hw *hw = &pf->hw;
7154 	u32 val;
7155 	int i;
7156 
7157 	/* disable interrupt causation from each Rx queue; Tx queues are
7158 	 * handled in ice_vsi_stop_tx_ring()
7159 	 */
7160 	if (vsi->rx_rings) {
7161 		ice_for_each_rxq(vsi, i) {
7162 			if (vsi->rx_rings[i]) {
7163 				u16 reg;
7164 
7165 				reg = vsi->rx_rings[i]->reg_idx;
7166 				val = rd32(hw, QINT_RQCTL(reg));
7167 				val &= ~QINT_RQCTL_CAUSE_ENA_M;
7168 				wr32(hw, QINT_RQCTL(reg), val);
7169 			}
7170 		}
7171 	}
7172 
7173 	/* disable each interrupt */
7174 	ice_for_each_q_vector(vsi, i) {
7175 		if (!vsi->q_vectors[i])
7176 			continue;
7177 		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7178 	}
7179 
7180 	ice_flush(hw);
7181 
7182 	/* don't call synchronize_irq() for VF's from the host */
7183 	if (vsi->type == ICE_VSI_VF)
7184 		return;
7185 
7186 	ice_for_each_q_vector(vsi, i)
7187 		synchronize_irq(vsi->q_vectors[i]->irq.virq);
7188 }
7189 
7190 /**
7191  * ice_down - Shutdown the connection
7192  * @vsi: The VSI being stopped
7193  *
7194  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7195  */
7196 int ice_down(struct ice_vsi *vsi)
7197 {
7198 	int i, tx_err, rx_err, vlan_err = 0;
7199 
7200 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7201 
7202 	if (vsi->netdev) {
7203 		vlan_err = ice_vsi_del_vlan_zero(vsi);
7204 		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
7205 		netif_carrier_off(vsi->netdev);
7206 		netif_tx_disable(vsi->netdev);
7207 	}
7208 
7209 	ice_vsi_dis_irq(vsi);
7210 
7211 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7212 	if (tx_err)
7213 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7214 			   vsi->vsi_num, tx_err);
7215 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
7216 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7217 		if (tx_err)
7218 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7219 				   vsi->vsi_num, tx_err);
7220 	}
7221 
7222 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
7223 	if (rx_err)
7224 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7225 			   vsi->vsi_num, rx_err);
7226 
7227 	ice_napi_disable_all(vsi);
7228 
7229 	ice_for_each_txq(vsi, i)
7230 		ice_clean_tx_ring(vsi->tx_rings[i]);
7231 
7232 	if (ice_is_xdp_ena_vsi(vsi))
7233 		ice_for_each_xdp_txq(vsi, i)
7234 			ice_clean_tx_ring(vsi->xdp_rings[i]);
7235 
7236 	ice_for_each_rxq(vsi, i)
7237 		ice_clean_rx_ring(vsi->rx_rings[i]);
7238 
7239 	if (tx_err || rx_err || vlan_err) {
7240 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7241 			   vsi->vsi_num, vsi->vsw->sw_id);
7242 		return -EIO;
7243 	}
7244 
7245 	return 0;
7246 }
7247 
7248 /**
7249  * ice_down_up - shutdown the VSI connection and bring it up
7250  * @vsi: the VSI to be reconnected
7251  */
7252 int ice_down_up(struct ice_vsi *vsi)
7253 {
7254 	int ret;
7255 
7256 	/* if DOWN already set, nothing to do */
7257 	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7258 		return 0;
7259 
7260 	ret = ice_down(vsi);
7261 	if (ret)
7262 		return ret;
7263 
7264 	ret = ice_up(vsi);
7265 	if (ret) {
7266 		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7267 		return ret;
7268 	}
7269 
7270 	return 0;
7271 }
7272 
7273 /**
7274  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7275  * @vsi: VSI having resources allocated
7276  *
7277  * Return 0 on success, negative on failure
7278  */
7279 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7280 {
7281 	int i, err = 0;
7282 
7283 	if (!vsi->num_txq) {
7284 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7285 			vsi->vsi_num);
7286 		return -EINVAL;
7287 	}
7288 
7289 	ice_for_each_txq(vsi, i) {
7290 		struct ice_tx_ring *ring = vsi->tx_rings[i];
7291 
7292 		if (!ring)
7293 			return -EINVAL;
7294 
7295 		if (vsi->netdev)
7296 			ring->netdev = vsi->netdev;
7297 		err = ice_setup_tx_ring(ring);
7298 		if (err)
7299 			break;
7300 	}
7301 
7302 	return err;
7303 }
7304 
7305 /**
7306  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7307  * @vsi: VSI having resources allocated
7308  *
7309  * Return 0 on success, negative on failure
7310  */
7311 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7312 {
7313 	int i, err = 0;
7314 
7315 	if (!vsi->num_rxq) {
7316 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7317 			vsi->vsi_num);
7318 		return -EINVAL;
7319 	}
7320 
7321 	ice_for_each_rxq(vsi, i) {
7322 		struct ice_rx_ring *ring = vsi->rx_rings[i];
7323 
7324 		if (!ring)
7325 			return -EINVAL;
7326 
7327 		if (vsi->netdev)
7328 			ring->netdev = vsi->netdev;
7329 		err = ice_setup_rx_ring(ring);
7330 		if (err)
7331 			break;
7332 	}
7333 
7334 	return err;
7335 }
7336 
7337 /**
7338  * ice_vsi_open_ctrl - open control VSI for use
7339  * @vsi: the VSI to open
7340  *
7341  * Initialization of the Control VSI
7342  *
7343  * Returns 0 on success, negative value on error
7344  */
7345 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7346 {
7347 	char int_name[ICE_INT_NAME_STR_LEN];
7348 	struct ice_pf *pf = vsi->back;
7349 	struct device *dev;
7350 	int err;
7351 
7352 	dev = ice_pf_to_dev(pf);
7353 	/* allocate descriptors */
7354 	err = ice_vsi_setup_tx_rings(vsi);
7355 	if (err)
7356 		goto err_setup_tx;
7357 
7358 	err = ice_vsi_setup_rx_rings(vsi);
7359 	if (err)
7360 		goto err_setup_rx;
7361 
7362 	err = ice_vsi_cfg_lan(vsi);
7363 	if (err)
7364 		goto err_setup_rx;
7365 
7366 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7367 		 dev_driver_string(dev), dev_name(dev));
7368 	err = ice_vsi_req_irq_msix(vsi, int_name);
7369 	if (err)
7370 		goto err_setup_rx;
7371 
7372 	ice_vsi_cfg_msix(vsi);
7373 
7374 	err = ice_vsi_start_all_rx_rings(vsi);
7375 	if (err)
7376 		goto err_up_complete;
7377 
7378 	clear_bit(ICE_VSI_DOWN, vsi->state);
7379 	ice_vsi_ena_irq(vsi);
7380 
7381 	return 0;
7382 
7383 err_up_complete:
7384 	ice_down(vsi);
7385 err_setup_rx:
7386 	ice_vsi_free_rx_rings(vsi);
7387 err_setup_tx:
7388 	ice_vsi_free_tx_rings(vsi);
7389 
7390 	return err;
7391 }
7392 
7393 /**
7394  * ice_vsi_open - Called when a network interface is made active
7395  * @vsi: the VSI to open
7396  *
7397  * Initialization of the VSI
7398  *
7399  * Returns 0 on success, negative value on error
7400  */
7401 int ice_vsi_open(struct ice_vsi *vsi)
7402 {
7403 	char int_name[ICE_INT_NAME_STR_LEN];
7404 	struct ice_pf *pf = vsi->back;
7405 	int err;
7406 
7407 	/* allocate descriptors */
7408 	err = ice_vsi_setup_tx_rings(vsi);
7409 	if (err)
7410 		goto err_setup_tx;
7411 
7412 	err = ice_vsi_setup_rx_rings(vsi);
7413 	if (err)
7414 		goto err_setup_rx;
7415 
7416 	err = ice_vsi_cfg_lan(vsi);
7417 	if (err)
7418 		goto err_setup_rx;
7419 
7420 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7421 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7422 	err = ice_vsi_req_irq_msix(vsi, int_name);
7423 	if (err)
7424 		goto err_setup_rx;
7425 
7426 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7427 
7428 	if (vsi->type == ICE_VSI_PF) {
7429 		/* Notify the stack of the actual queue counts. */
7430 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7431 		if (err)
7432 			goto err_set_qs;
7433 
7434 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7435 		if (err)
7436 			goto err_set_qs;
7437 	}
7438 
7439 	err = ice_up_complete(vsi);
7440 	if (err)
7441 		goto err_up_complete;
7442 
7443 	return 0;
7444 
7445 err_up_complete:
7446 	ice_down(vsi);
7447 err_set_qs:
7448 	ice_vsi_free_irq(vsi);
7449 err_setup_rx:
7450 	ice_vsi_free_rx_rings(vsi);
7451 err_setup_tx:
7452 	ice_vsi_free_tx_rings(vsi);
7453 
7454 	return err;
7455 }
7456 
7457 /**
7458  * ice_vsi_release_all - Delete all VSIs
7459  * @pf: PF from which all VSIs are being removed
7460  */
7461 static void ice_vsi_release_all(struct ice_pf *pf)
7462 {
7463 	int err, i;
7464 
7465 	if (!pf->vsi)
7466 		return;
7467 
7468 	ice_for_each_vsi(pf, i) {
7469 		if (!pf->vsi[i])
7470 			continue;
7471 
7472 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7473 			continue;
7474 
7475 		err = ice_vsi_release(pf->vsi[i]);
7476 		if (err)
7477 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7478 				i, err, pf->vsi[i]->vsi_num);
7479 	}
7480 }
7481 
7482 /**
7483  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7484  * @pf: pointer to the PF instance
7485  * @type: VSI type to rebuild
7486  *
7487  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7488  */
7489 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7490 {
7491 	struct device *dev = ice_pf_to_dev(pf);
7492 	int i, err;
7493 
7494 	ice_for_each_vsi(pf, i) {
7495 		struct ice_vsi *vsi = pf->vsi[i];
7496 
7497 		if (!vsi || vsi->type != type)
7498 			continue;
7499 
7500 		/* rebuild the VSI */
7501 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7502 		if (err) {
7503 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7504 				err, vsi->idx, ice_vsi_type_str(type));
7505 			return err;
7506 		}
7507 
7508 		/* replay filters for the VSI */
7509 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7510 		if (err) {
7511 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7512 				err, vsi->idx, ice_vsi_type_str(type));
7513 			return err;
7514 		}
7515 
7516 		/* Re-map HW VSI number, using VSI handle that has been
7517 		 * previously validated in ice_replay_vsi() call above
7518 		 */
7519 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7520 
7521 		/* enable the VSI */
7522 		err = ice_ena_vsi(vsi, false);
7523 		if (err) {
7524 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7525 				err, vsi->idx, ice_vsi_type_str(type));
7526 			return err;
7527 		}
7528 
7529 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7530 			 ice_vsi_type_str(type));
7531 	}
7532 
7533 	return 0;
7534 }
7535 
7536 /**
7537  * ice_update_pf_netdev_link - Update PF netdev link status
7538  * @pf: pointer to the PF instance
7539  */
7540 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7541 {
7542 	bool link_up;
7543 	int i;
7544 
7545 	ice_for_each_vsi(pf, i) {
7546 		struct ice_vsi *vsi = pf->vsi[i];
7547 
7548 		if (!vsi || vsi->type != ICE_VSI_PF)
7549 			return;
7550 
7551 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7552 		if (link_up) {
7553 			netif_carrier_on(pf->vsi[i]->netdev);
7554 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7555 		} else {
7556 			netif_carrier_off(pf->vsi[i]->netdev);
7557 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7558 		}
7559 	}
7560 }
7561 
7562 /**
7563  * ice_rebuild - rebuild after reset
7564  * @pf: PF to rebuild
7565  * @reset_type: type of reset
7566  *
7567  * Do not rebuild VF VSI in this flow because that is already handled via
7568  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7569  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7570  * to reset/rebuild all the VF VSI twice.
7571  */
7572 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7573 {
7574 	struct device *dev = ice_pf_to_dev(pf);
7575 	struct ice_hw *hw = &pf->hw;
7576 	bool dvm;
7577 	int err;
7578 
7579 	if (test_bit(ICE_DOWN, pf->state))
7580 		goto clear_recovery;
7581 
7582 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7583 
7584 #define ICE_EMP_RESET_SLEEP_MS 5000
7585 	if (reset_type == ICE_RESET_EMPR) {
7586 		/* If an EMP reset has occurred, any previously pending flash
7587 		 * update will have completed. We no longer know whether or
7588 		 * not the NVM update EMP reset is restricted.
7589 		 */
7590 		pf->fw_emp_reset_disabled = false;
7591 
7592 		msleep(ICE_EMP_RESET_SLEEP_MS);
7593 	}
7594 
7595 	err = ice_init_all_ctrlq(hw);
7596 	if (err) {
7597 		dev_err(dev, "control queues init failed %d\n", err);
7598 		goto err_init_ctrlq;
7599 	}
7600 
7601 	/* if DDP was previously loaded successfully */
7602 	if (!ice_is_safe_mode(pf)) {
7603 		/* reload the SW DB of filter tables */
7604 		if (reset_type == ICE_RESET_PFR)
7605 			ice_fill_blk_tbls(hw);
7606 		else
7607 			/* Reload DDP Package after CORER/GLOBR reset */
7608 			ice_load_pkg(NULL, pf);
7609 	}
7610 
7611 	err = ice_clear_pf_cfg(hw);
7612 	if (err) {
7613 		dev_err(dev, "clear PF configuration failed %d\n", err);
7614 		goto err_init_ctrlq;
7615 	}
7616 
7617 	ice_clear_pxe_mode(hw);
7618 
7619 	err = ice_init_nvm(hw);
7620 	if (err) {
7621 		dev_err(dev, "ice_init_nvm failed %d\n", err);
7622 		goto err_init_ctrlq;
7623 	}
7624 
7625 	err = ice_get_caps(hw);
7626 	if (err) {
7627 		dev_err(dev, "ice_get_caps failed %d\n", err);
7628 		goto err_init_ctrlq;
7629 	}
7630 
7631 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7632 	if (err) {
7633 		dev_err(dev, "set_mac_cfg failed %d\n", err);
7634 		goto err_init_ctrlq;
7635 	}
7636 
7637 	dvm = ice_is_dvm_ena(hw);
7638 
7639 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7640 	if (err)
7641 		goto err_init_ctrlq;
7642 
7643 	err = ice_sched_init_port(hw->port_info);
7644 	if (err)
7645 		goto err_sched_init_port;
7646 
7647 	/* start misc vector */
7648 	err = ice_req_irq_msix_misc(pf);
7649 	if (err) {
7650 		dev_err(dev, "misc vector setup failed: %d\n", err);
7651 		goto err_sched_init_port;
7652 	}
7653 
7654 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7655 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7656 		if (!rd32(hw, PFQF_FD_SIZE)) {
7657 			u16 unused, guar, b_effort;
7658 
7659 			guar = hw->func_caps.fd_fltr_guar;
7660 			b_effort = hw->func_caps.fd_fltr_best_effort;
7661 
7662 			/* force guaranteed filter pool for PF */
7663 			ice_alloc_fd_guar_item(hw, &unused, guar);
7664 			/* force shared filter pool for PF */
7665 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7666 		}
7667 	}
7668 
7669 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7670 		ice_dcb_rebuild(pf);
7671 
7672 	/* If the PF previously had enabled PTP, PTP init needs to happen before
7673 	 * the VSI rebuild. If not, this causes the PTP link status events to
7674 	 * fail.
7675 	 */
7676 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7677 		ice_ptp_rebuild(pf, reset_type);
7678 
7679 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7680 		ice_gnss_init(pf);
7681 
7682 	/* rebuild PF VSI */
7683 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7684 	if (err) {
7685 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7686 		goto err_vsi_rebuild;
7687 	}
7688 
7689 	ice_eswitch_rebuild(pf);
7690 
7691 	if (reset_type == ICE_RESET_PFR) {
7692 		err = ice_rebuild_channels(pf);
7693 		if (err) {
7694 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7695 				err);
7696 			goto err_vsi_rebuild;
7697 		}
7698 	}
7699 
7700 	/* If Flow Director is active */
7701 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7702 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7703 		if (err) {
7704 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7705 			goto err_vsi_rebuild;
7706 		}
7707 
7708 		/* replay HW Flow Director recipes */
7709 		if (hw->fdir_prof)
7710 			ice_fdir_replay_flows(hw);
7711 
7712 		/* replay Flow Director filters */
7713 		ice_fdir_replay_fltrs(pf);
7714 
7715 		ice_rebuild_arfs(pf);
7716 	}
7717 
7718 	ice_update_pf_netdev_link(pf);
7719 
7720 	/* tell the firmware we are up */
7721 	err = ice_send_version(pf);
7722 	if (err) {
7723 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7724 			err);
7725 		goto err_vsi_rebuild;
7726 	}
7727 
7728 	ice_replay_post(hw);
7729 
7730 	/* if we get here, reset flow is successful */
7731 	clear_bit(ICE_RESET_FAILED, pf->state);
7732 
7733 	ice_plug_aux_dev(pf);
7734 	if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7735 		ice_lag_rebuild(pf);
7736 
7737 	/* Restore timestamp mode settings after VSI rebuild */
7738 	ice_ptp_restore_timestamp_mode(pf);
7739 	return;
7740 
7741 err_vsi_rebuild:
7742 err_sched_init_port:
7743 	ice_sched_cleanup_all(hw);
7744 err_init_ctrlq:
7745 	ice_shutdown_all_ctrlq(hw);
7746 	set_bit(ICE_RESET_FAILED, pf->state);
7747 clear_recovery:
7748 	/* set this bit in PF state to control service task scheduling */
7749 	set_bit(ICE_NEEDS_RESTART, pf->state);
7750 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7751 }
7752 
7753 /**
7754  * ice_change_mtu - NDO callback to change the MTU
7755  * @netdev: network interface device structure
7756  * @new_mtu: new value for maximum frame size
7757  *
7758  * Returns 0 on success, negative on failure
7759  */
7760 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7761 {
7762 	struct ice_netdev_priv *np = netdev_priv(netdev);
7763 	struct ice_vsi *vsi = np->vsi;
7764 	struct ice_pf *pf = vsi->back;
7765 	struct bpf_prog *prog;
7766 	u8 count = 0;
7767 	int err = 0;
7768 
7769 	if (new_mtu == (int)netdev->mtu) {
7770 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7771 		return 0;
7772 	}
7773 
7774 	prog = vsi->xdp_prog;
7775 	if (prog && !prog->aux->xdp_has_frags) {
7776 		int frame_size = ice_max_xdp_frame_size(vsi);
7777 
7778 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7779 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7780 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7781 			return -EINVAL;
7782 		}
7783 	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7784 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7785 			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7786 				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7787 			return -EINVAL;
7788 		}
7789 	}
7790 
7791 	/* if a reset is in progress, wait for some time for it to complete */
7792 	do {
7793 		if (ice_is_reset_in_progress(pf->state)) {
7794 			count++;
7795 			usleep_range(1000, 2000);
7796 		} else {
7797 			break;
7798 		}
7799 
7800 	} while (count < 100);
7801 
7802 	if (count == 100) {
7803 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7804 		return -EBUSY;
7805 	}
7806 
7807 	WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7808 	err = ice_down_up(vsi);
7809 	if (err)
7810 		return err;
7811 
7812 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7813 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7814 
7815 	return err;
7816 }
7817 
7818 /**
7819  * ice_eth_ioctl - Access the hwtstamp interface
7820  * @netdev: network interface device structure
7821  * @ifr: interface request data
7822  * @cmd: ioctl command
7823  */
7824 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7825 {
7826 	struct ice_netdev_priv *np = netdev_priv(netdev);
7827 	struct ice_pf *pf = np->vsi->back;
7828 
7829 	switch (cmd) {
7830 	case SIOCGHWTSTAMP:
7831 		return ice_ptp_get_ts_config(pf, ifr);
7832 	case SIOCSHWTSTAMP:
7833 		return ice_ptp_set_ts_config(pf, ifr);
7834 	default:
7835 		return -EOPNOTSUPP;
7836 	}
7837 }
7838 
7839 /**
7840  * ice_aq_str - convert AQ err code to a string
7841  * @aq_err: the AQ error code to convert
7842  */
7843 const char *ice_aq_str(enum ice_aq_err aq_err)
7844 {
7845 	switch (aq_err) {
7846 	case ICE_AQ_RC_OK:
7847 		return "OK";
7848 	case ICE_AQ_RC_EPERM:
7849 		return "ICE_AQ_RC_EPERM";
7850 	case ICE_AQ_RC_ENOENT:
7851 		return "ICE_AQ_RC_ENOENT";
7852 	case ICE_AQ_RC_ENOMEM:
7853 		return "ICE_AQ_RC_ENOMEM";
7854 	case ICE_AQ_RC_EBUSY:
7855 		return "ICE_AQ_RC_EBUSY";
7856 	case ICE_AQ_RC_EEXIST:
7857 		return "ICE_AQ_RC_EEXIST";
7858 	case ICE_AQ_RC_EINVAL:
7859 		return "ICE_AQ_RC_EINVAL";
7860 	case ICE_AQ_RC_ENOSPC:
7861 		return "ICE_AQ_RC_ENOSPC";
7862 	case ICE_AQ_RC_ENOSYS:
7863 		return "ICE_AQ_RC_ENOSYS";
7864 	case ICE_AQ_RC_EMODE:
7865 		return "ICE_AQ_RC_EMODE";
7866 	case ICE_AQ_RC_ENOSEC:
7867 		return "ICE_AQ_RC_ENOSEC";
7868 	case ICE_AQ_RC_EBADSIG:
7869 		return "ICE_AQ_RC_EBADSIG";
7870 	case ICE_AQ_RC_ESVN:
7871 		return "ICE_AQ_RC_ESVN";
7872 	case ICE_AQ_RC_EBADMAN:
7873 		return "ICE_AQ_RC_EBADMAN";
7874 	case ICE_AQ_RC_EBADBUF:
7875 		return "ICE_AQ_RC_EBADBUF";
7876 	}
7877 
7878 	return "ICE_AQ_RC_UNKNOWN";
7879 }
7880 
7881 /**
7882  * ice_set_rss_lut - Set RSS LUT
7883  * @vsi: Pointer to VSI structure
7884  * @lut: Lookup table
7885  * @lut_size: Lookup table size
7886  *
7887  * Returns 0 on success, negative on failure
7888  */
7889 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7890 {
7891 	struct ice_aq_get_set_rss_lut_params params = {};
7892 	struct ice_hw *hw = &vsi->back->hw;
7893 	int status;
7894 
7895 	if (!lut)
7896 		return -EINVAL;
7897 
7898 	params.vsi_handle = vsi->idx;
7899 	params.lut_size = lut_size;
7900 	params.lut_type = vsi->rss_lut_type;
7901 	params.lut = lut;
7902 
7903 	status = ice_aq_set_rss_lut(hw, &params);
7904 	if (status)
7905 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7906 			status, ice_aq_str(hw->adminq.sq_last_status));
7907 
7908 	return status;
7909 }
7910 
7911 /**
7912  * ice_set_rss_key - Set RSS key
7913  * @vsi: Pointer to the VSI structure
7914  * @seed: RSS hash seed
7915  *
7916  * Returns 0 on success, negative on failure
7917  */
7918 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7919 {
7920 	struct ice_hw *hw = &vsi->back->hw;
7921 	int status;
7922 
7923 	if (!seed)
7924 		return -EINVAL;
7925 
7926 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7927 	if (status)
7928 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7929 			status, ice_aq_str(hw->adminq.sq_last_status));
7930 
7931 	return status;
7932 }
7933 
7934 /**
7935  * ice_get_rss_lut - Get RSS LUT
7936  * @vsi: Pointer to VSI structure
7937  * @lut: Buffer to store the lookup table entries
7938  * @lut_size: Size of buffer to store the lookup table entries
7939  *
7940  * Returns 0 on success, negative on failure
7941  */
7942 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7943 {
7944 	struct ice_aq_get_set_rss_lut_params params = {};
7945 	struct ice_hw *hw = &vsi->back->hw;
7946 	int status;
7947 
7948 	if (!lut)
7949 		return -EINVAL;
7950 
7951 	params.vsi_handle = vsi->idx;
7952 	params.lut_size = lut_size;
7953 	params.lut_type = vsi->rss_lut_type;
7954 	params.lut = lut;
7955 
7956 	status = ice_aq_get_rss_lut(hw, &params);
7957 	if (status)
7958 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7959 			status, ice_aq_str(hw->adminq.sq_last_status));
7960 
7961 	return status;
7962 }
7963 
7964 /**
7965  * ice_get_rss_key - Get RSS key
7966  * @vsi: Pointer to VSI structure
7967  * @seed: Buffer to store the key in
7968  *
7969  * Returns 0 on success, negative on failure
7970  */
7971 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7972 {
7973 	struct ice_hw *hw = &vsi->back->hw;
7974 	int status;
7975 
7976 	if (!seed)
7977 		return -EINVAL;
7978 
7979 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7980 	if (status)
7981 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7982 			status, ice_aq_str(hw->adminq.sq_last_status));
7983 
7984 	return status;
7985 }
7986 
7987 /**
7988  * ice_set_rss_hfunc - Set RSS HASH function
7989  * @vsi: Pointer to VSI structure
7990  * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
7991  *
7992  * Returns 0 on success, negative on failure
7993  */
7994 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
7995 {
7996 	struct ice_hw *hw = &vsi->back->hw;
7997 	struct ice_vsi_ctx *ctx;
7998 	bool symm;
7999 	int err;
8000 
8001 	if (hfunc == vsi->rss_hfunc)
8002 		return 0;
8003 
8004 	if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8005 	    hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8006 		return -EOPNOTSUPP;
8007 
8008 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8009 	if (!ctx)
8010 		return -ENOMEM;
8011 
8012 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8013 	ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8014 	ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8015 	ctx->info.q_opt_rss |=
8016 		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8017 	ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8018 	ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8019 
8020 	err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8021 	if (err) {
8022 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8023 			vsi->vsi_num, err);
8024 	} else {
8025 		vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8026 		vsi->rss_hfunc = hfunc;
8027 		netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8028 			    hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8029 			    "Symmetric " : "");
8030 	}
8031 	kfree(ctx);
8032 	if (err)
8033 		return err;
8034 
8035 	/* Fix the symmetry setting for all existing RSS configurations */
8036 	symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8037 	return ice_set_rss_cfg_symm(hw, vsi, symm);
8038 }
8039 
8040 /**
8041  * ice_bridge_getlink - Get the hardware bridge mode
8042  * @skb: skb buff
8043  * @pid: process ID
8044  * @seq: RTNL message seq
8045  * @dev: the netdev being configured
8046  * @filter_mask: filter mask passed in
8047  * @nlflags: netlink flags passed in
8048  *
8049  * Return the bridge mode (VEB/VEPA)
8050  */
8051 static int
8052 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8053 		   struct net_device *dev, u32 filter_mask, int nlflags)
8054 {
8055 	struct ice_netdev_priv *np = netdev_priv(dev);
8056 	struct ice_vsi *vsi = np->vsi;
8057 	struct ice_pf *pf = vsi->back;
8058 	u16 bmode;
8059 
8060 	bmode = pf->first_sw->bridge_mode;
8061 
8062 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8063 				       filter_mask, NULL);
8064 }
8065 
8066 /**
8067  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8068  * @vsi: Pointer to VSI structure
8069  * @bmode: Hardware bridge mode (VEB/VEPA)
8070  *
8071  * Returns 0 on success, negative on failure
8072  */
8073 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8074 {
8075 	struct ice_aqc_vsi_props *vsi_props;
8076 	struct ice_hw *hw = &vsi->back->hw;
8077 	struct ice_vsi_ctx *ctxt;
8078 	int ret;
8079 
8080 	vsi_props = &vsi->info;
8081 
8082 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8083 	if (!ctxt)
8084 		return -ENOMEM;
8085 
8086 	ctxt->info = vsi->info;
8087 
8088 	if (bmode == BRIDGE_MODE_VEB)
8089 		/* change from VEPA to VEB mode */
8090 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8091 	else
8092 		/* change from VEB to VEPA mode */
8093 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8094 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8095 
8096 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8097 	if (ret) {
8098 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8099 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
8100 		goto out;
8101 	}
8102 	/* Update sw flags for book keeping */
8103 	vsi_props->sw_flags = ctxt->info.sw_flags;
8104 
8105 out:
8106 	kfree(ctxt);
8107 	return ret;
8108 }
8109 
8110 /**
8111  * ice_bridge_setlink - Set the hardware bridge mode
8112  * @dev: the netdev being configured
8113  * @nlh: RTNL message
8114  * @flags: bridge setlink flags
8115  * @extack: netlink extended ack
8116  *
8117  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8118  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8119  * not already set for all VSIs connected to this switch. And also update the
8120  * unicast switch filter rules for the corresponding switch of the netdev.
8121  */
8122 static int
8123 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8124 		   u16 __always_unused flags,
8125 		   struct netlink_ext_ack __always_unused *extack)
8126 {
8127 	struct ice_netdev_priv *np = netdev_priv(dev);
8128 	struct ice_pf *pf = np->vsi->back;
8129 	struct nlattr *attr, *br_spec;
8130 	struct ice_hw *hw = &pf->hw;
8131 	struct ice_sw *pf_sw;
8132 	int rem, v, err = 0;
8133 
8134 	pf_sw = pf->first_sw;
8135 	/* find the attribute in the netlink message */
8136 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8137 	if (!br_spec)
8138 		return -EINVAL;
8139 
8140 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8141 		__u16 mode = nla_get_u16(attr);
8142 
8143 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8144 			return -EINVAL;
8145 		/* Continue  if bridge mode is not being flipped */
8146 		if (mode == pf_sw->bridge_mode)
8147 			continue;
8148 		/* Iterates through the PF VSI list and update the loopback
8149 		 * mode of the VSI
8150 		 */
8151 		ice_for_each_vsi(pf, v) {
8152 			if (!pf->vsi[v])
8153 				continue;
8154 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8155 			if (err)
8156 				return err;
8157 		}
8158 
8159 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8160 		/* Update the unicast switch filter rules for the corresponding
8161 		 * switch of the netdev
8162 		 */
8163 		err = ice_update_sw_rule_bridge_mode(hw);
8164 		if (err) {
8165 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8166 				   mode, err,
8167 				   ice_aq_str(hw->adminq.sq_last_status));
8168 			/* revert hw->evb_veb */
8169 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8170 			return err;
8171 		}
8172 
8173 		pf_sw->bridge_mode = mode;
8174 	}
8175 
8176 	return 0;
8177 }
8178 
8179 /**
8180  * ice_tx_timeout - Respond to a Tx Hang
8181  * @netdev: network interface device structure
8182  * @txqueue: Tx queue
8183  */
8184 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8185 {
8186 	struct ice_netdev_priv *np = netdev_priv(netdev);
8187 	struct ice_tx_ring *tx_ring = NULL;
8188 	struct ice_vsi *vsi = np->vsi;
8189 	struct ice_pf *pf = vsi->back;
8190 	u32 i;
8191 
8192 	pf->tx_timeout_count++;
8193 
8194 	/* Check if PFC is enabled for the TC to which the queue belongs
8195 	 * to. If yes then Tx timeout is not caused by a hung queue, no
8196 	 * need to reset and rebuild
8197 	 */
8198 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8199 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8200 			 txqueue);
8201 		return;
8202 	}
8203 
8204 	/* now that we have an index, find the tx_ring struct */
8205 	ice_for_each_txq(vsi, i)
8206 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8207 			if (txqueue == vsi->tx_rings[i]->q_index) {
8208 				tx_ring = vsi->tx_rings[i];
8209 				break;
8210 			}
8211 
8212 	/* Reset recovery level if enough time has elapsed after last timeout.
8213 	 * Also ensure no new reset action happens before next timeout period.
8214 	 */
8215 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8216 		pf->tx_timeout_recovery_level = 1;
8217 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8218 				       netdev->watchdog_timeo)))
8219 		return;
8220 
8221 	if (tx_ring) {
8222 		struct ice_hw *hw = &pf->hw;
8223 		u32 head, val = 0;
8224 
8225 		head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8226 				 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8227 		/* Read interrupt register */
8228 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8229 
8230 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8231 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8232 			    head, tx_ring->next_to_use, val);
8233 	}
8234 
8235 	pf->tx_timeout_last_recovery = jiffies;
8236 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8237 		    pf->tx_timeout_recovery_level, txqueue);
8238 
8239 	switch (pf->tx_timeout_recovery_level) {
8240 	case 1:
8241 		set_bit(ICE_PFR_REQ, pf->state);
8242 		break;
8243 	case 2:
8244 		set_bit(ICE_CORER_REQ, pf->state);
8245 		break;
8246 	case 3:
8247 		set_bit(ICE_GLOBR_REQ, pf->state);
8248 		break;
8249 	default:
8250 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8251 		set_bit(ICE_DOWN, pf->state);
8252 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8253 		set_bit(ICE_SERVICE_DIS, pf->state);
8254 		break;
8255 	}
8256 
8257 	ice_service_task_schedule(pf);
8258 	pf->tx_timeout_recovery_level++;
8259 }
8260 
8261 /**
8262  * ice_setup_tc_cls_flower - flower classifier offloads
8263  * @np: net device to configure
8264  * @filter_dev: device on which filter is added
8265  * @cls_flower: offload data
8266  */
8267 static int
8268 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8269 			struct net_device *filter_dev,
8270 			struct flow_cls_offload *cls_flower)
8271 {
8272 	struct ice_vsi *vsi = np->vsi;
8273 
8274 	if (cls_flower->common.chain_index)
8275 		return -EOPNOTSUPP;
8276 
8277 	switch (cls_flower->command) {
8278 	case FLOW_CLS_REPLACE:
8279 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8280 	case FLOW_CLS_DESTROY:
8281 		return ice_del_cls_flower(vsi, cls_flower);
8282 	default:
8283 		return -EINVAL;
8284 	}
8285 }
8286 
8287 /**
8288  * ice_setup_tc_block_cb - callback handler registered for TC block
8289  * @type: TC SETUP type
8290  * @type_data: TC flower offload data that contains user input
8291  * @cb_priv: netdev private data
8292  */
8293 static int
8294 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8295 {
8296 	struct ice_netdev_priv *np = cb_priv;
8297 
8298 	switch (type) {
8299 	case TC_SETUP_CLSFLOWER:
8300 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8301 					       type_data);
8302 	default:
8303 		return -EOPNOTSUPP;
8304 	}
8305 }
8306 
8307 /**
8308  * ice_validate_mqprio_qopt - Validate TCF input parameters
8309  * @vsi: Pointer to VSI
8310  * @mqprio_qopt: input parameters for mqprio queue configuration
8311  *
8312  * This function validates MQPRIO params, such as qcount (power of 2 wherever
8313  * needed), and make sure user doesn't specify qcount and BW rate limit
8314  * for TCs, which are more than "num_tc"
8315  */
8316 static int
8317 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8318 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
8319 {
8320 	int non_power_of_2_qcount = 0;
8321 	struct ice_pf *pf = vsi->back;
8322 	int max_rss_q_cnt = 0;
8323 	u64 sum_min_rate = 0;
8324 	struct device *dev;
8325 	int i, speed;
8326 	u8 num_tc;
8327 
8328 	if (vsi->type != ICE_VSI_PF)
8329 		return -EINVAL;
8330 
8331 	if (mqprio_qopt->qopt.offset[0] != 0 ||
8332 	    mqprio_qopt->qopt.num_tc < 1 ||
8333 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8334 		return -EINVAL;
8335 
8336 	dev = ice_pf_to_dev(pf);
8337 	vsi->ch_rss_size = 0;
8338 	num_tc = mqprio_qopt->qopt.num_tc;
8339 	speed = ice_get_link_speed_kbps(vsi);
8340 
8341 	for (i = 0; num_tc; i++) {
8342 		int qcount = mqprio_qopt->qopt.count[i];
8343 		u64 max_rate, min_rate, rem;
8344 
8345 		if (!qcount)
8346 			return -EINVAL;
8347 
8348 		if (is_power_of_2(qcount)) {
8349 			if (non_power_of_2_qcount &&
8350 			    qcount > non_power_of_2_qcount) {
8351 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8352 					qcount, non_power_of_2_qcount);
8353 				return -EINVAL;
8354 			}
8355 			if (qcount > max_rss_q_cnt)
8356 				max_rss_q_cnt = qcount;
8357 		} else {
8358 			if (non_power_of_2_qcount &&
8359 			    qcount != non_power_of_2_qcount) {
8360 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8361 					qcount, non_power_of_2_qcount);
8362 				return -EINVAL;
8363 			}
8364 			if (qcount < max_rss_q_cnt) {
8365 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8366 					qcount, max_rss_q_cnt);
8367 				return -EINVAL;
8368 			}
8369 			max_rss_q_cnt = qcount;
8370 			non_power_of_2_qcount = qcount;
8371 		}
8372 
8373 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8374 		 * converts the bandwidth rate limit into Bytes/s when
8375 		 * passing it down to the driver. So convert input bandwidth
8376 		 * from Bytes/s to Kbps
8377 		 */
8378 		max_rate = mqprio_qopt->max_rate[i];
8379 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8380 
8381 		/* min_rate is minimum guaranteed rate and it can't be zero */
8382 		min_rate = mqprio_qopt->min_rate[i];
8383 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8384 		sum_min_rate += min_rate;
8385 
8386 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8387 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8388 				min_rate, ICE_MIN_BW_LIMIT);
8389 			return -EINVAL;
8390 		}
8391 
8392 		if (max_rate && max_rate > speed) {
8393 			dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8394 				i, max_rate, speed);
8395 			return -EINVAL;
8396 		}
8397 
8398 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8399 		if (rem) {
8400 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8401 				i, ICE_MIN_BW_LIMIT);
8402 			return -EINVAL;
8403 		}
8404 
8405 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8406 		if (rem) {
8407 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8408 				i, ICE_MIN_BW_LIMIT);
8409 			return -EINVAL;
8410 		}
8411 
8412 		/* min_rate can't be more than max_rate, except when max_rate
8413 		 * is zero (implies max_rate sought is max line rate). In such
8414 		 * a case min_rate can be more than max.
8415 		 */
8416 		if (max_rate && min_rate > max_rate) {
8417 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8418 				min_rate, max_rate);
8419 			return -EINVAL;
8420 		}
8421 
8422 		if (i >= mqprio_qopt->qopt.num_tc - 1)
8423 			break;
8424 		if (mqprio_qopt->qopt.offset[i + 1] !=
8425 		    (mqprio_qopt->qopt.offset[i] + qcount))
8426 			return -EINVAL;
8427 	}
8428 	if (vsi->num_rxq <
8429 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8430 		return -EINVAL;
8431 	if (vsi->num_txq <
8432 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8433 		return -EINVAL;
8434 
8435 	if (sum_min_rate && sum_min_rate > (u64)speed) {
8436 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8437 			sum_min_rate, speed);
8438 		return -EINVAL;
8439 	}
8440 
8441 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8442 	vsi->ch_rss_size = max_rss_q_cnt;
8443 
8444 	return 0;
8445 }
8446 
8447 /**
8448  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8449  * @pf: ptr to PF device
8450  * @vsi: ptr to VSI
8451  */
8452 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8453 {
8454 	struct device *dev = ice_pf_to_dev(pf);
8455 	bool added = false;
8456 	struct ice_hw *hw;
8457 	int flow;
8458 
8459 	if (!(vsi->num_gfltr || vsi->num_bfltr))
8460 		return -EINVAL;
8461 
8462 	hw = &pf->hw;
8463 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8464 		struct ice_fd_hw_prof *prof;
8465 		int tun, status;
8466 		u64 entry_h;
8467 
8468 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8469 		      hw->fdir_prof[flow]->cnt))
8470 			continue;
8471 
8472 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8473 			enum ice_flow_priority prio;
8474 
8475 			/* add this VSI to FDir profile for this flow */
8476 			prio = ICE_FLOW_PRIO_NORMAL;
8477 			prof = hw->fdir_prof[flow];
8478 			status = ice_flow_add_entry(hw, ICE_BLK_FD,
8479 						    prof->prof_id[tun],
8480 						    prof->vsi_h[0], vsi->idx,
8481 						    prio, prof->fdir_seg[tun],
8482 						    &entry_h);
8483 			if (status) {
8484 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8485 					vsi->idx, flow);
8486 				continue;
8487 			}
8488 
8489 			prof->entry_h[prof->cnt][tun] = entry_h;
8490 		}
8491 
8492 		/* store VSI for filter replay and delete */
8493 		prof->vsi_h[prof->cnt] = vsi->idx;
8494 		prof->cnt++;
8495 
8496 		added = true;
8497 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8498 			flow);
8499 	}
8500 
8501 	if (!added)
8502 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8503 
8504 	return 0;
8505 }
8506 
8507 /**
8508  * ice_add_channel - add a channel by adding VSI
8509  * @pf: ptr to PF device
8510  * @sw_id: underlying HW switching element ID
8511  * @ch: ptr to channel structure
8512  *
8513  * Add a channel (VSI) using add_vsi and queue_map
8514  */
8515 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8516 {
8517 	struct device *dev = ice_pf_to_dev(pf);
8518 	struct ice_vsi *vsi;
8519 
8520 	if (ch->type != ICE_VSI_CHNL) {
8521 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8522 		return -EINVAL;
8523 	}
8524 
8525 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8526 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8527 		dev_err(dev, "create chnl VSI failure\n");
8528 		return -EINVAL;
8529 	}
8530 
8531 	ice_add_vsi_to_fdir(pf, vsi);
8532 
8533 	ch->sw_id = sw_id;
8534 	ch->vsi_num = vsi->vsi_num;
8535 	ch->info.mapping_flags = vsi->info.mapping_flags;
8536 	ch->ch_vsi = vsi;
8537 	/* set the back pointer of channel for newly created VSI */
8538 	vsi->ch = ch;
8539 
8540 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8541 	       sizeof(vsi->info.q_mapping));
8542 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8543 	       sizeof(vsi->info.tc_mapping));
8544 
8545 	return 0;
8546 }
8547 
8548 /**
8549  * ice_chnl_cfg_res
8550  * @vsi: the VSI being setup
8551  * @ch: ptr to channel structure
8552  *
8553  * Configure channel specific resources such as rings, vector.
8554  */
8555 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8556 {
8557 	int i;
8558 
8559 	for (i = 0; i < ch->num_txq; i++) {
8560 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8561 		struct ice_ring_container *rc;
8562 		struct ice_tx_ring *tx_ring;
8563 		struct ice_rx_ring *rx_ring;
8564 
8565 		tx_ring = vsi->tx_rings[ch->base_q + i];
8566 		rx_ring = vsi->rx_rings[ch->base_q + i];
8567 		if (!tx_ring || !rx_ring)
8568 			continue;
8569 
8570 		/* setup ring being channel enabled */
8571 		tx_ring->ch = ch;
8572 		rx_ring->ch = ch;
8573 
8574 		/* following code block sets up vector specific attributes */
8575 		tx_q_vector = tx_ring->q_vector;
8576 		rx_q_vector = rx_ring->q_vector;
8577 		if (!tx_q_vector && !rx_q_vector)
8578 			continue;
8579 
8580 		if (tx_q_vector) {
8581 			tx_q_vector->ch = ch;
8582 			/* setup Tx and Rx ITR setting if DIM is off */
8583 			rc = &tx_q_vector->tx;
8584 			if (!ITR_IS_DYNAMIC(rc))
8585 				ice_write_itr(rc, rc->itr_setting);
8586 		}
8587 		if (rx_q_vector) {
8588 			rx_q_vector->ch = ch;
8589 			/* setup Tx and Rx ITR setting if DIM is off */
8590 			rc = &rx_q_vector->rx;
8591 			if (!ITR_IS_DYNAMIC(rc))
8592 				ice_write_itr(rc, rc->itr_setting);
8593 		}
8594 	}
8595 
8596 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8597 	 * GLINT_ITR register would have written to perform in-context
8598 	 * update, hence perform flush
8599 	 */
8600 	if (ch->num_txq || ch->num_rxq)
8601 		ice_flush(&vsi->back->hw);
8602 }
8603 
8604 /**
8605  * ice_cfg_chnl_all_res - configure channel resources
8606  * @vsi: pte to main_vsi
8607  * @ch: ptr to channel structure
8608  *
8609  * This function configures channel specific resources such as flow-director
8610  * counter index, and other resources such as queues, vectors, ITR settings
8611  */
8612 static void
8613 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8614 {
8615 	/* configure channel (aka ADQ) resources such as queues, vectors,
8616 	 * ITR settings for channel specific vectors and anything else
8617 	 */
8618 	ice_chnl_cfg_res(vsi, ch);
8619 }
8620 
8621 /**
8622  * ice_setup_hw_channel - setup new channel
8623  * @pf: ptr to PF device
8624  * @vsi: the VSI being setup
8625  * @ch: ptr to channel structure
8626  * @sw_id: underlying HW switching element ID
8627  * @type: type of channel to be created (VMDq2/VF)
8628  *
8629  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8630  * and configures Tx rings accordingly
8631  */
8632 static int
8633 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8634 		     struct ice_channel *ch, u16 sw_id, u8 type)
8635 {
8636 	struct device *dev = ice_pf_to_dev(pf);
8637 	int ret;
8638 
8639 	ch->base_q = vsi->next_base_q;
8640 	ch->type = type;
8641 
8642 	ret = ice_add_channel(pf, sw_id, ch);
8643 	if (ret) {
8644 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8645 		return ret;
8646 	}
8647 
8648 	/* configure/setup ADQ specific resources */
8649 	ice_cfg_chnl_all_res(vsi, ch);
8650 
8651 	/* make sure to update the next_base_q so that subsequent channel's
8652 	 * (aka ADQ) VSI queue map is correct
8653 	 */
8654 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8655 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8656 		ch->num_rxq);
8657 
8658 	return 0;
8659 }
8660 
8661 /**
8662  * ice_setup_channel - setup new channel using uplink element
8663  * @pf: ptr to PF device
8664  * @vsi: the VSI being setup
8665  * @ch: ptr to channel structure
8666  *
8667  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8668  * and uplink switching element
8669  */
8670 static bool
8671 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8672 		  struct ice_channel *ch)
8673 {
8674 	struct device *dev = ice_pf_to_dev(pf);
8675 	u16 sw_id;
8676 	int ret;
8677 
8678 	if (vsi->type != ICE_VSI_PF) {
8679 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8680 		return false;
8681 	}
8682 
8683 	sw_id = pf->first_sw->sw_id;
8684 
8685 	/* create channel (VSI) */
8686 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8687 	if (ret) {
8688 		dev_err(dev, "failed to setup hw_channel\n");
8689 		return false;
8690 	}
8691 	dev_dbg(dev, "successfully created channel()\n");
8692 
8693 	return ch->ch_vsi ? true : false;
8694 }
8695 
8696 /**
8697  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8698  * @vsi: VSI to be configured
8699  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8700  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8701  */
8702 static int
8703 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8704 {
8705 	int err;
8706 
8707 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8708 	if (err)
8709 		return err;
8710 
8711 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8712 }
8713 
8714 /**
8715  * ice_create_q_channel - function to create channel
8716  * @vsi: VSI to be configured
8717  * @ch: ptr to channel (it contains channel specific params)
8718  *
8719  * This function creates channel (VSI) using num_queues specified by user,
8720  * reconfigs RSS if needed.
8721  */
8722 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8723 {
8724 	struct ice_pf *pf = vsi->back;
8725 	struct device *dev;
8726 
8727 	if (!ch)
8728 		return -EINVAL;
8729 
8730 	dev = ice_pf_to_dev(pf);
8731 	if (!ch->num_txq || !ch->num_rxq) {
8732 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8733 		return -EINVAL;
8734 	}
8735 
8736 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8737 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8738 			vsi->cnt_q_avail, ch->num_txq);
8739 		return -EINVAL;
8740 	}
8741 
8742 	if (!ice_setup_channel(pf, vsi, ch)) {
8743 		dev_info(dev, "Failed to setup channel\n");
8744 		return -EINVAL;
8745 	}
8746 	/* configure BW rate limit */
8747 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8748 		int ret;
8749 
8750 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8751 				       ch->min_tx_rate);
8752 		if (ret)
8753 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8754 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8755 		else
8756 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8757 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8758 	}
8759 
8760 	vsi->cnt_q_avail -= ch->num_txq;
8761 
8762 	return 0;
8763 }
8764 
8765 /**
8766  * ice_rem_all_chnl_fltrs - removes all channel filters
8767  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8768  *
8769  * Remove all advanced switch filters only if they are channel specific
8770  * tc-flower based filter
8771  */
8772 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8773 {
8774 	struct ice_tc_flower_fltr *fltr;
8775 	struct hlist_node *node;
8776 
8777 	/* to remove all channel filters, iterate an ordered list of filters */
8778 	hlist_for_each_entry_safe(fltr, node,
8779 				  &pf->tc_flower_fltr_list,
8780 				  tc_flower_node) {
8781 		struct ice_rule_query_data rule;
8782 		int status;
8783 
8784 		/* for now process only channel specific filters */
8785 		if (!ice_is_chnl_fltr(fltr))
8786 			continue;
8787 
8788 		rule.rid = fltr->rid;
8789 		rule.rule_id = fltr->rule_id;
8790 		rule.vsi_handle = fltr->dest_vsi_handle;
8791 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8792 		if (status) {
8793 			if (status == -ENOENT)
8794 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8795 					rule.rule_id);
8796 			else
8797 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8798 					status);
8799 		} else if (fltr->dest_vsi) {
8800 			/* update advanced switch filter count */
8801 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8802 				u32 flags = fltr->flags;
8803 
8804 				fltr->dest_vsi->num_chnl_fltr--;
8805 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8806 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8807 					pf->num_dmac_chnl_fltrs--;
8808 			}
8809 		}
8810 
8811 		hlist_del(&fltr->tc_flower_node);
8812 		kfree(fltr);
8813 	}
8814 }
8815 
8816 /**
8817  * ice_remove_q_channels - Remove queue channels for the TCs
8818  * @vsi: VSI to be configured
8819  * @rem_fltr: delete advanced switch filter or not
8820  *
8821  * Remove queue channels for the TCs
8822  */
8823 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8824 {
8825 	struct ice_channel *ch, *ch_tmp;
8826 	struct ice_pf *pf = vsi->back;
8827 	int i;
8828 
8829 	/* remove all tc-flower based filter if they are channel filters only */
8830 	if (rem_fltr)
8831 		ice_rem_all_chnl_fltrs(pf);
8832 
8833 	/* remove ntuple filters since queue configuration is being changed */
8834 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8835 		struct ice_hw *hw = &pf->hw;
8836 
8837 		mutex_lock(&hw->fdir_fltr_lock);
8838 		ice_fdir_del_all_fltrs(vsi);
8839 		mutex_unlock(&hw->fdir_fltr_lock);
8840 	}
8841 
8842 	/* perform cleanup for channels if they exist */
8843 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8844 		struct ice_vsi *ch_vsi;
8845 
8846 		list_del(&ch->list);
8847 		ch_vsi = ch->ch_vsi;
8848 		if (!ch_vsi) {
8849 			kfree(ch);
8850 			continue;
8851 		}
8852 
8853 		/* Reset queue contexts */
8854 		for (i = 0; i < ch->num_rxq; i++) {
8855 			struct ice_tx_ring *tx_ring;
8856 			struct ice_rx_ring *rx_ring;
8857 
8858 			tx_ring = vsi->tx_rings[ch->base_q + i];
8859 			rx_ring = vsi->rx_rings[ch->base_q + i];
8860 			if (tx_ring) {
8861 				tx_ring->ch = NULL;
8862 				if (tx_ring->q_vector)
8863 					tx_ring->q_vector->ch = NULL;
8864 			}
8865 			if (rx_ring) {
8866 				rx_ring->ch = NULL;
8867 				if (rx_ring->q_vector)
8868 					rx_ring->q_vector->ch = NULL;
8869 			}
8870 		}
8871 
8872 		/* Release FD resources for the channel VSI */
8873 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8874 
8875 		/* clear the VSI from scheduler tree */
8876 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8877 
8878 		/* Delete VSI from FW, PF and HW VSI arrays */
8879 		ice_vsi_delete(ch->ch_vsi);
8880 
8881 		/* free the channel */
8882 		kfree(ch);
8883 	}
8884 
8885 	/* clear the channel VSI map which is stored in main VSI */
8886 	ice_for_each_chnl_tc(i)
8887 		vsi->tc_map_vsi[i] = NULL;
8888 
8889 	/* reset main VSI's all TC information */
8890 	vsi->all_enatc = 0;
8891 	vsi->all_numtc = 0;
8892 }
8893 
8894 /**
8895  * ice_rebuild_channels - rebuild channel
8896  * @pf: ptr to PF
8897  *
8898  * Recreate channel VSIs and replay filters
8899  */
8900 static int ice_rebuild_channels(struct ice_pf *pf)
8901 {
8902 	struct device *dev = ice_pf_to_dev(pf);
8903 	struct ice_vsi *main_vsi;
8904 	bool rem_adv_fltr = true;
8905 	struct ice_channel *ch;
8906 	struct ice_vsi *vsi;
8907 	int tc_idx = 1;
8908 	int i, err;
8909 
8910 	main_vsi = ice_get_main_vsi(pf);
8911 	if (!main_vsi)
8912 		return 0;
8913 
8914 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8915 	    main_vsi->old_numtc == 1)
8916 		return 0; /* nothing to be done */
8917 
8918 	/* reconfigure main VSI based on old value of TC and cached values
8919 	 * for MQPRIO opts
8920 	 */
8921 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8922 	if (err) {
8923 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8924 			main_vsi->old_ena_tc, main_vsi->vsi_num);
8925 		return err;
8926 	}
8927 
8928 	/* rebuild ADQ VSIs */
8929 	ice_for_each_vsi(pf, i) {
8930 		enum ice_vsi_type type;
8931 
8932 		vsi = pf->vsi[i];
8933 		if (!vsi || vsi->type != ICE_VSI_CHNL)
8934 			continue;
8935 
8936 		type = vsi->type;
8937 
8938 		/* rebuild ADQ VSI */
8939 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8940 		if (err) {
8941 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8942 				ice_vsi_type_str(type), vsi->idx, err);
8943 			goto cleanup;
8944 		}
8945 
8946 		/* Re-map HW VSI number, using VSI handle that has been
8947 		 * previously validated in ice_replay_vsi() call above
8948 		 */
8949 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8950 
8951 		/* replay filters for the VSI */
8952 		err = ice_replay_vsi(&pf->hw, vsi->idx);
8953 		if (err) {
8954 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8955 				ice_vsi_type_str(type), err, vsi->idx);
8956 			rem_adv_fltr = false;
8957 			goto cleanup;
8958 		}
8959 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8960 			 ice_vsi_type_str(type), vsi->idx);
8961 
8962 		/* store ADQ VSI at correct TC index in main VSI's
8963 		 * map of TC to VSI
8964 		 */
8965 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
8966 	}
8967 
8968 	/* ADQ VSI(s) has been rebuilt successfully, so setup
8969 	 * channel for main VSI's Tx and Rx rings
8970 	 */
8971 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
8972 		struct ice_vsi *ch_vsi;
8973 
8974 		ch_vsi = ch->ch_vsi;
8975 		if (!ch_vsi)
8976 			continue;
8977 
8978 		/* reconfig channel resources */
8979 		ice_cfg_chnl_all_res(main_vsi, ch);
8980 
8981 		/* replay BW rate limit if it is non-zero */
8982 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8983 			continue;
8984 
8985 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8986 				       ch->min_tx_rate);
8987 		if (err)
8988 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8989 				err, ch->max_tx_rate, ch->min_tx_rate,
8990 				ch_vsi->vsi_num);
8991 		else
8992 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8993 				ch->max_tx_rate, ch->min_tx_rate,
8994 				ch_vsi->vsi_num);
8995 	}
8996 
8997 	/* reconfig RSS for main VSI */
8998 	if (main_vsi->ch_rss_size)
8999 		ice_vsi_cfg_rss_lut_key(main_vsi);
9000 
9001 	return 0;
9002 
9003 cleanup:
9004 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
9005 	return err;
9006 }
9007 
9008 /**
9009  * ice_create_q_channels - Add queue channel for the given TCs
9010  * @vsi: VSI to be configured
9011  *
9012  * Configures queue channel mapping to the given TCs
9013  */
9014 static int ice_create_q_channels(struct ice_vsi *vsi)
9015 {
9016 	struct ice_pf *pf = vsi->back;
9017 	struct ice_channel *ch;
9018 	int ret = 0, i;
9019 
9020 	ice_for_each_chnl_tc(i) {
9021 		if (!(vsi->all_enatc & BIT(i)))
9022 			continue;
9023 
9024 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9025 		if (!ch) {
9026 			ret = -ENOMEM;
9027 			goto err_free;
9028 		}
9029 		INIT_LIST_HEAD(&ch->list);
9030 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9031 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9032 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9033 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9034 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9035 
9036 		/* convert to Kbits/s */
9037 		if (ch->max_tx_rate)
9038 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
9039 						  ICE_BW_KBPS_DIVISOR);
9040 		if (ch->min_tx_rate)
9041 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
9042 						  ICE_BW_KBPS_DIVISOR);
9043 
9044 		ret = ice_create_q_channel(vsi, ch);
9045 		if (ret) {
9046 			dev_err(ice_pf_to_dev(pf),
9047 				"failed creating channel TC:%d\n", i);
9048 			kfree(ch);
9049 			goto err_free;
9050 		}
9051 		list_add_tail(&ch->list, &vsi->ch_list);
9052 		vsi->tc_map_vsi[i] = ch->ch_vsi;
9053 		dev_dbg(ice_pf_to_dev(pf),
9054 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
9055 	}
9056 	return 0;
9057 
9058 err_free:
9059 	ice_remove_q_channels(vsi, false);
9060 
9061 	return ret;
9062 }
9063 
9064 /**
9065  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9066  * @netdev: net device to configure
9067  * @type_data: TC offload data
9068  */
9069 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9070 {
9071 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9072 	struct ice_netdev_priv *np = netdev_priv(netdev);
9073 	struct ice_vsi *vsi = np->vsi;
9074 	struct ice_pf *pf = vsi->back;
9075 	u16 mode, ena_tc_qdisc = 0;
9076 	int cur_txq, cur_rxq;
9077 	u8 hw = 0, num_tcf;
9078 	struct device *dev;
9079 	int ret, i;
9080 
9081 	dev = ice_pf_to_dev(pf);
9082 	num_tcf = mqprio_qopt->qopt.num_tc;
9083 	hw = mqprio_qopt->qopt.hw;
9084 	mode = mqprio_qopt->mode;
9085 	if (!hw) {
9086 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9087 		vsi->ch_rss_size = 0;
9088 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9089 		goto config_tcf;
9090 	}
9091 
9092 	/* Generate queue region map for number of TCF requested */
9093 	for (i = 0; i < num_tcf; i++)
9094 		ena_tc_qdisc |= BIT(i);
9095 
9096 	switch (mode) {
9097 	case TC_MQPRIO_MODE_CHANNEL:
9098 
9099 		if (pf->hw.port_info->is_custom_tx_enabled) {
9100 			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9101 			return -EBUSY;
9102 		}
9103 		ice_tear_down_devlink_rate_tree(pf);
9104 
9105 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9106 		if (ret) {
9107 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9108 				   ret);
9109 			return ret;
9110 		}
9111 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9112 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9113 		/* don't assume state of hw_tc_offload during driver load
9114 		 * and set the flag for TC flower filter if hw_tc_offload
9115 		 * already ON
9116 		 */
9117 		if (vsi->netdev->features & NETIF_F_HW_TC)
9118 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9119 		break;
9120 	default:
9121 		return -EINVAL;
9122 	}
9123 
9124 config_tcf:
9125 
9126 	/* Requesting same TCF configuration as already enabled */
9127 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9128 	    mode != TC_MQPRIO_MODE_CHANNEL)
9129 		return 0;
9130 
9131 	/* Pause VSI queues */
9132 	ice_dis_vsi(vsi, true);
9133 
9134 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9135 		ice_remove_q_channels(vsi, true);
9136 
9137 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9138 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9139 				     num_online_cpus());
9140 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9141 				     num_online_cpus());
9142 	} else {
9143 		/* logic to rebuild VSI, same like ethtool -L */
9144 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9145 
9146 		for (i = 0; i < num_tcf; i++) {
9147 			if (!(ena_tc_qdisc & BIT(i)))
9148 				continue;
9149 
9150 			offset = vsi->mqprio_qopt.qopt.offset[i];
9151 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9152 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9153 		}
9154 		vsi->req_txq = offset + qcount_tx;
9155 		vsi->req_rxq = offset + qcount_rx;
9156 
9157 		/* store away original rss_size info, so that it gets reused
9158 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9159 		 * determine, what should be the rss_sizefor main VSI
9160 		 */
9161 		vsi->orig_rss_size = vsi->rss_size;
9162 	}
9163 
9164 	/* save current values of Tx and Rx queues before calling VSI rebuild
9165 	 * for fallback option
9166 	 */
9167 	cur_txq = vsi->num_txq;
9168 	cur_rxq = vsi->num_rxq;
9169 
9170 	/* proceed with rebuild main VSI using correct number of queues */
9171 	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9172 	if (ret) {
9173 		/* fallback to current number of queues */
9174 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9175 		vsi->req_txq = cur_txq;
9176 		vsi->req_rxq = cur_rxq;
9177 		clear_bit(ICE_RESET_FAILED, pf->state);
9178 		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9179 			dev_err(dev, "Rebuild of main VSI failed again\n");
9180 			return ret;
9181 		}
9182 	}
9183 
9184 	vsi->all_numtc = num_tcf;
9185 	vsi->all_enatc = ena_tc_qdisc;
9186 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9187 	if (ret) {
9188 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9189 			   vsi->vsi_num);
9190 		goto exit;
9191 	}
9192 
9193 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9194 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9195 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9196 
9197 		/* set TC0 rate limit if specified */
9198 		if (max_tx_rate || min_tx_rate) {
9199 			/* convert to Kbits/s */
9200 			if (max_tx_rate)
9201 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9202 			if (min_tx_rate)
9203 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9204 
9205 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9206 			if (!ret) {
9207 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9208 					max_tx_rate, min_tx_rate, vsi->vsi_num);
9209 			} else {
9210 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9211 					max_tx_rate, min_tx_rate, vsi->vsi_num);
9212 				goto exit;
9213 			}
9214 		}
9215 		ret = ice_create_q_channels(vsi);
9216 		if (ret) {
9217 			netdev_err(netdev, "failed configuring queue channels\n");
9218 			goto exit;
9219 		} else {
9220 			netdev_dbg(netdev, "successfully configured channels\n");
9221 		}
9222 	}
9223 
9224 	if (vsi->ch_rss_size)
9225 		ice_vsi_cfg_rss_lut_key(vsi);
9226 
9227 exit:
9228 	/* if error, reset the all_numtc and all_enatc */
9229 	if (ret) {
9230 		vsi->all_numtc = 0;
9231 		vsi->all_enatc = 0;
9232 	}
9233 	/* resume VSI */
9234 	ice_ena_vsi(vsi, true);
9235 
9236 	return ret;
9237 }
9238 
9239 static LIST_HEAD(ice_block_cb_list);
9240 
9241 static int
9242 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9243 	     void *type_data)
9244 {
9245 	struct ice_netdev_priv *np = netdev_priv(netdev);
9246 	struct ice_pf *pf = np->vsi->back;
9247 	bool locked = false;
9248 	int err;
9249 
9250 	switch (type) {
9251 	case TC_SETUP_BLOCK:
9252 		return flow_block_cb_setup_simple(type_data,
9253 						  &ice_block_cb_list,
9254 						  ice_setup_tc_block_cb,
9255 						  np, np, true);
9256 	case TC_SETUP_QDISC_MQPRIO:
9257 		if (ice_is_eswitch_mode_switchdev(pf)) {
9258 			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9259 			return -EOPNOTSUPP;
9260 		}
9261 
9262 		if (pf->adev) {
9263 			mutex_lock(&pf->adev_mutex);
9264 			device_lock(&pf->adev->dev);
9265 			locked = true;
9266 			if (pf->adev->dev.driver) {
9267 				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9268 				err = -EBUSY;
9269 				goto adev_unlock;
9270 			}
9271 		}
9272 
9273 		/* setup traffic classifier for receive side */
9274 		mutex_lock(&pf->tc_mutex);
9275 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9276 		mutex_unlock(&pf->tc_mutex);
9277 
9278 adev_unlock:
9279 		if (locked) {
9280 			device_unlock(&pf->adev->dev);
9281 			mutex_unlock(&pf->adev_mutex);
9282 		}
9283 		return err;
9284 	default:
9285 		return -EOPNOTSUPP;
9286 	}
9287 	return -EOPNOTSUPP;
9288 }
9289 
9290 static struct ice_indr_block_priv *
9291 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9292 			   struct net_device *netdev)
9293 {
9294 	struct ice_indr_block_priv *cb_priv;
9295 
9296 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9297 		if (!cb_priv->netdev)
9298 			return NULL;
9299 		if (cb_priv->netdev == netdev)
9300 			return cb_priv;
9301 	}
9302 	return NULL;
9303 }
9304 
9305 static int
9306 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9307 			void *indr_priv)
9308 {
9309 	struct ice_indr_block_priv *priv = indr_priv;
9310 	struct ice_netdev_priv *np = priv->np;
9311 
9312 	switch (type) {
9313 	case TC_SETUP_CLSFLOWER:
9314 		return ice_setup_tc_cls_flower(np, priv->netdev,
9315 					       (struct flow_cls_offload *)
9316 					       type_data);
9317 	default:
9318 		return -EOPNOTSUPP;
9319 	}
9320 }
9321 
9322 static int
9323 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9324 			struct ice_netdev_priv *np,
9325 			struct flow_block_offload *f, void *data,
9326 			void (*cleanup)(struct flow_block_cb *block_cb))
9327 {
9328 	struct ice_indr_block_priv *indr_priv;
9329 	struct flow_block_cb *block_cb;
9330 
9331 	if (!ice_is_tunnel_supported(netdev) &&
9332 	    !(is_vlan_dev(netdev) &&
9333 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
9334 		return -EOPNOTSUPP;
9335 
9336 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9337 		return -EOPNOTSUPP;
9338 
9339 	switch (f->command) {
9340 	case FLOW_BLOCK_BIND:
9341 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9342 		if (indr_priv)
9343 			return -EEXIST;
9344 
9345 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9346 		if (!indr_priv)
9347 			return -ENOMEM;
9348 
9349 		indr_priv->netdev = netdev;
9350 		indr_priv->np = np;
9351 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9352 
9353 		block_cb =
9354 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9355 						 indr_priv, indr_priv,
9356 						 ice_rep_indr_tc_block_unbind,
9357 						 f, netdev, sch, data, np,
9358 						 cleanup);
9359 
9360 		if (IS_ERR(block_cb)) {
9361 			list_del(&indr_priv->list);
9362 			kfree(indr_priv);
9363 			return PTR_ERR(block_cb);
9364 		}
9365 		flow_block_cb_add(block_cb, f);
9366 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9367 		break;
9368 	case FLOW_BLOCK_UNBIND:
9369 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9370 		if (!indr_priv)
9371 			return -ENOENT;
9372 
9373 		block_cb = flow_block_cb_lookup(f->block,
9374 						ice_indr_setup_block_cb,
9375 						indr_priv);
9376 		if (!block_cb)
9377 			return -ENOENT;
9378 
9379 		flow_indr_block_cb_remove(block_cb, f);
9380 
9381 		list_del(&block_cb->driver_list);
9382 		break;
9383 	default:
9384 		return -EOPNOTSUPP;
9385 	}
9386 	return 0;
9387 }
9388 
9389 static int
9390 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9391 		     void *cb_priv, enum tc_setup_type type, void *type_data,
9392 		     void *data,
9393 		     void (*cleanup)(struct flow_block_cb *block_cb))
9394 {
9395 	switch (type) {
9396 	case TC_SETUP_BLOCK:
9397 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9398 					       data, cleanup);
9399 
9400 	default:
9401 		return -EOPNOTSUPP;
9402 	}
9403 }
9404 
9405 /**
9406  * ice_open - Called when a network interface becomes active
9407  * @netdev: network interface device structure
9408  *
9409  * The open entry point is called when a network interface is made
9410  * active by the system (IFF_UP). At this point all resources needed
9411  * for transmit and receive operations are allocated, the interrupt
9412  * handler is registered with the OS, the netdev watchdog is enabled,
9413  * and the stack is notified that the interface is ready.
9414  *
9415  * Returns 0 on success, negative value on failure
9416  */
9417 int ice_open(struct net_device *netdev)
9418 {
9419 	struct ice_netdev_priv *np = netdev_priv(netdev);
9420 	struct ice_pf *pf = np->vsi->back;
9421 
9422 	if (ice_is_reset_in_progress(pf->state)) {
9423 		netdev_err(netdev, "can't open net device while reset is in progress");
9424 		return -EBUSY;
9425 	}
9426 
9427 	return ice_open_internal(netdev);
9428 }
9429 
9430 /**
9431  * ice_open_internal - Called when a network interface becomes active
9432  * @netdev: network interface device structure
9433  *
9434  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9435  * handling routine
9436  *
9437  * Returns 0 on success, negative value on failure
9438  */
9439 int ice_open_internal(struct net_device *netdev)
9440 {
9441 	struct ice_netdev_priv *np = netdev_priv(netdev);
9442 	struct ice_vsi *vsi = np->vsi;
9443 	struct ice_pf *pf = vsi->back;
9444 	struct ice_port_info *pi;
9445 	int err;
9446 
9447 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9448 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9449 		return -EIO;
9450 	}
9451 
9452 	netif_carrier_off(netdev);
9453 
9454 	pi = vsi->port_info;
9455 	err = ice_update_link_info(pi);
9456 	if (err) {
9457 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9458 		return err;
9459 	}
9460 
9461 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9462 
9463 	/* Set PHY if there is media, otherwise, turn off PHY */
9464 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9465 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9466 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9467 			err = ice_init_phy_user_cfg(pi);
9468 			if (err) {
9469 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9470 					   err);
9471 				return err;
9472 			}
9473 		}
9474 
9475 		err = ice_configure_phy(vsi);
9476 		if (err) {
9477 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9478 				   err);
9479 			return err;
9480 		}
9481 	} else {
9482 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9483 		ice_set_link(vsi, false);
9484 	}
9485 
9486 	err = ice_vsi_open(vsi);
9487 	if (err)
9488 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9489 			   vsi->vsi_num, vsi->vsw->sw_id);
9490 
9491 	/* Update existing tunnels information */
9492 	udp_tunnel_get_rx_info(netdev);
9493 
9494 	return err;
9495 }
9496 
9497 /**
9498  * ice_stop - Disables a network interface
9499  * @netdev: network interface device structure
9500  *
9501  * The stop entry point is called when an interface is de-activated by the OS,
9502  * and the netdevice enters the DOWN state. The hardware is still under the
9503  * driver's control, but the netdev interface is disabled.
9504  *
9505  * Returns success only - not allowed to fail
9506  */
9507 int ice_stop(struct net_device *netdev)
9508 {
9509 	struct ice_netdev_priv *np = netdev_priv(netdev);
9510 	struct ice_vsi *vsi = np->vsi;
9511 	struct ice_pf *pf = vsi->back;
9512 
9513 	if (ice_is_reset_in_progress(pf->state)) {
9514 		netdev_err(netdev, "can't stop net device while reset is in progress");
9515 		return -EBUSY;
9516 	}
9517 
9518 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9519 		int link_err = ice_force_phys_link_state(vsi, false);
9520 
9521 		if (link_err) {
9522 			if (link_err == -ENOMEDIUM)
9523 				netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9524 					    vsi->vsi_num);
9525 			else
9526 				netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9527 					   vsi->vsi_num, link_err);
9528 
9529 			ice_vsi_close(vsi);
9530 			return -EIO;
9531 		}
9532 	}
9533 
9534 	ice_vsi_close(vsi);
9535 
9536 	return 0;
9537 }
9538 
9539 /**
9540  * ice_features_check - Validate encapsulated packet conforms to limits
9541  * @skb: skb buffer
9542  * @netdev: This port's netdev
9543  * @features: Offload features that the stack believes apply
9544  */
9545 static netdev_features_t
9546 ice_features_check(struct sk_buff *skb,
9547 		   struct net_device __always_unused *netdev,
9548 		   netdev_features_t features)
9549 {
9550 	bool gso = skb_is_gso(skb);
9551 	size_t len;
9552 
9553 	/* No point in doing any of this if neither checksum nor GSO are
9554 	 * being requested for this frame. We can rule out both by just
9555 	 * checking for CHECKSUM_PARTIAL
9556 	 */
9557 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9558 		return features;
9559 
9560 	/* We cannot support GSO if the MSS is going to be less than
9561 	 * 64 bytes. If it is then we need to drop support for GSO.
9562 	 */
9563 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9564 		features &= ~NETIF_F_GSO_MASK;
9565 
9566 	len = skb_network_offset(skb);
9567 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9568 		goto out_rm_features;
9569 
9570 	len = skb_network_header_len(skb);
9571 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9572 		goto out_rm_features;
9573 
9574 	if (skb->encapsulation) {
9575 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9576 		 * the case of IPIP frames, the transport header pointer is
9577 		 * after the inner header! So check to make sure that this
9578 		 * is a GRE or UDP_TUNNEL frame before doing that math.
9579 		 */
9580 		if (gso && (skb_shinfo(skb)->gso_type &
9581 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9582 			len = skb_inner_network_header(skb) -
9583 			      skb_transport_header(skb);
9584 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9585 				goto out_rm_features;
9586 		}
9587 
9588 		len = skb_inner_network_header_len(skb);
9589 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9590 			goto out_rm_features;
9591 	}
9592 
9593 	return features;
9594 out_rm_features:
9595 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9596 }
9597 
9598 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9599 	.ndo_open = ice_open,
9600 	.ndo_stop = ice_stop,
9601 	.ndo_start_xmit = ice_start_xmit,
9602 	.ndo_set_mac_address = ice_set_mac_address,
9603 	.ndo_validate_addr = eth_validate_addr,
9604 	.ndo_change_mtu = ice_change_mtu,
9605 	.ndo_get_stats64 = ice_get_stats64,
9606 	.ndo_tx_timeout = ice_tx_timeout,
9607 	.ndo_bpf = ice_xdp_safe_mode,
9608 };
9609 
9610 static const struct net_device_ops ice_netdev_ops = {
9611 	.ndo_open = ice_open,
9612 	.ndo_stop = ice_stop,
9613 	.ndo_start_xmit = ice_start_xmit,
9614 	.ndo_select_queue = ice_select_queue,
9615 	.ndo_features_check = ice_features_check,
9616 	.ndo_fix_features = ice_fix_features,
9617 	.ndo_set_rx_mode = ice_set_rx_mode,
9618 	.ndo_set_mac_address = ice_set_mac_address,
9619 	.ndo_validate_addr = eth_validate_addr,
9620 	.ndo_change_mtu = ice_change_mtu,
9621 	.ndo_get_stats64 = ice_get_stats64,
9622 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9623 	.ndo_eth_ioctl = ice_eth_ioctl,
9624 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9625 	.ndo_set_vf_mac = ice_set_vf_mac,
9626 	.ndo_get_vf_config = ice_get_vf_cfg,
9627 	.ndo_set_vf_trust = ice_set_vf_trust,
9628 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9629 	.ndo_set_vf_link_state = ice_set_vf_link_state,
9630 	.ndo_get_vf_stats = ice_get_vf_stats,
9631 	.ndo_set_vf_rate = ice_set_vf_bw,
9632 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9633 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9634 	.ndo_setup_tc = ice_setup_tc,
9635 	.ndo_set_features = ice_set_features,
9636 	.ndo_bridge_getlink = ice_bridge_getlink,
9637 	.ndo_bridge_setlink = ice_bridge_setlink,
9638 	.ndo_fdb_add = ice_fdb_add,
9639 	.ndo_fdb_del = ice_fdb_del,
9640 #ifdef CONFIG_RFS_ACCEL
9641 	.ndo_rx_flow_steer = ice_rx_flow_steer,
9642 #endif
9643 	.ndo_tx_timeout = ice_tx_timeout,
9644 	.ndo_bpf = ice_xdp,
9645 	.ndo_xdp_xmit = ice_xdp_xmit,
9646 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9647 };
9648