xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision efb339a83368ab25de1a18c0fdff85e01c13a1ea)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 #include "ice_vsi_vlan_ops.h"
25 #include <net/xdp_sock_drv.h>
26 
27 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
28 static const char ice_driver_string[] = DRV_SUMMARY;
29 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
30 
31 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
32 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
33 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
34 
35 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
36 MODULE_DESCRIPTION(DRV_SUMMARY);
37 MODULE_LICENSE("GPL v2");
38 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
39 
40 static int debug = -1;
41 module_param(debug, int, 0644);
42 #ifndef CONFIG_DYNAMIC_DEBUG
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
44 #else
45 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
46 #endif /* !CONFIG_DYNAMIC_DEBUG */
47 
48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
49 EXPORT_SYMBOL(ice_xdp_locking_key);
50 
51 /**
52  * ice_hw_to_dev - Get device pointer from the hardware structure
53  * @hw: pointer to the device HW structure
54  *
55  * Used to access the device pointer from compilation units which can't easily
56  * include the definition of struct ice_pf without leading to circular header
57  * dependencies.
58  */
59 struct device *ice_hw_to_dev(struct ice_hw *hw)
60 {
61 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
62 
63 	return &pf->pdev->dev;
64 }
65 
66 static struct workqueue_struct *ice_wq;
67 static const struct net_device_ops ice_netdev_safe_mode_ops;
68 static const struct net_device_ops ice_netdev_ops;
69 
70 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
71 
72 static void ice_vsi_release_all(struct ice_pf *pf);
73 
74 static int ice_rebuild_channels(struct ice_pf *pf);
75 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
76 
77 static int
78 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
79 		     void *cb_priv, enum tc_setup_type type, void *type_data,
80 		     void *data,
81 		     void (*cleanup)(struct flow_block_cb *block_cb));
82 
83 bool netif_is_ice(struct net_device *dev)
84 {
85 	return dev && (dev->netdev_ops == &ice_netdev_ops);
86 }
87 
88 /**
89  * ice_get_tx_pending - returns number of Tx descriptors not processed
90  * @ring: the ring of descriptors
91  */
92 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
93 {
94 	u16 head, tail;
95 
96 	head = ring->next_to_clean;
97 	tail = ring->next_to_use;
98 
99 	if (head != tail)
100 		return (head < tail) ?
101 			tail - head : (tail + ring->count - head);
102 	return 0;
103 }
104 
105 /**
106  * ice_check_for_hang_subtask - check for and recover hung queues
107  * @pf: pointer to PF struct
108  */
109 static void ice_check_for_hang_subtask(struct ice_pf *pf)
110 {
111 	struct ice_vsi *vsi = NULL;
112 	struct ice_hw *hw;
113 	unsigned int i;
114 	int packets;
115 	u32 v;
116 
117 	ice_for_each_vsi(pf, v)
118 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
119 			vsi = pf->vsi[v];
120 			break;
121 		}
122 
123 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
124 		return;
125 
126 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
127 		return;
128 
129 	hw = &vsi->back->hw;
130 
131 	ice_for_each_txq(vsi, i) {
132 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
133 		struct ice_ring_stats *ring_stats;
134 
135 		if (!tx_ring)
136 			continue;
137 		if (ice_ring_ch_enabled(tx_ring))
138 			continue;
139 
140 		ring_stats = tx_ring->ring_stats;
141 		if (!ring_stats)
142 			continue;
143 
144 		if (tx_ring->desc) {
145 			/* If packet counter has not changed the queue is
146 			 * likely stalled, so force an interrupt for this
147 			 * queue.
148 			 *
149 			 * prev_pkt would be negative if there was no
150 			 * pending work.
151 			 */
152 			packets = ring_stats->stats.pkts & INT_MAX;
153 			if (ring_stats->tx_stats.prev_pkt == packets) {
154 				/* Trigger sw interrupt to revive the queue */
155 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
156 				continue;
157 			}
158 
159 			/* Memory barrier between read of packet count and call
160 			 * to ice_get_tx_pending()
161 			 */
162 			smp_rmb();
163 			ring_stats->tx_stats.prev_pkt =
164 			    ice_get_tx_pending(tx_ring) ? packets : -1;
165 		}
166 	}
167 }
168 
169 /**
170  * ice_init_mac_fltr - Set initial MAC filters
171  * @pf: board private structure
172  *
173  * Set initial set of MAC filters for PF VSI; configure filters for permanent
174  * address and broadcast address. If an error is encountered, netdevice will be
175  * unregistered.
176  */
177 static int ice_init_mac_fltr(struct ice_pf *pf)
178 {
179 	struct ice_vsi *vsi;
180 	u8 *perm_addr;
181 
182 	vsi = ice_get_main_vsi(pf);
183 	if (!vsi)
184 		return -EINVAL;
185 
186 	perm_addr = vsi->port_info->mac.perm_addr;
187 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
188 }
189 
190 /**
191  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
192  * @netdev: the net device on which the sync is happening
193  * @addr: MAC address to sync
194  *
195  * This is a callback function which is called by the in kernel device sync
196  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
197  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
198  * MAC filters from the hardware.
199  */
200 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
201 {
202 	struct ice_netdev_priv *np = netdev_priv(netdev);
203 	struct ice_vsi *vsi = np->vsi;
204 
205 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
206 				     ICE_FWD_TO_VSI))
207 		return -EINVAL;
208 
209 	return 0;
210 }
211 
212 /**
213  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
214  * @netdev: the net device on which the unsync is happening
215  * @addr: MAC address to unsync
216  *
217  * This is a callback function which is called by the in kernel device unsync
218  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
219  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
220  * delete the MAC filters from the hardware.
221  */
222 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
223 {
224 	struct ice_netdev_priv *np = netdev_priv(netdev);
225 	struct ice_vsi *vsi = np->vsi;
226 
227 	/* Under some circumstances, we might receive a request to delete our
228 	 * own device address from our uc list. Because we store the device
229 	 * address in the VSI's MAC filter list, we need to ignore such
230 	 * requests and not delete our device address from this list.
231 	 */
232 	if (ether_addr_equal(addr, netdev->dev_addr))
233 		return 0;
234 
235 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
236 				     ICE_FWD_TO_VSI))
237 		return -EINVAL;
238 
239 	return 0;
240 }
241 
242 /**
243  * ice_vsi_fltr_changed - check if filter state changed
244  * @vsi: VSI to be checked
245  *
246  * returns true if filter state has changed, false otherwise.
247  */
248 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
249 {
250 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
251 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
252 }
253 
254 /**
255  * ice_set_promisc - Enable promiscuous mode for a given PF
256  * @vsi: the VSI being configured
257  * @promisc_m: mask of promiscuous config bits
258  *
259  */
260 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
261 {
262 	int status;
263 
264 	if (vsi->type != ICE_VSI_PF)
265 		return 0;
266 
267 	if (ice_vsi_has_non_zero_vlans(vsi)) {
268 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
269 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
270 						       promisc_m);
271 	} else {
272 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
273 						  promisc_m, 0);
274 	}
275 	if (status && status != -EEXIST)
276 		return status;
277 
278 	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
279 		   vsi->vsi_num, promisc_m);
280 	return 0;
281 }
282 
283 /**
284  * ice_clear_promisc - Disable promiscuous mode for a given PF
285  * @vsi: the VSI being configured
286  * @promisc_m: mask of promiscuous config bits
287  *
288  */
289 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
290 {
291 	int status;
292 
293 	if (vsi->type != ICE_VSI_PF)
294 		return 0;
295 
296 	if (ice_vsi_has_non_zero_vlans(vsi)) {
297 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
298 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
299 							 promisc_m);
300 	} else {
301 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
302 						    promisc_m, 0);
303 	}
304 
305 	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
306 		   vsi->vsi_num, promisc_m);
307 	return status;
308 }
309 
310 /**
311  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
312  * @vsi: ptr to the VSI
313  *
314  * Push any outstanding VSI filter changes through the AdminQ.
315  */
316 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
317 {
318 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
319 	struct device *dev = ice_pf_to_dev(vsi->back);
320 	struct net_device *netdev = vsi->netdev;
321 	bool promisc_forced_on = false;
322 	struct ice_pf *pf = vsi->back;
323 	struct ice_hw *hw = &pf->hw;
324 	u32 changed_flags = 0;
325 	int err;
326 
327 	if (!vsi->netdev)
328 		return -EINVAL;
329 
330 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
331 		usleep_range(1000, 2000);
332 
333 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
334 	vsi->current_netdev_flags = vsi->netdev->flags;
335 
336 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
337 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
338 
339 	if (ice_vsi_fltr_changed(vsi)) {
340 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
341 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
342 
343 		/* grab the netdev's addr_list_lock */
344 		netif_addr_lock_bh(netdev);
345 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
346 			      ice_add_mac_to_unsync_list);
347 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
348 			      ice_add_mac_to_unsync_list);
349 		/* our temp lists are populated. release lock */
350 		netif_addr_unlock_bh(netdev);
351 	}
352 
353 	/* Remove MAC addresses in the unsync list */
354 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
355 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
356 	if (err) {
357 		netdev_err(netdev, "Failed to delete MAC filters\n");
358 		/* if we failed because of alloc failures, just bail */
359 		if (err == -ENOMEM)
360 			goto out;
361 	}
362 
363 	/* Add MAC addresses in the sync list */
364 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
365 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
366 	/* If filter is added successfully or already exists, do not go into
367 	 * 'if' condition and report it as error. Instead continue processing
368 	 * rest of the function.
369 	 */
370 	if (err && err != -EEXIST) {
371 		netdev_err(netdev, "Failed to add MAC filters\n");
372 		/* If there is no more space for new umac filters, VSI
373 		 * should go into promiscuous mode. There should be some
374 		 * space reserved for promiscuous filters.
375 		 */
376 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
377 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
378 				      vsi->state)) {
379 			promisc_forced_on = true;
380 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
381 				    vsi->vsi_num);
382 		} else {
383 			goto out;
384 		}
385 	}
386 	err = 0;
387 	/* check for changes in promiscuous modes */
388 	if (changed_flags & IFF_ALLMULTI) {
389 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
390 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
391 			if (err) {
392 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
393 				goto out_promisc;
394 			}
395 		} else {
396 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
397 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
398 			if (err) {
399 				vsi->current_netdev_flags |= IFF_ALLMULTI;
400 				goto out_promisc;
401 			}
402 		}
403 	}
404 
405 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
406 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
407 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
408 		if (vsi->current_netdev_flags & IFF_PROMISC) {
409 			/* Apply Rx filter rule to get traffic from wire */
410 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
411 				err = ice_set_dflt_vsi(vsi);
412 				if (err && err != -EEXIST) {
413 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
414 						   err, vsi->vsi_num);
415 					vsi->current_netdev_flags &=
416 						~IFF_PROMISC;
417 					goto out_promisc;
418 				}
419 				err = 0;
420 				vlan_ops->dis_rx_filtering(vsi);
421 
422 				/* promiscuous mode implies allmulticast so
423 				 * that VSIs that are in promiscuous mode are
424 				 * subscribed to multicast packets coming to
425 				 * the port
426 				 */
427 				err = ice_set_promisc(vsi,
428 						      ICE_MCAST_PROMISC_BITS);
429 				if (err)
430 					goto out_promisc;
431 			}
432 		} else {
433 			/* Clear Rx filter to remove traffic from wire */
434 			if (ice_is_vsi_dflt_vsi(vsi)) {
435 				err = ice_clear_dflt_vsi(vsi);
436 				if (err) {
437 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
438 						   err, vsi->vsi_num);
439 					vsi->current_netdev_flags |=
440 						IFF_PROMISC;
441 					goto out_promisc;
442 				}
443 				if (vsi->netdev->features &
444 				    NETIF_F_HW_VLAN_CTAG_FILTER)
445 					vlan_ops->ena_rx_filtering(vsi);
446 			}
447 
448 			/* disable allmulti here, but only if allmulti is not
449 			 * still enabled for the netdev
450 			 */
451 			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
452 				err = ice_clear_promisc(vsi,
453 							ICE_MCAST_PROMISC_BITS);
454 				if (err) {
455 					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
456 						   err, vsi->vsi_num);
457 				}
458 			}
459 		}
460 	}
461 	goto exit;
462 
463 out_promisc:
464 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
465 	goto exit;
466 out:
467 	/* if something went wrong then set the changed flag so we try again */
468 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
469 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
470 exit:
471 	clear_bit(ICE_CFG_BUSY, vsi->state);
472 	return err;
473 }
474 
475 /**
476  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
477  * @pf: board private structure
478  */
479 static void ice_sync_fltr_subtask(struct ice_pf *pf)
480 {
481 	int v;
482 
483 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
484 		return;
485 
486 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
487 
488 	ice_for_each_vsi(pf, v)
489 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
490 		    ice_vsi_sync_fltr(pf->vsi[v])) {
491 			/* come back and try again later */
492 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
493 			break;
494 		}
495 }
496 
497 /**
498  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
499  * @pf: the PF
500  * @locked: is the rtnl_lock already held
501  */
502 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
503 {
504 	int node;
505 	int v;
506 
507 	ice_for_each_vsi(pf, v)
508 		if (pf->vsi[v])
509 			ice_dis_vsi(pf->vsi[v], locked);
510 
511 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
512 		pf->pf_agg_node[node].num_vsis = 0;
513 
514 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
515 		pf->vf_agg_node[node].num_vsis = 0;
516 }
517 
518 /**
519  * ice_clear_sw_switch_recipes - clear switch recipes
520  * @pf: board private structure
521  *
522  * Mark switch recipes as not created in sw structures. There are cases where
523  * rules (especially advanced rules) need to be restored, either re-read from
524  * hardware or added again. For example after the reset. 'recp_created' flag
525  * prevents from doing that and need to be cleared upfront.
526  */
527 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
528 {
529 	struct ice_sw_recipe *recp;
530 	u8 i;
531 
532 	recp = pf->hw.switch_info->recp_list;
533 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
534 		recp[i].recp_created = false;
535 }
536 
537 /**
538  * ice_prepare_for_reset - prep for reset
539  * @pf: board private structure
540  * @reset_type: reset type requested
541  *
542  * Inform or close all dependent features in prep for reset.
543  */
544 static void
545 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
546 {
547 	struct ice_hw *hw = &pf->hw;
548 	struct ice_vsi *vsi;
549 	struct ice_vf *vf;
550 	unsigned int bkt;
551 
552 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
553 
554 	/* already prepared for reset */
555 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
556 		return;
557 
558 	ice_unplug_aux_dev(pf);
559 
560 	/* Notify VFs of impending reset */
561 	if (ice_check_sq_alive(hw, &hw->mailboxq))
562 		ice_vc_notify_reset(pf);
563 
564 	/* Disable VFs until reset is completed */
565 	mutex_lock(&pf->vfs.table_lock);
566 	ice_for_each_vf(pf, bkt, vf)
567 		ice_set_vf_state_dis(vf);
568 	mutex_unlock(&pf->vfs.table_lock);
569 
570 	if (ice_is_eswitch_mode_switchdev(pf)) {
571 		if (reset_type != ICE_RESET_PFR)
572 			ice_clear_sw_switch_recipes(pf);
573 	}
574 
575 	/* release ADQ specific HW and SW resources */
576 	vsi = ice_get_main_vsi(pf);
577 	if (!vsi)
578 		goto skip;
579 
580 	/* to be on safe side, reset orig_rss_size so that normal flow
581 	 * of deciding rss_size can take precedence
582 	 */
583 	vsi->orig_rss_size = 0;
584 
585 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
586 		if (reset_type == ICE_RESET_PFR) {
587 			vsi->old_ena_tc = vsi->all_enatc;
588 			vsi->old_numtc = vsi->all_numtc;
589 		} else {
590 			ice_remove_q_channels(vsi, true);
591 
592 			/* for other reset type, do not support channel rebuild
593 			 * hence reset needed info
594 			 */
595 			vsi->old_ena_tc = 0;
596 			vsi->all_enatc = 0;
597 			vsi->old_numtc = 0;
598 			vsi->all_numtc = 0;
599 			vsi->req_txq = 0;
600 			vsi->req_rxq = 0;
601 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
602 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
603 		}
604 	}
605 skip:
606 
607 	/* clear SW filtering DB */
608 	ice_clear_hw_tbls(hw);
609 	/* disable the VSIs and their queues that are not already DOWN */
610 	ice_pf_dis_all_vsi(pf, false);
611 
612 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
613 		ice_ptp_prepare_for_reset(pf);
614 
615 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
616 		ice_gnss_exit(pf);
617 
618 	if (hw->port_info)
619 		ice_sched_clear_port(hw->port_info);
620 
621 	ice_shutdown_all_ctrlq(hw);
622 
623 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
624 }
625 
626 /**
627  * ice_do_reset - Initiate one of many types of resets
628  * @pf: board private structure
629  * @reset_type: reset type requested before this function was called.
630  */
631 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
632 {
633 	struct device *dev = ice_pf_to_dev(pf);
634 	struct ice_hw *hw = &pf->hw;
635 
636 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
637 
638 	ice_prepare_for_reset(pf, reset_type);
639 
640 	/* trigger the reset */
641 	if (ice_reset(hw, reset_type)) {
642 		dev_err(dev, "reset %d failed\n", reset_type);
643 		set_bit(ICE_RESET_FAILED, pf->state);
644 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
645 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
646 		clear_bit(ICE_PFR_REQ, pf->state);
647 		clear_bit(ICE_CORER_REQ, pf->state);
648 		clear_bit(ICE_GLOBR_REQ, pf->state);
649 		wake_up(&pf->reset_wait_queue);
650 		return;
651 	}
652 
653 	/* PFR is a bit of a special case because it doesn't result in an OICR
654 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
655 	 * associated state bits.
656 	 */
657 	if (reset_type == ICE_RESET_PFR) {
658 		pf->pfr_count++;
659 		ice_rebuild(pf, reset_type);
660 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
661 		clear_bit(ICE_PFR_REQ, pf->state);
662 		wake_up(&pf->reset_wait_queue);
663 		ice_reset_all_vfs(pf);
664 	}
665 }
666 
667 /**
668  * ice_reset_subtask - Set up for resetting the device and driver
669  * @pf: board private structure
670  */
671 static void ice_reset_subtask(struct ice_pf *pf)
672 {
673 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
674 
675 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
676 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
677 	 * of reset is pending and sets bits in pf->state indicating the reset
678 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
679 	 * prepare for pending reset if not already (for PF software-initiated
680 	 * global resets the software should already be prepared for it as
681 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
682 	 * by firmware or software on other PFs, that bit is not set so prepare
683 	 * for the reset now), poll for reset done, rebuild and return.
684 	 */
685 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
686 		/* Perform the largest reset requested */
687 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
688 			reset_type = ICE_RESET_CORER;
689 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
690 			reset_type = ICE_RESET_GLOBR;
691 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
692 			reset_type = ICE_RESET_EMPR;
693 		/* return if no valid reset type requested */
694 		if (reset_type == ICE_RESET_INVAL)
695 			return;
696 		ice_prepare_for_reset(pf, reset_type);
697 
698 		/* make sure we are ready to rebuild */
699 		if (ice_check_reset(&pf->hw)) {
700 			set_bit(ICE_RESET_FAILED, pf->state);
701 		} else {
702 			/* done with reset. start rebuild */
703 			pf->hw.reset_ongoing = false;
704 			ice_rebuild(pf, reset_type);
705 			/* clear bit to resume normal operations, but
706 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
707 			 */
708 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
709 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
710 			clear_bit(ICE_PFR_REQ, pf->state);
711 			clear_bit(ICE_CORER_REQ, pf->state);
712 			clear_bit(ICE_GLOBR_REQ, pf->state);
713 			wake_up(&pf->reset_wait_queue);
714 			ice_reset_all_vfs(pf);
715 		}
716 
717 		return;
718 	}
719 
720 	/* No pending resets to finish processing. Check for new resets */
721 	if (test_bit(ICE_PFR_REQ, pf->state))
722 		reset_type = ICE_RESET_PFR;
723 	if (test_bit(ICE_CORER_REQ, pf->state))
724 		reset_type = ICE_RESET_CORER;
725 	if (test_bit(ICE_GLOBR_REQ, pf->state))
726 		reset_type = ICE_RESET_GLOBR;
727 	/* If no valid reset type requested just return */
728 	if (reset_type == ICE_RESET_INVAL)
729 		return;
730 
731 	/* reset if not already down or busy */
732 	if (!test_bit(ICE_DOWN, pf->state) &&
733 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
734 		ice_do_reset(pf, reset_type);
735 	}
736 }
737 
738 /**
739  * ice_print_topo_conflict - print topology conflict message
740  * @vsi: the VSI whose topology status is being checked
741  */
742 static void ice_print_topo_conflict(struct ice_vsi *vsi)
743 {
744 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
745 	case ICE_AQ_LINK_TOPO_CONFLICT:
746 	case ICE_AQ_LINK_MEDIA_CONFLICT:
747 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
748 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
749 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
750 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
751 		break;
752 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
753 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
754 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
755 		else
756 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
757 		break;
758 	default:
759 		break;
760 	}
761 }
762 
763 /**
764  * ice_print_link_msg - print link up or down message
765  * @vsi: the VSI whose link status is being queried
766  * @isup: boolean for if the link is now up or down
767  */
768 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
769 {
770 	struct ice_aqc_get_phy_caps_data *caps;
771 	const char *an_advertised;
772 	const char *fec_req;
773 	const char *speed;
774 	const char *fec;
775 	const char *fc;
776 	const char *an;
777 	int status;
778 
779 	if (!vsi)
780 		return;
781 
782 	if (vsi->current_isup == isup)
783 		return;
784 
785 	vsi->current_isup = isup;
786 
787 	if (!isup) {
788 		netdev_info(vsi->netdev, "NIC Link is Down\n");
789 		return;
790 	}
791 
792 	switch (vsi->port_info->phy.link_info.link_speed) {
793 	case ICE_AQ_LINK_SPEED_100GB:
794 		speed = "100 G";
795 		break;
796 	case ICE_AQ_LINK_SPEED_50GB:
797 		speed = "50 G";
798 		break;
799 	case ICE_AQ_LINK_SPEED_40GB:
800 		speed = "40 G";
801 		break;
802 	case ICE_AQ_LINK_SPEED_25GB:
803 		speed = "25 G";
804 		break;
805 	case ICE_AQ_LINK_SPEED_20GB:
806 		speed = "20 G";
807 		break;
808 	case ICE_AQ_LINK_SPEED_10GB:
809 		speed = "10 G";
810 		break;
811 	case ICE_AQ_LINK_SPEED_5GB:
812 		speed = "5 G";
813 		break;
814 	case ICE_AQ_LINK_SPEED_2500MB:
815 		speed = "2.5 G";
816 		break;
817 	case ICE_AQ_LINK_SPEED_1000MB:
818 		speed = "1 G";
819 		break;
820 	case ICE_AQ_LINK_SPEED_100MB:
821 		speed = "100 M";
822 		break;
823 	default:
824 		speed = "Unknown ";
825 		break;
826 	}
827 
828 	switch (vsi->port_info->fc.current_mode) {
829 	case ICE_FC_FULL:
830 		fc = "Rx/Tx";
831 		break;
832 	case ICE_FC_TX_PAUSE:
833 		fc = "Tx";
834 		break;
835 	case ICE_FC_RX_PAUSE:
836 		fc = "Rx";
837 		break;
838 	case ICE_FC_NONE:
839 		fc = "None";
840 		break;
841 	default:
842 		fc = "Unknown";
843 		break;
844 	}
845 
846 	/* Get FEC mode based on negotiated link info */
847 	switch (vsi->port_info->phy.link_info.fec_info) {
848 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
849 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
850 		fec = "RS-FEC";
851 		break;
852 	case ICE_AQ_LINK_25G_KR_FEC_EN:
853 		fec = "FC-FEC/BASE-R";
854 		break;
855 	default:
856 		fec = "NONE";
857 		break;
858 	}
859 
860 	/* check if autoneg completed, might be false due to not supported */
861 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
862 		an = "True";
863 	else
864 		an = "False";
865 
866 	/* Get FEC mode requested based on PHY caps last SW configuration */
867 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
868 	if (!caps) {
869 		fec_req = "Unknown";
870 		an_advertised = "Unknown";
871 		goto done;
872 	}
873 
874 	status = ice_aq_get_phy_caps(vsi->port_info, false,
875 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
876 	if (status)
877 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
878 
879 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
880 
881 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
882 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
883 		fec_req = "RS-FEC";
884 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
885 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
886 		fec_req = "FC-FEC/BASE-R";
887 	else
888 		fec_req = "NONE";
889 
890 	kfree(caps);
891 
892 done:
893 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
894 		    speed, fec_req, fec, an_advertised, an, fc);
895 	ice_print_topo_conflict(vsi);
896 }
897 
898 /**
899  * ice_vsi_link_event - update the VSI's netdev
900  * @vsi: the VSI on which the link event occurred
901  * @link_up: whether or not the VSI needs to be set up or down
902  */
903 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
904 {
905 	if (!vsi)
906 		return;
907 
908 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
909 		return;
910 
911 	if (vsi->type == ICE_VSI_PF) {
912 		if (link_up == netif_carrier_ok(vsi->netdev))
913 			return;
914 
915 		if (link_up) {
916 			netif_carrier_on(vsi->netdev);
917 			netif_tx_wake_all_queues(vsi->netdev);
918 		} else {
919 			netif_carrier_off(vsi->netdev);
920 			netif_tx_stop_all_queues(vsi->netdev);
921 		}
922 	}
923 }
924 
925 /**
926  * ice_set_dflt_mib - send a default config MIB to the FW
927  * @pf: private PF struct
928  *
929  * This function sends a default configuration MIB to the FW.
930  *
931  * If this function errors out at any point, the driver is still able to
932  * function.  The main impact is that LFC may not operate as expected.
933  * Therefore an error state in this function should be treated with a DBG
934  * message and continue on with driver rebuild/reenable.
935  */
936 static void ice_set_dflt_mib(struct ice_pf *pf)
937 {
938 	struct device *dev = ice_pf_to_dev(pf);
939 	u8 mib_type, *buf, *lldpmib = NULL;
940 	u16 len, typelen, offset = 0;
941 	struct ice_lldp_org_tlv *tlv;
942 	struct ice_hw *hw = &pf->hw;
943 	u32 ouisubtype;
944 
945 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
946 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
947 	if (!lldpmib) {
948 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
949 			__func__);
950 		return;
951 	}
952 
953 	/* Add ETS CFG TLV */
954 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
955 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
956 		   ICE_IEEE_ETS_TLV_LEN);
957 	tlv->typelen = htons(typelen);
958 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
959 		      ICE_IEEE_SUBTYPE_ETS_CFG);
960 	tlv->ouisubtype = htonl(ouisubtype);
961 
962 	buf = tlv->tlvinfo;
963 	buf[0] = 0;
964 
965 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
966 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
967 	 * Octets 13 - 20 are TSA values - leave as zeros
968 	 */
969 	buf[5] = 0x64;
970 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
971 	offset += len + 2;
972 	tlv = (struct ice_lldp_org_tlv *)
973 		((char *)tlv + sizeof(tlv->typelen) + len);
974 
975 	/* Add ETS REC TLV */
976 	buf = tlv->tlvinfo;
977 	tlv->typelen = htons(typelen);
978 
979 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
980 		      ICE_IEEE_SUBTYPE_ETS_REC);
981 	tlv->ouisubtype = htonl(ouisubtype);
982 
983 	/* First octet of buf is reserved
984 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
985 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
986 	 * Octets 13 - 20 are TSA value - leave as zeros
987 	 */
988 	buf[5] = 0x64;
989 	offset += len + 2;
990 	tlv = (struct ice_lldp_org_tlv *)
991 		((char *)tlv + sizeof(tlv->typelen) + len);
992 
993 	/* Add PFC CFG TLV */
994 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
995 		   ICE_IEEE_PFC_TLV_LEN);
996 	tlv->typelen = htons(typelen);
997 
998 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
999 		      ICE_IEEE_SUBTYPE_PFC_CFG);
1000 	tlv->ouisubtype = htonl(ouisubtype);
1001 
1002 	/* Octet 1 left as all zeros - PFC disabled */
1003 	buf[0] = 0x08;
1004 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1005 	offset += len + 2;
1006 
1007 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1008 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1009 
1010 	kfree(lldpmib);
1011 }
1012 
1013 /**
1014  * ice_check_phy_fw_load - check if PHY FW load failed
1015  * @pf: pointer to PF struct
1016  * @link_cfg_err: bitmap from the link info structure
1017  *
1018  * check if external PHY FW load failed and print an error message if it did
1019  */
1020 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1021 {
1022 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1023 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1024 		return;
1025 	}
1026 
1027 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1028 		return;
1029 
1030 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1031 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1032 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1033 	}
1034 }
1035 
1036 /**
1037  * ice_check_module_power
1038  * @pf: pointer to PF struct
1039  * @link_cfg_err: bitmap from the link info structure
1040  *
1041  * check module power level returned by a previous call to aq_get_link_info
1042  * and print error messages if module power level is not supported
1043  */
1044 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1045 {
1046 	/* if module power level is supported, clear the flag */
1047 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1048 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1049 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1050 		return;
1051 	}
1052 
1053 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1054 	 * above block didn't clear this bit, there's nothing to do
1055 	 */
1056 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1057 		return;
1058 
1059 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1060 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1061 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1062 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1063 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1064 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1065 	}
1066 }
1067 
1068 /**
1069  * ice_check_link_cfg_err - check if link configuration failed
1070  * @pf: pointer to the PF struct
1071  * @link_cfg_err: bitmap from the link info structure
1072  *
1073  * print if any link configuration failure happens due to the value in the
1074  * link_cfg_err parameter in the link info structure
1075  */
1076 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1077 {
1078 	ice_check_module_power(pf, link_cfg_err);
1079 	ice_check_phy_fw_load(pf, link_cfg_err);
1080 }
1081 
1082 /**
1083  * ice_link_event - process the link event
1084  * @pf: PF that the link event is associated with
1085  * @pi: port_info for the port that the link event is associated with
1086  * @link_up: true if the physical link is up and false if it is down
1087  * @link_speed: current link speed received from the link event
1088  *
1089  * Returns 0 on success and negative on failure
1090  */
1091 static int
1092 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1093 	       u16 link_speed)
1094 {
1095 	struct device *dev = ice_pf_to_dev(pf);
1096 	struct ice_phy_info *phy_info;
1097 	struct ice_vsi *vsi;
1098 	u16 old_link_speed;
1099 	bool old_link;
1100 	int status;
1101 
1102 	phy_info = &pi->phy;
1103 	phy_info->link_info_old = phy_info->link_info;
1104 
1105 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1106 	old_link_speed = phy_info->link_info_old.link_speed;
1107 
1108 	/* update the link info structures and re-enable link events,
1109 	 * don't bail on failure due to other book keeping needed
1110 	 */
1111 	status = ice_update_link_info(pi);
1112 	if (status)
1113 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1114 			pi->lport, status,
1115 			ice_aq_str(pi->hw->adminq.sq_last_status));
1116 
1117 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1118 
1119 	/* Check if the link state is up after updating link info, and treat
1120 	 * this event as an UP event since the link is actually UP now.
1121 	 */
1122 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1123 		link_up = true;
1124 
1125 	vsi = ice_get_main_vsi(pf);
1126 	if (!vsi || !vsi->port_info)
1127 		return -EINVAL;
1128 
1129 	/* turn off PHY if media was removed */
1130 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1131 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1132 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1133 		ice_set_link(vsi, false);
1134 	}
1135 
1136 	/* if the old link up/down and speed is the same as the new */
1137 	if (link_up == old_link && link_speed == old_link_speed)
1138 		return 0;
1139 
1140 	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1141 
1142 	if (ice_is_dcb_active(pf)) {
1143 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1144 			ice_dcb_rebuild(pf);
1145 	} else {
1146 		if (link_up)
1147 			ice_set_dflt_mib(pf);
1148 	}
1149 	ice_vsi_link_event(vsi, link_up);
1150 	ice_print_link_msg(vsi, link_up);
1151 
1152 	ice_vc_notify_link_state(pf);
1153 
1154 	return 0;
1155 }
1156 
1157 /**
1158  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1159  * @pf: board private structure
1160  */
1161 static void ice_watchdog_subtask(struct ice_pf *pf)
1162 {
1163 	int i;
1164 
1165 	/* if interface is down do nothing */
1166 	if (test_bit(ICE_DOWN, pf->state) ||
1167 	    test_bit(ICE_CFG_BUSY, pf->state))
1168 		return;
1169 
1170 	/* make sure we don't do these things too often */
1171 	if (time_before(jiffies,
1172 			pf->serv_tmr_prev + pf->serv_tmr_period))
1173 		return;
1174 
1175 	pf->serv_tmr_prev = jiffies;
1176 
1177 	/* Update the stats for active netdevs so the network stack
1178 	 * can look at updated numbers whenever it cares to
1179 	 */
1180 	ice_update_pf_stats(pf);
1181 	ice_for_each_vsi(pf, i)
1182 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1183 			ice_update_vsi_stats(pf->vsi[i]);
1184 }
1185 
1186 /**
1187  * ice_init_link_events - enable/initialize link events
1188  * @pi: pointer to the port_info instance
1189  *
1190  * Returns -EIO on failure, 0 on success
1191  */
1192 static int ice_init_link_events(struct ice_port_info *pi)
1193 {
1194 	u16 mask;
1195 
1196 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1197 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1198 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1199 
1200 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1201 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1202 			pi->lport);
1203 		return -EIO;
1204 	}
1205 
1206 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1207 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1208 			pi->lport);
1209 		return -EIO;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  * ice_handle_link_event - handle link event via ARQ
1217  * @pf: PF that the link event is associated with
1218  * @event: event structure containing link status info
1219  */
1220 static int
1221 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1222 {
1223 	struct ice_aqc_get_link_status_data *link_data;
1224 	struct ice_port_info *port_info;
1225 	int status;
1226 
1227 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1228 	port_info = pf->hw.port_info;
1229 	if (!port_info)
1230 		return -EINVAL;
1231 
1232 	status = ice_link_event(pf, port_info,
1233 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1234 				le16_to_cpu(link_data->link_speed));
1235 	if (status)
1236 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1237 			status);
1238 
1239 	return status;
1240 }
1241 
1242 enum ice_aq_task_state {
1243 	ICE_AQ_TASK_WAITING = 0,
1244 	ICE_AQ_TASK_COMPLETE,
1245 	ICE_AQ_TASK_CANCELED,
1246 };
1247 
1248 struct ice_aq_task {
1249 	struct hlist_node entry;
1250 
1251 	u16 opcode;
1252 	struct ice_rq_event_info *event;
1253 	enum ice_aq_task_state state;
1254 };
1255 
1256 /**
1257  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1258  * @pf: pointer to the PF private structure
1259  * @opcode: the opcode to wait for
1260  * @timeout: how long to wait, in jiffies
1261  * @event: storage for the event info
1262  *
1263  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1264  * current thread will be put to sleep until the specified event occurs or
1265  * until the given timeout is reached.
1266  *
1267  * To obtain only the descriptor contents, pass an event without an allocated
1268  * msg_buf. If the complete data buffer is desired, allocate the
1269  * event->msg_buf with enough space ahead of time.
1270  *
1271  * Returns: zero on success, or a negative error code on failure.
1272  */
1273 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1274 			  struct ice_rq_event_info *event)
1275 {
1276 	struct device *dev = ice_pf_to_dev(pf);
1277 	struct ice_aq_task *task;
1278 	unsigned long start;
1279 	long ret;
1280 	int err;
1281 
1282 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1283 	if (!task)
1284 		return -ENOMEM;
1285 
1286 	INIT_HLIST_NODE(&task->entry);
1287 	task->opcode = opcode;
1288 	task->event = event;
1289 	task->state = ICE_AQ_TASK_WAITING;
1290 
1291 	spin_lock_bh(&pf->aq_wait_lock);
1292 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1293 	spin_unlock_bh(&pf->aq_wait_lock);
1294 
1295 	start = jiffies;
1296 
1297 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1298 					       timeout);
1299 	switch (task->state) {
1300 	case ICE_AQ_TASK_WAITING:
1301 		err = ret < 0 ? ret : -ETIMEDOUT;
1302 		break;
1303 	case ICE_AQ_TASK_CANCELED:
1304 		err = ret < 0 ? ret : -ECANCELED;
1305 		break;
1306 	case ICE_AQ_TASK_COMPLETE:
1307 		err = ret < 0 ? ret : 0;
1308 		break;
1309 	default:
1310 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1311 		err = -EINVAL;
1312 		break;
1313 	}
1314 
1315 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1316 		jiffies_to_msecs(jiffies - start),
1317 		jiffies_to_msecs(timeout),
1318 		opcode);
1319 
1320 	spin_lock_bh(&pf->aq_wait_lock);
1321 	hlist_del(&task->entry);
1322 	spin_unlock_bh(&pf->aq_wait_lock);
1323 	kfree(task);
1324 
1325 	return err;
1326 }
1327 
1328 /**
1329  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1330  * @pf: pointer to the PF private structure
1331  * @opcode: the opcode of the event
1332  * @event: the event to check
1333  *
1334  * Loops over the current list of pending threads waiting for an AdminQ event.
1335  * For each matching task, copy the contents of the event into the task
1336  * structure and wake up the thread.
1337  *
1338  * If multiple threads wait for the same opcode, they will all be woken up.
1339  *
1340  * Note that event->msg_buf will only be duplicated if the event has a buffer
1341  * with enough space already allocated. Otherwise, only the descriptor and
1342  * message length will be copied.
1343  *
1344  * Returns: true if an event was found, false otherwise
1345  */
1346 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1347 				struct ice_rq_event_info *event)
1348 {
1349 	struct ice_aq_task *task;
1350 	bool found = false;
1351 
1352 	spin_lock_bh(&pf->aq_wait_lock);
1353 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1354 		if (task->state || task->opcode != opcode)
1355 			continue;
1356 
1357 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1358 		task->event->msg_len = event->msg_len;
1359 
1360 		/* Only copy the data buffer if a destination was set */
1361 		if (task->event->msg_buf &&
1362 		    task->event->buf_len > event->buf_len) {
1363 			memcpy(task->event->msg_buf, event->msg_buf,
1364 			       event->buf_len);
1365 			task->event->buf_len = event->buf_len;
1366 		}
1367 
1368 		task->state = ICE_AQ_TASK_COMPLETE;
1369 		found = true;
1370 	}
1371 	spin_unlock_bh(&pf->aq_wait_lock);
1372 
1373 	if (found)
1374 		wake_up(&pf->aq_wait_queue);
1375 }
1376 
1377 /**
1378  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1379  * @pf: the PF private structure
1380  *
1381  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1382  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1383  */
1384 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1385 {
1386 	struct ice_aq_task *task;
1387 
1388 	spin_lock_bh(&pf->aq_wait_lock);
1389 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1390 		task->state = ICE_AQ_TASK_CANCELED;
1391 	spin_unlock_bh(&pf->aq_wait_lock);
1392 
1393 	wake_up(&pf->aq_wait_queue);
1394 }
1395 
1396 /**
1397  * __ice_clean_ctrlq - helper function to clean controlq rings
1398  * @pf: ptr to struct ice_pf
1399  * @q_type: specific Control queue type
1400  */
1401 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1402 {
1403 	struct device *dev = ice_pf_to_dev(pf);
1404 	struct ice_rq_event_info event;
1405 	struct ice_hw *hw = &pf->hw;
1406 	struct ice_ctl_q_info *cq;
1407 	u16 pending, i = 0;
1408 	const char *qtype;
1409 	u32 oldval, val;
1410 
1411 	/* Do not clean control queue if/when PF reset fails */
1412 	if (test_bit(ICE_RESET_FAILED, pf->state))
1413 		return 0;
1414 
1415 	switch (q_type) {
1416 	case ICE_CTL_Q_ADMIN:
1417 		cq = &hw->adminq;
1418 		qtype = "Admin";
1419 		break;
1420 	case ICE_CTL_Q_SB:
1421 		cq = &hw->sbq;
1422 		qtype = "Sideband";
1423 		break;
1424 	case ICE_CTL_Q_MAILBOX:
1425 		cq = &hw->mailboxq;
1426 		qtype = "Mailbox";
1427 		/* we are going to try to detect a malicious VF, so set the
1428 		 * state to begin detection
1429 		 */
1430 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1431 		break;
1432 	default:
1433 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1434 		return 0;
1435 	}
1436 
1437 	/* check for error indications - PF_xx_AxQLEN register layout for
1438 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1439 	 */
1440 	val = rd32(hw, cq->rq.len);
1441 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1442 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1443 		oldval = val;
1444 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1445 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1446 				qtype);
1447 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1448 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1449 				qtype);
1450 		}
1451 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1452 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1453 				qtype);
1454 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1455 			 PF_FW_ARQLEN_ARQCRIT_M);
1456 		if (oldval != val)
1457 			wr32(hw, cq->rq.len, val);
1458 	}
1459 
1460 	val = rd32(hw, cq->sq.len);
1461 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1462 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1463 		oldval = val;
1464 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1465 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1466 				qtype);
1467 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1468 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1469 				qtype);
1470 		}
1471 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1472 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1473 				qtype);
1474 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1475 			 PF_FW_ATQLEN_ATQCRIT_M);
1476 		if (oldval != val)
1477 			wr32(hw, cq->sq.len, val);
1478 	}
1479 
1480 	event.buf_len = cq->rq_buf_size;
1481 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1482 	if (!event.msg_buf)
1483 		return 0;
1484 
1485 	do {
1486 		u16 opcode;
1487 		int ret;
1488 
1489 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1490 		if (ret == -EALREADY)
1491 			break;
1492 		if (ret) {
1493 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1494 				ret);
1495 			break;
1496 		}
1497 
1498 		opcode = le16_to_cpu(event.desc.opcode);
1499 
1500 		/* Notify any thread that might be waiting for this event */
1501 		ice_aq_check_events(pf, opcode, &event);
1502 
1503 		switch (opcode) {
1504 		case ice_aqc_opc_get_link_status:
1505 			if (ice_handle_link_event(pf, &event))
1506 				dev_err(dev, "Could not handle link event\n");
1507 			break;
1508 		case ice_aqc_opc_event_lan_overflow:
1509 			ice_vf_lan_overflow_event(pf, &event);
1510 			break;
1511 		case ice_mbx_opc_send_msg_to_pf:
1512 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1513 				ice_vc_process_vf_msg(pf, &event);
1514 			break;
1515 		case ice_aqc_opc_fw_logging:
1516 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1517 			break;
1518 		case ice_aqc_opc_lldp_set_mib_change:
1519 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1520 			break;
1521 		default:
1522 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1523 				qtype, opcode);
1524 			break;
1525 		}
1526 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1527 
1528 	kfree(event.msg_buf);
1529 
1530 	return pending && (i == ICE_DFLT_IRQ_WORK);
1531 }
1532 
1533 /**
1534  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1535  * @hw: pointer to hardware info
1536  * @cq: control queue information
1537  *
1538  * returns true if there are pending messages in a queue, false if there aren't
1539  */
1540 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1541 {
1542 	u16 ntu;
1543 
1544 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1545 	return cq->rq.next_to_clean != ntu;
1546 }
1547 
1548 /**
1549  * ice_clean_adminq_subtask - clean the AdminQ rings
1550  * @pf: board private structure
1551  */
1552 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1553 {
1554 	struct ice_hw *hw = &pf->hw;
1555 
1556 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1557 		return;
1558 
1559 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1560 		return;
1561 
1562 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1563 
1564 	/* There might be a situation where new messages arrive to a control
1565 	 * queue between processing the last message and clearing the
1566 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1567 	 * ice_ctrlq_pending) and process new messages if any.
1568 	 */
1569 	if (ice_ctrlq_pending(hw, &hw->adminq))
1570 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1571 
1572 	ice_flush(hw);
1573 }
1574 
1575 /**
1576  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1577  * @pf: board private structure
1578  */
1579 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1580 {
1581 	struct ice_hw *hw = &pf->hw;
1582 
1583 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1584 		return;
1585 
1586 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1587 		return;
1588 
1589 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1590 
1591 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1592 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1593 
1594 	ice_flush(hw);
1595 }
1596 
1597 /**
1598  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1599  * @pf: board private structure
1600  */
1601 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1602 {
1603 	struct ice_hw *hw = &pf->hw;
1604 
1605 	/* Nothing to do here if sideband queue is not supported */
1606 	if (!ice_is_sbq_supported(hw)) {
1607 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1608 		return;
1609 	}
1610 
1611 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1612 		return;
1613 
1614 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1615 		return;
1616 
1617 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1618 
1619 	if (ice_ctrlq_pending(hw, &hw->sbq))
1620 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1621 
1622 	ice_flush(hw);
1623 }
1624 
1625 /**
1626  * ice_service_task_schedule - schedule the service task to wake up
1627  * @pf: board private structure
1628  *
1629  * If not already scheduled, this puts the task into the work queue.
1630  */
1631 void ice_service_task_schedule(struct ice_pf *pf)
1632 {
1633 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1634 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1635 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1636 		queue_work(ice_wq, &pf->serv_task);
1637 }
1638 
1639 /**
1640  * ice_service_task_complete - finish up the service task
1641  * @pf: board private structure
1642  */
1643 static void ice_service_task_complete(struct ice_pf *pf)
1644 {
1645 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1646 
1647 	/* force memory (pf->state) to sync before next service task */
1648 	smp_mb__before_atomic();
1649 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1650 }
1651 
1652 /**
1653  * ice_service_task_stop - stop service task and cancel works
1654  * @pf: board private structure
1655  *
1656  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1657  * 1 otherwise.
1658  */
1659 static int ice_service_task_stop(struct ice_pf *pf)
1660 {
1661 	int ret;
1662 
1663 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1664 
1665 	if (pf->serv_tmr.function)
1666 		del_timer_sync(&pf->serv_tmr);
1667 	if (pf->serv_task.func)
1668 		cancel_work_sync(&pf->serv_task);
1669 
1670 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1671 	return ret;
1672 }
1673 
1674 /**
1675  * ice_service_task_restart - restart service task and schedule works
1676  * @pf: board private structure
1677  *
1678  * This function is needed for suspend and resume works (e.g WoL scenario)
1679  */
1680 static void ice_service_task_restart(struct ice_pf *pf)
1681 {
1682 	clear_bit(ICE_SERVICE_DIS, pf->state);
1683 	ice_service_task_schedule(pf);
1684 }
1685 
1686 /**
1687  * ice_service_timer - timer callback to schedule service task
1688  * @t: pointer to timer_list
1689  */
1690 static void ice_service_timer(struct timer_list *t)
1691 {
1692 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1693 
1694 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1695 	ice_service_task_schedule(pf);
1696 }
1697 
1698 /**
1699  * ice_handle_mdd_event - handle malicious driver detect event
1700  * @pf: pointer to the PF structure
1701  *
1702  * Called from service task. OICR interrupt handler indicates MDD event.
1703  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1704  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1705  * disable the queue, the PF can be configured to reset the VF using ethtool
1706  * private flag mdd-auto-reset-vf.
1707  */
1708 static void ice_handle_mdd_event(struct ice_pf *pf)
1709 {
1710 	struct device *dev = ice_pf_to_dev(pf);
1711 	struct ice_hw *hw = &pf->hw;
1712 	struct ice_vf *vf;
1713 	unsigned int bkt;
1714 	u32 reg;
1715 
1716 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1717 		/* Since the VF MDD event logging is rate limited, check if
1718 		 * there are pending MDD events.
1719 		 */
1720 		ice_print_vfs_mdd_events(pf);
1721 		return;
1722 	}
1723 
1724 	/* find what triggered an MDD event */
1725 	reg = rd32(hw, GL_MDET_TX_PQM);
1726 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1727 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1728 				GL_MDET_TX_PQM_PF_NUM_S;
1729 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1730 				GL_MDET_TX_PQM_VF_NUM_S;
1731 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1732 				GL_MDET_TX_PQM_MAL_TYPE_S;
1733 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1734 				GL_MDET_TX_PQM_QNUM_S);
1735 
1736 		if (netif_msg_tx_err(pf))
1737 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1738 				 event, queue, pf_num, vf_num);
1739 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1740 	}
1741 
1742 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1743 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1744 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1745 				GL_MDET_TX_TCLAN_PF_NUM_S;
1746 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1747 				GL_MDET_TX_TCLAN_VF_NUM_S;
1748 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1749 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1750 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1751 				GL_MDET_TX_TCLAN_QNUM_S);
1752 
1753 		if (netif_msg_tx_err(pf))
1754 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1755 				 event, queue, pf_num, vf_num);
1756 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1757 	}
1758 
1759 	reg = rd32(hw, GL_MDET_RX);
1760 	if (reg & GL_MDET_RX_VALID_M) {
1761 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1762 				GL_MDET_RX_PF_NUM_S;
1763 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1764 				GL_MDET_RX_VF_NUM_S;
1765 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1766 				GL_MDET_RX_MAL_TYPE_S;
1767 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1768 				GL_MDET_RX_QNUM_S);
1769 
1770 		if (netif_msg_rx_err(pf))
1771 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1772 				 event, queue, pf_num, vf_num);
1773 		wr32(hw, GL_MDET_RX, 0xffffffff);
1774 	}
1775 
1776 	/* check to see if this PF caused an MDD event */
1777 	reg = rd32(hw, PF_MDET_TX_PQM);
1778 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1779 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1780 		if (netif_msg_tx_err(pf))
1781 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1782 	}
1783 
1784 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1785 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1786 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1787 		if (netif_msg_tx_err(pf))
1788 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1789 	}
1790 
1791 	reg = rd32(hw, PF_MDET_RX);
1792 	if (reg & PF_MDET_RX_VALID_M) {
1793 		wr32(hw, PF_MDET_RX, 0xFFFF);
1794 		if (netif_msg_rx_err(pf))
1795 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1796 	}
1797 
1798 	/* Check to see if one of the VFs caused an MDD event, and then
1799 	 * increment counters and set print pending
1800 	 */
1801 	mutex_lock(&pf->vfs.table_lock);
1802 	ice_for_each_vf(pf, bkt, vf) {
1803 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1804 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1805 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1806 			vf->mdd_tx_events.count++;
1807 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1808 			if (netif_msg_tx_err(pf))
1809 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1810 					 vf->vf_id);
1811 		}
1812 
1813 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1814 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1815 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1816 			vf->mdd_tx_events.count++;
1817 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1818 			if (netif_msg_tx_err(pf))
1819 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1820 					 vf->vf_id);
1821 		}
1822 
1823 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1824 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1825 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1826 			vf->mdd_tx_events.count++;
1827 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1828 			if (netif_msg_tx_err(pf))
1829 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1830 					 vf->vf_id);
1831 		}
1832 
1833 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1834 		if (reg & VP_MDET_RX_VALID_M) {
1835 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1836 			vf->mdd_rx_events.count++;
1837 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1838 			if (netif_msg_rx_err(pf))
1839 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1840 					 vf->vf_id);
1841 
1842 			/* Since the queue is disabled on VF Rx MDD events, the
1843 			 * PF can be configured to reset the VF through ethtool
1844 			 * private flag mdd-auto-reset-vf.
1845 			 */
1846 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1847 				/* VF MDD event counters will be cleared by
1848 				 * reset, so print the event prior to reset.
1849 				 */
1850 				ice_print_vf_rx_mdd_event(vf);
1851 				ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1852 			}
1853 		}
1854 	}
1855 	mutex_unlock(&pf->vfs.table_lock);
1856 
1857 	ice_print_vfs_mdd_events(pf);
1858 }
1859 
1860 /**
1861  * ice_force_phys_link_state - Force the physical link state
1862  * @vsi: VSI to force the physical link state to up/down
1863  * @link_up: true/false indicates to set the physical link to up/down
1864  *
1865  * Force the physical link state by getting the current PHY capabilities from
1866  * hardware and setting the PHY config based on the determined capabilities. If
1867  * link changes a link event will be triggered because both the Enable Automatic
1868  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1869  *
1870  * Returns 0 on success, negative on failure
1871  */
1872 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1873 {
1874 	struct ice_aqc_get_phy_caps_data *pcaps;
1875 	struct ice_aqc_set_phy_cfg_data *cfg;
1876 	struct ice_port_info *pi;
1877 	struct device *dev;
1878 	int retcode;
1879 
1880 	if (!vsi || !vsi->port_info || !vsi->back)
1881 		return -EINVAL;
1882 	if (vsi->type != ICE_VSI_PF)
1883 		return 0;
1884 
1885 	dev = ice_pf_to_dev(vsi->back);
1886 
1887 	pi = vsi->port_info;
1888 
1889 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1890 	if (!pcaps)
1891 		return -ENOMEM;
1892 
1893 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1894 				      NULL);
1895 	if (retcode) {
1896 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1897 			vsi->vsi_num, retcode);
1898 		retcode = -EIO;
1899 		goto out;
1900 	}
1901 
1902 	/* No change in link */
1903 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1904 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1905 		goto out;
1906 
1907 	/* Use the current user PHY configuration. The current user PHY
1908 	 * configuration is initialized during probe from PHY capabilities
1909 	 * software mode, and updated on set PHY configuration.
1910 	 */
1911 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1912 	if (!cfg) {
1913 		retcode = -ENOMEM;
1914 		goto out;
1915 	}
1916 
1917 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1918 	if (link_up)
1919 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1920 	else
1921 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1922 
1923 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1924 	if (retcode) {
1925 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1926 			vsi->vsi_num, retcode);
1927 		retcode = -EIO;
1928 	}
1929 
1930 	kfree(cfg);
1931 out:
1932 	kfree(pcaps);
1933 	return retcode;
1934 }
1935 
1936 /**
1937  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1938  * @pi: port info structure
1939  *
1940  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1941  */
1942 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1943 {
1944 	struct ice_aqc_get_phy_caps_data *pcaps;
1945 	struct ice_pf *pf = pi->hw->back;
1946 	int err;
1947 
1948 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1949 	if (!pcaps)
1950 		return -ENOMEM;
1951 
1952 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1953 				  pcaps, NULL);
1954 
1955 	if (err) {
1956 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1957 		goto out;
1958 	}
1959 
1960 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1961 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1962 
1963 out:
1964 	kfree(pcaps);
1965 	return err;
1966 }
1967 
1968 /**
1969  * ice_init_link_dflt_override - Initialize link default override
1970  * @pi: port info structure
1971  *
1972  * Initialize link default override and PHY total port shutdown during probe
1973  */
1974 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1975 {
1976 	struct ice_link_default_override_tlv *ldo;
1977 	struct ice_pf *pf = pi->hw->back;
1978 
1979 	ldo = &pf->link_dflt_override;
1980 	if (ice_get_link_default_override(ldo, pi))
1981 		return;
1982 
1983 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1984 		return;
1985 
1986 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1987 	 * ethtool private flag) for ports with Port Disable bit set.
1988 	 */
1989 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1990 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1991 }
1992 
1993 /**
1994  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1995  * @pi: port info structure
1996  *
1997  * If default override is enabled, initialize the user PHY cfg speed and FEC
1998  * settings using the default override mask from the NVM.
1999  *
2000  * The PHY should only be configured with the default override settings the
2001  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2002  * is used to indicate that the user PHY cfg default override is initialized
2003  * and the PHY has not been configured with the default override settings. The
2004  * state is set here, and cleared in ice_configure_phy the first time the PHY is
2005  * configured.
2006  *
2007  * This function should be called only if the FW doesn't support default
2008  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2009  */
2010 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2011 {
2012 	struct ice_link_default_override_tlv *ldo;
2013 	struct ice_aqc_set_phy_cfg_data *cfg;
2014 	struct ice_phy_info *phy = &pi->phy;
2015 	struct ice_pf *pf = pi->hw->back;
2016 
2017 	ldo = &pf->link_dflt_override;
2018 
2019 	/* If link default override is enabled, use to mask NVM PHY capabilities
2020 	 * for speed and FEC default configuration.
2021 	 */
2022 	cfg = &phy->curr_user_phy_cfg;
2023 
2024 	if (ldo->phy_type_low || ldo->phy_type_high) {
2025 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2026 				    cpu_to_le64(ldo->phy_type_low);
2027 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2028 				     cpu_to_le64(ldo->phy_type_high);
2029 	}
2030 	cfg->link_fec_opt = ldo->fec_options;
2031 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2032 
2033 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2034 }
2035 
2036 /**
2037  * ice_init_phy_user_cfg - Initialize the PHY user configuration
2038  * @pi: port info structure
2039  *
2040  * Initialize the current user PHY configuration, speed, FEC, and FC requested
2041  * mode to default. The PHY defaults are from get PHY capabilities topology
2042  * with media so call when media is first available. An error is returned if
2043  * called when media is not available. The PHY initialization completed state is
2044  * set here.
2045  *
2046  * These configurations are used when setting PHY
2047  * configuration. The user PHY configuration is updated on set PHY
2048  * configuration. Returns 0 on success, negative on failure
2049  */
2050 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2051 {
2052 	struct ice_aqc_get_phy_caps_data *pcaps;
2053 	struct ice_phy_info *phy = &pi->phy;
2054 	struct ice_pf *pf = pi->hw->back;
2055 	int err;
2056 
2057 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2058 		return -EIO;
2059 
2060 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2061 	if (!pcaps)
2062 		return -ENOMEM;
2063 
2064 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2065 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2066 					  pcaps, NULL);
2067 	else
2068 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2069 					  pcaps, NULL);
2070 	if (err) {
2071 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2072 		goto err_out;
2073 	}
2074 
2075 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2076 
2077 	/* check if lenient mode is supported and enabled */
2078 	if (ice_fw_supports_link_override(pi->hw) &&
2079 	    !(pcaps->module_compliance_enforcement &
2080 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2081 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2082 
2083 		/* if the FW supports default PHY configuration mode, then the driver
2084 		 * does not have to apply link override settings. If not,
2085 		 * initialize user PHY configuration with link override values
2086 		 */
2087 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2088 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2089 			ice_init_phy_cfg_dflt_override(pi);
2090 			goto out;
2091 		}
2092 	}
2093 
2094 	/* if link default override is not enabled, set user flow control and
2095 	 * FEC settings based on what get_phy_caps returned
2096 	 */
2097 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2098 						      pcaps->link_fec_options);
2099 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2100 
2101 out:
2102 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2103 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2104 err_out:
2105 	kfree(pcaps);
2106 	return err;
2107 }
2108 
2109 /**
2110  * ice_configure_phy - configure PHY
2111  * @vsi: VSI of PHY
2112  *
2113  * Set the PHY configuration. If the current PHY configuration is the same as
2114  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2115  * configure the based get PHY capabilities for topology with media.
2116  */
2117 static int ice_configure_phy(struct ice_vsi *vsi)
2118 {
2119 	struct device *dev = ice_pf_to_dev(vsi->back);
2120 	struct ice_port_info *pi = vsi->port_info;
2121 	struct ice_aqc_get_phy_caps_data *pcaps;
2122 	struct ice_aqc_set_phy_cfg_data *cfg;
2123 	struct ice_phy_info *phy = &pi->phy;
2124 	struct ice_pf *pf = vsi->back;
2125 	int err;
2126 
2127 	/* Ensure we have media as we cannot configure a medialess port */
2128 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2129 		return -EPERM;
2130 
2131 	ice_print_topo_conflict(vsi);
2132 
2133 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2134 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2135 		return -EPERM;
2136 
2137 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2138 		return ice_force_phys_link_state(vsi, true);
2139 
2140 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2141 	if (!pcaps)
2142 		return -ENOMEM;
2143 
2144 	/* Get current PHY config */
2145 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2146 				  NULL);
2147 	if (err) {
2148 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2149 			vsi->vsi_num, err);
2150 		goto done;
2151 	}
2152 
2153 	/* If PHY enable link is configured and configuration has not changed,
2154 	 * there's nothing to do
2155 	 */
2156 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2157 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2158 		goto done;
2159 
2160 	/* Use PHY topology as baseline for configuration */
2161 	memset(pcaps, 0, sizeof(*pcaps));
2162 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2163 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2164 					  pcaps, NULL);
2165 	else
2166 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2167 					  pcaps, NULL);
2168 	if (err) {
2169 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2170 			vsi->vsi_num, err);
2171 		goto done;
2172 	}
2173 
2174 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2175 	if (!cfg) {
2176 		err = -ENOMEM;
2177 		goto done;
2178 	}
2179 
2180 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2181 
2182 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2183 	 * ice_init_phy_user_cfg_ldo.
2184 	 */
2185 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2186 			       vsi->back->state)) {
2187 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2188 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2189 	} else {
2190 		u64 phy_low = 0, phy_high = 0;
2191 
2192 		ice_update_phy_type(&phy_low, &phy_high,
2193 				    pi->phy.curr_user_speed_req);
2194 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2195 		cfg->phy_type_high = pcaps->phy_type_high &
2196 				     cpu_to_le64(phy_high);
2197 	}
2198 
2199 	/* Can't provide what was requested; use PHY capabilities */
2200 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2201 		cfg->phy_type_low = pcaps->phy_type_low;
2202 		cfg->phy_type_high = pcaps->phy_type_high;
2203 	}
2204 
2205 	/* FEC */
2206 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2207 
2208 	/* Can't provide what was requested; use PHY capabilities */
2209 	if (cfg->link_fec_opt !=
2210 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2211 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2212 		cfg->link_fec_opt = pcaps->link_fec_options;
2213 	}
2214 
2215 	/* Flow Control - always supported; no need to check against
2216 	 * capabilities
2217 	 */
2218 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2219 
2220 	/* Enable link and link update */
2221 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2222 
2223 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2224 	if (err)
2225 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2226 			vsi->vsi_num, err);
2227 
2228 	kfree(cfg);
2229 done:
2230 	kfree(pcaps);
2231 	return err;
2232 }
2233 
2234 /**
2235  * ice_check_media_subtask - Check for media
2236  * @pf: pointer to PF struct
2237  *
2238  * If media is available, then initialize PHY user configuration if it is not
2239  * been, and configure the PHY if the interface is up.
2240  */
2241 static void ice_check_media_subtask(struct ice_pf *pf)
2242 {
2243 	struct ice_port_info *pi;
2244 	struct ice_vsi *vsi;
2245 	int err;
2246 
2247 	/* No need to check for media if it's already present */
2248 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2249 		return;
2250 
2251 	vsi = ice_get_main_vsi(pf);
2252 	if (!vsi)
2253 		return;
2254 
2255 	/* Refresh link info and check if media is present */
2256 	pi = vsi->port_info;
2257 	err = ice_update_link_info(pi);
2258 	if (err)
2259 		return;
2260 
2261 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2262 
2263 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2264 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2265 			ice_init_phy_user_cfg(pi);
2266 
2267 		/* PHY settings are reset on media insertion, reconfigure
2268 		 * PHY to preserve settings.
2269 		 */
2270 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2271 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2272 			return;
2273 
2274 		err = ice_configure_phy(vsi);
2275 		if (!err)
2276 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2277 
2278 		/* A Link Status Event will be generated; the event handler
2279 		 * will complete bringing the interface up
2280 		 */
2281 	}
2282 }
2283 
2284 /**
2285  * ice_service_task - manage and run subtasks
2286  * @work: pointer to work_struct contained by the PF struct
2287  */
2288 static void ice_service_task(struct work_struct *work)
2289 {
2290 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2291 	unsigned long start_time = jiffies;
2292 
2293 	/* subtasks */
2294 
2295 	/* process reset requests first */
2296 	ice_reset_subtask(pf);
2297 
2298 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2299 	if (ice_is_reset_in_progress(pf->state) ||
2300 	    test_bit(ICE_SUSPENDED, pf->state) ||
2301 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2302 		ice_service_task_complete(pf);
2303 		return;
2304 	}
2305 
2306 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2307 		struct iidc_event *event;
2308 
2309 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2310 		if (event) {
2311 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2312 			/* report the entire OICR value to AUX driver */
2313 			swap(event->reg, pf->oicr_err_reg);
2314 			ice_send_event_to_aux(pf, event);
2315 			kfree(event);
2316 		}
2317 	}
2318 
2319 	/* unplug aux dev per request, if an unplug request came in
2320 	 * while processing a plug request, this will handle it
2321 	 */
2322 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2323 		ice_unplug_aux_dev(pf);
2324 
2325 	/* Plug aux device per request */
2326 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2327 		ice_plug_aux_dev(pf);
2328 
2329 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2330 		struct iidc_event *event;
2331 
2332 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2333 		if (event) {
2334 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2335 			ice_send_event_to_aux(pf, event);
2336 			kfree(event);
2337 		}
2338 	}
2339 
2340 	ice_clean_adminq_subtask(pf);
2341 	ice_check_media_subtask(pf);
2342 	ice_check_for_hang_subtask(pf);
2343 	ice_sync_fltr_subtask(pf);
2344 	ice_handle_mdd_event(pf);
2345 	ice_watchdog_subtask(pf);
2346 
2347 	if (ice_is_safe_mode(pf)) {
2348 		ice_service_task_complete(pf);
2349 		return;
2350 	}
2351 
2352 	ice_process_vflr_event(pf);
2353 	ice_clean_mailboxq_subtask(pf);
2354 	ice_clean_sbq_subtask(pf);
2355 	ice_sync_arfs_fltrs(pf);
2356 	ice_flush_fdir_ctx(pf);
2357 
2358 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2359 	ice_service_task_complete(pf);
2360 
2361 	/* If the tasks have taken longer than one service timer period
2362 	 * or there is more work to be done, reset the service timer to
2363 	 * schedule the service task now.
2364 	 */
2365 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2366 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2367 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2368 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2369 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2370 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2371 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2372 		mod_timer(&pf->serv_tmr, jiffies);
2373 }
2374 
2375 /**
2376  * ice_set_ctrlq_len - helper function to set controlq length
2377  * @hw: pointer to the HW instance
2378  */
2379 static void ice_set_ctrlq_len(struct ice_hw *hw)
2380 {
2381 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2382 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2383 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2384 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2385 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2386 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2387 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2388 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2389 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2390 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2391 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2392 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2393 }
2394 
2395 /**
2396  * ice_schedule_reset - schedule a reset
2397  * @pf: board private structure
2398  * @reset: reset being requested
2399  */
2400 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2401 {
2402 	struct device *dev = ice_pf_to_dev(pf);
2403 
2404 	/* bail out if earlier reset has failed */
2405 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2406 		dev_dbg(dev, "earlier reset has failed\n");
2407 		return -EIO;
2408 	}
2409 	/* bail if reset/recovery already in progress */
2410 	if (ice_is_reset_in_progress(pf->state)) {
2411 		dev_dbg(dev, "Reset already in progress\n");
2412 		return -EBUSY;
2413 	}
2414 
2415 	switch (reset) {
2416 	case ICE_RESET_PFR:
2417 		set_bit(ICE_PFR_REQ, pf->state);
2418 		break;
2419 	case ICE_RESET_CORER:
2420 		set_bit(ICE_CORER_REQ, pf->state);
2421 		break;
2422 	case ICE_RESET_GLOBR:
2423 		set_bit(ICE_GLOBR_REQ, pf->state);
2424 		break;
2425 	default:
2426 		return -EINVAL;
2427 	}
2428 
2429 	ice_service_task_schedule(pf);
2430 	return 0;
2431 }
2432 
2433 /**
2434  * ice_irq_affinity_notify - Callback for affinity changes
2435  * @notify: context as to what irq was changed
2436  * @mask: the new affinity mask
2437  *
2438  * This is a callback function used by the irq_set_affinity_notifier function
2439  * so that we may register to receive changes to the irq affinity masks.
2440  */
2441 static void
2442 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2443 			const cpumask_t *mask)
2444 {
2445 	struct ice_q_vector *q_vector =
2446 		container_of(notify, struct ice_q_vector, affinity_notify);
2447 
2448 	cpumask_copy(&q_vector->affinity_mask, mask);
2449 }
2450 
2451 /**
2452  * ice_irq_affinity_release - Callback for affinity notifier release
2453  * @ref: internal core kernel usage
2454  *
2455  * This is a callback function used by the irq_set_affinity_notifier function
2456  * to inform the current notification subscriber that they will no longer
2457  * receive notifications.
2458  */
2459 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2460 
2461 /**
2462  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2463  * @vsi: the VSI being configured
2464  */
2465 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2466 {
2467 	struct ice_hw *hw = &vsi->back->hw;
2468 	int i;
2469 
2470 	ice_for_each_q_vector(vsi, i)
2471 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2472 
2473 	ice_flush(hw);
2474 	return 0;
2475 }
2476 
2477 /**
2478  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2479  * @vsi: the VSI being configured
2480  * @basename: name for the vector
2481  */
2482 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2483 {
2484 	int q_vectors = vsi->num_q_vectors;
2485 	struct ice_pf *pf = vsi->back;
2486 	int base = vsi->base_vector;
2487 	struct device *dev;
2488 	int rx_int_idx = 0;
2489 	int tx_int_idx = 0;
2490 	int vector, err;
2491 	int irq_num;
2492 
2493 	dev = ice_pf_to_dev(pf);
2494 	for (vector = 0; vector < q_vectors; vector++) {
2495 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2496 
2497 		irq_num = pf->msix_entries[base + vector].vector;
2498 
2499 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2500 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2501 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2502 			tx_int_idx++;
2503 		} else if (q_vector->rx.rx_ring) {
2504 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2505 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2506 		} else if (q_vector->tx.tx_ring) {
2507 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2508 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2509 		} else {
2510 			/* skip this unused q_vector */
2511 			continue;
2512 		}
2513 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2514 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2515 					       IRQF_SHARED, q_vector->name,
2516 					       q_vector);
2517 		else
2518 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2519 					       0, q_vector->name, q_vector);
2520 		if (err) {
2521 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2522 				   err);
2523 			goto free_q_irqs;
2524 		}
2525 
2526 		/* register for affinity change notifications */
2527 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2528 			struct irq_affinity_notify *affinity_notify;
2529 
2530 			affinity_notify = &q_vector->affinity_notify;
2531 			affinity_notify->notify = ice_irq_affinity_notify;
2532 			affinity_notify->release = ice_irq_affinity_release;
2533 			irq_set_affinity_notifier(irq_num, affinity_notify);
2534 		}
2535 
2536 		/* assign the mask for this irq */
2537 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2538 	}
2539 
2540 	err = ice_set_cpu_rx_rmap(vsi);
2541 	if (err) {
2542 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2543 			   vsi->vsi_num, ERR_PTR(err));
2544 		goto free_q_irqs;
2545 	}
2546 
2547 	vsi->irqs_ready = true;
2548 	return 0;
2549 
2550 free_q_irqs:
2551 	while (vector) {
2552 		vector--;
2553 		irq_num = pf->msix_entries[base + vector].vector;
2554 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2555 			irq_set_affinity_notifier(irq_num, NULL);
2556 		irq_set_affinity_hint(irq_num, NULL);
2557 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2558 	}
2559 	return err;
2560 }
2561 
2562 /**
2563  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2564  * @vsi: VSI to setup Tx rings used by XDP
2565  *
2566  * Return 0 on success and negative value on error
2567  */
2568 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2569 {
2570 	struct device *dev = ice_pf_to_dev(vsi->back);
2571 	struct ice_tx_desc *tx_desc;
2572 	int i, j;
2573 
2574 	ice_for_each_xdp_txq(vsi, i) {
2575 		u16 xdp_q_idx = vsi->alloc_txq + i;
2576 		struct ice_ring_stats *ring_stats;
2577 		struct ice_tx_ring *xdp_ring;
2578 
2579 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2580 		if (!xdp_ring)
2581 			goto free_xdp_rings;
2582 
2583 		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2584 		if (!ring_stats) {
2585 			ice_free_tx_ring(xdp_ring);
2586 			goto free_xdp_rings;
2587 		}
2588 
2589 		xdp_ring->ring_stats = ring_stats;
2590 		xdp_ring->q_index = xdp_q_idx;
2591 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2592 		xdp_ring->vsi = vsi;
2593 		xdp_ring->netdev = NULL;
2594 		xdp_ring->dev = dev;
2595 		xdp_ring->count = vsi->num_tx_desc;
2596 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2597 		if (ice_setup_tx_ring(xdp_ring))
2598 			goto free_xdp_rings;
2599 		ice_set_ring_xdp(xdp_ring);
2600 		spin_lock_init(&xdp_ring->tx_lock);
2601 		for (j = 0; j < xdp_ring->count; j++) {
2602 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2603 			tx_desc->cmd_type_offset_bsz = 0;
2604 		}
2605 	}
2606 
2607 	return 0;
2608 
2609 free_xdp_rings:
2610 	for (; i >= 0; i--) {
2611 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2612 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2613 			vsi->xdp_rings[i]->ring_stats = NULL;
2614 			ice_free_tx_ring(vsi->xdp_rings[i]);
2615 		}
2616 	}
2617 	return -ENOMEM;
2618 }
2619 
2620 /**
2621  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2622  * @vsi: VSI to set the bpf prog on
2623  * @prog: the bpf prog pointer
2624  */
2625 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2626 {
2627 	struct bpf_prog *old_prog;
2628 	int i;
2629 
2630 	old_prog = xchg(&vsi->xdp_prog, prog);
2631 	if (old_prog)
2632 		bpf_prog_put(old_prog);
2633 
2634 	ice_for_each_rxq(vsi, i)
2635 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2636 }
2637 
2638 /**
2639  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2640  * @vsi: VSI to bring up Tx rings used by XDP
2641  * @prog: bpf program that will be assigned to VSI
2642  *
2643  * Return 0 on success and negative value on error
2644  */
2645 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2646 {
2647 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2648 	int xdp_rings_rem = vsi->num_xdp_txq;
2649 	struct ice_pf *pf = vsi->back;
2650 	struct ice_qs_cfg xdp_qs_cfg = {
2651 		.qs_mutex = &pf->avail_q_mutex,
2652 		.pf_map = pf->avail_txqs,
2653 		.pf_map_size = pf->max_pf_txqs,
2654 		.q_count = vsi->num_xdp_txq,
2655 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2656 		.vsi_map = vsi->txq_map,
2657 		.vsi_map_offset = vsi->alloc_txq,
2658 		.mapping_mode = ICE_VSI_MAP_CONTIG
2659 	};
2660 	struct device *dev;
2661 	int i, v_idx;
2662 	int status;
2663 
2664 	dev = ice_pf_to_dev(pf);
2665 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2666 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2667 	if (!vsi->xdp_rings)
2668 		return -ENOMEM;
2669 
2670 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2671 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2672 		goto err_map_xdp;
2673 
2674 	if (static_key_enabled(&ice_xdp_locking_key))
2675 		netdev_warn(vsi->netdev,
2676 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2677 
2678 	if (ice_xdp_alloc_setup_rings(vsi))
2679 		goto clear_xdp_rings;
2680 
2681 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2682 	ice_for_each_q_vector(vsi, v_idx) {
2683 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2684 		int xdp_rings_per_v, q_id, q_base;
2685 
2686 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2687 					       vsi->num_q_vectors - v_idx);
2688 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2689 
2690 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2691 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2692 
2693 			xdp_ring->q_vector = q_vector;
2694 			xdp_ring->next = q_vector->tx.tx_ring;
2695 			q_vector->tx.tx_ring = xdp_ring;
2696 		}
2697 		xdp_rings_rem -= xdp_rings_per_v;
2698 	}
2699 
2700 	ice_for_each_rxq(vsi, i) {
2701 		if (static_key_enabled(&ice_xdp_locking_key)) {
2702 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2703 		} else {
2704 			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2705 			struct ice_tx_ring *ring;
2706 
2707 			ice_for_each_tx_ring(ring, q_vector->tx) {
2708 				if (ice_ring_is_xdp(ring)) {
2709 					vsi->rx_rings[i]->xdp_ring = ring;
2710 					break;
2711 				}
2712 			}
2713 		}
2714 		ice_tx_xsk_pool(vsi, i);
2715 	}
2716 
2717 	/* omit the scheduler update if in reset path; XDP queues will be
2718 	 * taken into account at the end of ice_vsi_rebuild, where
2719 	 * ice_cfg_vsi_lan is being called
2720 	 */
2721 	if (ice_is_reset_in_progress(pf->state))
2722 		return 0;
2723 
2724 	/* tell the Tx scheduler that right now we have
2725 	 * additional queues
2726 	 */
2727 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2728 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2729 
2730 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2731 				 max_txqs);
2732 	if (status) {
2733 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2734 			status);
2735 		goto clear_xdp_rings;
2736 	}
2737 
2738 	/* assign the prog only when it's not already present on VSI;
2739 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2740 	 * VSI rebuild that happens under ethtool -L can expose us to
2741 	 * the bpf_prog refcount issues as we would be swapping same
2742 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2743 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2744 	 * this is not harmful as dev_xdp_install bumps the refcount
2745 	 * before calling the op exposed by the driver;
2746 	 */
2747 	if (!ice_is_xdp_ena_vsi(vsi))
2748 		ice_vsi_assign_bpf_prog(vsi, prog);
2749 
2750 	return 0;
2751 clear_xdp_rings:
2752 	ice_for_each_xdp_txq(vsi, i)
2753 		if (vsi->xdp_rings[i]) {
2754 			kfree_rcu(vsi->xdp_rings[i], rcu);
2755 			vsi->xdp_rings[i] = NULL;
2756 		}
2757 
2758 err_map_xdp:
2759 	mutex_lock(&pf->avail_q_mutex);
2760 	ice_for_each_xdp_txq(vsi, i) {
2761 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2762 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2763 	}
2764 	mutex_unlock(&pf->avail_q_mutex);
2765 
2766 	devm_kfree(dev, vsi->xdp_rings);
2767 	return -ENOMEM;
2768 }
2769 
2770 /**
2771  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2772  * @vsi: VSI to remove XDP rings
2773  *
2774  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2775  * resources
2776  */
2777 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2778 {
2779 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2780 	struct ice_pf *pf = vsi->back;
2781 	int i, v_idx;
2782 
2783 	/* q_vectors are freed in reset path so there's no point in detaching
2784 	 * rings; in case of rebuild being triggered not from reset bits
2785 	 * in pf->state won't be set, so additionally check first q_vector
2786 	 * against NULL
2787 	 */
2788 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2789 		goto free_qmap;
2790 
2791 	ice_for_each_q_vector(vsi, v_idx) {
2792 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2793 		struct ice_tx_ring *ring;
2794 
2795 		ice_for_each_tx_ring(ring, q_vector->tx)
2796 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2797 				break;
2798 
2799 		/* restore the value of last node prior to XDP setup */
2800 		q_vector->tx.tx_ring = ring;
2801 	}
2802 
2803 free_qmap:
2804 	mutex_lock(&pf->avail_q_mutex);
2805 	ice_for_each_xdp_txq(vsi, i) {
2806 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2807 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2808 	}
2809 	mutex_unlock(&pf->avail_q_mutex);
2810 
2811 	ice_for_each_xdp_txq(vsi, i)
2812 		if (vsi->xdp_rings[i]) {
2813 			if (vsi->xdp_rings[i]->desc) {
2814 				synchronize_rcu();
2815 				ice_free_tx_ring(vsi->xdp_rings[i]);
2816 			}
2817 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2818 			vsi->xdp_rings[i]->ring_stats = NULL;
2819 			kfree_rcu(vsi->xdp_rings[i], rcu);
2820 			vsi->xdp_rings[i] = NULL;
2821 		}
2822 
2823 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2824 	vsi->xdp_rings = NULL;
2825 
2826 	if (static_key_enabled(&ice_xdp_locking_key))
2827 		static_branch_dec(&ice_xdp_locking_key);
2828 
2829 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2830 		return 0;
2831 
2832 	ice_vsi_assign_bpf_prog(vsi, NULL);
2833 
2834 	/* notify Tx scheduler that we destroyed XDP queues and bring
2835 	 * back the old number of child nodes
2836 	 */
2837 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2838 		max_txqs[i] = vsi->num_txq;
2839 
2840 	/* change number of XDP Tx queues to 0 */
2841 	vsi->num_xdp_txq = 0;
2842 
2843 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2844 			       max_txqs);
2845 }
2846 
2847 /**
2848  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2849  * @vsi: VSI to schedule napi on
2850  */
2851 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2852 {
2853 	int i;
2854 
2855 	ice_for_each_rxq(vsi, i) {
2856 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2857 
2858 		if (rx_ring->xsk_pool)
2859 			napi_schedule(&rx_ring->q_vector->napi);
2860 	}
2861 }
2862 
2863 /**
2864  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2865  * @vsi: VSI to determine the count of XDP Tx qs
2866  *
2867  * returns 0 if Tx qs count is higher than at least half of CPU count,
2868  * -ENOMEM otherwise
2869  */
2870 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2871 {
2872 	u16 avail = ice_get_avail_txq_count(vsi->back);
2873 	u16 cpus = num_possible_cpus();
2874 
2875 	if (avail < cpus / 2)
2876 		return -ENOMEM;
2877 
2878 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2879 
2880 	if (vsi->num_xdp_txq < cpus)
2881 		static_branch_inc(&ice_xdp_locking_key);
2882 
2883 	return 0;
2884 }
2885 
2886 /**
2887  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2888  * @vsi: Pointer to VSI structure
2889  */
2890 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2891 {
2892 	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2893 		return ICE_RXBUF_1664;
2894 	else
2895 		return ICE_RXBUF_3072;
2896 }
2897 
2898 /**
2899  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2900  * @vsi: VSI to setup XDP for
2901  * @prog: XDP program
2902  * @extack: netlink extended ack
2903  */
2904 static int
2905 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2906 		   struct netlink_ext_ack *extack)
2907 {
2908 	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2909 	bool if_running = netif_running(vsi->netdev);
2910 	int ret = 0, xdp_ring_err = 0;
2911 
2912 	if (prog && !prog->aux->xdp_has_frags) {
2913 		if (frame_size > ice_max_xdp_frame_size(vsi)) {
2914 			NL_SET_ERR_MSG_MOD(extack,
2915 					   "MTU is too large for linear frames and XDP prog does not support frags");
2916 			return -EOPNOTSUPP;
2917 		}
2918 	}
2919 
2920 	/* need to stop netdev while setting up the program for Rx rings */
2921 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2922 		ret = ice_down(vsi);
2923 		if (ret) {
2924 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2925 			return ret;
2926 		}
2927 	}
2928 
2929 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2930 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2931 		if (xdp_ring_err) {
2932 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2933 		} else {
2934 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2935 			if (xdp_ring_err)
2936 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2937 		}
2938 		xdp_features_set_redirect_target(vsi->netdev, true);
2939 		/* reallocate Rx queues that are used for zero-copy */
2940 		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2941 		if (xdp_ring_err)
2942 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2943 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2944 		xdp_features_clear_redirect_target(vsi->netdev);
2945 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2946 		if (xdp_ring_err)
2947 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2948 		/* reallocate Rx queues that were used for zero-copy */
2949 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2950 		if (xdp_ring_err)
2951 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2952 	} else {
2953 		/* safe to call even when prog == vsi->xdp_prog as
2954 		 * dev_xdp_install in net/core/dev.c incremented prog's
2955 		 * refcount so corresponding bpf_prog_put won't cause
2956 		 * underflow
2957 		 */
2958 		ice_vsi_assign_bpf_prog(vsi, prog);
2959 	}
2960 
2961 	if (if_running)
2962 		ret = ice_up(vsi);
2963 
2964 	if (!ret && prog)
2965 		ice_vsi_rx_napi_schedule(vsi);
2966 
2967 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2968 }
2969 
2970 /**
2971  * ice_xdp_safe_mode - XDP handler for safe mode
2972  * @dev: netdevice
2973  * @xdp: XDP command
2974  */
2975 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2976 			     struct netdev_bpf *xdp)
2977 {
2978 	NL_SET_ERR_MSG_MOD(xdp->extack,
2979 			   "Please provide working DDP firmware package in order to use XDP\n"
2980 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2981 	return -EOPNOTSUPP;
2982 }
2983 
2984 /**
2985  * ice_xdp - implements XDP handler
2986  * @dev: netdevice
2987  * @xdp: XDP command
2988  */
2989 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2990 {
2991 	struct ice_netdev_priv *np = netdev_priv(dev);
2992 	struct ice_vsi *vsi = np->vsi;
2993 
2994 	if (vsi->type != ICE_VSI_PF) {
2995 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2996 		return -EINVAL;
2997 	}
2998 
2999 	switch (xdp->command) {
3000 	case XDP_SETUP_PROG:
3001 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3002 	case XDP_SETUP_XSK_POOL:
3003 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3004 					  xdp->xsk.queue_id);
3005 	default:
3006 		return -EINVAL;
3007 	}
3008 }
3009 
3010 /**
3011  * ice_ena_misc_vector - enable the non-queue interrupts
3012  * @pf: board private structure
3013  */
3014 static void ice_ena_misc_vector(struct ice_pf *pf)
3015 {
3016 	struct ice_hw *hw = &pf->hw;
3017 	u32 val;
3018 
3019 	/* Disable anti-spoof detection interrupt to prevent spurious event
3020 	 * interrupts during a function reset. Anti-spoof functionally is
3021 	 * still supported.
3022 	 */
3023 	val = rd32(hw, GL_MDCK_TX_TDPU);
3024 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3025 	wr32(hw, GL_MDCK_TX_TDPU, val);
3026 
3027 	/* clear things first */
3028 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3029 	rd32(hw, PFINT_OICR);		/* read to clear */
3030 
3031 	val = (PFINT_OICR_ECC_ERR_M |
3032 	       PFINT_OICR_MAL_DETECT_M |
3033 	       PFINT_OICR_GRST_M |
3034 	       PFINT_OICR_PCI_EXCEPTION_M |
3035 	       PFINT_OICR_VFLR_M |
3036 	       PFINT_OICR_HMC_ERR_M |
3037 	       PFINT_OICR_PE_PUSH_M |
3038 	       PFINT_OICR_PE_CRITERR_M);
3039 
3040 	wr32(hw, PFINT_OICR_ENA, val);
3041 
3042 	/* SW_ITR_IDX = 0, but don't change INTENA */
3043 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
3044 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3045 }
3046 
3047 /**
3048  * ice_misc_intr - misc interrupt handler
3049  * @irq: interrupt number
3050  * @data: pointer to a q_vector
3051  */
3052 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3053 {
3054 	struct ice_pf *pf = (struct ice_pf *)data;
3055 	struct ice_hw *hw = &pf->hw;
3056 	irqreturn_t ret = IRQ_NONE;
3057 	struct device *dev;
3058 	u32 oicr, ena_mask;
3059 
3060 	dev = ice_pf_to_dev(pf);
3061 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3062 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3063 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3064 
3065 	oicr = rd32(hw, PFINT_OICR);
3066 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3067 
3068 	if (oicr & PFINT_OICR_SWINT_M) {
3069 		ena_mask &= ~PFINT_OICR_SWINT_M;
3070 		pf->sw_int_count++;
3071 	}
3072 
3073 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3074 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3075 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3076 	}
3077 	if (oicr & PFINT_OICR_VFLR_M) {
3078 		/* disable any further VFLR event notifications */
3079 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3080 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3081 
3082 			reg &= ~PFINT_OICR_VFLR_M;
3083 			wr32(hw, PFINT_OICR_ENA, reg);
3084 		} else {
3085 			ena_mask &= ~PFINT_OICR_VFLR_M;
3086 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3087 		}
3088 	}
3089 
3090 	if (oicr & PFINT_OICR_GRST_M) {
3091 		u32 reset;
3092 
3093 		/* we have a reset warning */
3094 		ena_mask &= ~PFINT_OICR_GRST_M;
3095 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3096 			GLGEN_RSTAT_RESET_TYPE_S;
3097 
3098 		if (reset == ICE_RESET_CORER)
3099 			pf->corer_count++;
3100 		else if (reset == ICE_RESET_GLOBR)
3101 			pf->globr_count++;
3102 		else if (reset == ICE_RESET_EMPR)
3103 			pf->empr_count++;
3104 		else
3105 			dev_dbg(dev, "Invalid reset type %d\n", reset);
3106 
3107 		/* If a reset cycle isn't already in progress, we set a bit in
3108 		 * pf->state so that the service task can start a reset/rebuild.
3109 		 */
3110 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3111 			if (reset == ICE_RESET_CORER)
3112 				set_bit(ICE_CORER_RECV, pf->state);
3113 			else if (reset == ICE_RESET_GLOBR)
3114 				set_bit(ICE_GLOBR_RECV, pf->state);
3115 			else
3116 				set_bit(ICE_EMPR_RECV, pf->state);
3117 
3118 			/* There are couple of different bits at play here.
3119 			 * hw->reset_ongoing indicates whether the hardware is
3120 			 * in reset. This is set to true when a reset interrupt
3121 			 * is received and set back to false after the driver
3122 			 * has determined that the hardware is out of reset.
3123 			 *
3124 			 * ICE_RESET_OICR_RECV in pf->state indicates
3125 			 * that a post reset rebuild is required before the
3126 			 * driver is operational again. This is set above.
3127 			 *
3128 			 * As this is the start of the reset/rebuild cycle, set
3129 			 * both to indicate that.
3130 			 */
3131 			hw->reset_ongoing = true;
3132 		}
3133 	}
3134 
3135 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3136 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3137 		if (!hw->reset_ongoing)
3138 			ret = IRQ_WAKE_THREAD;
3139 	}
3140 
3141 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3142 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3143 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3144 
3145 		/* Save EVENTs from GTSYN register */
3146 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3147 						     GLTSYN_STAT_EVENT1_M |
3148 						     GLTSYN_STAT_EVENT2_M);
3149 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3150 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3151 	}
3152 
3153 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3154 	if (oicr & ICE_AUX_CRIT_ERR) {
3155 		pf->oicr_err_reg |= oicr;
3156 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3157 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3158 	}
3159 
3160 	/* Report any remaining unexpected interrupts */
3161 	oicr &= ena_mask;
3162 	if (oicr) {
3163 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3164 		/* If a critical error is pending there is no choice but to
3165 		 * reset the device.
3166 		 */
3167 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3168 			    PFINT_OICR_ECC_ERR_M)) {
3169 			set_bit(ICE_PFR_REQ, pf->state);
3170 			ice_service_task_schedule(pf);
3171 		}
3172 	}
3173 	if (!ret)
3174 		ret = IRQ_HANDLED;
3175 
3176 	ice_service_task_schedule(pf);
3177 	ice_irq_dynamic_ena(hw, NULL, NULL);
3178 
3179 	return ret;
3180 }
3181 
3182 /**
3183  * ice_misc_intr_thread_fn - misc interrupt thread function
3184  * @irq: interrupt number
3185  * @data: pointer to a q_vector
3186  */
3187 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3188 {
3189 	struct ice_pf *pf = data;
3190 
3191 	if (ice_is_reset_in_progress(pf->state))
3192 		return IRQ_HANDLED;
3193 
3194 	while (!ice_ptp_process_ts(pf))
3195 		usleep_range(50, 100);
3196 
3197 	return IRQ_HANDLED;
3198 }
3199 
3200 /**
3201  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3202  * @hw: pointer to HW structure
3203  */
3204 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3205 {
3206 	/* disable Admin queue Interrupt causes */
3207 	wr32(hw, PFINT_FW_CTL,
3208 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3209 
3210 	/* disable Mailbox queue Interrupt causes */
3211 	wr32(hw, PFINT_MBX_CTL,
3212 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3213 
3214 	wr32(hw, PFINT_SB_CTL,
3215 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3216 
3217 	/* disable Control queue Interrupt causes */
3218 	wr32(hw, PFINT_OICR_CTL,
3219 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3220 
3221 	ice_flush(hw);
3222 }
3223 
3224 /**
3225  * ice_free_irq_msix_misc - Unroll misc vector setup
3226  * @pf: board private structure
3227  */
3228 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3229 {
3230 	struct ice_hw *hw = &pf->hw;
3231 
3232 	ice_dis_ctrlq_interrupts(hw);
3233 
3234 	/* disable OICR interrupt */
3235 	wr32(hw, PFINT_OICR_ENA, 0);
3236 	ice_flush(hw);
3237 
3238 	if (pf->msix_entries) {
3239 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
3240 		devm_free_irq(ice_pf_to_dev(pf),
3241 			      pf->msix_entries[pf->oicr_idx].vector, pf);
3242 	}
3243 
3244 	pf->num_avail_sw_msix += 1;
3245 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3246 }
3247 
3248 /**
3249  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3250  * @hw: pointer to HW structure
3251  * @reg_idx: HW vector index to associate the control queue interrupts with
3252  */
3253 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3254 {
3255 	u32 val;
3256 
3257 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3258 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3259 	wr32(hw, PFINT_OICR_CTL, val);
3260 
3261 	/* enable Admin queue Interrupt causes */
3262 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3263 	       PFINT_FW_CTL_CAUSE_ENA_M);
3264 	wr32(hw, PFINT_FW_CTL, val);
3265 
3266 	/* enable Mailbox queue Interrupt causes */
3267 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3268 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3269 	wr32(hw, PFINT_MBX_CTL, val);
3270 
3271 	/* This enables Sideband queue Interrupt causes */
3272 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3273 	       PFINT_SB_CTL_CAUSE_ENA_M);
3274 	wr32(hw, PFINT_SB_CTL, val);
3275 
3276 	ice_flush(hw);
3277 }
3278 
3279 /**
3280  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3281  * @pf: board private structure
3282  *
3283  * This sets up the handler for MSIX 0, which is used to manage the
3284  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3285  * when in MSI or Legacy interrupt mode.
3286  */
3287 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3288 {
3289 	struct device *dev = ice_pf_to_dev(pf);
3290 	struct ice_hw *hw = &pf->hw;
3291 	int oicr_idx, err = 0;
3292 
3293 	if (!pf->int_name[0])
3294 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3295 			 dev_driver_string(dev), dev_name(dev));
3296 
3297 	/* Do not request IRQ but do enable OICR interrupt since settings are
3298 	 * lost during reset. Note that this function is called only during
3299 	 * rebuild path and not while reset is in progress.
3300 	 */
3301 	if (ice_is_reset_in_progress(pf->state))
3302 		goto skip_req_irq;
3303 
3304 	/* reserve one vector in irq_tracker for misc interrupts */
3305 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3306 	if (oicr_idx < 0)
3307 		return oicr_idx;
3308 
3309 	pf->num_avail_sw_msix -= 1;
3310 	pf->oicr_idx = (u16)oicr_idx;
3311 
3312 	err = devm_request_threaded_irq(dev,
3313 					pf->msix_entries[pf->oicr_idx].vector,
3314 					ice_misc_intr, ice_misc_intr_thread_fn,
3315 					0, pf->int_name, pf);
3316 	if (err) {
3317 		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3318 			pf->int_name, err);
3319 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3320 		pf->num_avail_sw_msix += 1;
3321 		return err;
3322 	}
3323 
3324 skip_req_irq:
3325 	ice_ena_misc_vector(pf);
3326 
3327 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3328 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3329 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3330 
3331 	ice_flush(hw);
3332 	ice_irq_dynamic_ena(hw, NULL, NULL);
3333 
3334 	return 0;
3335 }
3336 
3337 /**
3338  * ice_napi_add - register NAPI handler for the VSI
3339  * @vsi: VSI for which NAPI handler is to be registered
3340  *
3341  * This function is only called in the driver's load path. Registering the NAPI
3342  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3343  * reset/rebuild, etc.)
3344  */
3345 static void ice_napi_add(struct ice_vsi *vsi)
3346 {
3347 	int v_idx;
3348 
3349 	if (!vsi->netdev)
3350 		return;
3351 
3352 	ice_for_each_q_vector(vsi, v_idx)
3353 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3354 			       ice_napi_poll);
3355 }
3356 
3357 /**
3358  * ice_set_ops - set netdev and ethtools ops for the given netdev
3359  * @vsi: the VSI associated with the new netdev
3360  */
3361 static void ice_set_ops(struct ice_vsi *vsi)
3362 {
3363 	struct net_device *netdev = vsi->netdev;
3364 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3365 
3366 	if (ice_is_safe_mode(pf)) {
3367 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3368 		ice_set_ethtool_safe_mode_ops(netdev);
3369 		return;
3370 	}
3371 
3372 	netdev->netdev_ops = &ice_netdev_ops;
3373 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3374 	ice_set_ethtool_ops(netdev);
3375 
3376 	if (vsi->type != ICE_VSI_PF)
3377 		return;
3378 
3379 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3380 			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3381 			       NETDEV_XDP_ACT_RX_SG;
3382 }
3383 
3384 /**
3385  * ice_set_netdev_features - set features for the given netdev
3386  * @netdev: netdev instance
3387  */
3388 static void ice_set_netdev_features(struct net_device *netdev)
3389 {
3390 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3391 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3392 	netdev_features_t csumo_features;
3393 	netdev_features_t vlano_features;
3394 	netdev_features_t dflt_features;
3395 	netdev_features_t tso_features;
3396 
3397 	if (ice_is_safe_mode(pf)) {
3398 		/* safe mode */
3399 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3400 		netdev->hw_features = netdev->features;
3401 		return;
3402 	}
3403 
3404 	dflt_features = NETIF_F_SG	|
3405 			NETIF_F_HIGHDMA	|
3406 			NETIF_F_NTUPLE	|
3407 			NETIF_F_RXHASH;
3408 
3409 	csumo_features = NETIF_F_RXCSUM	  |
3410 			 NETIF_F_IP_CSUM  |
3411 			 NETIF_F_SCTP_CRC |
3412 			 NETIF_F_IPV6_CSUM;
3413 
3414 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3415 			 NETIF_F_HW_VLAN_CTAG_TX     |
3416 			 NETIF_F_HW_VLAN_CTAG_RX;
3417 
3418 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3419 	if (is_dvm_ena)
3420 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3421 
3422 	tso_features = NETIF_F_TSO			|
3423 		       NETIF_F_TSO_ECN			|
3424 		       NETIF_F_TSO6			|
3425 		       NETIF_F_GSO_GRE			|
3426 		       NETIF_F_GSO_UDP_TUNNEL		|
3427 		       NETIF_F_GSO_GRE_CSUM		|
3428 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3429 		       NETIF_F_GSO_PARTIAL		|
3430 		       NETIF_F_GSO_IPXIP4		|
3431 		       NETIF_F_GSO_IPXIP6		|
3432 		       NETIF_F_GSO_UDP_L4;
3433 
3434 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3435 					NETIF_F_GSO_GRE_CSUM;
3436 	/* set features that user can change */
3437 	netdev->hw_features = dflt_features | csumo_features |
3438 			      vlano_features | tso_features;
3439 
3440 	/* add support for HW_CSUM on packets with MPLS header */
3441 	netdev->mpls_features =  NETIF_F_HW_CSUM |
3442 				 NETIF_F_TSO     |
3443 				 NETIF_F_TSO6;
3444 
3445 	/* enable features */
3446 	netdev->features |= netdev->hw_features;
3447 
3448 	netdev->hw_features |= NETIF_F_HW_TC;
3449 	netdev->hw_features |= NETIF_F_LOOPBACK;
3450 
3451 	/* encap and VLAN devices inherit default, csumo and tso features */
3452 	netdev->hw_enc_features |= dflt_features | csumo_features |
3453 				   tso_features;
3454 	netdev->vlan_features |= dflt_features | csumo_features |
3455 				 tso_features;
3456 
3457 	/* advertise support but don't enable by default since only one type of
3458 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3459 	 * type turns on the other has to be turned off. This is enforced by the
3460 	 * ice_fix_features() ndo callback.
3461 	 */
3462 	if (is_dvm_ena)
3463 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3464 			NETIF_F_HW_VLAN_STAG_TX;
3465 
3466 	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3467 	 * be changed at runtime
3468 	 */
3469 	netdev->hw_features |= NETIF_F_RXFCS;
3470 
3471 	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3472 }
3473 
3474 /**
3475  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3476  * @lut: Lookup table
3477  * @rss_table_size: Lookup table size
3478  * @rss_size: Range of queue number for hashing
3479  */
3480 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3481 {
3482 	u16 i;
3483 
3484 	for (i = 0; i < rss_table_size; i++)
3485 		lut[i] = i % rss_size;
3486 }
3487 
3488 /**
3489  * ice_pf_vsi_setup - Set up a PF VSI
3490  * @pf: board private structure
3491  * @pi: pointer to the port_info instance
3492  *
3493  * Returns pointer to the successfully allocated VSI software struct
3494  * on success, otherwise returns NULL on failure.
3495  */
3496 static struct ice_vsi *
3497 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3498 {
3499 	struct ice_vsi_cfg_params params = {};
3500 
3501 	params.type = ICE_VSI_PF;
3502 	params.pi = pi;
3503 	params.flags = ICE_VSI_FLAG_INIT;
3504 
3505 	return ice_vsi_setup(pf, &params);
3506 }
3507 
3508 static struct ice_vsi *
3509 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3510 		   struct ice_channel *ch)
3511 {
3512 	struct ice_vsi_cfg_params params = {};
3513 
3514 	params.type = ICE_VSI_CHNL;
3515 	params.pi = pi;
3516 	params.ch = ch;
3517 	params.flags = ICE_VSI_FLAG_INIT;
3518 
3519 	return ice_vsi_setup(pf, &params);
3520 }
3521 
3522 /**
3523  * ice_ctrl_vsi_setup - Set up a control VSI
3524  * @pf: board private structure
3525  * @pi: pointer to the port_info instance
3526  *
3527  * Returns pointer to the successfully allocated VSI software struct
3528  * on success, otherwise returns NULL on failure.
3529  */
3530 static struct ice_vsi *
3531 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3532 {
3533 	struct ice_vsi_cfg_params params = {};
3534 
3535 	params.type = ICE_VSI_CTRL;
3536 	params.pi = pi;
3537 	params.flags = ICE_VSI_FLAG_INIT;
3538 
3539 	return ice_vsi_setup(pf, &params);
3540 }
3541 
3542 /**
3543  * ice_lb_vsi_setup - Set up a loopback VSI
3544  * @pf: board private structure
3545  * @pi: pointer to the port_info instance
3546  *
3547  * Returns pointer to the successfully allocated VSI software struct
3548  * on success, otherwise returns NULL on failure.
3549  */
3550 struct ice_vsi *
3551 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3552 {
3553 	struct ice_vsi_cfg_params params = {};
3554 
3555 	params.type = ICE_VSI_LB;
3556 	params.pi = pi;
3557 	params.flags = ICE_VSI_FLAG_INIT;
3558 
3559 	return ice_vsi_setup(pf, &params);
3560 }
3561 
3562 /**
3563  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3564  * @netdev: network interface to be adjusted
3565  * @proto: VLAN TPID
3566  * @vid: VLAN ID to be added
3567  *
3568  * net_device_ops implementation for adding VLAN IDs
3569  */
3570 static int
3571 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3572 {
3573 	struct ice_netdev_priv *np = netdev_priv(netdev);
3574 	struct ice_vsi_vlan_ops *vlan_ops;
3575 	struct ice_vsi *vsi = np->vsi;
3576 	struct ice_vlan vlan;
3577 	int ret;
3578 
3579 	/* VLAN 0 is added by default during load/reset */
3580 	if (!vid)
3581 		return 0;
3582 
3583 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3584 		usleep_range(1000, 2000);
3585 
3586 	/* Add multicast promisc rule for the VLAN ID to be added if
3587 	 * all-multicast is currently enabled.
3588 	 */
3589 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3590 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3591 					       ICE_MCAST_VLAN_PROMISC_BITS,
3592 					       vid);
3593 		if (ret)
3594 			goto finish;
3595 	}
3596 
3597 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3598 
3599 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3600 	 * packets aren't pruned by the device's internal switch on Rx
3601 	 */
3602 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3603 	ret = vlan_ops->add_vlan(vsi, &vlan);
3604 	if (ret)
3605 		goto finish;
3606 
3607 	/* If all-multicast is currently enabled and this VLAN ID is only one
3608 	 * besides VLAN-0 we have to update look-up type of multicast promisc
3609 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3610 	 */
3611 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3612 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3613 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3614 					   ICE_MCAST_PROMISC_BITS, 0);
3615 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3616 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3617 	}
3618 
3619 finish:
3620 	clear_bit(ICE_CFG_BUSY, vsi->state);
3621 
3622 	return ret;
3623 }
3624 
3625 /**
3626  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3627  * @netdev: network interface to be adjusted
3628  * @proto: VLAN TPID
3629  * @vid: VLAN ID to be removed
3630  *
3631  * net_device_ops implementation for removing VLAN IDs
3632  */
3633 static int
3634 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3635 {
3636 	struct ice_netdev_priv *np = netdev_priv(netdev);
3637 	struct ice_vsi_vlan_ops *vlan_ops;
3638 	struct ice_vsi *vsi = np->vsi;
3639 	struct ice_vlan vlan;
3640 	int ret;
3641 
3642 	/* don't allow removal of VLAN 0 */
3643 	if (!vid)
3644 		return 0;
3645 
3646 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3647 		usleep_range(1000, 2000);
3648 
3649 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3650 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3651 	if (ret) {
3652 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3653 			   vsi->vsi_num);
3654 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3655 	}
3656 
3657 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3658 
3659 	/* Make sure VLAN delete is successful before updating VLAN
3660 	 * information
3661 	 */
3662 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3663 	ret = vlan_ops->del_vlan(vsi, &vlan);
3664 	if (ret)
3665 		goto finish;
3666 
3667 	/* Remove multicast promisc rule for the removed VLAN ID if
3668 	 * all-multicast is enabled.
3669 	 */
3670 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3671 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3672 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3673 
3674 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3675 		/* Update look-up type of multicast promisc rule for VLAN 0
3676 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3677 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3678 		 */
3679 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3680 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3681 						   ICE_MCAST_VLAN_PROMISC_BITS,
3682 						   0);
3683 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3684 						 ICE_MCAST_PROMISC_BITS, 0);
3685 		}
3686 	}
3687 
3688 finish:
3689 	clear_bit(ICE_CFG_BUSY, vsi->state);
3690 
3691 	return ret;
3692 }
3693 
3694 /**
3695  * ice_rep_indr_tc_block_unbind
3696  * @cb_priv: indirection block private data
3697  */
3698 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3699 {
3700 	struct ice_indr_block_priv *indr_priv = cb_priv;
3701 
3702 	list_del(&indr_priv->list);
3703 	kfree(indr_priv);
3704 }
3705 
3706 /**
3707  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3708  * @vsi: VSI struct which has the netdev
3709  */
3710 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3711 {
3712 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3713 
3714 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3715 				 ice_rep_indr_tc_block_unbind);
3716 }
3717 
3718 /**
3719  * ice_tc_indir_block_register - Register TC indirect block notifications
3720  * @vsi: VSI struct which has the netdev
3721  *
3722  * Returns 0 on success, negative value on failure
3723  */
3724 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3725 {
3726 	struct ice_netdev_priv *np;
3727 
3728 	if (!vsi || !vsi->netdev)
3729 		return -EINVAL;
3730 
3731 	np = netdev_priv(vsi->netdev);
3732 
3733 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3734 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3735 }
3736 
3737 /**
3738  * ice_get_avail_q_count - Get count of queues in use
3739  * @pf_qmap: bitmap to get queue use count from
3740  * @lock: pointer to a mutex that protects access to pf_qmap
3741  * @size: size of the bitmap
3742  */
3743 static u16
3744 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3745 {
3746 	unsigned long bit;
3747 	u16 count = 0;
3748 
3749 	mutex_lock(lock);
3750 	for_each_clear_bit(bit, pf_qmap, size)
3751 		count++;
3752 	mutex_unlock(lock);
3753 
3754 	return count;
3755 }
3756 
3757 /**
3758  * ice_get_avail_txq_count - Get count of Tx queues in use
3759  * @pf: pointer to an ice_pf instance
3760  */
3761 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3762 {
3763 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3764 				     pf->max_pf_txqs);
3765 }
3766 
3767 /**
3768  * ice_get_avail_rxq_count - Get count of Rx queues in use
3769  * @pf: pointer to an ice_pf instance
3770  */
3771 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3772 {
3773 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3774 				     pf->max_pf_rxqs);
3775 }
3776 
3777 /**
3778  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3779  * @pf: board private structure to initialize
3780  */
3781 static void ice_deinit_pf(struct ice_pf *pf)
3782 {
3783 	ice_service_task_stop(pf);
3784 	mutex_destroy(&pf->adev_mutex);
3785 	mutex_destroy(&pf->sw_mutex);
3786 	mutex_destroy(&pf->tc_mutex);
3787 	mutex_destroy(&pf->avail_q_mutex);
3788 	mutex_destroy(&pf->vfs.table_lock);
3789 
3790 	if (pf->avail_txqs) {
3791 		bitmap_free(pf->avail_txqs);
3792 		pf->avail_txqs = NULL;
3793 	}
3794 
3795 	if (pf->avail_rxqs) {
3796 		bitmap_free(pf->avail_rxqs);
3797 		pf->avail_rxqs = NULL;
3798 	}
3799 
3800 	if (pf->ptp.clock)
3801 		ptp_clock_unregister(pf->ptp.clock);
3802 }
3803 
3804 /**
3805  * ice_set_pf_caps - set PFs capability flags
3806  * @pf: pointer to the PF instance
3807  */
3808 static void ice_set_pf_caps(struct ice_pf *pf)
3809 {
3810 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3811 
3812 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3813 	if (func_caps->common_cap.rdma)
3814 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3815 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3816 	if (func_caps->common_cap.dcb)
3817 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3818 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3819 	if (func_caps->common_cap.sr_iov_1_1) {
3820 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3821 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3822 					      ICE_MAX_SRIOV_VFS);
3823 	}
3824 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3825 	if (func_caps->common_cap.rss_table_size)
3826 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3827 
3828 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3829 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3830 		u16 unused;
3831 
3832 		/* ctrl_vsi_idx will be set to a valid value when flow director
3833 		 * is setup by ice_init_fdir
3834 		 */
3835 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3836 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3837 		/* force guaranteed filter pool for PF */
3838 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3839 				       func_caps->fd_fltr_guar);
3840 		/* force shared filter pool for PF */
3841 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3842 				       func_caps->fd_fltr_best_effort);
3843 	}
3844 
3845 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3846 	if (func_caps->common_cap.ieee_1588)
3847 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3848 
3849 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3850 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3851 }
3852 
3853 /**
3854  * ice_init_pf - Initialize general software structures (struct ice_pf)
3855  * @pf: board private structure to initialize
3856  */
3857 static int ice_init_pf(struct ice_pf *pf)
3858 {
3859 	ice_set_pf_caps(pf);
3860 
3861 	mutex_init(&pf->sw_mutex);
3862 	mutex_init(&pf->tc_mutex);
3863 	mutex_init(&pf->adev_mutex);
3864 
3865 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3866 	spin_lock_init(&pf->aq_wait_lock);
3867 	init_waitqueue_head(&pf->aq_wait_queue);
3868 
3869 	init_waitqueue_head(&pf->reset_wait_queue);
3870 
3871 	/* setup service timer and periodic service task */
3872 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3873 	pf->serv_tmr_period = HZ;
3874 	INIT_WORK(&pf->serv_task, ice_service_task);
3875 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3876 
3877 	mutex_init(&pf->avail_q_mutex);
3878 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3879 	if (!pf->avail_txqs)
3880 		return -ENOMEM;
3881 
3882 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3883 	if (!pf->avail_rxqs) {
3884 		bitmap_free(pf->avail_txqs);
3885 		pf->avail_txqs = NULL;
3886 		return -ENOMEM;
3887 	}
3888 
3889 	mutex_init(&pf->vfs.table_lock);
3890 	hash_init(pf->vfs.table);
3891 
3892 	return 0;
3893 }
3894 
3895 /**
3896  * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
3897  * @pf: board private structure
3898  * @v_remain: number of remaining MSI-X vectors to be distributed
3899  *
3900  * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
3901  * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
3902  * remaining vectors.
3903  */
3904 static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
3905 {
3906 	int v_rdma;
3907 
3908 	if (!ice_is_rdma_ena(pf)) {
3909 		pf->num_lan_msix = v_remain;
3910 		return;
3911 	}
3912 
3913 	/* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
3914 	v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3915 
3916 	if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
3917 		dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
3918 		clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3919 
3920 		pf->num_rdma_msix = 0;
3921 		pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3922 	} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3923 		   (v_remain - v_rdma < v_rdma)) {
3924 		/* Support minimum RDMA and give remaining vectors to LAN MSIX */
3925 		pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
3926 		pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
3927 	} else {
3928 		/* Split remaining MSIX with RDMA after accounting for AEQ MSIX
3929 		 */
3930 		pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3931 				    ICE_RDMA_NUM_AEQ_MSIX;
3932 		pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3933 	}
3934 }
3935 
3936 /**
3937  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3938  * @pf: board private structure
3939  *
3940  * Compute the number of MSIX vectors wanted and request from the OS. Adjust
3941  * device usage if there are not enough vectors. Return the number of vectors
3942  * reserved or negative on failure.
3943  */
3944 static int ice_ena_msix_range(struct ice_pf *pf)
3945 {
3946 	int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
3947 	struct device *dev = ice_pf_to_dev(pf);
3948 	int err, i;
3949 
3950 	hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
3951 	num_cpus = num_online_cpus();
3952 
3953 	/* LAN miscellaneous handler */
3954 	v_other = ICE_MIN_LAN_OICR_MSIX;
3955 
3956 	/* Flow Director */
3957 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
3958 		v_other += ICE_FDIR_MSIX;
3959 
3960 	/* switchdev */
3961 	v_other += ICE_ESWITCH_MSIX;
3962 
3963 	v_wanted = v_other;
3964 
3965 	/* LAN traffic */
3966 	pf->num_lan_msix = num_cpus;
3967 	v_wanted += pf->num_lan_msix;
3968 
3969 	/* RDMA auxiliary driver */
3970 	if (ice_is_rdma_ena(pf)) {
3971 		pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3972 		v_wanted += pf->num_rdma_msix;
3973 	}
3974 
3975 	if (v_wanted > hw_num_msix) {
3976 		int v_remain;
3977 
3978 		dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
3979 			 v_wanted, hw_num_msix);
3980 
3981 		if (hw_num_msix < ICE_MIN_MSIX) {
3982 			err = -ERANGE;
3983 			goto exit_err;
3984 		}
3985 
3986 		v_remain = hw_num_msix - v_other;
3987 		if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
3988 			v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
3989 			v_remain = ICE_MIN_LAN_TXRX_MSIX;
3990 		}
3991 
3992 		ice_reduce_msix_usage(pf, v_remain);
3993 		v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
3994 
3995 		dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
3996 			   pf->num_lan_msix);
3997 		if (ice_is_rdma_ena(pf))
3998 			dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
3999 				   pf->num_rdma_msix);
4000 	}
4001 
4002 	pf->msix_entries = devm_kcalloc(dev, v_wanted,
4003 					sizeof(*pf->msix_entries), GFP_KERNEL);
4004 	if (!pf->msix_entries) {
4005 		err = -ENOMEM;
4006 		goto exit_err;
4007 	}
4008 
4009 	for (i = 0; i < v_wanted; i++)
4010 		pf->msix_entries[i].entry = i;
4011 
4012 	/* actually reserve the vectors */
4013 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
4014 					 ICE_MIN_MSIX, v_wanted);
4015 	if (v_actual < 0) {
4016 		dev_err(dev, "unable to reserve MSI-X vectors\n");
4017 		err = v_actual;
4018 		goto msix_err;
4019 	}
4020 
4021 	if (v_actual < v_wanted) {
4022 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
4023 			 v_wanted, v_actual);
4024 
4025 		if (v_actual < ICE_MIN_MSIX) {
4026 			/* error if we can't get minimum vectors */
4027 			pci_disable_msix(pf->pdev);
4028 			err = -ERANGE;
4029 			goto msix_err;
4030 		} else {
4031 			int v_remain = v_actual - v_other;
4032 
4033 			if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
4034 				v_remain = ICE_MIN_LAN_TXRX_MSIX;
4035 
4036 			ice_reduce_msix_usage(pf, v_remain);
4037 
4038 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
4039 				   pf->num_lan_msix);
4040 
4041 			if (ice_is_rdma_ena(pf))
4042 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
4043 					   pf->num_rdma_msix);
4044 		}
4045 	}
4046 
4047 	return v_actual;
4048 
4049 msix_err:
4050 	devm_kfree(dev, pf->msix_entries);
4051 
4052 exit_err:
4053 	pf->num_rdma_msix = 0;
4054 	pf->num_lan_msix = 0;
4055 	return err;
4056 }
4057 
4058 /**
4059  * ice_dis_msix - Disable MSI-X interrupt setup in OS
4060  * @pf: board private structure
4061  */
4062 static void ice_dis_msix(struct ice_pf *pf)
4063 {
4064 	pci_disable_msix(pf->pdev);
4065 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
4066 	pf->msix_entries = NULL;
4067 }
4068 
4069 /**
4070  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
4071  * @pf: board private structure
4072  */
4073 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
4074 {
4075 	ice_dis_msix(pf);
4076 
4077 	if (pf->irq_tracker) {
4078 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
4079 		pf->irq_tracker = NULL;
4080 	}
4081 }
4082 
4083 /**
4084  * ice_init_interrupt_scheme - Determine proper interrupt scheme
4085  * @pf: board private structure to initialize
4086  */
4087 static int ice_init_interrupt_scheme(struct ice_pf *pf)
4088 {
4089 	int vectors;
4090 
4091 	vectors = ice_ena_msix_range(pf);
4092 
4093 	if (vectors < 0)
4094 		return vectors;
4095 
4096 	/* set up vector assignment tracking */
4097 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
4098 				       struct_size(pf->irq_tracker, list, vectors),
4099 				       GFP_KERNEL);
4100 	if (!pf->irq_tracker) {
4101 		ice_dis_msix(pf);
4102 		return -ENOMEM;
4103 	}
4104 
4105 	/* populate SW interrupts pool with number of OS granted IRQs. */
4106 	pf->num_avail_sw_msix = (u16)vectors;
4107 	pf->irq_tracker->num_entries = (u16)vectors;
4108 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
4109 
4110 	return 0;
4111 }
4112 
4113 /**
4114  * ice_is_wol_supported - check if WoL is supported
4115  * @hw: pointer to hardware info
4116  *
4117  * Check if WoL is supported based on the HW configuration.
4118  * Returns true if NVM supports and enables WoL for this port, false otherwise
4119  */
4120 bool ice_is_wol_supported(struct ice_hw *hw)
4121 {
4122 	u16 wol_ctrl;
4123 
4124 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4125 	 * word) indicates WoL is not supported on the corresponding PF ID.
4126 	 */
4127 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4128 		return false;
4129 
4130 	return !(BIT(hw->port_info->lport) & wol_ctrl);
4131 }
4132 
4133 /**
4134  * ice_vsi_recfg_qs - Change the number of queues on a VSI
4135  * @vsi: VSI being changed
4136  * @new_rx: new number of Rx queues
4137  * @new_tx: new number of Tx queues
4138  * @locked: is adev device_lock held
4139  *
4140  * Only change the number of queues if new_tx, or new_rx is non-0.
4141  *
4142  * Returns 0 on success.
4143  */
4144 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4145 {
4146 	struct ice_pf *pf = vsi->back;
4147 	int err = 0, timeout = 50;
4148 
4149 	if (!new_rx && !new_tx)
4150 		return -EINVAL;
4151 
4152 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4153 		timeout--;
4154 		if (!timeout)
4155 			return -EBUSY;
4156 		usleep_range(1000, 2000);
4157 	}
4158 
4159 	if (new_tx)
4160 		vsi->req_txq = (u16)new_tx;
4161 	if (new_rx)
4162 		vsi->req_rxq = (u16)new_rx;
4163 
4164 	/* set for the next time the netdev is started */
4165 	if (!netif_running(vsi->netdev)) {
4166 		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4167 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4168 		goto done;
4169 	}
4170 
4171 	ice_vsi_close(vsi);
4172 	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4173 	ice_pf_dcb_recfg(pf, locked);
4174 	ice_vsi_open(vsi);
4175 done:
4176 	clear_bit(ICE_CFG_BUSY, pf->state);
4177 	return err;
4178 }
4179 
4180 /**
4181  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4182  * @pf: PF to configure
4183  *
4184  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4185  * VSI can still Tx/Rx VLAN tagged packets.
4186  */
4187 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4188 {
4189 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4190 	struct ice_vsi_ctx *ctxt;
4191 	struct ice_hw *hw;
4192 	int status;
4193 
4194 	if (!vsi)
4195 		return;
4196 
4197 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4198 	if (!ctxt)
4199 		return;
4200 
4201 	hw = &pf->hw;
4202 	ctxt->info = vsi->info;
4203 
4204 	ctxt->info.valid_sections =
4205 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4206 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4207 			    ICE_AQ_VSI_PROP_SW_VALID);
4208 
4209 	/* disable VLAN anti-spoof */
4210 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4211 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4212 
4213 	/* disable VLAN pruning and keep all other settings */
4214 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4215 
4216 	/* allow all VLANs on Tx and don't strip on Rx */
4217 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4218 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4219 
4220 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4221 	if (status) {
4222 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4223 			status, ice_aq_str(hw->adminq.sq_last_status));
4224 	} else {
4225 		vsi->info.sec_flags = ctxt->info.sec_flags;
4226 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4227 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4228 	}
4229 
4230 	kfree(ctxt);
4231 }
4232 
4233 /**
4234  * ice_log_pkg_init - log result of DDP package load
4235  * @hw: pointer to hardware info
4236  * @state: state of package load
4237  */
4238 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4239 {
4240 	struct ice_pf *pf = hw->back;
4241 	struct device *dev;
4242 
4243 	dev = ice_pf_to_dev(pf);
4244 
4245 	switch (state) {
4246 	case ICE_DDP_PKG_SUCCESS:
4247 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4248 			 hw->active_pkg_name,
4249 			 hw->active_pkg_ver.major,
4250 			 hw->active_pkg_ver.minor,
4251 			 hw->active_pkg_ver.update,
4252 			 hw->active_pkg_ver.draft);
4253 		break;
4254 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4255 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4256 			 hw->active_pkg_name,
4257 			 hw->active_pkg_ver.major,
4258 			 hw->active_pkg_ver.minor,
4259 			 hw->active_pkg_ver.update,
4260 			 hw->active_pkg_ver.draft);
4261 		break;
4262 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4263 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4264 			hw->active_pkg_name,
4265 			hw->active_pkg_ver.major,
4266 			hw->active_pkg_ver.minor,
4267 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4268 		break;
4269 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4270 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4271 			 hw->active_pkg_name,
4272 			 hw->active_pkg_ver.major,
4273 			 hw->active_pkg_ver.minor,
4274 			 hw->active_pkg_ver.update,
4275 			 hw->active_pkg_ver.draft,
4276 			 hw->pkg_name,
4277 			 hw->pkg_ver.major,
4278 			 hw->pkg_ver.minor,
4279 			 hw->pkg_ver.update,
4280 			 hw->pkg_ver.draft);
4281 		break;
4282 	case ICE_DDP_PKG_FW_MISMATCH:
4283 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4284 		break;
4285 	case ICE_DDP_PKG_INVALID_FILE:
4286 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4287 		break;
4288 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4289 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4290 		break;
4291 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4292 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4293 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4294 		break;
4295 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4296 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4297 		break;
4298 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4299 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4300 		break;
4301 	case ICE_DDP_PKG_LOAD_ERROR:
4302 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4303 		/* poll for reset to complete */
4304 		if (ice_check_reset(hw))
4305 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4306 		break;
4307 	case ICE_DDP_PKG_ERR:
4308 	default:
4309 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4310 		break;
4311 	}
4312 }
4313 
4314 /**
4315  * ice_load_pkg - load/reload the DDP Package file
4316  * @firmware: firmware structure when firmware requested or NULL for reload
4317  * @pf: pointer to the PF instance
4318  *
4319  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4320  * initialize HW tables.
4321  */
4322 static void
4323 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4324 {
4325 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4326 	struct device *dev = ice_pf_to_dev(pf);
4327 	struct ice_hw *hw = &pf->hw;
4328 
4329 	/* Load DDP Package */
4330 	if (firmware && !hw->pkg_copy) {
4331 		state = ice_copy_and_init_pkg(hw, firmware->data,
4332 					      firmware->size);
4333 		ice_log_pkg_init(hw, state);
4334 	} else if (!firmware && hw->pkg_copy) {
4335 		/* Reload package during rebuild after CORER/GLOBR reset */
4336 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4337 		ice_log_pkg_init(hw, state);
4338 	} else {
4339 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4340 	}
4341 
4342 	if (!ice_is_init_pkg_successful(state)) {
4343 		/* Safe Mode */
4344 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4345 		return;
4346 	}
4347 
4348 	/* Successful download package is the precondition for advanced
4349 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4350 	 */
4351 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4352 }
4353 
4354 /**
4355  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4356  * @pf: pointer to the PF structure
4357  *
4358  * There is no error returned here because the driver should be able to handle
4359  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4360  * specifically with Tx.
4361  */
4362 static void ice_verify_cacheline_size(struct ice_pf *pf)
4363 {
4364 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4365 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4366 			 ICE_CACHE_LINE_BYTES);
4367 }
4368 
4369 /**
4370  * ice_send_version - update firmware with driver version
4371  * @pf: PF struct
4372  *
4373  * Returns 0 on success, else error code
4374  */
4375 static int ice_send_version(struct ice_pf *pf)
4376 {
4377 	struct ice_driver_ver dv;
4378 
4379 	dv.major_ver = 0xff;
4380 	dv.minor_ver = 0xff;
4381 	dv.build_ver = 0xff;
4382 	dv.subbuild_ver = 0;
4383 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4384 		sizeof(dv.driver_string));
4385 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4386 }
4387 
4388 /**
4389  * ice_init_fdir - Initialize flow director VSI and configuration
4390  * @pf: pointer to the PF instance
4391  *
4392  * returns 0 on success, negative on error
4393  */
4394 static int ice_init_fdir(struct ice_pf *pf)
4395 {
4396 	struct device *dev = ice_pf_to_dev(pf);
4397 	struct ice_vsi *ctrl_vsi;
4398 	int err;
4399 
4400 	/* Side Band Flow Director needs to have a control VSI.
4401 	 * Allocate it and store it in the PF.
4402 	 */
4403 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4404 	if (!ctrl_vsi) {
4405 		dev_dbg(dev, "could not create control VSI\n");
4406 		return -ENOMEM;
4407 	}
4408 
4409 	err = ice_vsi_open_ctrl(ctrl_vsi);
4410 	if (err) {
4411 		dev_dbg(dev, "could not open control VSI\n");
4412 		goto err_vsi_open;
4413 	}
4414 
4415 	mutex_init(&pf->hw.fdir_fltr_lock);
4416 
4417 	err = ice_fdir_create_dflt_rules(pf);
4418 	if (err)
4419 		goto err_fdir_rule;
4420 
4421 	return 0;
4422 
4423 err_fdir_rule:
4424 	ice_fdir_release_flows(&pf->hw);
4425 	ice_vsi_close(ctrl_vsi);
4426 err_vsi_open:
4427 	ice_vsi_release(ctrl_vsi);
4428 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4429 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4430 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4431 	}
4432 	return err;
4433 }
4434 
4435 static void ice_deinit_fdir(struct ice_pf *pf)
4436 {
4437 	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4438 
4439 	if (!vsi)
4440 		return;
4441 
4442 	ice_vsi_manage_fdir(vsi, false);
4443 	ice_vsi_release(vsi);
4444 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4445 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4446 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4447 	}
4448 
4449 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4450 }
4451 
4452 /**
4453  * ice_get_opt_fw_name - return optional firmware file name or NULL
4454  * @pf: pointer to the PF instance
4455  */
4456 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4457 {
4458 	/* Optional firmware name same as default with additional dash
4459 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4460 	 */
4461 	struct pci_dev *pdev = pf->pdev;
4462 	char *opt_fw_filename;
4463 	u64 dsn;
4464 
4465 	/* Determine the name of the optional file using the DSN (two
4466 	 * dwords following the start of the DSN Capability).
4467 	 */
4468 	dsn = pci_get_dsn(pdev);
4469 	if (!dsn)
4470 		return NULL;
4471 
4472 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4473 	if (!opt_fw_filename)
4474 		return NULL;
4475 
4476 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4477 		 ICE_DDP_PKG_PATH, dsn);
4478 
4479 	return opt_fw_filename;
4480 }
4481 
4482 /**
4483  * ice_request_fw - Device initialization routine
4484  * @pf: pointer to the PF instance
4485  */
4486 static void ice_request_fw(struct ice_pf *pf)
4487 {
4488 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4489 	const struct firmware *firmware = NULL;
4490 	struct device *dev = ice_pf_to_dev(pf);
4491 	int err = 0;
4492 
4493 	/* optional device-specific DDP (if present) overrides the default DDP
4494 	 * package file. kernel logs a debug message if the file doesn't exist,
4495 	 * and warning messages for other errors.
4496 	 */
4497 	if (opt_fw_filename) {
4498 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4499 		if (err) {
4500 			kfree(opt_fw_filename);
4501 			goto dflt_pkg_load;
4502 		}
4503 
4504 		/* request for firmware was successful. Download to device */
4505 		ice_load_pkg(firmware, pf);
4506 		kfree(opt_fw_filename);
4507 		release_firmware(firmware);
4508 		return;
4509 	}
4510 
4511 dflt_pkg_load:
4512 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4513 	if (err) {
4514 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4515 		return;
4516 	}
4517 
4518 	/* request for firmware was successful. Download to device */
4519 	ice_load_pkg(firmware, pf);
4520 	release_firmware(firmware);
4521 }
4522 
4523 /**
4524  * ice_print_wake_reason - show the wake up cause in the log
4525  * @pf: pointer to the PF struct
4526  */
4527 static void ice_print_wake_reason(struct ice_pf *pf)
4528 {
4529 	u32 wus = pf->wakeup_reason;
4530 	const char *wake_str;
4531 
4532 	/* if no wake event, nothing to print */
4533 	if (!wus)
4534 		return;
4535 
4536 	if (wus & PFPM_WUS_LNKC_M)
4537 		wake_str = "Link\n";
4538 	else if (wus & PFPM_WUS_MAG_M)
4539 		wake_str = "Magic Packet\n";
4540 	else if (wus & PFPM_WUS_MNG_M)
4541 		wake_str = "Management\n";
4542 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4543 		wake_str = "Firmware Reset\n";
4544 	else
4545 		wake_str = "Unknown\n";
4546 
4547 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4548 }
4549 
4550 /**
4551  * ice_register_netdev - register netdev
4552  * @vsi: pointer to the VSI struct
4553  */
4554 static int ice_register_netdev(struct ice_vsi *vsi)
4555 {
4556 	int err;
4557 
4558 	if (!vsi || !vsi->netdev)
4559 		return -EIO;
4560 
4561 	err = register_netdev(vsi->netdev);
4562 	if (err)
4563 		return err;
4564 
4565 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4566 	netif_carrier_off(vsi->netdev);
4567 	netif_tx_stop_all_queues(vsi->netdev);
4568 
4569 	return 0;
4570 }
4571 
4572 static void ice_unregister_netdev(struct ice_vsi *vsi)
4573 {
4574 	if (!vsi || !vsi->netdev)
4575 		return;
4576 
4577 	unregister_netdev(vsi->netdev);
4578 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4579 }
4580 
4581 /**
4582  * ice_cfg_netdev - Allocate, configure and register a netdev
4583  * @vsi: the VSI associated with the new netdev
4584  *
4585  * Returns 0 on success, negative value on failure
4586  */
4587 static int ice_cfg_netdev(struct ice_vsi *vsi)
4588 {
4589 	struct ice_netdev_priv *np;
4590 	struct net_device *netdev;
4591 	u8 mac_addr[ETH_ALEN];
4592 
4593 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4594 				    vsi->alloc_rxq);
4595 	if (!netdev)
4596 		return -ENOMEM;
4597 
4598 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4599 	vsi->netdev = netdev;
4600 	np = netdev_priv(netdev);
4601 	np->vsi = vsi;
4602 
4603 	ice_set_netdev_features(netdev);
4604 	ice_set_ops(vsi);
4605 
4606 	if (vsi->type == ICE_VSI_PF) {
4607 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4608 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4609 		eth_hw_addr_set(netdev, mac_addr);
4610 	}
4611 
4612 	netdev->priv_flags |= IFF_UNICAST_FLT;
4613 
4614 	/* Setup netdev TC information */
4615 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4616 
4617 	netdev->max_mtu = ICE_MAX_MTU;
4618 
4619 	return 0;
4620 }
4621 
4622 static void ice_decfg_netdev(struct ice_vsi *vsi)
4623 {
4624 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4625 	free_netdev(vsi->netdev);
4626 	vsi->netdev = NULL;
4627 }
4628 
4629 static int ice_start_eth(struct ice_vsi *vsi)
4630 {
4631 	int err;
4632 
4633 	err = ice_init_mac_fltr(vsi->back);
4634 	if (err)
4635 		return err;
4636 
4637 	rtnl_lock();
4638 	err = ice_vsi_open(vsi);
4639 	rtnl_unlock();
4640 
4641 	return err;
4642 }
4643 
4644 static int ice_init_eth(struct ice_pf *pf)
4645 {
4646 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4647 	int err;
4648 
4649 	if (!vsi)
4650 		return -EINVAL;
4651 
4652 	/* init channel list */
4653 	INIT_LIST_HEAD(&vsi->ch_list);
4654 
4655 	err = ice_cfg_netdev(vsi);
4656 	if (err)
4657 		return err;
4658 	/* Setup DCB netlink interface */
4659 	ice_dcbnl_setup(vsi);
4660 
4661 	err = ice_init_mac_fltr(pf);
4662 	if (err)
4663 		goto err_init_mac_fltr;
4664 
4665 	err = ice_devlink_create_pf_port(pf);
4666 	if (err)
4667 		goto err_devlink_create_pf_port;
4668 
4669 	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4670 
4671 	err = ice_register_netdev(vsi);
4672 	if (err)
4673 		goto err_register_netdev;
4674 
4675 	err = ice_tc_indir_block_register(vsi);
4676 	if (err)
4677 		goto err_tc_indir_block_register;
4678 
4679 	ice_napi_add(vsi);
4680 
4681 	return 0;
4682 
4683 err_tc_indir_block_register:
4684 	ice_unregister_netdev(vsi);
4685 err_register_netdev:
4686 	ice_devlink_destroy_pf_port(pf);
4687 err_devlink_create_pf_port:
4688 err_init_mac_fltr:
4689 	ice_decfg_netdev(vsi);
4690 	return err;
4691 }
4692 
4693 static void ice_deinit_eth(struct ice_pf *pf)
4694 {
4695 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4696 
4697 	if (!vsi)
4698 		return;
4699 
4700 	ice_vsi_close(vsi);
4701 	ice_unregister_netdev(vsi);
4702 	ice_devlink_destroy_pf_port(pf);
4703 	ice_tc_indir_block_unregister(vsi);
4704 	ice_decfg_netdev(vsi);
4705 }
4706 
4707 static int ice_init_dev(struct ice_pf *pf)
4708 {
4709 	struct device *dev = ice_pf_to_dev(pf);
4710 	struct ice_hw *hw = &pf->hw;
4711 	int err;
4712 
4713 	err = ice_init_hw(hw);
4714 	if (err) {
4715 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4716 		return err;
4717 	}
4718 
4719 	ice_init_feature_support(pf);
4720 
4721 	ice_request_fw(pf);
4722 
4723 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4724 	 * set in pf->state, which will cause ice_is_safe_mode to return
4725 	 * true
4726 	 */
4727 	if (ice_is_safe_mode(pf)) {
4728 		/* we already got function/device capabilities but these don't
4729 		 * reflect what the driver needs to do in safe mode. Instead of
4730 		 * adding conditional logic everywhere to ignore these
4731 		 * device/function capabilities, override them.
4732 		 */
4733 		ice_set_safe_mode_caps(hw);
4734 	}
4735 
4736 	err = ice_init_pf(pf);
4737 	if (err) {
4738 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4739 		goto err_init_pf;
4740 	}
4741 
4742 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4743 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4744 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4745 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4746 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4747 		pf->hw.udp_tunnel_nic.tables[0].n_entries =
4748 			pf->hw.tnl.valid_count[TNL_VXLAN];
4749 		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4750 			UDP_TUNNEL_TYPE_VXLAN;
4751 	}
4752 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4753 		pf->hw.udp_tunnel_nic.tables[1].n_entries =
4754 			pf->hw.tnl.valid_count[TNL_GENEVE];
4755 		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4756 			UDP_TUNNEL_TYPE_GENEVE;
4757 	}
4758 
4759 	err = ice_init_interrupt_scheme(pf);
4760 	if (err) {
4761 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4762 		err = -EIO;
4763 		goto err_init_interrupt_scheme;
4764 	}
4765 
4766 	/* In case of MSIX we are going to setup the misc vector right here
4767 	 * to handle admin queue events etc. In case of legacy and MSI
4768 	 * the misc functionality and queue processing is combined in
4769 	 * the same vector and that gets setup at open.
4770 	 */
4771 	err = ice_req_irq_msix_misc(pf);
4772 	if (err) {
4773 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4774 		goto err_req_irq_msix_misc;
4775 	}
4776 
4777 	return 0;
4778 
4779 err_req_irq_msix_misc:
4780 	ice_clear_interrupt_scheme(pf);
4781 err_init_interrupt_scheme:
4782 	ice_deinit_pf(pf);
4783 err_init_pf:
4784 	ice_deinit_hw(hw);
4785 	return err;
4786 }
4787 
4788 static void ice_deinit_dev(struct ice_pf *pf)
4789 {
4790 	ice_free_irq_msix_misc(pf);
4791 	ice_clear_interrupt_scheme(pf);
4792 	ice_deinit_pf(pf);
4793 	ice_deinit_hw(&pf->hw);
4794 }
4795 
4796 static void ice_init_features(struct ice_pf *pf)
4797 {
4798 	struct device *dev = ice_pf_to_dev(pf);
4799 
4800 	if (ice_is_safe_mode(pf))
4801 		return;
4802 
4803 	/* initialize DDP driven features */
4804 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4805 		ice_ptp_init(pf);
4806 
4807 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4808 		ice_gnss_init(pf);
4809 
4810 	/* Note: Flow director init failure is non-fatal to load */
4811 	if (ice_init_fdir(pf))
4812 		dev_err(dev, "could not initialize flow director\n");
4813 
4814 	/* Note: DCB init failure is non-fatal to load */
4815 	if (ice_init_pf_dcb(pf, false)) {
4816 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4817 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4818 	} else {
4819 		ice_cfg_lldp_mib_change(&pf->hw, true);
4820 	}
4821 
4822 	if (ice_init_lag(pf))
4823 		dev_warn(dev, "Failed to init link aggregation support\n");
4824 }
4825 
4826 static void ice_deinit_features(struct ice_pf *pf)
4827 {
4828 	ice_deinit_lag(pf);
4829 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4830 		ice_cfg_lldp_mib_change(&pf->hw, false);
4831 	ice_deinit_fdir(pf);
4832 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4833 		ice_gnss_exit(pf);
4834 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4835 		ice_ptp_release(pf);
4836 }
4837 
4838 static void ice_init_wakeup(struct ice_pf *pf)
4839 {
4840 	/* Save wakeup reason register for later use */
4841 	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4842 
4843 	/* check for a power management event */
4844 	ice_print_wake_reason(pf);
4845 
4846 	/* clear wake status, all bits */
4847 	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4848 
4849 	/* Disable WoL at init, wait for user to enable */
4850 	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4851 }
4852 
4853 static int ice_init_link(struct ice_pf *pf)
4854 {
4855 	struct device *dev = ice_pf_to_dev(pf);
4856 	int err;
4857 
4858 	err = ice_init_link_events(pf->hw.port_info);
4859 	if (err) {
4860 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4861 		return err;
4862 	}
4863 
4864 	/* not a fatal error if this fails */
4865 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4866 	if (err)
4867 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4868 
4869 	/* not a fatal error if this fails */
4870 	err = ice_update_link_info(pf->hw.port_info);
4871 	if (err)
4872 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4873 
4874 	ice_init_link_dflt_override(pf->hw.port_info);
4875 
4876 	ice_check_link_cfg_err(pf,
4877 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4878 
4879 	/* if media available, initialize PHY settings */
4880 	if (pf->hw.port_info->phy.link_info.link_info &
4881 	    ICE_AQ_MEDIA_AVAILABLE) {
4882 		/* not a fatal error if this fails */
4883 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4884 		if (err)
4885 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4886 
4887 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4888 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4889 
4890 			if (vsi)
4891 				ice_configure_phy(vsi);
4892 		}
4893 	} else {
4894 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4895 	}
4896 
4897 	return err;
4898 }
4899 
4900 static int ice_init_pf_sw(struct ice_pf *pf)
4901 {
4902 	bool dvm = ice_is_dvm_ena(&pf->hw);
4903 	struct ice_vsi *vsi;
4904 	int err;
4905 
4906 	/* create switch struct for the switch element created by FW on boot */
4907 	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4908 	if (!pf->first_sw)
4909 		return -ENOMEM;
4910 
4911 	if (pf->hw.evb_veb)
4912 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4913 	else
4914 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4915 
4916 	pf->first_sw->pf = pf;
4917 
4918 	/* record the sw_id available for later use */
4919 	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4920 
4921 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4922 	if (err)
4923 		goto err_aq_set_port_params;
4924 
4925 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4926 	if (!vsi) {
4927 		err = -ENOMEM;
4928 		goto err_pf_vsi_setup;
4929 	}
4930 
4931 	return 0;
4932 
4933 err_pf_vsi_setup:
4934 err_aq_set_port_params:
4935 	kfree(pf->first_sw);
4936 	return err;
4937 }
4938 
4939 static void ice_deinit_pf_sw(struct ice_pf *pf)
4940 {
4941 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4942 
4943 	if (!vsi)
4944 		return;
4945 
4946 	ice_vsi_release(vsi);
4947 	kfree(pf->first_sw);
4948 }
4949 
4950 static int ice_alloc_vsis(struct ice_pf *pf)
4951 {
4952 	struct device *dev = ice_pf_to_dev(pf);
4953 
4954 	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4955 	if (!pf->num_alloc_vsi)
4956 		return -EIO;
4957 
4958 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4959 		dev_warn(dev,
4960 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4961 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4962 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4963 	}
4964 
4965 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4966 			       GFP_KERNEL);
4967 	if (!pf->vsi)
4968 		return -ENOMEM;
4969 
4970 	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4971 				     sizeof(*pf->vsi_stats), GFP_KERNEL);
4972 	if (!pf->vsi_stats) {
4973 		devm_kfree(dev, pf->vsi);
4974 		return -ENOMEM;
4975 	}
4976 
4977 	return 0;
4978 }
4979 
4980 static void ice_dealloc_vsis(struct ice_pf *pf)
4981 {
4982 	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4983 	pf->vsi_stats = NULL;
4984 
4985 	pf->num_alloc_vsi = 0;
4986 	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4987 	pf->vsi = NULL;
4988 }
4989 
4990 static int ice_init_devlink(struct ice_pf *pf)
4991 {
4992 	int err;
4993 
4994 	err = ice_devlink_register_params(pf);
4995 	if (err)
4996 		return err;
4997 
4998 	ice_devlink_init_regions(pf);
4999 	ice_devlink_register(pf);
5000 
5001 	return 0;
5002 }
5003 
5004 static void ice_deinit_devlink(struct ice_pf *pf)
5005 {
5006 	ice_devlink_unregister(pf);
5007 	ice_devlink_destroy_regions(pf);
5008 	ice_devlink_unregister_params(pf);
5009 }
5010 
5011 static int ice_init(struct ice_pf *pf)
5012 {
5013 	int err;
5014 
5015 	err = ice_init_dev(pf);
5016 	if (err)
5017 		return err;
5018 
5019 	err = ice_alloc_vsis(pf);
5020 	if (err)
5021 		goto err_alloc_vsis;
5022 
5023 	err = ice_init_pf_sw(pf);
5024 	if (err)
5025 		goto err_init_pf_sw;
5026 
5027 	ice_init_wakeup(pf);
5028 
5029 	err = ice_init_link(pf);
5030 	if (err)
5031 		goto err_init_link;
5032 
5033 	err = ice_send_version(pf);
5034 	if (err)
5035 		goto err_init_link;
5036 
5037 	ice_verify_cacheline_size(pf);
5038 
5039 	if (ice_is_safe_mode(pf))
5040 		ice_set_safe_mode_vlan_cfg(pf);
5041 	else
5042 		/* print PCI link speed and width */
5043 		pcie_print_link_status(pf->pdev);
5044 
5045 	/* ready to go, so clear down state bit */
5046 	clear_bit(ICE_DOWN, pf->state);
5047 	clear_bit(ICE_SERVICE_DIS, pf->state);
5048 
5049 	/* since everything is good, start the service timer */
5050 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5051 
5052 	return 0;
5053 
5054 err_init_link:
5055 	ice_deinit_pf_sw(pf);
5056 err_init_pf_sw:
5057 	ice_dealloc_vsis(pf);
5058 err_alloc_vsis:
5059 	ice_deinit_dev(pf);
5060 	return err;
5061 }
5062 
5063 static void ice_deinit(struct ice_pf *pf)
5064 {
5065 	set_bit(ICE_SERVICE_DIS, pf->state);
5066 	set_bit(ICE_DOWN, pf->state);
5067 
5068 	ice_deinit_pf_sw(pf);
5069 	ice_dealloc_vsis(pf);
5070 	ice_deinit_dev(pf);
5071 }
5072 
5073 /**
5074  * ice_load - load pf by init hw and starting VSI
5075  * @pf: pointer to the pf instance
5076  */
5077 int ice_load(struct ice_pf *pf)
5078 {
5079 	struct ice_vsi_cfg_params params = {};
5080 	struct ice_vsi *vsi;
5081 	int err;
5082 
5083 	err = ice_reset(&pf->hw, ICE_RESET_PFR);
5084 	if (err)
5085 		return err;
5086 
5087 	err = ice_init_dev(pf);
5088 	if (err)
5089 		return err;
5090 
5091 	vsi = ice_get_main_vsi(pf);
5092 
5093 	params = ice_vsi_to_params(vsi);
5094 	params.flags = ICE_VSI_FLAG_INIT;
5095 
5096 	err = ice_vsi_cfg(vsi, &params);
5097 	if (err)
5098 		goto err_vsi_cfg;
5099 
5100 	err = ice_start_eth(ice_get_main_vsi(pf));
5101 	if (err)
5102 		goto err_start_eth;
5103 
5104 	err = ice_init_rdma(pf);
5105 	if (err)
5106 		goto err_init_rdma;
5107 
5108 	ice_init_features(pf);
5109 	ice_service_task_restart(pf);
5110 
5111 	clear_bit(ICE_DOWN, pf->state);
5112 
5113 	return 0;
5114 
5115 err_init_rdma:
5116 	ice_vsi_close(ice_get_main_vsi(pf));
5117 err_start_eth:
5118 	ice_vsi_decfg(ice_get_main_vsi(pf));
5119 err_vsi_cfg:
5120 	ice_deinit_dev(pf);
5121 	return err;
5122 }
5123 
5124 /**
5125  * ice_unload - unload pf by stopping VSI and deinit hw
5126  * @pf: pointer to the pf instance
5127  */
5128 void ice_unload(struct ice_pf *pf)
5129 {
5130 	ice_deinit_features(pf);
5131 	ice_deinit_rdma(pf);
5132 	ice_vsi_close(ice_get_main_vsi(pf));
5133 	ice_vsi_decfg(ice_get_main_vsi(pf));
5134 	ice_deinit_dev(pf);
5135 }
5136 
5137 /**
5138  * ice_probe - Device initialization routine
5139  * @pdev: PCI device information struct
5140  * @ent: entry in ice_pci_tbl
5141  *
5142  * Returns 0 on success, negative on failure
5143  */
5144 static int
5145 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5146 {
5147 	struct device *dev = &pdev->dev;
5148 	struct ice_pf *pf;
5149 	struct ice_hw *hw;
5150 	int err;
5151 
5152 	if (pdev->is_virtfn) {
5153 		dev_err(dev, "can't probe a virtual function\n");
5154 		return -EINVAL;
5155 	}
5156 
5157 	/* this driver uses devres, see
5158 	 * Documentation/driver-api/driver-model/devres.rst
5159 	 */
5160 	err = pcim_enable_device(pdev);
5161 	if (err)
5162 		return err;
5163 
5164 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5165 	if (err) {
5166 		dev_err(dev, "BAR0 I/O map error %d\n", err);
5167 		return err;
5168 	}
5169 
5170 	pf = ice_allocate_pf(dev);
5171 	if (!pf)
5172 		return -ENOMEM;
5173 
5174 	/* initialize Auxiliary index to invalid value */
5175 	pf->aux_idx = -1;
5176 
5177 	/* set up for high or low DMA */
5178 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5179 	if (err) {
5180 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5181 		return err;
5182 	}
5183 
5184 	pci_set_master(pdev);
5185 
5186 	pf->pdev = pdev;
5187 	pci_set_drvdata(pdev, pf);
5188 	set_bit(ICE_DOWN, pf->state);
5189 	/* Disable service task until DOWN bit is cleared */
5190 	set_bit(ICE_SERVICE_DIS, pf->state);
5191 
5192 	hw = &pf->hw;
5193 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5194 	pci_save_state(pdev);
5195 
5196 	hw->back = pf;
5197 	hw->port_info = NULL;
5198 	hw->vendor_id = pdev->vendor;
5199 	hw->device_id = pdev->device;
5200 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5201 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5202 	hw->subsystem_device_id = pdev->subsystem_device;
5203 	hw->bus.device = PCI_SLOT(pdev->devfn);
5204 	hw->bus.func = PCI_FUNC(pdev->devfn);
5205 	ice_set_ctrlq_len(hw);
5206 
5207 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5208 
5209 #ifndef CONFIG_DYNAMIC_DEBUG
5210 	if (debug < -1)
5211 		hw->debug_mask = debug;
5212 #endif
5213 
5214 	err = ice_init(pf);
5215 	if (err)
5216 		goto err_init;
5217 
5218 	err = ice_init_eth(pf);
5219 	if (err)
5220 		goto err_init_eth;
5221 
5222 	err = ice_init_rdma(pf);
5223 	if (err)
5224 		goto err_init_rdma;
5225 
5226 	err = ice_init_devlink(pf);
5227 	if (err)
5228 		goto err_init_devlink;
5229 
5230 	ice_init_features(pf);
5231 
5232 	return 0;
5233 
5234 err_init_devlink:
5235 	ice_deinit_rdma(pf);
5236 err_init_rdma:
5237 	ice_deinit_eth(pf);
5238 err_init_eth:
5239 	ice_deinit(pf);
5240 err_init:
5241 	pci_disable_device(pdev);
5242 	return err;
5243 }
5244 
5245 /**
5246  * ice_set_wake - enable or disable Wake on LAN
5247  * @pf: pointer to the PF struct
5248  *
5249  * Simple helper for WoL control
5250  */
5251 static void ice_set_wake(struct ice_pf *pf)
5252 {
5253 	struct ice_hw *hw = &pf->hw;
5254 	bool wol = pf->wol_ena;
5255 
5256 	/* clear wake state, otherwise new wake events won't fire */
5257 	wr32(hw, PFPM_WUS, U32_MAX);
5258 
5259 	/* enable / disable APM wake up, no RMW needed */
5260 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5261 
5262 	/* set magic packet filter enabled */
5263 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5264 }
5265 
5266 /**
5267  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5268  * @pf: pointer to the PF struct
5269  *
5270  * Issue firmware command to enable multicast magic wake, making
5271  * sure that any locally administered address (LAA) is used for
5272  * wake, and that PF reset doesn't undo the LAA.
5273  */
5274 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5275 {
5276 	struct device *dev = ice_pf_to_dev(pf);
5277 	struct ice_hw *hw = &pf->hw;
5278 	u8 mac_addr[ETH_ALEN];
5279 	struct ice_vsi *vsi;
5280 	int status;
5281 	u8 flags;
5282 
5283 	if (!pf->wol_ena)
5284 		return;
5285 
5286 	vsi = ice_get_main_vsi(pf);
5287 	if (!vsi)
5288 		return;
5289 
5290 	/* Get current MAC address in case it's an LAA */
5291 	if (vsi->netdev)
5292 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5293 	else
5294 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5295 
5296 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5297 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5298 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5299 
5300 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5301 	if (status)
5302 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5303 			status, ice_aq_str(hw->adminq.sq_last_status));
5304 }
5305 
5306 /**
5307  * ice_remove - Device removal routine
5308  * @pdev: PCI device information struct
5309  */
5310 static void ice_remove(struct pci_dev *pdev)
5311 {
5312 	struct ice_pf *pf = pci_get_drvdata(pdev);
5313 	int i;
5314 
5315 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5316 		if (!ice_is_reset_in_progress(pf->state))
5317 			break;
5318 		msleep(100);
5319 	}
5320 
5321 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5322 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5323 		ice_free_vfs(pf);
5324 	}
5325 
5326 	ice_service_task_stop(pf);
5327 	ice_aq_cancel_waiting_tasks(pf);
5328 	set_bit(ICE_DOWN, pf->state);
5329 
5330 	if (!ice_is_safe_mode(pf))
5331 		ice_remove_arfs(pf);
5332 	ice_deinit_features(pf);
5333 	ice_deinit_devlink(pf);
5334 	ice_deinit_rdma(pf);
5335 	ice_deinit_eth(pf);
5336 	ice_deinit(pf);
5337 
5338 	ice_vsi_release_all(pf);
5339 
5340 	ice_setup_mc_magic_wake(pf);
5341 	ice_set_wake(pf);
5342 
5343 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
5344 	 * do it via ice_schedule_reset() since there is no need to rebuild
5345 	 * and the service task is already stopped.
5346 	 */
5347 	ice_reset(&pf->hw, ICE_RESET_PFR);
5348 	pci_wait_for_pending_transaction(pdev);
5349 	pci_disable_device(pdev);
5350 }
5351 
5352 /**
5353  * ice_shutdown - PCI callback for shutting down device
5354  * @pdev: PCI device information struct
5355  */
5356 static void ice_shutdown(struct pci_dev *pdev)
5357 {
5358 	struct ice_pf *pf = pci_get_drvdata(pdev);
5359 
5360 	ice_remove(pdev);
5361 
5362 	if (system_state == SYSTEM_POWER_OFF) {
5363 		pci_wake_from_d3(pdev, pf->wol_ena);
5364 		pci_set_power_state(pdev, PCI_D3hot);
5365 	}
5366 }
5367 
5368 #ifdef CONFIG_PM
5369 /**
5370  * ice_prepare_for_shutdown - prep for PCI shutdown
5371  * @pf: board private structure
5372  *
5373  * Inform or close all dependent features in prep for PCI device shutdown
5374  */
5375 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5376 {
5377 	struct ice_hw *hw = &pf->hw;
5378 	u32 v;
5379 
5380 	/* Notify VFs of impending reset */
5381 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5382 		ice_vc_notify_reset(pf);
5383 
5384 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5385 
5386 	/* disable the VSIs and their queues that are not already DOWN */
5387 	ice_pf_dis_all_vsi(pf, false);
5388 
5389 	ice_for_each_vsi(pf, v)
5390 		if (pf->vsi[v])
5391 			pf->vsi[v]->vsi_num = 0;
5392 
5393 	ice_shutdown_all_ctrlq(hw);
5394 }
5395 
5396 /**
5397  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5398  * @pf: board private structure to reinitialize
5399  *
5400  * This routine reinitialize interrupt scheme that was cleared during
5401  * power management suspend callback.
5402  *
5403  * This should be called during resume routine to re-allocate the q_vectors
5404  * and reacquire interrupts.
5405  */
5406 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5407 {
5408 	struct device *dev = ice_pf_to_dev(pf);
5409 	int ret, v;
5410 
5411 	/* Since we clear MSIX flag during suspend, we need to
5412 	 * set it back during resume...
5413 	 */
5414 
5415 	ret = ice_init_interrupt_scheme(pf);
5416 	if (ret) {
5417 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5418 		return ret;
5419 	}
5420 
5421 	/* Remap vectors and rings, after successful re-init interrupts */
5422 	ice_for_each_vsi(pf, v) {
5423 		if (!pf->vsi[v])
5424 			continue;
5425 
5426 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5427 		if (ret)
5428 			goto err_reinit;
5429 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5430 	}
5431 
5432 	ret = ice_req_irq_msix_misc(pf);
5433 	if (ret) {
5434 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5435 			ret);
5436 		goto err_reinit;
5437 	}
5438 
5439 	return 0;
5440 
5441 err_reinit:
5442 	while (v--)
5443 		if (pf->vsi[v])
5444 			ice_vsi_free_q_vectors(pf->vsi[v]);
5445 
5446 	return ret;
5447 }
5448 
5449 /**
5450  * ice_suspend
5451  * @dev: generic device information structure
5452  *
5453  * Power Management callback to quiesce the device and prepare
5454  * for D3 transition.
5455  */
5456 static int __maybe_unused ice_suspend(struct device *dev)
5457 {
5458 	struct pci_dev *pdev = to_pci_dev(dev);
5459 	struct ice_pf *pf;
5460 	int disabled, v;
5461 
5462 	pf = pci_get_drvdata(pdev);
5463 
5464 	if (!ice_pf_state_is_nominal(pf)) {
5465 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5466 		return -EBUSY;
5467 	}
5468 
5469 	/* Stop watchdog tasks until resume completion.
5470 	 * Even though it is most likely that the service task is
5471 	 * disabled if the device is suspended or down, the service task's
5472 	 * state is controlled by a different state bit, and we should
5473 	 * store and honor whatever state that bit is in at this point.
5474 	 */
5475 	disabled = ice_service_task_stop(pf);
5476 
5477 	ice_unplug_aux_dev(pf);
5478 
5479 	/* Already suspended?, then there is nothing to do */
5480 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5481 		if (!disabled)
5482 			ice_service_task_restart(pf);
5483 		return 0;
5484 	}
5485 
5486 	if (test_bit(ICE_DOWN, pf->state) ||
5487 	    ice_is_reset_in_progress(pf->state)) {
5488 		dev_err(dev, "can't suspend device in reset or already down\n");
5489 		if (!disabled)
5490 			ice_service_task_restart(pf);
5491 		return 0;
5492 	}
5493 
5494 	ice_setup_mc_magic_wake(pf);
5495 
5496 	ice_prepare_for_shutdown(pf);
5497 
5498 	ice_set_wake(pf);
5499 
5500 	/* Free vectors, clear the interrupt scheme and release IRQs
5501 	 * for proper hibernation, especially with large number of CPUs.
5502 	 * Otherwise hibernation might fail when mapping all the vectors back
5503 	 * to CPU0.
5504 	 */
5505 	ice_free_irq_msix_misc(pf);
5506 	ice_for_each_vsi(pf, v) {
5507 		if (!pf->vsi[v])
5508 			continue;
5509 		ice_vsi_free_q_vectors(pf->vsi[v]);
5510 	}
5511 	ice_clear_interrupt_scheme(pf);
5512 
5513 	pci_save_state(pdev);
5514 	pci_wake_from_d3(pdev, pf->wol_ena);
5515 	pci_set_power_state(pdev, PCI_D3hot);
5516 	return 0;
5517 }
5518 
5519 /**
5520  * ice_resume - PM callback for waking up from D3
5521  * @dev: generic device information structure
5522  */
5523 static int __maybe_unused ice_resume(struct device *dev)
5524 {
5525 	struct pci_dev *pdev = to_pci_dev(dev);
5526 	enum ice_reset_req reset_type;
5527 	struct ice_pf *pf;
5528 	struct ice_hw *hw;
5529 	int ret;
5530 
5531 	pci_set_power_state(pdev, PCI_D0);
5532 	pci_restore_state(pdev);
5533 	pci_save_state(pdev);
5534 
5535 	if (!pci_device_is_present(pdev))
5536 		return -ENODEV;
5537 
5538 	ret = pci_enable_device_mem(pdev);
5539 	if (ret) {
5540 		dev_err(dev, "Cannot enable device after suspend\n");
5541 		return ret;
5542 	}
5543 
5544 	pf = pci_get_drvdata(pdev);
5545 	hw = &pf->hw;
5546 
5547 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5548 	ice_print_wake_reason(pf);
5549 
5550 	/* We cleared the interrupt scheme when we suspended, so we need to
5551 	 * restore it now to resume device functionality.
5552 	 */
5553 	ret = ice_reinit_interrupt_scheme(pf);
5554 	if (ret)
5555 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5556 
5557 	clear_bit(ICE_DOWN, pf->state);
5558 	/* Now perform PF reset and rebuild */
5559 	reset_type = ICE_RESET_PFR;
5560 	/* re-enable service task for reset, but allow reset to schedule it */
5561 	clear_bit(ICE_SERVICE_DIS, pf->state);
5562 
5563 	if (ice_schedule_reset(pf, reset_type))
5564 		dev_err(dev, "Reset during resume failed.\n");
5565 
5566 	clear_bit(ICE_SUSPENDED, pf->state);
5567 	ice_service_task_restart(pf);
5568 
5569 	/* Restart the service task */
5570 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5571 
5572 	return 0;
5573 }
5574 #endif /* CONFIG_PM */
5575 
5576 /**
5577  * ice_pci_err_detected - warning that PCI error has been detected
5578  * @pdev: PCI device information struct
5579  * @err: the type of PCI error
5580  *
5581  * Called to warn that something happened on the PCI bus and the error handling
5582  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5583  */
5584 static pci_ers_result_t
5585 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5586 {
5587 	struct ice_pf *pf = pci_get_drvdata(pdev);
5588 
5589 	if (!pf) {
5590 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5591 			__func__, err);
5592 		return PCI_ERS_RESULT_DISCONNECT;
5593 	}
5594 
5595 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5596 		ice_service_task_stop(pf);
5597 
5598 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5599 			set_bit(ICE_PFR_REQ, pf->state);
5600 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5601 		}
5602 	}
5603 
5604 	return PCI_ERS_RESULT_NEED_RESET;
5605 }
5606 
5607 /**
5608  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5609  * @pdev: PCI device information struct
5610  *
5611  * Called to determine if the driver can recover from the PCI slot reset by
5612  * using a register read to determine if the device is recoverable.
5613  */
5614 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5615 {
5616 	struct ice_pf *pf = pci_get_drvdata(pdev);
5617 	pci_ers_result_t result;
5618 	int err;
5619 	u32 reg;
5620 
5621 	err = pci_enable_device_mem(pdev);
5622 	if (err) {
5623 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5624 			err);
5625 		result = PCI_ERS_RESULT_DISCONNECT;
5626 	} else {
5627 		pci_set_master(pdev);
5628 		pci_restore_state(pdev);
5629 		pci_save_state(pdev);
5630 		pci_wake_from_d3(pdev, false);
5631 
5632 		/* Check for life */
5633 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5634 		if (!reg)
5635 			result = PCI_ERS_RESULT_RECOVERED;
5636 		else
5637 			result = PCI_ERS_RESULT_DISCONNECT;
5638 	}
5639 
5640 	return result;
5641 }
5642 
5643 /**
5644  * ice_pci_err_resume - restart operations after PCI error recovery
5645  * @pdev: PCI device information struct
5646  *
5647  * Called to allow the driver to bring things back up after PCI error and/or
5648  * reset recovery have finished
5649  */
5650 static void ice_pci_err_resume(struct pci_dev *pdev)
5651 {
5652 	struct ice_pf *pf = pci_get_drvdata(pdev);
5653 
5654 	if (!pf) {
5655 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5656 			__func__);
5657 		return;
5658 	}
5659 
5660 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5661 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5662 			__func__);
5663 		return;
5664 	}
5665 
5666 	ice_restore_all_vfs_msi_state(pdev);
5667 
5668 	ice_do_reset(pf, ICE_RESET_PFR);
5669 	ice_service_task_restart(pf);
5670 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5671 }
5672 
5673 /**
5674  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5675  * @pdev: PCI device information struct
5676  */
5677 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5678 {
5679 	struct ice_pf *pf = pci_get_drvdata(pdev);
5680 
5681 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5682 		ice_service_task_stop(pf);
5683 
5684 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5685 			set_bit(ICE_PFR_REQ, pf->state);
5686 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5687 		}
5688 	}
5689 }
5690 
5691 /**
5692  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5693  * @pdev: PCI device information struct
5694  */
5695 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5696 {
5697 	ice_pci_err_resume(pdev);
5698 }
5699 
5700 /* ice_pci_tbl - PCI Device ID Table
5701  *
5702  * Wildcard entries (PCI_ANY_ID) should come last
5703  * Last entry must be all 0s
5704  *
5705  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5706  *   Class, Class Mask, private data (not used) }
5707  */
5708 static const struct pci_device_id ice_pci_tbl[] = {
5709 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5710 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5711 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5712 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5713 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5714 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5715 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5716 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5717 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5718 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5719 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5720 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5721 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5722 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5723 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5724 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5725 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5726 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5727 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5728 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5729 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5730 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5731 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5732 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5733 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5734 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5735 	/* required last entry */
5736 	{ 0, }
5737 };
5738 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5739 
5740 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5741 
5742 static const struct pci_error_handlers ice_pci_err_handler = {
5743 	.error_detected = ice_pci_err_detected,
5744 	.slot_reset = ice_pci_err_slot_reset,
5745 	.reset_prepare = ice_pci_err_reset_prepare,
5746 	.reset_done = ice_pci_err_reset_done,
5747 	.resume = ice_pci_err_resume
5748 };
5749 
5750 static struct pci_driver ice_driver = {
5751 	.name = KBUILD_MODNAME,
5752 	.id_table = ice_pci_tbl,
5753 	.probe = ice_probe,
5754 	.remove = ice_remove,
5755 #ifdef CONFIG_PM
5756 	.driver.pm = &ice_pm_ops,
5757 #endif /* CONFIG_PM */
5758 	.shutdown = ice_shutdown,
5759 	.sriov_configure = ice_sriov_configure,
5760 	.err_handler = &ice_pci_err_handler
5761 };
5762 
5763 /**
5764  * ice_module_init - Driver registration routine
5765  *
5766  * ice_module_init is the first routine called when the driver is
5767  * loaded. All it does is register with the PCI subsystem.
5768  */
5769 static int __init ice_module_init(void)
5770 {
5771 	int status;
5772 
5773 	pr_info("%s\n", ice_driver_string);
5774 	pr_info("%s\n", ice_copyright);
5775 
5776 	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5777 	if (!ice_wq) {
5778 		pr_err("Failed to create workqueue\n");
5779 		return -ENOMEM;
5780 	}
5781 
5782 	status = pci_register_driver(&ice_driver);
5783 	if (status) {
5784 		pr_err("failed to register PCI driver, err %d\n", status);
5785 		destroy_workqueue(ice_wq);
5786 	}
5787 
5788 	return status;
5789 }
5790 module_init(ice_module_init);
5791 
5792 /**
5793  * ice_module_exit - Driver exit cleanup routine
5794  *
5795  * ice_module_exit is called just before the driver is removed
5796  * from memory.
5797  */
5798 static void __exit ice_module_exit(void)
5799 {
5800 	pci_unregister_driver(&ice_driver);
5801 	destroy_workqueue(ice_wq);
5802 	pr_info("module unloaded\n");
5803 }
5804 module_exit(ice_module_exit);
5805 
5806 /**
5807  * ice_set_mac_address - NDO callback to set MAC address
5808  * @netdev: network interface device structure
5809  * @pi: pointer to an address structure
5810  *
5811  * Returns 0 on success, negative on failure
5812  */
5813 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5814 {
5815 	struct ice_netdev_priv *np = netdev_priv(netdev);
5816 	struct ice_vsi *vsi = np->vsi;
5817 	struct ice_pf *pf = vsi->back;
5818 	struct ice_hw *hw = &pf->hw;
5819 	struct sockaddr *addr = pi;
5820 	u8 old_mac[ETH_ALEN];
5821 	u8 flags = 0;
5822 	u8 *mac;
5823 	int err;
5824 
5825 	mac = (u8 *)addr->sa_data;
5826 
5827 	if (!is_valid_ether_addr(mac))
5828 		return -EADDRNOTAVAIL;
5829 
5830 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5831 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5832 		return 0;
5833 	}
5834 
5835 	if (test_bit(ICE_DOWN, pf->state) ||
5836 	    ice_is_reset_in_progress(pf->state)) {
5837 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5838 			   mac);
5839 		return -EBUSY;
5840 	}
5841 
5842 	if (ice_chnl_dmac_fltr_cnt(pf)) {
5843 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5844 			   mac);
5845 		return -EAGAIN;
5846 	}
5847 
5848 	netif_addr_lock_bh(netdev);
5849 	ether_addr_copy(old_mac, netdev->dev_addr);
5850 	/* change the netdev's MAC address */
5851 	eth_hw_addr_set(netdev, mac);
5852 	netif_addr_unlock_bh(netdev);
5853 
5854 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5855 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5856 	if (err && err != -ENOENT) {
5857 		err = -EADDRNOTAVAIL;
5858 		goto err_update_filters;
5859 	}
5860 
5861 	/* Add filter for new MAC. If filter exists, return success */
5862 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5863 	if (err == -EEXIST) {
5864 		/* Although this MAC filter is already present in hardware it's
5865 		 * possible in some cases (e.g. bonding) that dev_addr was
5866 		 * modified outside of the driver and needs to be restored back
5867 		 * to this value.
5868 		 */
5869 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5870 
5871 		return 0;
5872 	} else if (err) {
5873 		/* error if the new filter addition failed */
5874 		err = -EADDRNOTAVAIL;
5875 	}
5876 
5877 err_update_filters:
5878 	if (err) {
5879 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5880 			   mac);
5881 		netif_addr_lock_bh(netdev);
5882 		eth_hw_addr_set(netdev, old_mac);
5883 		netif_addr_unlock_bh(netdev);
5884 		return err;
5885 	}
5886 
5887 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5888 		   netdev->dev_addr);
5889 
5890 	/* write new MAC address to the firmware */
5891 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5892 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5893 	if (err) {
5894 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5895 			   mac, err);
5896 	}
5897 	return 0;
5898 }
5899 
5900 /**
5901  * ice_set_rx_mode - NDO callback to set the netdev filters
5902  * @netdev: network interface device structure
5903  */
5904 static void ice_set_rx_mode(struct net_device *netdev)
5905 {
5906 	struct ice_netdev_priv *np = netdev_priv(netdev);
5907 	struct ice_vsi *vsi = np->vsi;
5908 
5909 	if (!vsi)
5910 		return;
5911 
5912 	/* Set the flags to synchronize filters
5913 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5914 	 * flags
5915 	 */
5916 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5917 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5918 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5919 
5920 	/* schedule our worker thread which will take care of
5921 	 * applying the new filter changes
5922 	 */
5923 	ice_service_task_schedule(vsi->back);
5924 }
5925 
5926 /**
5927  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5928  * @netdev: network interface device structure
5929  * @queue_index: Queue ID
5930  * @maxrate: maximum bandwidth in Mbps
5931  */
5932 static int
5933 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5934 {
5935 	struct ice_netdev_priv *np = netdev_priv(netdev);
5936 	struct ice_vsi *vsi = np->vsi;
5937 	u16 q_handle;
5938 	int status;
5939 	u8 tc;
5940 
5941 	/* Validate maxrate requested is within permitted range */
5942 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5943 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5944 			   maxrate, queue_index);
5945 		return -EINVAL;
5946 	}
5947 
5948 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5949 	tc = ice_dcb_get_tc(vsi, queue_index);
5950 
5951 	/* Set BW back to default, when user set maxrate to 0 */
5952 	if (!maxrate)
5953 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5954 					       q_handle, ICE_MAX_BW);
5955 	else
5956 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5957 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5958 	if (status)
5959 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5960 			   status);
5961 
5962 	return status;
5963 }
5964 
5965 /**
5966  * ice_fdb_add - add an entry to the hardware database
5967  * @ndm: the input from the stack
5968  * @tb: pointer to array of nladdr (unused)
5969  * @dev: the net device pointer
5970  * @addr: the MAC address entry being added
5971  * @vid: VLAN ID
5972  * @flags: instructions from stack about fdb operation
5973  * @extack: netlink extended ack
5974  */
5975 static int
5976 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5977 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5978 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5979 {
5980 	int err;
5981 
5982 	if (vid) {
5983 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5984 		return -EINVAL;
5985 	}
5986 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5987 		netdev_err(dev, "FDB only supports static addresses\n");
5988 		return -EINVAL;
5989 	}
5990 
5991 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5992 		err = dev_uc_add_excl(dev, addr);
5993 	else if (is_multicast_ether_addr(addr))
5994 		err = dev_mc_add_excl(dev, addr);
5995 	else
5996 		err = -EINVAL;
5997 
5998 	/* Only return duplicate errors if NLM_F_EXCL is set */
5999 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
6000 		err = 0;
6001 
6002 	return err;
6003 }
6004 
6005 /**
6006  * ice_fdb_del - delete an entry from the hardware database
6007  * @ndm: the input from the stack
6008  * @tb: pointer to array of nladdr (unused)
6009  * @dev: the net device pointer
6010  * @addr: the MAC address entry being added
6011  * @vid: VLAN ID
6012  * @extack: netlink extended ack
6013  */
6014 static int
6015 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6016 	    struct net_device *dev, const unsigned char *addr,
6017 	    __always_unused u16 vid, struct netlink_ext_ack *extack)
6018 {
6019 	int err;
6020 
6021 	if (ndm->ndm_state & NUD_PERMANENT) {
6022 		netdev_err(dev, "FDB only supports static addresses\n");
6023 		return -EINVAL;
6024 	}
6025 
6026 	if (is_unicast_ether_addr(addr))
6027 		err = dev_uc_del(dev, addr);
6028 	else if (is_multicast_ether_addr(addr))
6029 		err = dev_mc_del(dev, addr);
6030 	else
6031 		err = -EINVAL;
6032 
6033 	return err;
6034 }
6035 
6036 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6037 					 NETIF_F_HW_VLAN_CTAG_TX | \
6038 					 NETIF_F_HW_VLAN_STAG_RX | \
6039 					 NETIF_F_HW_VLAN_STAG_TX)
6040 
6041 #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6042 					 NETIF_F_HW_VLAN_STAG_RX)
6043 
6044 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
6045 					 NETIF_F_HW_VLAN_STAG_FILTER)
6046 
6047 /**
6048  * ice_fix_features - fix the netdev features flags based on device limitations
6049  * @netdev: ptr to the netdev that flags are being fixed on
6050  * @features: features that need to be checked and possibly fixed
6051  *
6052  * Make sure any fixups are made to features in this callback. This enables the
6053  * driver to not have to check unsupported configurations throughout the driver
6054  * because that's the responsiblity of this callback.
6055  *
6056  * Single VLAN Mode (SVM) Supported Features:
6057  *	NETIF_F_HW_VLAN_CTAG_FILTER
6058  *	NETIF_F_HW_VLAN_CTAG_RX
6059  *	NETIF_F_HW_VLAN_CTAG_TX
6060  *
6061  * Double VLAN Mode (DVM) Supported Features:
6062  *	NETIF_F_HW_VLAN_CTAG_FILTER
6063  *	NETIF_F_HW_VLAN_CTAG_RX
6064  *	NETIF_F_HW_VLAN_CTAG_TX
6065  *
6066  *	NETIF_F_HW_VLAN_STAG_FILTER
6067  *	NETIF_HW_VLAN_STAG_RX
6068  *	NETIF_HW_VLAN_STAG_TX
6069  *
6070  * Features that need fixing:
6071  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6072  *	These are mutually exlusive as the VSI context cannot support multiple
6073  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
6074  *	is not done, then default to clearing the requested STAG offload
6075  *	settings.
6076  *
6077  *	All supported filtering has to be enabled or disabled together. For
6078  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6079  *	together. If this is not done, then default to VLAN filtering disabled.
6080  *	These are mutually exclusive as there is currently no way to
6081  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6082  *	prune rules.
6083  */
6084 static netdev_features_t
6085 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6086 {
6087 	struct ice_netdev_priv *np = netdev_priv(netdev);
6088 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6089 	bool cur_ctag, cur_stag, req_ctag, req_stag;
6090 
6091 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6092 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6093 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6094 
6095 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6096 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6097 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6098 
6099 	if (req_vlan_fltr != cur_vlan_fltr) {
6100 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6101 			if (req_ctag && req_stag) {
6102 				features |= NETIF_VLAN_FILTERING_FEATURES;
6103 			} else if (!req_ctag && !req_stag) {
6104 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6105 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
6106 				   (!cur_stag && req_stag && !cur_ctag)) {
6107 				features |= NETIF_VLAN_FILTERING_FEATURES;
6108 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6109 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
6110 				   (cur_stag && !req_stag && cur_ctag)) {
6111 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6112 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6113 			}
6114 		} else {
6115 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6116 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6117 
6118 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6119 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6120 		}
6121 	}
6122 
6123 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6124 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6125 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6126 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6127 			      NETIF_F_HW_VLAN_STAG_TX);
6128 	}
6129 
6130 	if (!(netdev->features & NETIF_F_RXFCS) &&
6131 	    (features & NETIF_F_RXFCS) &&
6132 	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6133 	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6134 		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6135 		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6136 	}
6137 
6138 	return features;
6139 }
6140 
6141 /**
6142  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6143  * @vsi: PF's VSI
6144  * @features: features used to determine VLAN offload settings
6145  *
6146  * First, determine the vlan_ethertype based on the VLAN offload bits in
6147  * features. Then determine if stripping and insertion should be enabled or
6148  * disabled. Finally enable or disable VLAN stripping and insertion.
6149  */
6150 static int
6151 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6152 {
6153 	bool enable_stripping = true, enable_insertion = true;
6154 	struct ice_vsi_vlan_ops *vlan_ops;
6155 	int strip_err = 0, insert_err = 0;
6156 	u16 vlan_ethertype = 0;
6157 
6158 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6159 
6160 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6161 		vlan_ethertype = ETH_P_8021AD;
6162 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6163 		vlan_ethertype = ETH_P_8021Q;
6164 
6165 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6166 		enable_stripping = false;
6167 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6168 		enable_insertion = false;
6169 
6170 	if (enable_stripping)
6171 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6172 	else
6173 		strip_err = vlan_ops->dis_stripping(vsi);
6174 
6175 	if (enable_insertion)
6176 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6177 	else
6178 		insert_err = vlan_ops->dis_insertion(vsi);
6179 
6180 	if (strip_err || insert_err)
6181 		return -EIO;
6182 
6183 	return 0;
6184 }
6185 
6186 /**
6187  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6188  * @vsi: PF's VSI
6189  * @features: features used to determine VLAN filtering settings
6190  *
6191  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6192  * features.
6193  */
6194 static int
6195 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6196 {
6197 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6198 	int err = 0;
6199 
6200 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6201 	 * if either bit is set
6202 	 */
6203 	if (features &
6204 	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6205 		err = vlan_ops->ena_rx_filtering(vsi);
6206 	else
6207 		err = vlan_ops->dis_rx_filtering(vsi);
6208 
6209 	return err;
6210 }
6211 
6212 /**
6213  * ice_set_vlan_features - set VLAN settings based on suggested feature set
6214  * @netdev: ptr to the netdev being adjusted
6215  * @features: the feature set that the stack is suggesting
6216  *
6217  * Only update VLAN settings if the requested_vlan_features are different than
6218  * the current_vlan_features.
6219  */
6220 static int
6221 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6222 {
6223 	netdev_features_t current_vlan_features, requested_vlan_features;
6224 	struct ice_netdev_priv *np = netdev_priv(netdev);
6225 	struct ice_vsi *vsi = np->vsi;
6226 	int err;
6227 
6228 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6229 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6230 	if (current_vlan_features ^ requested_vlan_features) {
6231 		if ((features & NETIF_F_RXFCS) &&
6232 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6233 			dev_err(ice_pf_to_dev(vsi->back),
6234 				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6235 			return -EIO;
6236 		}
6237 
6238 		err = ice_set_vlan_offload_features(vsi, features);
6239 		if (err)
6240 			return err;
6241 	}
6242 
6243 	current_vlan_features = netdev->features &
6244 		NETIF_VLAN_FILTERING_FEATURES;
6245 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6246 	if (current_vlan_features ^ requested_vlan_features) {
6247 		err = ice_set_vlan_filtering_features(vsi, features);
6248 		if (err)
6249 			return err;
6250 	}
6251 
6252 	return 0;
6253 }
6254 
6255 /**
6256  * ice_set_loopback - turn on/off loopback mode on underlying PF
6257  * @vsi: ptr to VSI
6258  * @ena: flag to indicate the on/off setting
6259  */
6260 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6261 {
6262 	bool if_running = netif_running(vsi->netdev);
6263 	int ret;
6264 
6265 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6266 		ret = ice_down(vsi);
6267 		if (ret) {
6268 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6269 			return ret;
6270 		}
6271 	}
6272 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6273 	if (ret)
6274 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6275 	if (if_running)
6276 		ret = ice_up(vsi);
6277 
6278 	return ret;
6279 }
6280 
6281 /**
6282  * ice_set_features - set the netdev feature flags
6283  * @netdev: ptr to the netdev being adjusted
6284  * @features: the feature set that the stack is suggesting
6285  */
6286 static int
6287 ice_set_features(struct net_device *netdev, netdev_features_t features)
6288 {
6289 	netdev_features_t changed = netdev->features ^ features;
6290 	struct ice_netdev_priv *np = netdev_priv(netdev);
6291 	struct ice_vsi *vsi = np->vsi;
6292 	struct ice_pf *pf = vsi->back;
6293 	int ret = 0;
6294 
6295 	/* Don't set any netdev advanced features with device in Safe Mode */
6296 	if (ice_is_safe_mode(pf)) {
6297 		dev_err(ice_pf_to_dev(pf),
6298 			"Device is in Safe Mode - not enabling advanced netdev features\n");
6299 		return ret;
6300 	}
6301 
6302 	/* Do not change setting during reset */
6303 	if (ice_is_reset_in_progress(pf->state)) {
6304 		dev_err(ice_pf_to_dev(pf),
6305 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6306 		return -EBUSY;
6307 	}
6308 
6309 	/* Multiple features can be changed in one call so keep features in
6310 	 * separate if/else statements to guarantee each feature is checked
6311 	 */
6312 	if (changed & NETIF_F_RXHASH)
6313 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6314 
6315 	ret = ice_set_vlan_features(netdev, features);
6316 	if (ret)
6317 		return ret;
6318 
6319 	/* Turn on receive of FCS aka CRC, and after setting this
6320 	 * flag the packet data will have the 4 byte CRC appended
6321 	 */
6322 	if (changed & NETIF_F_RXFCS) {
6323 		if ((features & NETIF_F_RXFCS) &&
6324 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6325 			dev_err(ice_pf_to_dev(vsi->back),
6326 				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6327 			return -EIO;
6328 		}
6329 
6330 		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6331 		ret = ice_down_up(vsi);
6332 		if (ret)
6333 			return ret;
6334 	}
6335 
6336 	if (changed & NETIF_F_NTUPLE) {
6337 		bool ena = !!(features & NETIF_F_NTUPLE);
6338 
6339 		ice_vsi_manage_fdir(vsi, ena);
6340 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6341 	}
6342 
6343 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6344 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6345 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6346 		return -EACCES;
6347 	}
6348 
6349 	if (changed & NETIF_F_HW_TC) {
6350 		bool ena = !!(features & NETIF_F_HW_TC);
6351 
6352 		ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6353 		      clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6354 	}
6355 
6356 	if (changed & NETIF_F_LOOPBACK)
6357 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6358 
6359 	return ret;
6360 }
6361 
6362 /**
6363  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6364  * @vsi: VSI to setup VLAN properties for
6365  */
6366 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6367 {
6368 	int err;
6369 
6370 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6371 	if (err)
6372 		return err;
6373 
6374 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6375 	if (err)
6376 		return err;
6377 
6378 	return ice_vsi_add_vlan_zero(vsi);
6379 }
6380 
6381 /**
6382  * ice_vsi_cfg_lan - Setup the VSI lan related config
6383  * @vsi: the VSI being configured
6384  *
6385  * Return 0 on success and negative value on error
6386  */
6387 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6388 {
6389 	int err;
6390 
6391 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6392 		ice_set_rx_mode(vsi->netdev);
6393 
6394 		err = ice_vsi_vlan_setup(vsi);
6395 		if (err)
6396 			return err;
6397 	}
6398 	ice_vsi_cfg_dcb_rings(vsi);
6399 
6400 	err = ice_vsi_cfg_lan_txqs(vsi);
6401 	if (!err && ice_is_xdp_ena_vsi(vsi))
6402 		err = ice_vsi_cfg_xdp_txqs(vsi);
6403 	if (!err)
6404 		err = ice_vsi_cfg_rxqs(vsi);
6405 
6406 	return err;
6407 }
6408 
6409 /* THEORY OF MODERATION:
6410  * The ice driver hardware works differently than the hardware that DIMLIB was
6411  * originally made for. ice hardware doesn't have packet count limits that
6412  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6413  * which is hard-coded to a limit of 250,000 ints/second.
6414  * If not using dynamic moderation, the INTRL value can be modified
6415  * by ethtool rx-usecs-high.
6416  */
6417 struct ice_dim {
6418 	/* the throttle rate for interrupts, basically worst case delay before
6419 	 * an initial interrupt fires, value is stored in microseconds.
6420 	 */
6421 	u16 itr;
6422 };
6423 
6424 /* Make a different profile for Rx that doesn't allow quite so aggressive
6425  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6426  * second.
6427  */
6428 static const struct ice_dim rx_profile[] = {
6429 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6430 	{8},    /* 125,000 ints/s */
6431 	{16},   /*  62,500 ints/s */
6432 	{62},   /*  16,129 ints/s */
6433 	{126}   /*   7,936 ints/s */
6434 };
6435 
6436 /* The transmit profile, which has the same sorts of values
6437  * as the previous struct
6438  */
6439 static const struct ice_dim tx_profile[] = {
6440 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6441 	{8},    /* 125,000 ints/s */
6442 	{40},   /*  16,125 ints/s */
6443 	{128},  /*   7,812 ints/s */
6444 	{256}   /*   3,906 ints/s */
6445 };
6446 
6447 static void ice_tx_dim_work(struct work_struct *work)
6448 {
6449 	struct ice_ring_container *rc;
6450 	struct dim *dim;
6451 	u16 itr;
6452 
6453 	dim = container_of(work, struct dim, work);
6454 	rc = (struct ice_ring_container *)dim->priv;
6455 
6456 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6457 
6458 	/* look up the values in our local table */
6459 	itr = tx_profile[dim->profile_ix].itr;
6460 
6461 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6462 	ice_write_itr(rc, itr);
6463 
6464 	dim->state = DIM_START_MEASURE;
6465 }
6466 
6467 static void ice_rx_dim_work(struct work_struct *work)
6468 {
6469 	struct ice_ring_container *rc;
6470 	struct dim *dim;
6471 	u16 itr;
6472 
6473 	dim = container_of(work, struct dim, work);
6474 	rc = (struct ice_ring_container *)dim->priv;
6475 
6476 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6477 
6478 	/* look up the values in our local table */
6479 	itr = rx_profile[dim->profile_ix].itr;
6480 
6481 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6482 	ice_write_itr(rc, itr);
6483 
6484 	dim->state = DIM_START_MEASURE;
6485 }
6486 
6487 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6488 
6489 /**
6490  * ice_init_moderation - set up interrupt moderation
6491  * @q_vector: the vector containing rings to be configured
6492  *
6493  * Set up interrupt moderation registers, with the intent to do the right thing
6494  * when called from reset or from probe, and whether or not dynamic moderation
6495  * is enabled or not. Take special care to write all the registers in both
6496  * dynamic moderation mode or not in order to make sure hardware is in a known
6497  * state.
6498  */
6499 static void ice_init_moderation(struct ice_q_vector *q_vector)
6500 {
6501 	struct ice_ring_container *rc;
6502 	bool tx_dynamic, rx_dynamic;
6503 
6504 	rc = &q_vector->tx;
6505 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6506 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6507 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6508 	rc->dim.priv = rc;
6509 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6510 
6511 	/* set the initial TX ITR to match the above */
6512 	ice_write_itr(rc, tx_dynamic ?
6513 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6514 
6515 	rc = &q_vector->rx;
6516 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6517 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6518 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6519 	rc->dim.priv = rc;
6520 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6521 
6522 	/* set the initial RX ITR to match the above */
6523 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6524 				       rc->itr_setting);
6525 
6526 	ice_set_q_vector_intrl(q_vector);
6527 }
6528 
6529 /**
6530  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6531  * @vsi: the VSI being configured
6532  */
6533 static void ice_napi_enable_all(struct ice_vsi *vsi)
6534 {
6535 	int q_idx;
6536 
6537 	if (!vsi->netdev)
6538 		return;
6539 
6540 	ice_for_each_q_vector(vsi, q_idx) {
6541 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6542 
6543 		ice_init_moderation(q_vector);
6544 
6545 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6546 			napi_enable(&q_vector->napi);
6547 	}
6548 }
6549 
6550 /**
6551  * ice_up_complete - Finish the last steps of bringing up a connection
6552  * @vsi: The VSI being configured
6553  *
6554  * Return 0 on success and negative value on error
6555  */
6556 static int ice_up_complete(struct ice_vsi *vsi)
6557 {
6558 	struct ice_pf *pf = vsi->back;
6559 	int err;
6560 
6561 	ice_vsi_cfg_msix(vsi);
6562 
6563 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6564 	 * Tx queue group list was configured and the context bits were
6565 	 * programmed using ice_vsi_cfg_txqs
6566 	 */
6567 	err = ice_vsi_start_all_rx_rings(vsi);
6568 	if (err)
6569 		return err;
6570 
6571 	clear_bit(ICE_VSI_DOWN, vsi->state);
6572 	ice_napi_enable_all(vsi);
6573 	ice_vsi_ena_irq(vsi);
6574 
6575 	if (vsi->port_info &&
6576 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6577 	    vsi->netdev && vsi->type == ICE_VSI_PF) {
6578 		ice_print_link_msg(vsi, true);
6579 		netif_tx_start_all_queues(vsi->netdev);
6580 		netif_carrier_on(vsi->netdev);
6581 		ice_ptp_link_change(pf, pf->hw.pf_id, true);
6582 	}
6583 
6584 	/* Perform an initial read of the statistics registers now to
6585 	 * set the baseline so counters are ready when interface is up
6586 	 */
6587 	ice_update_eth_stats(vsi);
6588 
6589 	if (vsi->type == ICE_VSI_PF)
6590 		ice_service_task_schedule(pf);
6591 
6592 	return 0;
6593 }
6594 
6595 /**
6596  * ice_up - Bring the connection back up after being down
6597  * @vsi: VSI being configured
6598  */
6599 int ice_up(struct ice_vsi *vsi)
6600 {
6601 	int err;
6602 
6603 	err = ice_vsi_cfg_lan(vsi);
6604 	if (!err)
6605 		err = ice_up_complete(vsi);
6606 
6607 	return err;
6608 }
6609 
6610 /**
6611  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6612  * @syncp: pointer to u64_stats_sync
6613  * @stats: stats that pkts and bytes count will be taken from
6614  * @pkts: packets stats counter
6615  * @bytes: bytes stats counter
6616  *
6617  * This function fetches stats from the ring considering the atomic operations
6618  * that needs to be performed to read u64 values in 32 bit machine.
6619  */
6620 void
6621 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6622 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6623 {
6624 	unsigned int start;
6625 
6626 	do {
6627 		start = u64_stats_fetch_begin(syncp);
6628 		*pkts = stats.pkts;
6629 		*bytes = stats.bytes;
6630 	} while (u64_stats_fetch_retry(syncp, start));
6631 }
6632 
6633 /**
6634  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6635  * @vsi: the VSI to be updated
6636  * @vsi_stats: the stats struct to be updated
6637  * @rings: rings to work on
6638  * @count: number of rings
6639  */
6640 static void
6641 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6642 			     struct rtnl_link_stats64 *vsi_stats,
6643 			     struct ice_tx_ring **rings, u16 count)
6644 {
6645 	u16 i;
6646 
6647 	for (i = 0; i < count; i++) {
6648 		struct ice_tx_ring *ring;
6649 		u64 pkts = 0, bytes = 0;
6650 
6651 		ring = READ_ONCE(rings[i]);
6652 		if (!ring || !ring->ring_stats)
6653 			continue;
6654 		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6655 					     ring->ring_stats->stats, &pkts,
6656 					     &bytes);
6657 		vsi_stats->tx_packets += pkts;
6658 		vsi_stats->tx_bytes += bytes;
6659 		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6660 		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6661 		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6662 	}
6663 }
6664 
6665 /**
6666  * ice_update_vsi_ring_stats - Update VSI stats counters
6667  * @vsi: the VSI to be updated
6668  */
6669 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6670 {
6671 	struct rtnl_link_stats64 *net_stats, *stats_prev;
6672 	struct rtnl_link_stats64 *vsi_stats;
6673 	u64 pkts, bytes;
6674 	int i;
6675 
6676 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6677 	if (!vsi_stats)
6678 		return;
6679 
6680 	/* reset non-netdev (extended) stats */
6681 	vsi->tx_restart = 0;
6682 	vsi->tx_busy = 0;
6683 	vsi->tx_linearize = 0;
6684 	vsi->rx_buf_failed = 0;
6685 	vsi->rx_page_failed = 0;
6686 
6687 	rcu_read_lock();
6688 
6689 	/* update Tx rings counters */
6690 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6691 				     vsi->num_txq);
6692 
6693 	/* update Rx rings counters */
6694 	ice_for_each_rxq(vsi, i) {
6695 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6696 		struct ice_ring_stats *ring_stats;
6697 
6698 		ring_stats = ring->ring_stats;
6699 		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6700 					     ring_stats->stats, &pkts,
6701 					     &bytes);
6702 		vsi_stats->rx_packets += pkts;
6703 		vsi_stats->rx_bytes += bytes;
6704 		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6705 		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6706 	}
6707 
6708 	/* update XDP Tx rings counters */
6709 	if (ice_is_xdp_ena_vsi(vsi))
6710 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6711 					     vsi->num_xdp_txq);
6712 
6713 	rcu_read_unlock();
6714 
6715 	net_stats = &vsi->net_stats;
6716 	stats_prev = &vsi->net_stats_prev;
6717 
6718 	/* clear prev counters after reset */
6719 	if (vsi_stats->tx_packets < stats_prev->tx_packets ||
6720 	    vsi_stats->rx_packets < stats_prev->rx_packets) {
6721 		stats_prev->tx_packets = 0;
6722 		stats_prev->tx_bytes = 0;
6723 		stats_prev->rx_packets = 0;
6724 		stats_prev->rx_bytes = 0;
6725 	}
6726 
6727 	/* update netdev counters */
6728 	net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6729 	net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6730 	net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6731 	net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6732 
6733 	stats_prev->tx_packets = vsi_stats->tx_packets;
6734 	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6735 	stats_prev->rx_packets = vsi_stats->rx_packets;
6736 	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6737 
6738 	kfree(vsi_stats);
6739 }
6740 
6741 /**
6742  * ice_update_vsi_stats - Update VSI stats counters
6743  * @vsi: the VSI to be updated
6744  */
6745 void ice_update_vsi_stats(struct ice_vsi *vsi)
6746 {
6747 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6748 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6749 	struct ice_pf *pf = vsi->back;
6750 
6751 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6752 	    test_bit(ICE_CFG_BUSY, pf->state))
6753 		return;
6754 
6755 	/* get stats as recorded by Tx/Rx rings */
6756 	ice_update_vsi_ring_stats(vsi);
6757 
6758 	/* get VSI stats as recorded by the hardware */
6759 	ice_update_eth_stats(vsi);
6760 
6761 	cur_ns->tx_errors = cur_es->tx_errors;
6762 	cur_ns->rx_dropped = cur_es->rx_discards;
6763 	cur_ns->tx_dropped = cur_es->tx_discards;
6764 	cur_ns->multicast = cur_es->rx_multicast;
6765 
6766 	/* update some more netdev stats if this is main VSI */
6767 	if (vsi->type == ICE_VSI_PF) {
6768 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6769 		cur_ns->rx_errors = pf->stats.crc_errors +
6770 				    pf->stats.illegal_bytes +
6771 				    pf->stats.rx_len_errors +
6772 				    pf->stats.rx_undersize +
6773 				    pf->hw_csum_rx_error +
6774 				    pf->stats.rx_jabber +
6775 				    pf->stats.rx_fragments +
6776 				    pf->stats.rx_oversize;
6777 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6778 		/* record drops from the port level */
6779 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6780 	}
6781 }
6782 
6783 /**
6784  * ice_update_pf_stats - Update PF port stats counters
6785  * @pf: PF whose stats needs to be updated
6786  */
6787 void ice_update_pf_stats(struct ice_pf *pf)
6788 {
6789 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6790 	struct ice_hw *hw = &pf->hw;
6791 	u16 fd_ctr_base;
6792 	u8 port;
6793 
6794 	port = hw->port_info->lport;
6795 	prev_ps = &pf->stats_prev;
6796 	cur_ps = &pf->stats;
6797 
6798 	if (ice_is_reset_in_progress(pf->state))
6799 		pf->stat_prev_loaded = false;
6800 
6801 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6802 			  &prev_ps->eth.rx_bytes,
6803 			  &cur_ps->eth.rx_bytes);
6804 
6805 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6806 			  &prev_ps->eth.rx_unicast,
6807 			  &cur_ps->eth.rx_unicast);
6808 
6809 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6810 			  &prev_ps->eth.rx_multicast,
6811 			  &cur_ps->eth.rx_multicast);
6812 
6813 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6814 			  &prev_ps->eth.rx_broadcast,
6815 			  &cur_ps->eth.rx_broadcast);
6816 
6817 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6818 			  &prev_ps->eth.rx_discards,
6819 			  &cur_ps->eth.rx_discards);
6820 
6821 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6822 			  &prev_ps->eth.tx_bytes,
6823 			  &cur_ps->eth.tx_bytes);
6824 
6825 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6826 			  &prev_ps->eth.tx_unicast,
6827 			  &cur_ps->eth.tx_unicast);
6828 
6829 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6830 			  &prev_ps->eth.tx_multicast,
6831 			  &cur_ps->eth.tx_multicast);
6832 
6833 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6834 			  &prev_ps->eth.tx_broadcast,
6835 			  &cur_ps->eth.tx_broadcast);
6836 
6837 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6838 			  &prev_ps->tx_dropped_link_down,
6839 			  &cur_ps->tx_dropped_link_down);
6840 
6841 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6842 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6843 
6844 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6845 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6846 
6847 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6848 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6849 
6850 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6851 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6852 
6853 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6854 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6855 
6856 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6857 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6858 
6859 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6860 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6861 
6862 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6863 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6864 
6865 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6866 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6867 
6868 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6869 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6870 
6871 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6872 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6873 
6874 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6875 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6876 
6877 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6878 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6879 
6880 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6881 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6882 
6883 	fd_ctr_base = hw->fd_ctr_base;
6884 
6885 	ice_stat_update40(hw,
6886 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6887 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6888 			  &cur_ps->fd_sb_match);
6889 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6890 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6891 
6892 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6893 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6894 
6895 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6896 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6897 
6898 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6899 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6900 
6901 	ice_update_dcb_stats(pf);
6902 
6903 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6904 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6905 
6906 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6907 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6908 
6909 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6910 			  &prev_ps->mac_local_faults,
6911 			  &cur_ps->mac_local_faults);
6912 
6913 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6914 			  &prev_ps->mac_remote_faults,
6915 			  &cur_ps->mac_remote_faults);
6916 
6917 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6918 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6919 
6920 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6921 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6922 
6923 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6924 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6925 
6926 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6927 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6928 
6929 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6930 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6931 
6932 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6933 
6934 	pf->stat_prev_loaded = true;
6935 }
6936 
6937 /**
6938  * ice_get_stats64 - get statistics for network device structure
6939  * @netdev: network interface device structure
6940  * @stats: main device statistics structure
6941  */
6942 static
6943 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6944 {
6945 	struct ice_netdev_priv *np = netdev_priv(netdev);
6946 	struct rtnl_link_stats64 *vsi_stats;
6947 	struct ice_vsi *vsi = np->vsi;
6948 
6949 	vsi_stats = &vsi->net_stats;
6950 
6951 	if (!vsi->num_txq || !vsi->num_rxq)
6952 		return;
6953 
6954 	/* netdev packet/byte stats come from ring counter. These are obtained
6955 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6956 	 * But, only call the update routine and read the registers if VSI is
6957 	 * not down.
6958 	 */
6959 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6960 		ice_update_vsi_ring_stats(vsi);
6961 	stats->tx_packets = vsi_stats->tx_packets;
6962 	stats->tx_bytes = vsi_stats->tx_bytes;
6963 	stats->rx_packets = vsi_stats->rx_packets;
6964 	stats->rx_bytes = vsi_stats->rx_bytes;
6965 
6966 	/* The rest of the stats can be read from the hardware but instead we
6967 	 * just return values that the watchdog task has already obtained from
6968 	 * the hardware.
6969 	 */
6970 	stats->multicast = vsi_stats->multicast;
6971 	stats->tx_errors = vsi_stats->tx_errors;
6972 	stats->tx_dropped = vsi_stats->tx_dropped;
6973 	stats->rx_errors = vsi_stats->rx_errors;
6974 	stats->rx_dropped = vsi_stats->rx_dropped;
6975 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6976 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6977 }
6978 
6979 /**
6980  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6981  * @vsi: VSI having NAPI disabled
6982  */
6983 static void ice_napi_disable_all(struct ice_vsi *vsi)
6984 {
6985 	int q_idx;
6986 
6987 	if (!vsi->netdev)
6988 		return;
6989 
6990 	ice_for_each_q_vector(vsi, q_idx) {
6991 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6992 
6993 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6994 			napi_disable(&q_vector->napi);
6995 
6996 		cancel_work_sync(&q_vector->tx.dim.work);
6997 		cancel_work_sync(&q_vector->rx.dim.work);
6998 	}
6999 }
7000 
7001 /**
7002  * ice_down - Shutdown the connection
7003  * @vsi: The VSI being stopped
7004  *
7005  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7006  */
7007 int ice_down(struct ice_vsi *vsi)
7008 {
7009 	int i, tx_err, rx_err, vlan_err = 0;
7010 
7011 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7012 
7013 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
7014 		vlan_err = ice_vsi_del_vlan_zero(vsi);
7015 		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
7016 		netif_carrier_off(vsi->netdev);
7017 		netif_tx_disable(vsi->netdev);
7018 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
7019 		ice_eswitch_stop_all_tx_queues(vsi->back);
7020 	}
7021 
7022 	ice_vsi_dis_irq(vsi);
7023 
7024 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7025 	if (tx_err)
7026 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7027 			   vsi->vsi_num, tx_err);
7028 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
7029 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7030 		if (tx_err)
7031 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7032 				   vsi->vsi_num, tx_err);
7033 	}
7034 
7035 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
7036 	if (rx_err)
7037 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7038 			   vsi->vsi_num, rx_err);
7039 
7040 	ice_napi_disable_all(vsi);
7041 
7042 	ice_for_each_txq(vsi, i)
7043 		ice_clean_tx_ring(vsi->tx_rings[i]);
7044 
7045 	ice_for_each_rxq(vsi, i)
7046 		ice_clean_rx_ring(vsi->rx_rings[i]);
7047 
7048 	if (tx_err || rx_err || vlan_err) {
7049 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7050 			   vsi->vsi_num, vsi->vsw->sw_id);
7051 		return -EIO;
7052 	}
7053 
7054 	return 0;
7055 }
7056 
7057 /**
7058  * ice_down_up - shutdown the VSI connection and bring it up
7059  * @vsi: the VSI to be reconnected
7060  */
7061 int ice_down_up(struct ice_vsi *vsi)
7062 {
7063 	int ret;
7064 
7065 	/* if DOWN already set, nothing to do */
7066 	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7067 		return 0;
7068 
7069 	ret = ice_down(vsi);
7070 	if (ret)
7071 		return ret;
7072 
7073 	ret = ice_up(vsi);
7074 	if (ret) {
7075 		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7076 		return ret;
7077 	}
7078 
7079 	return 0;
7080 }
7081 
7082 /**
7083  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7084  * @vsi: VSI having resources allocated
7085  *
7086  * Return 0 on success, negative on failure
7087  */
7088 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7089 {
7090 	int i, err = 0;
7091 
7092 	if (!vsi->num_txq) {
7093 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7094 			vsi->vsi_num);
7095 		return -EINVAL;
7096 	}
7097 
7098 	ice_for_each_txq(vsi, i) {
7099 		struct ice_tx_ring *ring = vsi->tx_rings[i];
7100 
7101 		if (!ring)
7102 			return -EINVAL;
7103 
7104 		if (vsi->netdev)
7105 			ring->netdev = vsi->netdev;
7106 		err = ice_setup_tx_ring(ring);
7107 		if (err)
7108 			break;
7109 	}
7110 
7111 	return err;
7112 }
7113 
7114 /**
7115  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7116  * @vsi: VSI having resources allocated
7117  *
7118  * Return 0 on success, negative on failure
7119  */
7120 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7121 {
7122 	int i, err = 0;
7123 
7124 	if (!vsi->num_rxq) {
7125 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7126 			vsi->vsi_num);
7127 		return -EINVAL;
7128 	}
7129 
7130 	ice_for_each_rxq(vsi, i) {
7131 		struct ice_rx_ring *ring = vsi->rx_rings[i];
7132 
7133 		if (!ring)
7134 			return -EINVAL;
7135 
7136 		if (vsi->netdev)
7137 			ring->netdev = vsi->netdev;
7138 		err = ice_setup_rx_ring(ring);
7139 		if (err)
7140 			break;
7141 	}
7142 
7143 	return err;
7144 }
7145 
7146 /**
7147  * ice_vsi_open_ctrl - open control VSI for use
7148  * @vsi: the VSI to open
7149  *
7150  * Initialization of the Control VSI
7151  *
7152  * Returns 0 on success, negative value on error
7153  */
7154 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7155 {
7156 	char int_name[ICE_INT_NAME_STR_LEN];
7157 	struct ice_pf *pf = vsi->back;
7158 	struct device *dev;
7159 	int err;
7160 
7161 	dev = ice_pf_to_dev(pf);
7162 	/* allocate descriptors */
7163 	err = ice_vsi_setup_tx_rings(vsi);
7164 	if (err)
7165 		goto err_setup_tx;
7166 
7167 	err = ice_vsi_setup_rx_rings(vsi);
7168 	if (err)
7169 		goto err_setup_rx;
7170 
7171 	err = ice_vsi_cfg_lan(vsi);
7172 	if (err)
7173 		goto err_setup_rx;
7174 
7175 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7176 		 dev_driver_string(dev), dev_name(dev));
7177 	err = ice_vsi_req_irq_msix(vsi, int_name);
7178 	if (err)
7179 		goto err_setup_rx;
7180 
7181 	ice_vsi_cfg_msix(vsi);
7182 
7183 	err = ice_vsi_start_all_rx_rings(vsi);
7184 	if (err)
7185 		goto err_up_complete;
7186 
7187 	clear_bit(ICE_VSI_DOWN, vsi->state);
7188 	ice_vsi_ena_irq(vsi);
7189 
7190 	return 0;
7191 
7192 err_up_complete:
7193 	ice_down(vsi);
7194 err_setup_rx:
7195 	ice_vsi_free_rx_rings(vsi);
7196 err_setup_tx:
7197 	ice_vsi_free_tx_rings(vsi);
7198 
7199 	return err;
7200 }
7201 
7202 /**
7203  * ice_vsi_open - Called when a network interface is made active
7204  * @vsi: the VSI to open
7205  *
7206  * Initialization of the VSI
7207  *
7208  * Returns 0 on success, negative value on error
7209  */
7210 int ice_vsi_open(struct ice_vsi *vsi)
7211 {
7212 	char int_name[ICE_INT_NAME_STR_LEN];
7213 	struct ice_pf *pf = vsi->back;
7214 	int err;
7215 
7216 	/* allocate descriptors */
7217 	err = ice_vsi_setup_tx_rings(vsi);
7218 	if (err)
7219 		goto err_setup_tx;
7220 
7221 	err = ice_vsi_setup_rx_rings(vsi);
7222 	if (err)
7223 		goto err_setup_rx;
7224 
7225 	err = ice_vsi_cfg_lan(vsi);
7226 	if (err)
7227 		goto err_setup_rx;
7228 
7229 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7230 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7231 	err = ice_vsi_req_irq_msix(vsi, int_name);
7232 	if (err)
7233 		goto err_setup_rx;
7234 
7235 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7236 
7237 	if (vsi->type == ICE_VSI_PF) {
7238 		/* Notify the stack of the actual queue counts. */
7239 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7240 		if (err)
7241 			goto err_set_qs;
7242 
7243 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7244 		if (err)
7245 			goto err_set_qs;
7246 	}
7247 
7248 	err = ice_up_complete(vsi);
7249 	if (err)
7250 		goto err_up_complete;
7251 
7252 	return 0;
7253 
7254 err_up_complete:
7255 	ice_down(vsi);
7256 err_set_qs:
7257 	ice_vsi_free_irq(vsi);
7258 err_setup_rx:
7259 	ice_vsi_free_rx_rings(vsi);
7260 err_setup_tx:
7261 	ice_vsi_free_tx_rings(vsi);
7262 
7263 	return err;
7264 }
7265 
7266 /**
7267  * ice_vsi_release_all - Delete all VSIs
7268  * @pf: PF from which all VSIs are being removed
7269  */
7270 static void ice_vsi_release_all(struct ice_pf *pf)
7271 {
7272 	int err, i;
7273 
7274 	if (!pf->vsi)
7275 		return;
7276 
7277 	ice_for_each_vsi(pf, i) {
7278 		if (!pf->vsi[i])
7279 			continue;
7280 
7281 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7282 			continue;
7283 
7284 		err = ice_vsi_release(pf->vsi[i]);
7285 		if (err)
7286 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7287 				i, err, pf->vsi[i]->vsi_num);
7288 	}
7289 }
7290 
7291 /**
7292  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7293  * @pf: pointer to the PF instance
7294  * @type: VSI type to rebuild
7295  *
7296  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7297  */
7298 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7299 {
7300 	struct device *dev = ice_pf_to_dev(pf);
7301 	int i, err;
7302 
7303 	ice_for_each_vsi(pf, i) {
7304 		struct ice_vsi *vsi = pf->vsi[i];
7305 
7306 		if (!vsi || vsi->type != type)
7307 			continue;
7308 
7309 		/* rebuild the VSI */
7310 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7311 		if (err) {
7312 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7313 				err, vsi->idx, ice_vsi_type_str(type));
7314 			return err;
7315 		}
7316 
7317 		/* replay filters for the VSI */
7318 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7319 		if (err) {
7320 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7321 				err, vsi->idx, ice_vsi_type_str(type));
7322 			return err;
7323 		}
7324 
7325 		/* Re-map HW VSI number, using VSI handle that has been
7326 		 * previously validated in ice_replay_vsi() call above
7327 		 */
7328 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7329 
7330 		/* enable the VSI */
7331 		err = ice_ena_vsi(vsi, false);
7332 		if (err) {
7333 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7334 				err, vsi->idx, ice_vsi_type_str(type));
7335 			return err;
7336 		}
7337 
7338 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7339 			 ice_vsi_type_str(type));
7340 	}
7341 
7342 	return 0;
7343 }
7344 
7345 /**
7346  * ice_update_pf_netdev_link - Update PF netdev link status
7347  * @pf: pointer to the PF instance
7348  */
7349 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7350 {
7351 	bool link_up;
7352 	int i;
7353 
7354 	ice_for_each_vsi(pf, i) {
7355 		struct ice_vsi *vsi = pf->vsi[i];
7356 
7357 		if (!vsi || vsi->type != ICE_VSI_PF)
7358 			return;
7359 
7360 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7361 		if (link_up) {
7362 			netif_carrier_on(pf->vsi[i]->netdev);
7363 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7364 		} else {
7365 			netif_carrier_off(pf->vsi[i]->netdev);
7366 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7367 		}
7368 	}
7369 }
7370 
7371 /**
7372  * ice_rebuild - rebuild after reset
7373  * @pf: PF to rebuild
7374  * @reset_type: type of reset
7375  *
7376  * Do not rebuild VF VSI in this flow because that is already handled via
7377  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7378  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7379  * to reset/rebuild all the VF VSI twice.
7380  */
7381 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7382 {
7383 	struct device *dev = ice_pf_to_dev(pf);
7384 	struct ice_hw *hw = &pf->hw;
7385 	bool dvm;
7386 	int err;
7387 
7388 	if (test_bit(ICE_DOWN, pf->state))
7389 		goto clear_recovery;
7390 
7391 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7392 
7393 #define ICE_EMP_RESET_SLEEP_MS 5000
7394 	if (reset_type == ICE_RESET_EMPR) {
7395 		/* If an EMP reset has occurred, any previously pending flash
7396 		 * update will have completed. We no longer know whether or
7397 		 * not the NVM update EMP reset is restricted.
7398 		 */
7399 		pf->fw_emp_reset_disabled = false;
7400 
7401 		msleep(ICE_EMP_RESET_SLEEP_MS);
7402 	}
7403 
7404 	err = ice_init_all_ctrlq(hw);
7405 	if (err) {
7406 		dev_err(dev, "control queues init failed %d\n", err);
7407 		goto err_init_ctrlq;
7408 	}
7409 
7410 	/* if DDP was previously loaded successfully */
7411 	if (!ice_is_safe_mode(pf)) {
7412 		/* reload the SW DB of filter tables */
7413 		if (reset_type == ICE_RESET_PFR)
7414 			ice_fill_blk_tbls(hw);
7415 		else
7416 			/* Reload DDP Package after CORER/GLOBR reset */
7417 			ice_load_pkg(NULL, pf);
7418 	}
7419 
7420 	err = ice_clear_pf_cfg(hw);
7421 	if (err) {
7422 		dev_err(dev, "clear PF configuration failed %d\n", err);
7423 		goto err_init_ctrlq;
7424 	}
7425 
7426 	ice_clear_pxe_mode(hw);
7427 
7428 	err = ice_init_nvm(hw);
7429 	if (err) {
7430 		dev_err(dev, "ice_init_nvm failed %d\n", err);
7431 		goto err_init_ctrlq;
7432 	}
7433 
7434 	err = ice_get_caps(hw);
7435 	if (err) {
7436 		dev_err(dev, "ice_get_caps failed %d\n", err);
7437 		goto err_init_ctrlq;
7438 	}
7439 
7440 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7441 	if (err) {
7442 		dev_err(dev, "set_mac_cfg failed %d\n", err);
7443 		goto err_init_ctrlq;
7444 	}
7445 
7446 	dvm = ice_is_dvm_ena(hw);
7447 
7448 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7449 	if (err)
7450 		goto err_init_ctrlq;
7451 
7452 	err = ice_sched_init_port(hw->port_info);
7453 	if (err)
7454 		goto err_sched_init_port;
7455 
7456 	/* start misc vector */
7457 	err = ice_req_irq_msix_misc(pf);
7458 	if (err) {
7459 		dev_err(dev, "misc vector setup failed: %d\n", err);
7460 		goto err_sched_init_port;
7461 	}
7462 
7463 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7464 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7465 		if (!rd32(hw, PFQF_FD_SIZE)) {
7466 			u16 unused, guar, b_effort;
7467 
7468 			guar = hw->func_caps.fd_fltr_guar;
7469 			b_effort = hw->func_caps.fd_fltr_best_effort;
7470 
7471 			/* force guaranteed filter pool for PF */
7472 			ice_alloc_fd_guar_item(hw, &unused, guar);
7473 			/* force shared filter pool for PF */
7474 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7475 		}
7476 	}
7477 
7478 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7479 		ice_dcb_rebuild(pf);
7480 
7481 	/* If the PF previously had enabled PTP, PTP init needs to happen before
7482 	 * the VSI rebuild. If not, this causes the PTP link status events to
7483 	 * fail.
7484 	 */
7485 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7486 		ice_ptp_reset(pf);
7487 
7488 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7489 		ice_gnss_init(pf);
7490 
7491 	/* rebuild PF VSI */
7492 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7493 	if (err) {
7494 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7495 		goto err_vsi_rebuild;
7496 	}
7497 
7498 	/* configure PTP timestamping after VSI rebuild */
7499 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7500 		ice_ptp_cfg_timestamp(pf, false);
7501 
7502 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7503 	if (err) {
7504 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7505 		goto err_vsi_rebuild;
7506 	}
7507 
7508 	if (reset_type == ICE_RESET_PFR) {
7509 		err = ice_rebuild_channels(pf);
7510 		if (err) {
7511 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7512 				err);
7513 			goto err_vsi_rebuild;
7514 		}
7515 	}
7516 
7517 	/* If Flow Director is active */
7518 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7519 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7520 		if (err) {
7521 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7522 			goto err_vsi_rebuild;
7523 		}
7524 
7525 		/* replay HW Flow Director recipes */
7526 		if (hw->fdir_prof)
7527 			ice_fdir_replay_flows(hw);
7528 
7529 		/* replay Flow Director filters */
7530 		ice_fdir_replay_fltrs(pf);
7531 
7532 		ice_rebuild_arfs(pf);
7533 	}
7534 
7535 	ice_update_pf_netdev_link(pf);
7536 
7537 	/* tell the firmware we are up */
7538 	err = ice_send_version(pf);
7539 	if (err) {
7540 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7541 			err);
7542 		goto err_vsi_rebuild;
7543 	}
7544 
7545 	ice_replay_post(hw);
7546 
7547 	/* if we get here, reset flow is successful */
7548 	clear_bit(ICE_RESET_FAILED, pf->state);
7549 
7550 	ice_plug_aux_dev(pf);
7551 	return;
7552 
7553 err_vsi_rebuild:
7554 err_sched_init_port:
7555 	ice_sched_cleanup_all(hw);
7556 err_init_ctrlq:
7557 	ice_shutdown_all_ctrlq(hw);
7558 	set_bit(ICE_RESET_FAILED, pf->state);
7559 clear_recovery:
7560 	/* set this bit in PF state to control service task scheduling */
7561 	set_bit(ICE_NEEDS_RESTART, pf->state);
7562 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7563 }
7564 
7565 /**
7566  * ice_change_mtu - NDO callback to change the MTU
7567  * @netdev: network interface device structure
7568  * @new_mtu: new value for maximum frame size
7569  *
7570  * Returns 0 on success, negative on failure
7571  */
7572 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7573 {
7574 	struct ice_netdev_priv *np = netdev_priv(netdev);
7575 	struct ice_vsi *vsi = np->vsi;
7576 	struct ice_pf *pf = vsi->back;
7577 	struct bpf_prog *prog;
7578 	u8 count = 0;
7579 	int err = 0;
7580 
7581 	if (new_mtu == (int)netdev->mtu) {
7582 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7583 		return 0;
7584 	}
7585 
7586 	prog = vsi->xdp_prog;
7587 	if (prog && !prog->aux->xdp_has_frags) {
7588 		int frame_size = ice_max_xdp_frame_size(vsi);
7589 
7590 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7591 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7592 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7593 			return -EINVAL;
7594 		}
7595 	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7596 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7597 			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7598 				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7599 			return -EINVAL;
7600 		}
7601 	}
7602 
7603 	/* if a reset is in progress, wait for some time for it to complete */
7604 	do {
7605 		if (ice_is_reset_in_progress(pf->state)) {
7606 			count++;
7607 			usleep_range(1000, 2000);
7608 		} else {
7609 			break;
7610 		}
7611 
7612 	} while (count < 100);
7613 
7614 	if (count == 100) {
7615 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7616 		return -EBUSY;
7617 	}
7618 
7619 	netdev->mtu = (unsigned int)new_mtu;
7620 
7621 	/* if VSI is up, bring it down and then back up */
7622 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
7623 		err = ice_down(vsi);
7624 		if (err) {
7625 			netdev_err(netdev, "change MTU if_down err %d\n", err);
7626 			return err;
7627 		}
7628 
7629 		err = ice_up(vsi);
7630 		if (err) {
7631 			netdev_err(netdev, "change MTU if_up err %d\n", err);
7632 			return err;
7633 		}
7634 	}
7635 
7636 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7637 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7638 
7639 	return err;
7640 }
7641 
7642 /**
7643  * ice_eth_ioctl - Access the hwtstamp interface
7644  * @netdev: network interface device structure
7645  * @ifr: interface request data
7646  * @cmd: ioctl command
7647  */
7648 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7649 {
7650 	struct ice_netdev_priv *np = netdev_priv(netdev);
7651 	struct ice_pf *pf = np->vsi->back;
7652 
7653 	switch (cmd) {
7654 	case SIOCGHWTSTAMP:
7655 		return ice_ptp_get_ts_config(pf, ifr);
7656 	case SIOCSHWTSTAMP:
7657 		return ice_ptp_set_ts_config(pf, ifr);
7658 	default:
7659 		return -EOPNOTSUPP;
7660 	}
7661 }
7662 
7663 /**
7664  * ice_aq_str - convert AQ err code to a string
7665  * @aq_err: the AQ error code to convert
7666  */
7667 const char *ice_aq_str(enum ice_aq_err aq_err)
7668 {
7669 	switch (aq_err) {
7670 	case ICE_AQ_RC_OK:
7671 		return "OK";
7672 	case ICE_AQ_RC_EPERM:
7673 		return "ICE_AQ_RC_EPERM";
7674 	case ICE_AQ_RC_ENOENT:
7675 		return "ICE_AQ_RC_ENOENT";
7676 	case ICE_AQ_RC_ENOMEM:
7677 		return "ICE_AQ_RC_ENOMEM";
7678 	case ICE_AQ_RC_EBUSY:
7679 		return "ICE_AQ_RC_EBUSY";
7680 	case ICE_AQ_RC_EEXIST:
7681 		return "ICE_AQ_RC_EEXIST";
7682 	case ICE_AQ_RC_EINVAL:
7683 		return "ICE_AQ_RC_EINVAL";
7684 	case ICE_AQ_RC_ENOSPC:
7685 		return "ICE_AQ_RC_ENOSPC";
7686 	case ICE_AQ_RC_ENOSYS:
7687 		return "ICE_AQ_RC_ENOSYS";
7688 	case ICE_AQ_RC_EMODE:
7689 		return "ICE_AQ_RC_EMODE";
7690 	case ICE_AQ_RC_ENOSEC:
7691 		return "ICE_AQ_RC_ENOSEC";
7692 	case ICE_AQ_RC_EBADSIG:
7693 		return "ICE_AQ_RC_EBADSIG";
7694 	case ICE_AQ_RC_ESVN:
7695 		return "ICE_AQ_RC_ESVN";
7696 	case ICE_AQ_RC_EBADMAN:
7697 		return "ICE_AQ_RC_EBADMAN";
7698 	case ICE_AQ_RC_EBADBUF:
7699 		return "ICE_AQ_RC_EBADBUF";
7700 	}
7701 
7702 	return "ICE_AQ_RC_UNKNOWN";
7703 }
7704 
7705 /**
7706  * ice_set_rss_lut - Set RSS LUT
7707  * @vsi: Pointer to VSI structure
7708  * @lut: Lookup table
7709  * @lut_size: Lookup table size
7710  *
7711  * Returns 0 on success, negative on failure
7712  */
7713 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7714 {
7715 	struct ice_aq_get_set_rss_lut_params params = {};
7716 	struct ice_hw *hw = &vsi->back->hw;
7717 	int status;
7718 
7719 	if (!lut)
7720 		return -EINVAL;
7721 
7722 	params.vsi_handle = vsi->idx;
7723 	params.lut_size = lut_size;
7724 	params.lut_type = vsi->rss_lut_type;
7725 	params.lut = lut;
7726 
7727 	status = ice_aq_set_rss_lut(hw, &params);
7728 	if (status)
7729 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7730 			status, ice_aq_str(hw->adminq.sq_last_status));
7731 
7732 	return status;
7733 }
7734 
7735 /**
7736  * ice_set_rss_key - Set RSS key
7737  * @vsi: Pointer to the VSI structure
7738  * @seed: RSS hash seed
7739  *
7740  * Returns 0 on success, negative on failure
7741  */
7742 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7743 {
7744 	struct ice_hw *hw = &vsi->back->hw;
7745 	int status;
7746 
7747 	if (!seed)
7748 		return -EINVAL;
7749 
7750 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7751 	if (status)
7752 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7753 			status, ice_aq_str(hw->adminq.sq_last_status));
7754 
7755 	return status;
7756 }
7757 
7758 /**
7759  * ice_get_rss_lut - Get RSS LUT
7760  * @vsi: Pointer to VSI structure
7761  * @lut: Buffer to store the lookup table entries
7762  * @lut_size: Size of buffer to store the lookup table entries
7763  *
7764  * Returns 0 on success, negative on failure
7765  */
7766 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7767 {
7768 	struct ice_aq_get_set_rss_lut_params params = {};
7769 	struct ice_hw *hw = &vsi->back->hw;
7770 	int status;
7771 
7772 	if (!lut)
7773 		return -EINVAL;
7774 
7775 	params.vsi_handle = vsi->idx;
7776 	params.lut_size = lut_size;
7777 	params.lut_type = vsi->rss_lut_type;
7778 	params.lut = lut;
7779 
7780 	status = ice_aq_get_rss_lut(hw, &params);
7781 	if (status)
7782 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7783 			status, ice_aq_str(hw->adminq.sq_last_status));
7784 
7785 	return status;
7786 }
7787 
7788 /**
7789  * ice_get_rss_key - Get RSS key
7790  * @vsi: Pointer to VSI structure
7791  * @seed: Buffer to store the key in
7792  *
7793  * Returns 0 on success, negative on failure
7794  */
7795 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7796 {
7797 	struct ice_hw *hw = &vsi->back->hw;
7798 	int status;
7799 
7800 	if (!seed)
7801 		return -EINVAL;
7802 
7803 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7804 	if (status)
7805 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7806 			status, ice_aq_str(hw->adminq.sq_last_status));
7807 
7808 	return status;
7809 }
7810 
7811 /**
7812  * ice_bridge_getlink - Get the hardware bridge mode
7813  * @skb: skb buff
7814  * @pid: process ID
7815  * @seq: RTNL message seq
7816  * @dev: the netdev being configured
7817  * @filter_mask: filter mask passed in
7818  * @nlflags: netlink flags passed in
7819  *
7820  * Return the bridge mode (VEB/VEPA)
7821  */
7822 static int
7823 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7824 		   struct net_device *dev, u32 filter_mask, int nlflags)
7825 {
7826 	struct ice_netdev_priv *np = netdev_priv(dev);
7827 	struct ice_vsi *vsi = np->vsi;
7828 	struct ice_pf *pf = vsi->back;
7829 	u16 bmode;
7830 
7831 	bmode = pf->first_sw->bridge_mode;
7832 
7833 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7834 				       filter_mask, NULL);
7835 }
7836 
7837 /**
7838  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7839  * @vsi: Pointer to VSI structure
7840  * @bmode: Hardware bridge mode (VEB/VEPA)
7841  *
7842  * Returns 0 on success, negative on failure
7843  */
7844 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7845 {
7846 	struct ice_aqc_vsi_props *vsi_props;
7847 	struct ice_hw *hw = &vsi->back->hw;
7848 	struct ice_vsi_ctx *ctxt;
7849 	int ret;
7850 
7851 	vsi_props = &vsi->info;
7852 
7853 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7854 	if (!ctxt)
7855 		return -ENOMEM;
7856 
7857 	ctxt->info = vsi->info;
7858 
7859 	if (bmode == BRIDGE_MODE_VEB)
7860 		/* change from VEPA to VEB mode */
7861 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7862 	else
7863 		/* change from VEB to VEPA mode */
7864 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7865 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7866 
7867 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7868 	if (ret) {
7869 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7870 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7871 		goto out;
7872 	}
7873 	/* Update sw flags for book keeping */
7874 	vsi_props->sw_flags = ctxt->info.sw_flags;
7875 
7876 out:
7877 	kfree(ctxt);
7878 	return ret;
7879 }
7880 
7881 /**
7882  * ice_bridge_setlink - Set the hardware bridge mode
7883  * @dev: the netdev being configured
7884  * @nlh: RTNL message
7885  * @flags: bridge setlink flags
7886  * @extack: netlink extended ack
7887  *
7888  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7889  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7890  * not already set for all VSIs connected to this switch. And also update the
7891  * unicast switch filter rules for the corresponding switch of the netdev.
7892  */
7893 static int
7894 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7895 		   u16 __always_unused flags,
7896 		   struct netlink_ext_ack __always_unused *extack)
7897 {
7898 	struct ice_netdev_priv *np = netdev_priv(dev);
7899 	struct ice_pf *pf = np->vsi->back;
7900 	struct nlattr *attr, *br_spec;
7901 	struct ice_hw *hw = &pf->hw;
7902 	struct ice_sw *pf_sw;
7903 	int rem, v, err = 0;
7904 
7905 	pf_sw = pf->first_sw;
7906 	/* find the attribute in the netlink message */
7907 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7908 
7909 	nla_for_each_nested(attr, br_spec, rem) {
7910 		__u16 mode;
7911 
7912 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7913 			continue;
7914 		mode = nla_get_u16(attr);
7915 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7916 			return -EINVAL;
7917 		/* Continue  if bridge mode is not being flipped */
7918 		if (mode == pf_sw->bridge_mode)
7919 			continue;
7920 		/* Iterates through the PF VSI list and update the loopback
7921 		 * mode of the VSI
7922 		 */
7923 		ice_for_each_vsi(pf, v) {
7924 			if (!pf->vsi[v])
7925 				continue;
7926 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7927 			if (err)
7928 				return err;
7929 		}
7930 
7931 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7932 		/* Update the unicast switch filter rules for the corresponding
7933 		 * switch of the netdev
7934 		 */
7935 		err = ice_update_sw_rule_bridge_mode(hw);
7936 		if (err) {
7937 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7938 				   mode, err,
7939 				   ice_aq_str(hw->adminq.sq_last_status));
7940 			/* revert hw->evb_veb */
7941 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7942 			return err;
7943 		}
7944 
7945 		pf_sw->bridge_mode = mode;
7946 	}
7947 
7948 	return 0;
7949 }
7950 
7951 /**
7952  * ice_tx_timeout - Respond to a Tx Hang
7953  * @netdev: network interface device structure
7954  * @txqueue: Tx queue
7955  */
7956 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7957 {
7958 	struct ice_netdev_priv *np = netdev_priv(netdev);
7959 	struct ice_tx_ring *tx_ring = NULL;
7960 	struct ice_vsi *vsi = np->vsi;
7961 	struct ice_pf *pf = vsi->back;
7962 	u32 i;
7963 
7964 	pf->tx_timeout_count++;
7965 
7966 	/* Check if PFC is enabled for the TC to which the queue belongs
7967 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7968 	 * need to reset and rebuild
7969 	 */
7970 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7971 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7972 			 txqueue);
7973 		return;
7974 	}
7975 
7976 	/* now that we have an index, find the tx_ring struct */
7977 	ice_for_each_txq(vsi, i)
7978 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7979 			if (txqueue == vsi->tx_rings[i]->q_index) {
7980 				tx_ring = vsi->tx_rings[i];
7981 				break;
7982 			}
7983 
7984 	/* Reset recovery level if enough time has elapsed after last timeout.
7985 	 * Also ensure no new reset action happens before next timeout period.
7986 	 */
7987 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7988 		pf->tx_timeout_recovery_level = 1;
7989 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7990 				       netdev->watchdog_timeo)))
7991 		return;
7992 
7993 	if (tx_ring) {
7994 		struct ice_hw *hw = &pf->hw;
7995 		u32 head, val = 0;
7996 
7997 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7998 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7999 		/* Read interrupt register */
8000 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8001 
8002 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8003 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8004 			    head, tx_ring->next_to_use, val);
8005 	}
8006 
8007 	pf->tx_timeout_last_recovery = jiffies;
8008 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8009 		    pf->tx_timeout_recovery_level, txqueue);
8010 
8011 	switch (pf->tx_timeout_recovery_level) {
8012 	case 1:
8013 		set_bit(ICE_PFR_REQ, pf->state);
8014 		break;
8015 	case 2:
8016 		set_bit(ICE_CORER_REQ, pf->state);
8017 		break;
8018 	case 3:
8019 		set_bit(ICE_GLOBR_REQ, pf->state);
8020 		break;
8021 	default:
8022 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8023 		set_bit(ICE_DOWN, pf->state);
8024 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8025 		set_bit(ICE_SERVICE_DIS, pf->state);
8026 		break;
8027 	}
8028 
8029 	ice_service_task_schedule(pf);
8030 	pf->tx_timeout_recovery_level++;
8031 }
8032 
8033 /**
8034  * ice_setup_tc_cls_flower - flower classifier offloads
8035  * @np: net device to configure
8036  * @filter_dev: device on which filter is added
8037  * @cls_flower: offload data
8038  */
8039 static int
8040 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8041 			struct net_device *filter_dev,
8042 			struct flow_cls_offload *cls_flower)
8043 {
8044 	struct ice_vsi *vsi = np->vsi;
8045 
8046 	if (cls_flower->common.chain_index)
8047 		return -EOPNOTSUPP;
8048 
8049 	switch (cls_flower->command) {
8050 	case FLOW_CLS_REPLACE:
8051 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8052 	case FLOW_CLS_DESTROY:
8053 		return ice_del_cls_flower(vsi, cls_flower);
8054 	default:
8055 		return -EINVAL;
8056 	}
8057 }
8058 
8059 /**
8060  * ice_setup_tc_block_cb - callback handler registered for TC block
8061  * @type: TC SETUP type
8062  * @type_data: TC flower offload data that contains user input
8063  * @cb_priv: netdev private data
8064  */
8065 static int
8066 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8067 {
8068 	struct ice_netdev_priv *np = cb_priv;
8069 
8070 	switch (type) {
8071 	case TC_SETUP_CLSFLOWER:
8072 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8073 					       type_data);
8074 	default:
8075 		return -EOPNOTSUPP;
8076 	}
8077 }
8078 
8079 /**
8080  * ice_validate_mqprio_qopt - Validate TCF input parameters
8081  * @vsi: Pointer to VSI
8082  * @mqprio_qopt: input parameters for mqprio queue configuration
8083  *
8084  * This function validates MQPRIO params, such as qcount (power of 2 wherever
8085  * needed), and make sure user doesn't specify qcount and BW rate limit
8086  * for TCs, which are more than "num_tc"
8087  */
8088 static int
8089 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8090 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
8091 {
8092 	u64 sum_max_rate = 0, sum_min_rate = 0;
8093 	int non_power_of_2_qcount = 0;
8094 	struct ice_pf *pf = vsi->back;
8095 	int max_rss_q_cnt = 0;
8096 	struct device *dev;
8097 	int i, speed;
8098 	u8 num_tc;
8099 
8100 	if (vsi->type != ICE_VSI_PF)
8101 		return -EINVAL;
8102 
8103 	if (mqprio_qopt->qopt.offset[0] != 0 ||
8104 	    mqprio_qopt->qopt.num_tc < 1 ||
8105 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8106 		return -EINVAL;
8107 
8108 	dev = ice_pf_to_dev(pf);
8109 	vsi->ch_rss_size = 0;
8110 	num_tc = mqprio_qopt->qopt.num_tc;
8111 
8112 	for (i = 0; num_tc; i++) {
8113 		int qcount = mqprio_qopt->qopt.count[i];
8114 		u64 max_rate, min_rate, rem;
8115 
8116 		if (!qcount)
8117 			return -EINVAL;
8118 
8119 		if (is_power_of_2(qcount)) {
8120 			if (non_power_of_2_qcount &&
8121 			    qcount > non_power_of_2_qcount) {
8122 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8123 					qcount, non_power_of_2_qcount);
8124 				return -EINVAL;
8125 			}
8126 			if (qcount > max_rss_q_cnt)
8127 				max_rss_q_cnt = qcount;
8128 		} else {
8129 			if (non_power_of_2_qcount &&
8130 			    qcount != non_power_of_2_qcount) {
8131 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8132 					qcount, non_power_of_2_qcount);
8133 				return -EINVAL;
8134 			}
8135 			if (qcount < max_rss_q_cnt) {
8136 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8137 					qcount, max_rss_q_cnt);
8138 				return -EINVAL;
8139 			}
8140 			max_rss_q_cnt = qcount;
8141 			non_power_of_2_qcount = qcount;
8142 		}
8143 
8144 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8145 		 * converts the bandwidth rate limit into Bytes/s when
8146 		 * passing it down to the driver. So convert input bandwidth
8147 		 * from Bytes/s to Kbps
8148 		 */
8149 		max_rate = mqprio_qopt->max_rate[i];
8150 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8151 		sum_max_rate += max_rate;
8152 
8153 		/* min_rate is minimum guaranteed rate and it can't be zero */
8154 		min_rate = mqprio_qopt->min_rate[i];
8155 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8156 		sum_min_rate += min_rate;
8157 
8158 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8159 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8160 				min_rate, ICE_MIN_BW_LIMIT);
8161 			return -EINVAL;
8162 		}
8163 
8164 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8165 		if (rem) {
8166 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8167 				i, ICE_MIN_BW_LIMIT);
8168 			return -EINVAL;
8169 		}
8170 
8171 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8172 		if (rem) {
8173 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8174 				i, ICE_MIN_BW_LIMIT);
8175 			return -EINVAL;
8176 		}
8177 
8178 		/* min_rate can't be more than max_rate, except when max_rate
8179 		 * is zero (implies max_rate sought is max line rate). In such
8180 		 * a case min_rate can be more than max.
8181 		 */
8182 		if (max_rate && min_rate > max_rate) {
8183 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8184 				min_rate, max_rate);
8185 			return -EINVAL;
8186 		}
8187 
8188 		if (i >= mqprio_qopt->qopt.num_tc - 1)
8189 			break;
8190 		if (mqprio_qopt->qopt.offset[i + 1] !=
8191 		    (mqprio_qopt->qopt.offset[i] + qcount))
8192 			return -EINVAL;
8193 	}
8194 	if (vsi->num_rxq <
8195 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8196 		return -EINVAL;
8197 	if (vsi->num_txq <
8198 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8199 		return -EINVAL;
8200 
8201 	speed = ice_get_link_speed_kbps(vsi);
8202 	if (sum_max_rate && sum_max_rate > (u64)speed) {
8203 		dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
8204 			sum_max_rate, speed);
8205 		return -EINVAL;
8206 	}
8207 	if (sum_min_rate && sum_min_rate > (u64)speed) {
8208 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8209 			sum_min_rate, speed);
8210 		return -EINVAL;
8211 	}
8212 
8213 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8214 	vsi->ch_rss_size = max_rss_q_cnt;
8215 
8216 	return 0;
8217 }
8218 
8219 /**
8220  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8221  * @pf: ptr to PF device
8222  * @vsi: ptr to VSI
8223  */
8224 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8225 {
8226 	struct device *dev = ice_pf_to_dev(pf);
8227 	bool added = false;
8228 	struct ice_hw *hw;
8229 	int flow;
8230 
8231 	if (!(vsi->num_gfltr || vsi->num_bfltr))
8232 		return -EINVAL;
8233 
8234 	hw = &pf->hw;
8235 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8236 		struct ice_fd_hw_prof *prof;
8237 		int tun, status;
8238 		u64 entry_h;
8239 
8240 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8241 		      hw->fdir_prof[flow]->cnt))
8242 			continue;
8243 
8244 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8245 			enum ice_flow_priority prio;
8246 			u64 prof_id;
8247 
8248 			/* add this VSI to FDir profile for this flow */
8249 			prio = ICE_FLOW_PRIO_NORMAL;
8250 			prof = hw->fdir_prof[flow];
8251 			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8252 			status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8253 						    prof->vsi_h[0], vsi->idx,
8254 						    prio, prof->fdir_seg[tun],
8255 						    &entry_h);
8256 			if (status) {
8257 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8258 					vsi->idx, flow);
8259 				continue;
8260 			}
8261 
8262 			prof->entry_h[prof->cnt][tun] = entry_h;
8263 		}
8264 
8265 		/* store VSI for filter replay and delete */
8266 		prof->vsi_h[prof->cnt] = vsi->idx;
8267 		prof->cnt++;
8268 
8269 		added = true;
8270 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8271 			flow);
8272 	}
8273 
8274 	if (!added)
8275 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8276 
8277 	return 0;
8278 }
8279 
8280 /**
8281  * ice_add_channel - add a channel by adding VSI
8282  * @pf: ptr to PF device
8283  * @sw_id: underlying HW switching element ID
8284  * @ch: ptr to channel structure
8285  *
8286  * Add a channel (VSI) using add_vsi and queue_map
8287  */
8288 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8289 {
8290 	struct device *dev = ice_pf_to_dev(pf);
8291 	struct ice_vsi *vsi;
8292 
8293 	if (ch->type != ICE_VSI_CHNL) {
8294 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8295 		return -EINVAL;
8296 	}
8297 
8298 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8299 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8300 		dev_err(dev, "create chnl VSI failure\n");
8301 		return -EINVAL;
8302 	}
8303 
8304 	ice_add_vsi_to_fdir(pf, vsi);
8305 
8306 	ch->sw_id = sw_id;
8307 	ch->vsi_num = vsi->vsi_num;
8308 	ch->info.mapping_flags = vsi->info.mapping_flags;
8309 	ch->ch_vsi = vsi;
8310 	/* set the back pointer of channel for newly created VSI */
8311 	vsi->ch = ch;
8312 
8313 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8314 	       sizeof(vsi->info.q_mapping));
8315 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8316 	       sizeof(vsi->info.tc_mapping));
8317 
8318 	return 0;
8319 }
8320 
8321 /**
8322  * ice_chnl_cfg_res
8323  * @vsi: the VSI being setup
8324  * @ch: ptr to channel structure
8325  *
8326  * Configure channel specific resources such as rings, vector.
8327  */
8328 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8329 {
8330 	int i;
8331 
8332 	for (i = 0; i < ch->num_txq; i++) {
8333 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8334 		struct ice_ring_container *rc;
8335 		struct ice_tx_ring *tx_ring;
8336 		struct ice_rx_ring *rx_ring;
8337 
8338 		tx_ring = vsi->tx_rings[ch->base_q + i];
8339 		rx_ring = vsi->rx_rings[ch->base_q + i];
8340 		if (!tx_ring || !rx_ring)
8341 			continue;
8342 
8343 		/* setup ring being channel enabled */
8344 		tx_ring->ch = ch;
8345 		rx_ring->ch = ch;
8346 
8347 		/* following code block sets up vector specific attributes */
8348 		tx_q_vector = tx_ring->q_vector;
8349 		rx_q_vector = rx_ring->q_vector;
8350 		if (!tx_q_vector && !rx_q_vector)
8351 			continue;
8352 
8353 		if (tx_q_vector) {
8354 			tx_q_vector->ch = ch;
8355 			/* setup Tx and Rx ITR setting if DIM is off */
8356 			rc = &tx_q_vector->tx;
8357 			if (!ITR_IS_DYNAMIC(rc))
8358 				ice_write_itr(rc, rc->itr_setting);
8359 		}
8360 		if (rx_q_vector) {
8361 			rx_q_vector->ch = ch;
8362 			/* setup Tx and Rx ITR setting if DIM is off */
8363 			rc = &rx_q_vector->rx;
8364 			if (!ITR_IS_DYNAMIC(rc))
8365 				ice_write_itr(rc, rc->itr_setting);
8366 		}
8367 	}
8368 
8369 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8370 	 * GLINT_ITR register would have written to perform in-context
8371 	 * update, hence perform flush
8372 	 */
8373 	if (ch->num_txq || ch->num_rxq)
8374 		ice_flush(&vsi->back->hw);
8375 }
8376 
8377 /**
8378  * ice_cfg_chnl_all_res - configure channel resources
8379  * @vsi: pte to main_vsi
8380  * @ch: ptr to channel structure
8381  *
8382  * This function configures channel specific resources such as flow-director
8383  * counter index, and other resources such as queues, vectors, ITR settings
8384  */
8385 static void
8386 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8387 {
8388 	/* configure channel (aka ADQ) resources such as queues, vectors,
8389 	 * ITR settings for channel specific vectors and anything else
8390 	 */
8391 	ice_chnl_cfg_res(vsi, ch);
8392 }
8393 
8394 /**
8395  * ice_setup_hw_channel - setup new channel
8396  * @pf: ptr to PF device
8397  * @vsi: the VSI being setup
8398  * @ch: ptr to channel structure
8399  * @sw_id: underlying HW switching element ID
8400  * @type: type of channel to be created (VMDq2/VF)
8401  *
8402  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8403  * and configures Tx rings accordingly
8404  */
8405 static int
8406 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8407 		     struct ice_channel *ch, u16 sw_id, u8 type)
8408 {
8409 	struct device *dev = ice_pf_to_dev(pf);
8410 	int ret;
8411 
8412 	ch->base_q = vsi->next_base_q;
8413 	ch->type = type;
8414 
8415 	ret = ice_add_channel(pf, sw_id, ch);
8416 	if (ret) {
8417 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8418 		return ret;
8419 	}
8420 
8421 	/* configure/setup ADQ specific resources */
8422 	ice_cfg_chnl_all_res(vsi, ch);
8423 
8424 	/* make sure to update the next_base_q so that subsequent channel's
8425 	 * (aka ADQ) VSI queue map is correct
8426 	 */
8427 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8428 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8429 		ch->num_rxq);
8430 
8431 	return 0;
8432 }
8433 
8434 /**
8435  * ice_setup_channel - setup new channel using uplink element
8436  * @pf: ptr to PF device
8437  * @vsi: the VSI being setup
8438  * @ch: ptr to channel structure
8439  *
8440  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8441  * and uplink switching element
8442  */
8443 static bool
8444 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8445 		  struct ice_channel *ch)
8446 {
8447 	struct device *dev = ice_pf_to_dev(pf);
8448 	u16 sw_id;
8449 	int ret;
8450 
8451 	if (vsi->type != ICE_VSI_PF) {
8452 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8453 		return false;
8454 	}
8455 
8456 	sw_id = pf->first_sw->sw_id;
8457 
8458 	/* create channel (VSI) */
8459 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8460 	if (ret) {
8461 		dev_err(dev, "failed to setup hw_channel\n");
8462 		return false;
8463 	}
8464 	dev_dbg(dev, "successfully created channel()\n");
8465 
8466 	return ch->ch_vsi ? true : false;
8467 }
8468 
8469 /**
8470  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8471  * @vsi: VSI to be configured
8472  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8473  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8474  */
8475 static int
8476 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8477 {
8478 	int err;
8479 
8480 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8481 	if (err)
8482 		return err;
8483 
8484 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8485 }
8486 
8487 /**
8488  * ice_create_q_channel - function to create channel
8489  * @vsi: VSI to be configured
8490  * @ch: ptr to channel (it contains channel specific params)
8491  *
8492  * This function creates channel (VSI) using num_queues specified by user,
8493  * reconfigs RSS if needed.
8494  */
8495 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8496 {
8497 	struct ice_pf *pf = vsi->back;
8498 	struct device *dev;
8499 
8500 	if (!ch)
8501 		return -EINVAL;
8502 
8503 	dev = ice_pf_to_dev(pf);
8504 	if (!ch->num_txq || !ch->num_rxq) {
8505 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8506 		return -EINVAL;
8507 	}
8508 
8509 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8510 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8511 			vsi->cnt_q_avail, ch->num_txq);
8512 		return -EINVAL;
8513 	}
8514 
8515 	if (!ice_setup_channel(pf, vsi, ch)) {
8516 		dev_info(dev, "Failed to setup channel\n");
8517 		return -EINVAL;
8518 	}
8519 	/* configure BW rate limit */
8520 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8521 		int ret;
8522 
8523 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8524 				       ch->min_tx_rate);
8525 		if (ret)
8526 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8527 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8528 		else
8529 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8530 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8531 	}
8532 
8533 	vsi->cnt_q_avail -= ch->num_txq;
8534 
8535 	return 0;
8536 }
8537 
8538 /**
8539  * ice_rem_all_chnl_fltrs - removes all channel filters
8540  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8541  *
8542  * Remove all advanced switch filters only if they are channel specific
8543  * tc-flower based filter
8544  */
8545 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8546 {
8547 	struct ice_tc_flower_fltr *fltr;
8548 	struct hlist_node *node;
8549 
8550 	/* to remove all channel filters, iterate an ordered list of filters */
8551 	hlist_for_each_entry_safe(fltr, node,
8552 				  &pf->tc_flower_fltr_list,
8553 				  tc_flower_node) {
8554 		struct ice_rule_query_data rule;
8555 		int status;
8556 
8557 		/* for now process only channel specific filters */
8558 		if (!ice_is_chnl_fltr(fltr))
8559 			continue;
8560 
8561 		rule.rid = fltr->rid;
8562 		rule.rule_id = fltr->rule_id;
8563 		rule.vsi_handle = fltr->dest_vsi_handle;
8564 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8565 		if (status) {
8566 			if (status == -ENOENT)
8567 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8568 					rule.rule_id);
8569 			else
8570 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8571 					status);
8572 		} else if (fltr->dest_vsi) {
8573 			/* update advanced switch filter count */
8574 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8575 				u32 flags = fltr->flags;
8576 
8577 				fltr->dest_vsi->num_chnl_fltr--;
8578 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8579 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8580 					pf->num_dmac_chnl_fltrs--;
8581 			}
8582 		}
8583 
8584 		hlist_del(&fltr->tc_flower_node);
8585 		kfree(fltr);
8586 	}
8587 }
8588 
8589 /**
8590  * ice_remove_q_channels - Remove queue channels for the TCs
8591  * @vsi: VSI to be configured
8592  * @rem_fltr: delete advanced switch filter or not
8593  *
8594  * Remove queue channels for the TCs
8595  */
8596 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8597 {
8598 	struct ice_channel *ch, *ch_tmp;
8599 	struct ice_pf *pf = vsi->back;
8600 	int i;
8601 
8602 	/* remove all tc-flower based filter if they are channel filters only */
8603 	if (rem_fltr)
8604 		ice_rem_all_chnl_fltrs(pf);
8605 
8606 	/* remove ntuple filters since queue configuration is being changed */
8607 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8608 		struct ice_hw *hw = &pf->hw;
8609 
8610 		mutex_lock(&hw->fdir_fltr_lock);
8611 		ice_fdir_del_all_fltrs(vsi);
8612 		mutex_unlock(&hw->fdir_fltr_lock);
8613 	}
8614 
8615 	/* perform cleanup for channels if they exist */
8616 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8617 		struct ice_vsi *ch_vsi;
8618 
8619 		list_del(&ch->list);
8620 		ch_vsi = ch->ch_vsi;
8621 		if (!ch_vsi) {
8622 			kfree(ch);
8623 			continue;
8624 		}
8625 
8626 		/* Reset queue contexts */
8627 		for (i = 0; i < ch->num_rxq; i++) {
8628 			struct ice_tx_ring *tx_ring;
8629 			struct ice_rx_ring *rx_ring;
8630 
8631 			tx_ring = vsi->tx_rings[ch->base_q + i];
8632 			rx_ring = vsi->rx_rings[ch->base_q + i];
8633 			if (tx_ring) {
8634 				tx_ring->ch = NULL;
8635 				if (tx_ring->q_vector)
8636 					tx_ring->q_vector->ch = NULL;
8637 			}
8638 			if (rx_ring) {
8639 				rx_ring->ch = NULL;
8640 				if (rx_ring->q_vector)
8641 					rx_ring->q_vector->ch = NULL;
8642 			}
8643 		}
8644 
8645 		/* Release FD resources for the channel VSI */
8646 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8647 
8648 		/* clear the VSI from scheduler tree */
8649 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8650 
8651 		/* Delete VSI from FW, PF and HW VSI arrays */
8652 		ice_vsi_delete(ch->ch_vsi);
8653 
8654 		/* free the channel */
8655 		kfree(ch);
8656 	}
8657 
8658 	/* clear the channel VSI map which is stored in main VSI */
8659 	ice_for_each_chnl_tc(i)
8660 		vsi->tc_map_vsi[i] = NULL;
8661 
8662 	/* reset main VSI's all TC information */
8663 	vsi->all_enatc = 0;
8664 	vsi->all_numtc = 0;
8665 }
8666 
8667 /**
8668  * ice_rebuild_channels - rebuild channel
8669  * @pf: ptr to PF
8670  *
8671  * Recreate channel VSIs and replay filters
8672  */
8673 static int ice_rebuild_channels(struct ice_pf *pf)
8674 {
8675 	struct device *dev = ice_pf_to_dev(pf);
8676 	struct ice_vsi *main_vsi;
8677 	bool rem_adv_fltr = true;
8678 	struct ice_channel *ch;
8679 	struct ice_vsi *vsi;
8680 	int tc_idx = 1;
8681 	int i, err;
8682 
8683 	main_vsi = ice_get_main_vsi(pf);
8684 	if (!main_vsi)
8685 		return 0;
8686 
8687 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8688 	    main_vsi->old_numtc == 1)
8689 		return 0; /* nothing to be done */
8690 
8691 	/* reconfigure main VSI based on old value of TC and cached values
8692 	 * for MQPRIO opts
8693 	 */
8694 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8695 	if (err) {
8696 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8697 			main_vsi->old_ena_tc, main_vsi->vsi_num);
8698 		return err;
8699 	}
8700 
8701 	/* rebuild ADQ VSIs */
8702 	ice_for_each_vsi(pf, i) {
8703 		enum ice_vsi_type type;
8704 
8705 		vsi = pf->vsi[i];
8706 		if (!vsi || vsi->type != ICE_VSI_CHNL)
8707 			continue;
8708 
8709 		type = vsi->type;
8710 
8711 		/* rebuild ADQ VSI */
8712 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8713 		if (err) {
8714 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8715 				ice_vsi_type_str(type), vsi->idx, err);
8716 			goto cleanup;
8717 		}
8718 
8719 		/* Re-map HW VSI number, using VSI handle that has been
8720 		 * previously validated in ice_replay_vsi() call above
8721 		 */
8722 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8723 
8724 		/* replay filters for the VSI */
8725 		err = ice_replay_vsi(&pf->hw, vsi->idx);
8726 		if (err) {
8727 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8728 				ice_vsi_type_str(type), err, vsi->idx);
8729 			rem_adv_fltr = false;
8730 			goto cleanup;
8731 		}
8732 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8733 			 ice_vsi_type_str(type), vsi->idx);
8734 
8735 		/* store ADQ VSI at correct TC index in main VSI's
8736 		 * map of TC to VSI
8737 		 */
8738 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
8739 	}
8740 
8741 	/* ADQ VSI(s) has been rebuilt successfully, so setup
8742 	 * channel for main VSI's Tx and Rx rings
8743 	 */
8744 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
8745 		struct ice_vsi *ch_vsi;
8746 
8747 		ch_vsi = ch->ch_vsi;
8748 		if (!ch_vsi)
8749 			continue;
8750 
8751 		/* reconfig channel resources */
8752 		ice_cfg_chnl_all_res(main_vsi, ch);
8753 
8754 		/* replay BW rate limit if it is non-zero */
8755 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8756 			continue;
8757 
8758 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8759 				       ch->min_tx_rate);
8760 		if (err)
8761 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8762 				err, ch->max_tx_rate, ch->min_tx_rate,
8763 				ch_vsi->vsi_num);
8764 		else
8765 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8766 				ch->max_tx_rate, ch->min_tx_rate,
8767 				ch_vsi->vsi_num);
8768 	}
8769 
8770 	/* reconfig RSS for main VSI */
8771 	if (main_vsi->ch_rss_size)
8772 		ice_vsi_cfg_rss_lut_key(main_vsi);
8773 
8774 	return 0;
8775 
8776 cleanup:
8777 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8778 	return err;
8779 }
8780 
8781 /**
8782  * ice_create_q_channels - Add queue channel for the given TCs
8783  * @vsi: VSI to be configured
8784  *
8785  * Configures queue channel mapping to the given TCs
8786  */
8787 static int ice_create_q_channels(struct ice_vsi *vsi)
8788 {
8789 	struct ice_pf *pf = vsi->back;
8790 	struct ice_channel *ch;
8791 	int ret = 0, i;
8792 
8793 	ice_for_each_chnl_tc(i) {
8794 		if (!(vsi->all_enatc & BIT(i)))
8795 			continue;
8796 
8797 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8798 		if (!ch) {
8799 			ret = -ENOMEM;
8800 			goto err_free;
8801 		}
8802 		INIT_LIST_HEAD(&ch->list);
8803 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8804 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8805 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8806 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8807 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8808 
8809 		/* convert to Kbits/s */
8810 		if (ch->max_tx_rate)
8811 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8812 						  ICE_BW_KBPS_DIVISOR);
8813 		if (ch->min_tx_rate)
8814 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8815 						  ICE_BW_KBPS_DIVISOR);
8816 
8817 		ret = ice_create_q_channel(vsi, ch);
8818 		if (ret) {
8819 			dev_err(ice_pf_to_dev(pf),
8820 				"failed creating channel TC:%d\n", i);
8821 			kfree(ch);
8822 			goto err_free;
8823 		}
8824 		list_add_tail(&ch->list, &vsi->ch_list);
8825 		vsi->tc_map_vsi[i] = ch->ch_vsi;
8826 		dev_dbg(ice_pf_to_dev(pf),
8827 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8828 	}
8829 	return 0;
8830 
8831 err_free:
8832 	ice_remove_q_channels(vsi, false);
8833 
8834 	return ret;
8835 }
8836 
8837 /**
8838  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8839  * @netdev: net device to configure
8840  * @type_data: TC offload data
8841  */
8842 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8843 {
8844 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8845 	struct ice_netdev_priv *np = netdev_priv(netdev);
8846 	struct ice_vsi *vsi = np->vsi;
8847 	struct ice_pf *pf = vsi->back;
8848 	u16 mode, ena_tc_qdisc = 0;
8849 	int cur_txq, cur_rxq;
8850 	u8 hw = 0, num_tcf;
8851 	struct device *dev;
8852 	int ret, i;
8853 
8854 	dev = ice_pf_to_dev(pf);
8855 	num_tcf = mqprio_qopt->qopt.num_tc;
8856 	hw = mqprio_qopt->qopt.hw;
8857 	mode = mqprio_qopt->mode;
8858 	if (!hw) {
8859 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8860 		vsi->ch_rss_size = 0;
8861 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8862 		goto config_tcf;
8863 	}
8864 
8865 	/* Generate queue region map for number of TCF requested */
8866 	for (i = 0; i < num_tcf; i++)
8867 		ena_tc_qdisc |= BIT(i);
8868 
8869 	switch (mode) {
8870 	case TC_MQPRIO_MODE_CHANNEL:
8871 
8872 		if (pf->hw.port_info->is_custom_tx_enabled) {
8873 			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8874 			return -EBUSY;
8875 		}
8876 		ice_tear_down_devlink_rate_tree(pf);
8877 
8878 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8879 		if (ret) {
8880 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8881 				   ret);
8882 			return ret;
8883 		}
8884 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8885 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8886 		/* don't assume state of hw_tc_offload during driver load
8887 		 * and set the flag for TC flower filter if hw_tc_offload
8888 		 * already ON
8889 		 */
8890 		if (vsi->netdev->features & NETIF_F_HW_TC)
8891 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8892 		break;
8893 	default:
8894 		return -EINVAL;
8895 	}
8896 
8897 config_tcf:
8898 
8899 	/* Requesting same TCF configuration as already enabled */
8900 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8901 	    mode != TC_MQPRIO_MODE_CHANNEL)
8902 		return 0;
8903 
8904 	/* Pause VSI queues */
8905 	ice_dis_vsi(vsi, true);
8906 
8907 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8908 		ice_remove_q_channels(vsi, true);
8909 
8910 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8911 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8912 				     num_online_cpus());
8913 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8914 				     num_online_cpus());
8915 	} else {
8916 		/* logic to rebuild VSI, same like ethtool -L */
8917 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8918 
8919 		for (i = 0; i < num_tcf; i++) {
8920 			if (!(ena_tc_qdisc & BIT(i)))
8921 				continue;
8922 
8923 			offset = vsi->mqprio_qopt.qopt.offset[i];
8924 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8925 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8926 		}
8927 		vsi->req_txq = offset + qcount_tx;
8928 		vsi->req_rxq = offset + qcount_rx;
8929 
8930 		/* store away original rss_size info, so that it gets reused
8931 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8932 		 * determine, what should be the rss_sizefor main VSI
8933 		 */
8934 		vsi->orig_rss_size = vsi->rss_size;
8935 	}
8936 
8937 	/* save current values of Tx and Rx queues before calling VSI rebuild
8938 	 * for fallback option
8939 	 */
8940 	cur_txq = vsi->num_txq;
8941 	cur_rxq = vsi->num_rxq;
8942 
8943 	/* proceed with rebuild main VSI using correct number of queues */
8944 	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8945 	if (ret) {
8946 		/* fallback to current number of queues */
8947 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8948 		vsi->req_txq = cur_txq;
8949 		vsi->req_rxq = cur_rxq;
8950 		clear_bit(ICE_RESET_FAILED, pf->state);
8951 		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8952 			dev_err(dev, "Rebuild of main VSI failed again\n");
8953 			return ret;
8954 		}
8955 	}
8956 
8957 	vsi->all_numtc = num_tcf;
8958 	vsi->all_enatc = ena_tc_qdisc;
8959 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8960 	if (ret) {
8961 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8962 			   vsi->vsi_num);
8963 		goto exit;
8964 	}
8965 
8966 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8967 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8968 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8969 
8970 		/* set TC0 rate limit if specified */
8971 		if (max_tx_rate || min_tx_rate) {
8972 			/* convert to Kbits/s */
8973 			if (max_tx_rate)
8974 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8975 			if (min_tx_rate)
8976 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8977 
8978 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8979 			if (!ret) {
8980 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8981 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8982 			} else {
8983 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8984 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8985 				goto exit;
8986 			}
8987 		}
8988 		ret = ice_create_q_channels(vsi);
8989 		if (ret) {
8990 			netdev_err(netdev, "failed configuring queue channels\n");
8991 			goto exit;
8992 		} else {
8993 			netdev_dbg(netdev, "successfully configured channels\n");
8994 		}
8995 	}
8996 
8997 	if (vsi->ch_rss_size)
8998 		ice_vsi_cfg_rss_lut_key(vsi);
8999 
9000 exit:
9001 	/* if error, reset the all_numtc and all_enatc */
9002 	if (ret) {
9003 		vsi->all_numtc = 0;
9004 		vsi->all_enatc = 0;
9005 	}
9006 	/* resume VSI */
9007 	ice_ena_vsi(vsi, true);
9008 
9009 	return ret;
9010 }
9011 
9012 static LIST_HEAD(ice_block_cb_list);
9013 
9014 static int
9015 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9016 	     void *type_data)
9017 {
9018 	struct ice_netdev_priv *np = netdev_priv(netdev);
9019 	struct ice_pf *pf = np->vsi->back;
9020 	int err;
9021 
9022 	switch (type) {
9023 	case TC_SETUP_BLOCK:
9024 		return flow_block_cb_setup_simple(type_data,
9025 						  &ice_block_cb_list,
9026 						  ice_setup_tc_block_cb,
9027 						  np, np, true);
9028 	case TC_SETUP_QDISC_MQPRIO:
9029 		/* setup traffic classifier for receive side */
9030 		mutex_lock(&pf->tc_mutex);
9031 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9032 		mutex_unlock(&pf->tc_mutex);
9033 		return err;
9034 	default:
9035 		return -EOPNOTSUPP;
9036 	}
9037 	return -EOPNOTSUPP;
9038 }
9039 
9040 static struct ice_indr_block_priv *
9041 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9042 			   struct net_device *netdev)
9043 {
9044 	struct ice_indr_block_priv *cb_priv;
9045 
9046 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9047 		if (!cb_priv->netdev)
9048 			return NULL;
9049 		if (cb_priv->netdev == netdev)
9050 			return cb_priv;
9051 	}
9052 	return NULL;
9053 }
9054 
9055 static int
9056 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9057 			void *indr_priv)
9058 {
9059 	struct ice_indr_block_priv *priv = indr_priv;
9060 	struct ice_netdev_priv *np = priv->np;
9061 
9062 	switch (type) {
9063 	case TC_SETUP_CLSFLOWER:
9064 		return ice_setup_tc_cls_flower(np, priv->netdev,
9065 					       (struct flow_cls_offload *)
9066 					       type_data);
9067 	default:
9068 		return -EOPNOTSUPP;
9069 	}
9070 }
9071 
9072 static int
9073 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9074 			struct ice_netdev_priv *np,
9075 			struct flow_block_offload *f, void *data,
9076 			void (*cleanup)(struct flow_block_cb *block_cb))
9077 {
9078 	struct ice_indr_block_priv *indr_priv;
9079 	struct flow_block_cb *block_cb;
9080 
9081 	if (!ice_is_tunnel_supported(netdev) &&
9082 	    !(is_vlan_dev(netdev) &&
9083 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
9084 		return -EOPNOTSUPP;
9085 
9086 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9087 		return -EOPNOTSUPP;
9088 
9089 	switch (f->command) {
9090 	case FLOW_BLOCK_BIND:
9091 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9092 		if (indr_priv)
9093 			return -EEXIST;
9094 
9095 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9096 		if (!indr_priv)
9097 			return -ENOMEM;
9098 
9099 		indr_priv->netdev = netdev;
9100 		indr_priv->np = np;
9101 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9102 
9103 		block_cb =
9104 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9105 						 indr_priv, indr_priv,
9106 						 ice_rep_indr_tc_block_unbind,
9107 						 f, netdev, sch, data, np,
9108 						 cleanup);
9109 
9110 		if (IS_ERR(block_cb)) {
9111 			list_del(&indr_priv->list);
9112 			kfree(indr_priv);
9113 			return PTR_ERR(block_cb);
9114 		}
9115 		flow_block_cb_add(block_cb, f);
9116 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9117 		break;
9118 	case FLOW_BLOCK_UNBIND:
9119 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9120 		if (!indr_priv)
9121 			return -ENOENT;
9122 
9123 		block_cb = flow_block_cb_lookup(f->block,
9124 						ice_indr_setup_block_cb,
9125 						indr_priv);
9126 		if (!block_cb)
9127 			return -ENOENT;
9128 
9129 		flow_indr_block_cb_remove(block_cb, f);
9130 
9131 		list_del(&block_cb->driver_list);
9132 		break;
9133 	default:
9134 		return -EOPNOTSUPP;
9135 	}
9136 	return 0;
9137 }
9138 
9139 static int
9140 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9141 		     void *cb_priv, enum tc_setup_type type, void *type_data,
9142 		     void *data,
9143 		     void (*cleanup)(struct flow_block_cb *block_cb))
9144 {
9145 	switch (type) {
9146 	case TC_SETUP_BLOCK:
9147 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9148 					       data, cleanup);
9149 
9150 	default:
9151 		return -EOPNOTSUPP;
9152 	}
9153 }
9154 
9155 /**
9156  * ice_open - Called when a network interface becomes active
9157  * @netdev: network interface device structure
9158  *
9159  * The open entry point is called when a network interface is made
9160  * active by the system (IFF_UP). At this point all resources needed
9161  * for transmit and receive operations are allocated, the interrupt
9162  * handler is registered with the OS, the netdev watchdog is enabled,
9163  * and the stack is notified that the interface is ready.
9164  *
9165  * Returns 0 on success, negative value on failure
9166  */
9167 int ice_open(struct net_device *netdev)
9168 {
9169 	struct ice_netdev_priv *np = netdev_priv(netdev);
9170 	struct ice_pf *pf = np->vsi->back;
9171 
9172 	if (ice_is_reset_in_progress(pf->state)) {
9173 		netdev_err(netdev, "can't open net device while reset is in progress");
9174 		return -EBUSY;
9175 	}
9176 
9177 	return ice_open_internal(netdev);
9178 }
9179 
9180 /**
9181  * ice_open_internal - Called when a network interface becomes active
9182  * @netdev: network interface device structure
9183  *
9184  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9185  * handling routine
9186  *
9187  * Returns 0 on success, negative value on failure
9188  */
9189 int ice_open_internal(struct net_device *netdev)
9190 {
9191 	struct ice_netdev_priv *np = netdev_priv(netdev);
9192 	struct ice_vsi *vsi = np->vsi;
9193 	struct ice_pf *pf = vsi->back;
9194 	struct ice_port_info *pi;
9195 	int err;
9196 
9197 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9198 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9199 		return -EIO;
9200 	}
9201 
9202 	netif_carrier_off(netdev);
9203 
9204 	pi = vsi->port_info;
9205 	err = ice_update_link_info(pi);
9206 	if (err) {
9207 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9208 		return err;
9209 	}
9210 
9211 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9212 
9213 	/* Set PHY if there is media, otherwise, turn off PHY */
9214 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9215 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9216 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9217 			err = ice_init_phy_user_cfg(pi);
9218 			if (err) {
9219 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9220 					   err);
9221 				return err;
9222 			}
9223 		}
9224 
9225 		err = ice_configure_phy(vsi);
9226 		if (err) {
9227 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9228 				   err);
9229 			return err;
9230 		}
9231 	} else {
9232 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9233 		ice_set_link(vsi, false);
9234 	}
9235 
9236 	err = ice_vsi_open(vsi);
9237 	if (err)
9238 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9239 			   vsi->vsi_num, vsi->vsw->sw_id);
9240 
9241 	/* Update existing tunnels information */
9242 	udp_tunnel_get_rx_info(netdev);
9243 
9244 	return err;
9245 }
9246 
9247 /**
9248  * ice_stop - Disables a network interface
9249  * @netdev: network interface device structure
9250  *
9251  * The stop entry point is called when an interface is de-activated by the OS,
9252  * and the netdevice enters the DOWN state. The hardware is still under the
9253  * driver's control, but the netdev interface is disabled.
9254  *
9255  * Returns success only - not allowed to fail
9256  */
9257 int ice_stop(struct net_device *netdev)
9258 {
9259 	struct ice_netdev_priv *np = netdev_priv(netdev);
9260 	struct ice_vsi *vsi = np->vsi;
9261 	struct ice_pf *pf = vsi->back;
9262 
9263 	if (ice_is_reset_in_progress(pf->state)) {
9264 		netdev_err(netdev, "can't stop net device while reset is in progress");
9265 		return -EBUSY;
9266 	}
9267 
9268 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9269 		int link_err = ice_force_phys_link_state(vsi, false);
9270 
9271 		if (link_err) {
9272 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9273 				   vsi->vsi_num, link_err);
9274 			return -EIO;
9275 		}
9276 	}
9277 
9278 	ice_vsi_close(vsi);
9279 
9280 	return 0;
9281 }
9282 
9283 /**
9284  * ice_features_check - Validate encapsulated packet conforms to limits
9285  * @skb: skb buffer
9286  * @netdev: This port's netdev
9287  * @features: Offload features that the stack believes apply
9288  */
9289 static netdev_features_t
9290 ice_features_check(struct sk_buff *skb,
9291 		   struct net_device __always_unused *netdev,
9292 		   netdev_features_t features)
9293 {
9294 	bool gso = skb_is_gso(skb);
9295 	size_t len;
9296 
9297 	/* No point in doing any of this if neither checksum nor GSO are
9298 	 * being requested for this frame. We can rule out both by just
9299 	 * checking for CHECKSUM_PARTIAL
9300 	 */
9301 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9302 		return features;
9303 
9304 	/* We cannot support GSO if the MSS is going to be less than
9305 	 * 64 bytes. If it is then we need to drop support for GSO.
9306 	 */
9307 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9308 		features &= ~NETIF_F_GSO_MASK;
9309 
9310 	len = skb_network_offset(skb);
9311 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9312 		goto out_rm_features;
9313 
9314 	len = skb_network_header_len(skb);
9315 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9316 		goto out_rm_features;
9317 
9318 	if (skb->encapsulation) {
9319 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9320 		 * the case of IPIP frames, the transport header pointer is
9321 		 * after the inner header! So check to make sure that this
9322 		 * is a GRE or UDP_TUNNEL frame before doing that math.
9323 		 */
9324 		if (gso && (skb_shinfo(skb)->gso_type &
9325 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9326 			len = skb_inner_network_header(skb) -
9327 			      skb_transport_header(skb);
9328 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9329 				goto out_rm_features;
9330 		}
9331 
9332 		len = skb_inner_network_header_len(skb);
9333 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9334 			goto out_rm_features;
9335 	}
9336 
9337 	return features;
9338 out_rm_features:
9339 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9340 }
9341 
9342 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9343 	.ndo_open = ice_open,
9344 	.ndo_stop = ice_stop,
9345 	.ndo_start_xmit = ice_start_xmit,
9346 	.ndo_set_mac_address = ice_set_mac_address,
9347 	.ndo_validate_addr = eth_validate_addr,
9348 	.ndo_change_mtu = ice_change_mtu,
9349 	.ndo_get_stats64 = ice_get_stats64,
9350 	.ndo_tx_timeout = ice_tx_timeout,
9351 	.ndo_bpf = ice_xdp_safe_mode,
9352 };
9353 
9354 static const struct net_device_ops ice_netdev_ops = {
9355 	.ndo_open = ice_open,
9356 	.ndo_stop = ice_stop,
9357 	.ndo_start_xmit = ice_start_xmit,
9358 	.ndo_select_queue = ice_select_queue,
9359 	.ndo_features_check = ice_features_check,
9360 	.ndo_fix_features = ice_fix_features,
9361 	.ndo_set_rx_mode = ice_set_rx_mode,
9362 	.ndo_set_mac_address = ice_set_mac_address,
9363 	.ndo_validate_addr = eth_validate_addr,
9364 	.ndo_change_mtu = ice_change_mtu,
9365 	.ndo_get_stats64 = ice_get_stats64,
9366 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9367 	.ndo_eth_ioctl = ice_eth_ioctl,
9368 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9369 	.ndo_set_vf_mac = ice_set_vf_mac,
9370 	.ndo_get_vf_config = ice_get_vf_cfg,
9371 	.ndo_set_vf_trust = ice_set_vf_trust,
9372 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9373 	.ndo_set_vf_link_state = ice_set_vf_link_state,
9374 	.ndo_get_vf_stats = ice_get_vf_stats,
9375 	.ndo_set_vf_rate = ice_set_vf_bw,
9376 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9377 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9378 	.ndo_setup_tc = ice_setup_tc,
9379 	.ndo_set_features = ice_set_features,
9380 	.ndo_bridge_getlink = ice_bridge_getlink,
9381 	.ndo_bridge_setlink = ice_bridge_setlink,
9382 	.ndo_fdb_add = ice_fdb_add,
9383 	.ndo_fdb_del = ice_fdb_del,
9384 #ifdef CONFIG_RFS_ACCEL
9385 	.ndo_rx_flow_steer = ice_rx_flow_steer,
9386 #endif
9387 	.ndo_tx_timeout = ice_tx_timeout,
9388 	.ndo_bpf = ice_xdp,
9389 	.ndo_xdp_xmit = ice_xdp_xmit,
9390 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9391 };
9392