xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision fefe5dc4afeafe896c90d5b20b605f2759343c3b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17  * ice tracepoint functions. This must be done exactly once across the
18  * ice driver.
19  */
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
24 #include "ice_vsi_vlan_ops.h"
25 #include <net/xdp_sock_drv.h>
26 
27 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
28 static const char ice_driver_string[] = DRV_SUMMARY;
29 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
30 
31 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
32 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
33 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
34 
35 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
36 MODULE_DESCRIPTION(DRV_SUMMARY);
37 MODULE_LICENSE("GPL v2");
38 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
39 
40 static int debug = -1;
41 module_param(debug, int, 0644);
42 #ifndef CONFIG_DYNAMIC_DEBUG
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
44 #else
45 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
46 #endif /* !CONFIG_DYNAMIC_DEBUG */
47 
48 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
49 EXPORT_SYMBOL(ice_xdp_locking_key);
50 
51 /**
52  * ice_hw_to_dev - Get device pointer from the hardware structure
53  * @hw: pointer to the device HW structure
54  *
55  * Used to access the device pointer from compilation units which can't easily
56  * include the definition of struct ice_pf without leading to circular header
57  * dependencies.
58  */
59 struct device *ice_hw_to_dev(struct ice_hw *hw)
60 {
61 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
62 
63 	return &pf->pdev->dev;
64 }
65 
66 static struct workqueue_struct *ice_wq;
67 struct workqueue_struct *ice_lag_wq;
68 static const struct net_device_ops ice_netdev_safe_mode_ops;
69 static const struct net_device_ops ice_netdev_ops;
70 
71 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
72 
73 static void ice_vsi_release_all(struct ice_pf *pf);
74 
75 static int ice_rebuild_channels(struct ice_pf *pf);
76 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
77 
78 static int
79 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
80 		     void *cb_priv, enum tc_setup_type type, void *type_data,
81 		     void *data,
82 		     void (*cleanup)(struct flow_block_cb *block_cb));
83 
84 bool netif_is_ice(const struct net_device *dev)
85 {
86 	return dev && (dev->netdev_ops == &ice_netdev_ops);
87 }
88 
89 /**
90  * ice_get_tx_pending - returns number of Tx descriptors not processed
91  * @ring: the ring of descriptors
92  */
93 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
94 {
95 	u16 head, tail;
96 
97 	head = ring->next_to_clean;
98 	tail = ring->next_to_use;
99 
100 	if (head != tail)
101 		return (head < tail) ?
102 			tail - head : (tail + ring->count - head);
103 	return 0;
104 }
105 
106 /**
107  * ice_check_for_hang_subtask - check for and recover hung queues
108  * @pf: pointer to PF struct
109  */
110 static void ice_check_for_hang_subtask(struct ice_pf *pf)
111 {
112 	struct ice_vsi *vsi = NULL;
113 	struct ice_hw *hw;
114 	unsigned int i;
115 	int packets;
116 	u32 v;
117 
118 	ice_for_each_vsi(pf, v)
119 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
120 			vsi = pf->vsi[v];
121 			break;
122 		}
123 
124 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
125 		return;
126 
127 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
128 		return;
129 
130 	hw = &vsi->back->hw;
131 
132 	ice_for_each_txq(vsi, i) {
133 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
134 		struct ice_ring_stats *ring_stats;
135 
136 		if (!tx_ring)
137 			continue;
138 		if (ice_ring_ch_enabled(tx_ring))
139 			continue;
140 
141 		ring_stats = tx_ring->ring_stats;
142 		if (!ring_stats)
143 			continue;
144 
145 		if (tx_ring->desc) {
146 			/* If packet counter has not changed the queue is
147 			 * likely stalled, so force an interrupt for this
148 			 * queue.
149 			 *
150 			 * prev_pkt would be negative if there was no
151 			 * pending work.
152 			 */
153 			packets = ring_stats->stats.pkts & INT_MAX;
154 			if (ring_stats->tx_stats.prev_pkt == packets) {
155 				/* Trigger sw interrupt to revive the queue */
156 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
157 				continue;
158 			}
159 
160 			/* Memory barrier between read of packet count and call
161 			 * to ice_get_tx_pending()
162 			 */
163 			smp_rmb();
164 			ring_stats->tx_stats.prev_pkt =
165 			    ice_get_tx_pending(tx_ring) ? packets : -1;
166 		}
167 	}
168 }
169 
170 /**
171  * ice_init_mac_fltr - Set initial MAC filters
172  * @pf: board private structure
173  *
174  * Set initial set of MAC filters for PF VSI; configure filters for permanent
175  * address and broadcast address. If an error is encountered, netdevice will be
176  * unregistered.
177  */
178 static int ice_init_mac_fltr(struct ice_pf *pf)
179 {
180 	struct ice_vsi *vsi;
181 	u8 *perm_addr;
182 
183 	vsi = ice_get_main_vsi(pf);
184 	if (!vsi)
185 		return -EINVAL;
186 
187 	perm_addr = vsi->port_info->mac.perm_addr;
188 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
189 }
190 
191 /**
192  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
193  * @netdev: the net device on which the sync is happening
194  * @addr: MAC address to sync
195  *
196  * This is a callback function which is called by the in kernel device sync
197  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
198  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
199  * MAC filters from the hardware.
200  */
201 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
202 {
203 	struct ice_netdev_priv *np = netdev_priv(netdev);
204 	struct ice_vsi *vsi = np->vsi;
205 
206 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
207 				     ICE_FWD_TO_VSI))
208 		return -EINVAL;
209 
210 	return 0;
211 }
212 
213 /**
214  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
215  * @netdev: the net device on which the unsync is happening
216  * @addr: MAC address to unsync
217  *
218  * This is a callback function which is called by the in kernel device unsync
219  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
220  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
221  * delete the MAC filters from the hardware.
222  */
223 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
224 {
225 	struct ice_netdev_priv *np = netdev_priv(netdev);
226 	struct ice_vsi *vsi = np->vsi;
227 
228 	/* Under some circumstances, we might receive a request to delete our
229 	 * own device address from our uc list. Because we store the device
230 	 * address in the VSI's MAC filter list, we need to ignore such
231 	 * requests and not delete our device address from this list.
232 	 */
233 	if (ether_addr_equal(addr, netdev->dev_addr))
234 		return 0;
235 
236 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
237 				     ICE_FWD_TO_VSI))
238 		return -EINVAL;
239 
240 	return 0;
241 }
242 
243 /**
244  * ice_vsi_fltr_changed - check if filter state changed
245  * @vsi: VSI to be checked
246  *
247  * returns true if filter state has changed, false otherwise.
248  */
249 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
250 {
251 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
252 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
253 }
254 
255 /**
256  * ice_set_promisc - Enable promiscuous mode for a given PF
257  * @vsi: the VSI being configured
258  * @promisc_m: mask of promiscuous config bits
259  *
260  */
261 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
262 {
263 	int status;
264 
265 	if (vsi->type != ICE_VSI_PF)
266 		return 0;
267 
268 	if (ice_vsi_has_non_zero_vlans(vsi)) {
269 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
270 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
271 						       promisc_m);
272 	} else {
273 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
274 						  promisc_m, 0);
275 	}
276 	if (status && status != -EEXIST)
277 		return status;
278 
279 	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
280 		   vsi->vsi_num, promisc_m);
281 	return 0;
282 }
283 
284 /**
285  * ice_clear_promisc - Disable promiscuous mode for a given PF
286  * @vsi: the VSI being configured
287  * @promisc_m: mask of promiscuous config bits
288  *
289  */
290 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
291 {
292 	int status;
293 
294 	if (vsi->type != ICE_VSI_PF)
295 		return 0;
296 
297 	if (ice_vsi_has_non_zero_vlans(vsi)) {
298 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
299 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
300 							 promisc_m);
301 	} else {
302 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
303 						    promisc_m, 0);
304 	}
305 
306 	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
307 		   vsi->vsi_num, promisc_m);
308 	return status;
309 }
310 
311 /**
312  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
313  * @vsi: ptr to the VSI
314  *
315  * Push any outstanding VSI filter changes through the AdminQ.
316  */
317 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
318 {
319 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
320 	struct device *dev = ice_pf_to_dev(vsi->back);
321 	struct net_device *netdev = vsi->netdev;
322 	bool promisc_forced_on = false;
323 	struct ice_pf *pf = vsi->back;
324 	struct ice_hw *hw = &pf->hw;
325 	u32 changed_flags = 0;
326 	int err;
327 
328 	if (!vsi->netdev)
329 		return -EINVAL;
330 
331 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
332 		usleep_range(1000, 2000);
333 
334 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
335 	vsi->current_netdev_flags = vsi->netdev->flags;
336 
337 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
338 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
339 
340 	if (ice_vsi_fltr_changed(vsi)) {
341 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
342 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
343 
344 		/* grab the netdev's addr_list_lock */
345 		netif_addr_lock_bh(netdev);
346 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
347 			      ice_add_mac_to_unsync_list);
348 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
349 			      ice_add_mac_to_unsync_list);
350 		/* our temp lists are populated. release lock */
351 		netif_addr_unlock_bh(netdev);
352 	}
353 
354 	/* Remove MAC addresses in the unsync list */
355 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
356 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
357 	if (err) {
358 		netdev_err(netdev, "Failed to delete MAC filters\n");
359 		/* if we failed because of alloc failures, just bail */
360 		if (err == -ENOMEM)
361 			goto out;
362 	}
363 
364 	/* Add MAC addresses in the sync list */
365 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
366 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
367 	/* If filter is added successfully or already exists, do not go into
368 	 * 'if' condition and report it as error. Instead continue processing
369 	 * rest of the function.
370 	 */
371 	if (err && err != -EEXIST) {
372 		netdev_err(netdev, "Failed to add MAC filters\n");
373 		/* If there is no more space for new umac filters, VSI
374 		 * should go into promiscuous mode. There should be some
375 		 * space reserved for promiscuous filters.
376 		 */
377 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
378 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
379 				      vsi->state)) {
380 			promisc_forced_on = true;
381 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
382 				    vsi->vsi_num);
383 		} else {
384 			goto out;
385 		}
386 	}
387 	err = 0;
388 	/* check for changes in promiscuous modes */
389 	if (changed_flags & IFF_ALLMULTI) {
390 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
391 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
392 			if (err) {
393 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
394 				goto out_promisc;
395 			}
396 		} else {
397 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
398 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
399 			if (err) {
400 				vsi->current_netdev_flags |= IFF_ALLMULTI;
401 				goto out_promisc;
402 			}
403 		}
404 	}
405 
406 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
407 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
408 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
409 		if (vsi->current_netdev_flags & IFF_PROMISC) {
410 			/* Apply Rx filter rule to get traffic from wire */
411 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
412 				err = ice_set_dflt_vsi(vsi);
413 				if (err && err != -EEXIST) {
414 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
415 						   err, vsi->vsi_num);
416 					vsi->current_netdev_flags &=
417 						~IFF_PROMISC;
418 					goto out_promisc;
419 				}
420 				err = 0;
421 				vlan_ops->dis_rx_filtering(vsi);
422 
423 				/* promiscuous mode implies allmulticast so
424 				 * that VSIs that are in promiscuous mode are
425 				 * subscribed to multicast packets coming to
426 				 * the port
427 				 */
428 				err = ice_set_promisc(vsi,
429 						      ICE_MCAST_PROMISC_BITS);
430 				if (err)
431 					goto out_promisc;
432 			}
433 		} else {
434 			/* Clear Rx filter to remove traffic from wire */
435 			if (ice_is_vsi_dflt_vsi(vsi)) {
436 				err = ice_clear_dflt_vsi(vsi);
437 				if (err) {
438 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
439 						   err, vsi->vsi_num);
440 					vsi->current_netdev_flags |=
441 						IFF_PROMISC;
442 					goto out_promisc;
443 				}
444 				if (vsi->netdev->features &
445 				    NETIF_F_HW_VLAN_CTAG_FILTER)
446 					vlan_ops->ena_rx_filtering(vsi);
447 			}
448 
449 			/* disable allmulti here, but only if allmulti is not
450 			 * still enabled for the netdev
451 			 */
452 			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
453 				err = ice_clear_promisc(vsi,
454 							ICE_MCAST_PROMISC_BITS);
455 				if (err) {
456 					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
457 						   err, vsi->vsi_num);
458 				}
459 			}
460 		}
461 	}
462 	goto exit;
463 
464 out_promisc:
465 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
466 	goto exit;
467 out:
468 	/* if something went wrong then set the changed flag so we try again */
469 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
470 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
471 exit:
472 	clear_bit(ICE_CFG_BUSY, vsi->state);
473 	return err;
474 }
475 
476 /**
477  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
478  * @pf: board private structure
479  */
480 static void ice_sync_fltr_subtask(struct ice_pf *pf)
481 {
482 	int v;
483 
484 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
485 		return;
486 
487 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
488 
489 	ice_for_each_vsi(pf, v)
490 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
491 		    ice_vsi_sync_fltr(pf->vsi[v])) {
492 			/* come back and try again later */
493 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
494 			break;
495 		}
496 }
497 
498 /**
499  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
500  * @pf: the PF
501  * @locked: is the rtnl_lock already held
502  */
503 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
504 {
505 	int node;
506 	int v;
507 
508 	ice_for_each_vsi(pf, v)
509 		if (pf->vsi[v])
510 			ice_dis_vsi(pf->vsi[v], locked);
511 
512 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
513 		pf->pf_agg_node[node].num_vsis = 0;
514 
515 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
516 		pf->vf_agg_node[node].num_vsis = 0;
517 }
518 
519 /**
520  * ice_clear_sw_switch_recipes - clear switch recipes
521  * @pf: board private structure
522  *
523  * Mark switch recipes as not created in sw structures. There are cases where
524  * rules (especially advanced rules) need to be restored, either re-read from
525  * hardware or added again. For example after the reset. 'recp_created' flag
526  * prevents from doing that and need to be cleared upfront.
527  */
528 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
529 {
530 	struct ice_sw_recipe *recp;
531 	u8 i;
532 
533 	recp = pf->hw.switch_info->recp_list;
534 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
535 		recp[i].recp_created = false;
536 }
537 
538 /**
539  * ice_prepare_for_reset - prep for reset
540  * @pf: board private structure
541  * @reset_type: reset type requested
542  *
543  * Inform or close all dependent features in prep for reset.
544  */
545 static void
546 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
547 {
548 	struct ice_hw *hw = &pf->hw;
549 	struct ice_vsi *vsi;
550 	struct ice_vf *vf;
551 	unsigned int bkt;
552 
553 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
554 
555 	/* already prepared for reset */
556 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
557 		return;
558 
559 	ice_unplug_aux_dev(pf);
560 
561 	/* Notify VFs of impending reset */
562 	if (ice_check_sq_alive(hw, &hw->mailboxq))
563 		ice_vc_notify_reset(pf);
564 
565 	/* Disable VFs until reset is completed */
566 	mutex_lock(&pf->vfs.table_lock);
567 	ice_for_each_vf(pf, bkt, vf)
568 		ice_set_vf_state_dis(vf);
569 	mutex_unlock(&pf->vfs.table_lock);
570 
571 	if (ice_is_eswitch_mode_switchdev(pf)) {
572 		if (reset_type != ICE_RESET_PFR)
573 			ice_clear_sw_switch_recipes(pf);
574 	}
575 
576 	/* release ADQ specific HW and SW resources */
577 	vsi = ice_get_main_vsi(pf);
578 	if (!vsi)
579 		goto skip;
580 
581 	/* to be on safe side, reset orig_rss_size so that normal flow
582 	 * of deciding rss_size can take precedence
583 	 */
584 	vsi->orig_rss_size = 0;
585 
586 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
587 		if (reset_type == ICE_RESET_PFR) {
588 			vsi->old_ena_tc = vsi->all_enatc;
589 			vsi->old_numtc = vsi->all_numtc;
590 		} else {
591 			ice_remove_q_channels(vsi, true);
592 
593 			/* for other reset type, do not support channel rebuild
594 			 * hence reset needed info
595 			 */
596 			vsi->old_ena_tc = 0;
597 			vsi->all_enatc = 0;
598 			vsi->old_numtc = 0;
599 			vsi->all_numtc = 0;
600 			vsi->req_txq = 0;
601 			vsi->req_rxq = 0;
602 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
603 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
604 		}
605 	}
606 skip:
607 
608 	/* clear SW filtering DB */
609 	ice_clear_hw_tbls(hw);
610 	/* disable the VSIs and their queues that are not already DOWN */
611 	ice_pf_dis_all_vsi(pf, false);
612 
613 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
614 		ice_ptp_prepare_for_reset(pf);
615 
616 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
617 		ice_gnss_exit(pf);
618 
619 	if (hw->port_info)
620 		ice_sched_clear_port(hw->port_info);
621 
622 	ice_shutdown_all_ctrlq(hw);
623 
624 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
625 }
626 
627 /**
628  * ice_do_reset - Initiate one of many types of resets
629  * @pf: board private structure
630  * @reset_type: reset type requested before this function was called.
631  */
632 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
633 {
634 	struct device *dev = ice_pf_to_dev(pf);
635 	struct ice_hw *hw = &pf->hw;
636 
637 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
638 
639 	if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
640 		dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
641 		reset_type = ICE_RESET_CORER;
642 	}
643 
644 	ice_prepare_for_reset(pf, reset_type);
645 
646 	/* trigger the reset */
647 	if (ice_reset(hw, reset_type)) {
648 		dev_err(dev, "reset %d failed\n", reset_type);
649 		set_bit(ICE_RESET_FAILED, pf->state);
650 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
651 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
652 		clear_bit(ICE_PFR_REQ, pf->state);
653 		clear_bit(ICE_CORER_REQ, pf->state);
654 		clear_bit(ICE_GLOBR_REQ, pf->state);
655 		wake_up(&pf->reset_wait_queue);
656 		return;
657 	}
658 
659 	/* PFR is a bit of a special case because it doesn't result in an OICR
660 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
661 	 * associated state bits.
662 	 */
663 	if (reset_type == ICE_RESET_PFR) {
664 		pf->pfr_count++;
665 		ice_rebuild(pf, reset_type);
666 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
667 		clear_bit(ICE_PFR_REQ, pf->state);
668 		wake_up(&pf->reset_wait_queue);
669 		ice_reset_all_vfs(pf);
670 	}
671 }
672 
673 /**
674  * ice_reset_subtask - Set up for resetting the device and driver
675  * @pf: board private structure
676  */
677 static void ice_reset_subtask(struct ice_pf *pf)
678 {
679 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
680 
681 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
682 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
683 	 * of reset is pending and sets bits in pf->state indicating the reset
684 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
685 	 * prepare for pending reset if not already (for PF software-initiated
686 	 * global resets the software should already be prepared for it as
687 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
688 	 * by firmware or software on other PFs, that bit is not set so prepare
689 	 * for the reset now), poll for reset done, rebuild and return.
690 	 */
691 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
692 		/* Perform the largest reset requested */
693 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
694 			reset_type = ICE_RESET_CORER;
695 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
696 			reset_type = ICE_RESET_GLOBR;
697 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
698 			reset_type = ICE_RESET_EMPR;
699 		/* return if no valid reset type requested */
700 		if (reset_type == ICE_RESET_INVAL)
701 			return;
702 		ice_prepare_for_reset(pf, reset_type);
703 
704 		/* make sure we are ready to rebuild */
705 		if (ice_check_reset(&pf->hw)) {
706 			set_bit(ICE_RESET_FAILED, pf->state);
707 		} else {
708 			/* done with reset. start rebuild */
709 			pf->hw.reset_ongoing = false;
710 			ice_rebuild(pf, reset_type);
711 			/* clear bit to resume normal operations, but
712 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
713 			 */
714 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
715 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
716 			clear_bit(ICE_PFR_REQ, pf->state);
717 			clear_bit(ICE_CORER_REQ, pf->state);
718 			clear_bit(ICE_GLOBR_REQ, pf->state);
719 			wake_up(&pf->reset_wait_queue);
720 			ice_reset_all_vfs(pf);
721 		}
722 
723 		return;
724 	}
725 
726 	/* No pending resets to finish processing. Check for new resets */
727 	if (test_bit(ICE_PFR_REQ, pf->state)) {
728 		reset_type = ICE_RESET_PFR;
729 		if (pf->lag && pf->lag->bonded) {
730 			dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
731 			reset_type = ICE_RESET_CORER;
732 		}
733 	}
734 	if (test_bit(ICE_CORER_REQ, pf->state))
735 		reset_type = ICE_RESET_CORER;
736 	if (test_bit(ICE_GLOBR_REQ, pf->state))
737 		reset_type = ICE_RESET_GLOBR;
738 	/* If no valid reset type requested just return */
739 	if (reset_type == ICE_RESET_INVAL)
740 		return;
741 
742 	/* reset if not already down or busy */
743 	if (!test_bit(ICE_DOWN, pf->state) &&
744 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
745 		ice_do_reset(pf, reset_type);
746 	}
747 }
748 
749 /**
750  * ice_print_topo_conflict - print topology conflict message
751  * @vsi: the VSI whose topology status is being checked
752  */
753 static void ice_print_topo_conflict(struct ice_vsi *vsi)
754 {
755 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
756 	case ICE_AQ_LINK_TOPO_CONFLICT:
757 	case ICE_AQ_LINK_MEDIA_CONFLICT:
758 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
759 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
760 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
761 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
762 		break;
763 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
764 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
765 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
766 		else
767 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
768 		break;
769 	default:
770 		break;
771 	}
772 }
773 
774 /**
775  * ice_print_link_msg - print link up or down message
776  * @vsi: the VSI whose link status is being queried
777  * @isup: boolean for if the link is now up or down
778  */
779 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
780 {
781 	struct ice_aqc_get_phy_caps_data *caps;
782 	const char *an_advertised;
783 	const char *fec_req;
784 	const char *speed;
785 	const char *fec;
786 	const char *fc;
787 	const char *an;
788 	int status;
789 
790 	if (!vsi)
791 		return;
792 
793 	if (vsi->current_isup == isup)
794 		return;
795 
796 	vsi->current_isup = isup;
797 
798 	if (!isup) {
799 		netdev_info(vsi->netdev, "NIC Link is Down\n");
800 		return;
801 	}
802 
803 	switch (vsi->port_info->phy.link_info.link_speed) {
804 	case ICE_AQ_LINK_SPEED_100GB:
805 		speed = "100 G";
806 		break;
807 	case ICE_AQ_LINK_SPEED_50GB:
808 		speed = "50 G";
809 		break;
810 	case ICE_AQ_LINK_SPEED_40GB:
811 		speed = "40 G";
812 		break;
813 	case ICE_AQ_LINK_SPEED_25GB:
814 		speed = "25 G";
815 		break;
816 	case ICE_AQ_LINK_SPEED_20GB:
817 		speed = "20 G";
818 		break;
819 	case ICE_AQ_LINK_SPEED_10GB:
820 		speed = "10 G";
821 		break;
822 	case ICE_AQ_LINK_SPEED_5GB:
823 		speed = "5 G";
824 		break;
825 	case ICE_AQ_LINK_SPEED_2500MB:
826 		speed = "2.5 G";
827 		break;
828 	case ICE_AQ_LINK_SPEED_1000MB:
829 		speed = "1 G";
830 		break;
831 	case ICE_AQ_LINK_SPEED_100MB:
832 		speed = "100 M";
833 		break;
834 	default:
835 		speed = "Unknown ";
836 		break;
837 	}
838 
839 	switch (vsi->port_info->fc.current_mode) {
840 	case ICE_FC_FULL:
841 		fc = "Rx/Tx";
842 		break;
843 	case ICE_FC_TX_PAUSE:
844 		fc = "Tx";
845 		break;
846 	case ICE_FC_RX_PAUSE:
847 		fc = "Rx";
848 		break;
849 	case ICE_FC_NONE:
850 		fc = "None";
851 		break;
852 	default:
853 		fc = "Unknown";
854 		break;
855 	}
856 
857 	/* Get FEC mode based on negotiated link info */
858 	switch (vsi->port_info->phy.link_info.fec_info) {
859 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
860 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
861 		fec = "RS-FEC";
862 		break;
863 	case ICE_AQ_LINK_25G_KR_FEC_EN:
864 		fec = "FC-FEC/BASE-R";
865 		break;
866 	default:
867 		fec = "NONE";
868 		break;
869 	}
870 
871 	/* check if autoneg completed, might be false due to not supported */
872 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
873 		an = "True";
874 	else
875 		an = "False";
876 
877 	/* Get FEC mode requested based on PHY caps last SW configuration */
878 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
879 	if (!caps) {
880 		fec_req = "Unknown";
881 		an_advertised = "Unknown";
882 		goto done;
883 	}
884 
885 	status = ice_aq_get_phy_caps(vsi->port_info, false,
886 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
887 	if (status)
888 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
889 
890 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
891 
892 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
893 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
894 		fec_req = "RS-FEC";
895 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
896 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
897 		fec_req = "FC-FEC/BASE-R";
898 	else
899 		fec_req = "NONE";
900 
901 	kfree(caps);
902 
903 done:
904 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
905 		    speed, fec_req, fec, an_advertised, an, fc);
906 	ice_print_topo_conflict(vsi);
907 }
908 
909 /**
910  * ice_vsi_link_event - update the VSI's netdev
911  * @vsi: the VSI on which the link event occurred
912  * @link_up: whether or not the VSI needs to be set up or down
913  */
914 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
915 {
916 	if (!vsi)
917 		return;
918 
919 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
920 		return;
921 
922 	if (vsi->type == ICE_VSI_PF) {
923 		if (link_up == netif_carrier_ok(vsi->netdev))
924 			return;
925 
926 		if (link_up) {
927 			netif_carrier_on(vsi->netdev);
928 			netif_tx_wake_all_queues(vsi->netdev);
929 		} else {
930 			netif_carrier_off(vsi->netdev);
931 			netif_tx_stop_all_queues(vsi->netdev);
932 		}
933 	}
934 }
935 
936 /**
937  * ice_set_dflt_mib - send a default config MIB to the FW
938  * @pf: private PF struct
939  *
940  * This function sends a default configuration MIB to the FW.
941  *
942  * If this function errors out at any point, the driver is still able to
943  * function.  The main impact is that LFC may not operate as expected.
944  * Therefore an error state in this function should be treated with a DBG
945  * message and continue on with driver rebuild/reenable.
946  */
947 static void ice_set_dflt_mib(struct ice_pf *pf)
948 {
949 	struct device *dev = ice_pf_to_dev(pf);
950 	u8 mib_type, *buf, *lldpmib = NULL;
951 	u16 len, typelen, offset = 0;
952 	struct ice_lldp_org_tlv *tlv;
953 	struct ice_hw *hw = &pf->hw;
954 	u32 ouisubtype;
955 
956 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
957 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
958 	if (!lldpmib) {
959 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
960 			__func__);
961 		return;
962 	}
963 
964 	/* Add ETS CFG TLV */
965 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
966 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
967 		   ICE_IEEE_ETS_TLV_LEN);
968 	tlv->typelen = htons(typelen);
969 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
970 		      ICE_IEEE_SUBTYPE_ETS_CFG);
971 	tlv->ouisubtype = htonl(ouisubtype);
972 
973 	buf = tlv->tlvinfo;
974 	buf[0] = 0;
975 
976 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
977 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
978 	 * Octets 13 - 20 are TSA values - leave as zeros
979 	 */
980 	buf[5] = 0x64;
981 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
982 	offset += len + 2;
983 	tlv = (struct ice_lldp_org_tlv *)
984 		((char *)tlv + sizeof(tlv->typelen) + len);
985 
986 	/* Add ETS REC TLV */
987 	buf = tlv->tlvinfo;
988 	tlv->typelen = htons(typelen);
989 
990 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
991 		      ICE_IEEE_SUBTYPE_ETS_REC);
992 	tlv->ouisubtype = htonl(ouisubtype);
993 
994 	/* First octet of buf is reserved
995 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
996 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
997 	 * Octets 13 - 20 are TSA value - leave as zeros
998 	 */
999 	buf[5] = 0x64;
1000 	offset += len + 2;
1001 	tlv = (struct ice_lldp_org_tlv *)
1002 		((char *)tlv + sizeof(tlv->typelen) + len);
1003 
1004 	/* Add PFC CFG TLV */
1005 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1006 		   ICE_IEEE_PFC_TLV_LEN);
1007 	tlv->typelen = htons(typelen);
1008 
1009 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1010 		      ICE_IEEE_SUBTYPE_PFC_CFG);
1011 	tlv->ouisubtype = htonl(ouisubtype);
1012 
1013 	/* Octet 1 left as all zeros - PFC disabled */
1014 	buf[0] = 0x08;
1015 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1016 	offset += len + 2;
1017 
1018 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1019 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1020 
1021 	kfree(lldpmib);
1022 }
1023 
1024 /**
1025  * ice_check_phy_fw_load - check if PHY FW load failed
1026  * @pf: pointer to PF struct
1027  * @link_cfg_err: bitmap from the link info structure
1028  *
1029  * check if external PHY FW load failed and print an error message if it did
1030  */
1031 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1032 {
1033 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1034 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1035 		return;
1036 	}
1037 
1038 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1039 		return;
1040 
1041 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1042 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1043 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1044 	}
1045 }
1046 
1047 /**
1048  * ice_check_module_power
1049  * @pf: pointer to PF struct
1050  * @link_cfg_err: bitmap from the link info structure
1051  *
1052  * check module power level returned by a previous call to aq_get_link_info
1053  * and print error messages if module power level is not supported
1054  */
1055 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1056 {
1057 	/* if module power level is supported, clear the flag */
1058 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1059 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1060 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1061 		return;
1062 	}
1063 
1064 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1065 	 * above block didn't clear this bit, there's nothing to do
1066 	 */
1067 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1068 		return;
1069 
1070 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1071 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1072 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1073 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1074 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1075 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1076 	}
1077 }
1078 
1079 /**
1080  * ice_check_link_cfg_err - check if link configuration failed
1081  * @pf: pointer to the PF struct
1082  * @link_cfg_err: bitmap from the link info structure
1083  *
1084  * print if any link configuration failure happens due to the value in the
1085  * link_cfg_err parameter in the link info structure
1086  */
1087 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1088 {
1089 	ice_check_module_power(pf, link_cfg_err);
1090 	ice_check_phy_fw_load(pf, link_cfg_err);
1091 }
1092 
1093 /**
1094  * ice_link_event - process the link event
1095  * @pf: PF that the link event is associated with
1096  * @pi: port_info for the port that the link event is associated with
1097  * @link_up: true if the physical link is up and false if it is down
1098  * @link_speed: current link speed received from the link event
1099  *
1100  * Returns 0 on success and negative on failure
1101  */
1102 static int
1103 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1104 	       u16 link_speed)
1105 {
1106 	struct device *dev = ice_pf_to_dev(pf);
1107 	struct ice_phy_info *phy_info;
1108 	struct ice_vsi *vsi;
1109 	u16 old_link_speed;
1110 	bool old_link;
1111 	int status;
1112 
1113 	phy_info = &pi->phy;
1114 	phy_info->link_info_old = phy_info->link_info;
1115 
1116 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1117 	old_link_speed = phy_info->link_info_old.link_speed;
1118 
1119 	/* update the link info structures and re-enable link events,
1120 	 * don't bail on failure due to other book keeping needed
1121 	 */
1122 	status = ice_update_link_info(pi);
1123 	if (status)
1124 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1125 			pi->lport, status,
1126 			ice_aq_str(pi->hw->adminq.sq_last_status));
1127 
1128 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1129 
1130 	/* Check if the link state is up after updating link info, and treat
1131 	 * this event as an UP event since the link is actually UP now.
1132 	 */
1133 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1134 		link_up = true;
1135 
1136 	vsi = ice_get_main_vsi(pf);
1137 	if (!vsi || !vsi->port_info)
1138 		return -EINVAL;
1139 
1140 	/* turn off PHY if media was removed */
1141 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1142 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1143 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1144 		ice_set_link(vsi, false);
1145 	}
1146 
1147 	/* if the old link up/down and speed is the same as the new */
1148 	if (link_up == old_link && link_speed == old_link_speed)
1149 		return 0;
1150 
1151 	ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1152 
1153 	if (ice_is_dcb_active(pf)) {
1154 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1155 			ice_dcb_rebuild(pf);
1156 	} else {
1157 		if (link_up)
1158 			ice_set_dflt_mib(pf);
1159 	}
1160 	ice_vsi_link_event(vsi, link_up);
1161 	ice_print_link_msg(vsi, link_up);
1162 
1163 	ice_vc_notify_link_state(pf);
1164 
1165 	return 0;
1166 }
1167 
1168 /**
1169  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1170  * @pf: board private structure
1171  */
1172 static void ice_watchdog_subtask(struct ice_pf *pf)
1173 {
1174 	int i;
1175 
1176 	/* if interface is down do nothing */
1177 	if (test_bit(ICE_DOWN, pf->state) ||
1178 	    test_bit(ICE_CFG_BUSY, pf->state))
1179 		return;
1180 
1181 	/* make sure we don't do these things too often */
1182 	if (time_before(jiffies,
1183 			pf->serv_tmr_prev + pf->serv_tmr_period))
1184 		return;
1185 
1186 	pf->serv_tmr_prev = jiffies;
1187 
1188 	/* Update the stats for active netdevs so the network stack
1189 	 * can look at updated numbers whenever it cares to
1190 	 */
1191 	ice_update_pf_stats(pf);
1192 	ice_for_each_vsi(pf, i)
1193 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1194 			ice_update_vsi_stats(pf->vsi[i]);
1195 }
1196 
1197 /**
1198  * ice_init_link_events - enable/initialize link events
1199  * @pi: pointer to the port_info instance
1200  *
1201  * Returns -EIO on failure, 0 on success
1202  */
1203 static int ice_init_link_events(struct ice_port_info *pi)
1204 {
1205 	u16 mask;
1206 
1207 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1208 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1209 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1210 
1211 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1212 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1213 			pi->lport);
1214 		return -EIO;
1215 	}
1216 
1217 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1218 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1219 			pi->lport);
1220 		return -EIO;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 /**
1227  * ice_handle_link_event - handle link event via ARQ
1228  * @pf: PF that the link event is associated with
1229  * @event: event structure containing link status info
1230  */
1231 static int
1232 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1233 {
1234 	struct ice_aqc_get_link_status_data *link_data;
1235 	struct ice_port_info *port_info;
1236 	int status;
1237 
1238 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1239 	port_info = pf->hw.port_info;
1240 	if (!port_info)
1241 		return -EINVAL;
1242 
1243 	status = ice_link_event(pf, port_info,
1244 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1245 				le16_to_cpu(link_data->link_speed));
1246 	if (status)
1247 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1248 			status);
1249 
1250 	return status;
1251 }
1252 
1253 /**
1254  * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1255  * @pf: pointer to the PF private structure
1256  * @task: intermediate helper storage and identifier for waiting
1257  * @opcode: the opcode to wait for
1258  *
1259  * Prepares to wait for a specific AdminQ completion event on the ARQ for
1260  * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1261  *
1262  * Calls are separated to allow caller registering for event before sending
1263  * the command, which mitigates a race between registering and FW responding.
1264  *
1265  * To obtain only the descriptor contents, pass an task->event with null
1266  * msg_buf. If the complete data buffer is desired, allocate the
1267  * task->event.msg_buf with enough space ahead of time.
1268  */
1269 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1270 			   u16 opcode)
1271 {
1272 	INIT_HLIST_NODE(&task->entry);
1273 	task->opcode = opcode;
1274 	task->state = ICE_AQ_TASK_WAITING;
1275 
1276 	spin_lock_bh(&pf->aq_wait_lock);
1277 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1278 	spin_unlock_bh(&pf->aq_wait_lock);
1279 }
1280 
1281 /**
1282  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1283  * @pf: pointer to the PF private structure
1284  * @task: ptr prepared by ice_aq_prep_for_event()
1285  * @timeout: how long to wait, in jiffies
1286  *
1287  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1288  * current thread will be put to sleep until the specified event occurs or
1289  * until the given timeout is reached.
1290  *
1291  * Returns: zero on success, or a negative error code on failure.
1292  */
1293 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1294 			  unsigned long timeout)
1295 {
1296 	enum ice_aq_task_state *state = &task->state;
1297 	struct device *dev = ice_pf_to_dev(pf);
1298 	unsigned long start = jiffies;
1299 	long ret;
1300 	int err;
1301 
1302 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1303 					       *state != ICE_AQ_TASK_WAITING,
1304 					       timeout);
1305 	switch (*state) {
1306 	case ICE_AQ_TASK_NOT_PREPARED:
1307 		WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1308 		err = -EINVAL;
1309 		break;
1310 	case ICE_AQ_TASK_WAITING:
1311 		err = ret < 0 ? ret : -ETIMEDOUT;
1312 		break;
1313 	case ICE_AQ_TASK_CANCELED:
1314 		err = ret < 0 ? ret : -ECANCELED;
1315 		break;
1316 	case ICE_AQ_TASK_COMPLETE:
1317 		err = ret < 0 ? ret : 0;
1318 		break;
1319 	default:
1320 		WARN(1, "Unexpected AdminQ wait task state %u", *state);
1321 		err = -EINVAL;
1322 		break;
1323 	}
1324 
1325 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1326 		jiffies_to_msecs(jiffies - start),
1327 		jiffies_to_msecs(timeout),
1328 		task->opcode);
1329 
1330 	spin_lock_bh(&pf->aq_wait_lock);
1331 	hlist_del(&task->entry);
1332 	spin_unlock_bh(&pf->aq_wait_lock);
1333 
1334 	return err;
1335 }
1336 
1337 /**
1338  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1339  * @pf: pointer to the PF private structure
1340  * @opcode: the opcode of the event
1341  * @event: the event to check
1342  *
1343  * Loops over the current list of pending threads waiting for an AdminQ event.
1344  * For each matching task, copy the contents of the event into the task
1345  * structure and wake up the thread.
1346  *
1347  * If multiple threads wait for the same opcode, they will all be woken up.
1348  *
1349  * Note that event->msg_buf will only be duplicated if the event has a buffer
1350  * with enough space already allocated. Otherwise, only the descriptor and
1351  * message length will be copied.
1352  *
1353  * Returns: true if an event was found, false otherwise
1354  */
1355 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1356 				struct ice_rq_event_info *event)
1357 {
1358 	struct ice_rq_event_info *task_ev;
1359 	struct ice_aq_task *task;
1360 	bool found = false;
1361 
1362 	spin_lock_bh(&pf->aq_wait_lock);
1363 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1364 		if (task->state != ICE_AQ_TASK_WAITING)
1365 			continue;
1366 		if (task->opcode != opcode)
1367 			continue;
1368 
1369 		task_ev = &task->event;
1370 		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1371 		task_ev->msg_len = event->msg_len;
1372 
1373 		/* Only copy the data buffer if a destination was set */
1374 		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1375 			memcpy(task_ev->msg_buf, event->msg_buf,
1376 			       event->buf_len);
1377 			task_ev->buf_len = event->buf_len;
1378 		}
1379 
1380 		task->state = ICE_AQ_TASK_COMPLETE;
1381 		found = true;
1382 	}
1383 	spin_unlock_bh(&pf->aq_wait_lock);
1384 
1385 	if (found)
1386 		wake_up(&pf->aq_wait_queue);
1387 }
1388 
1389 /**
1390  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1391  * @pf: the PF private structure
1392  *
1393  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1394  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1395  */
1396 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1397 {
1398 	struct ice_aq_task *task;
1399 
1400 	spin_lock_bh(&pf->aq_wait_lock);
1401 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1402 		task->state = ICE_AQ_TASK_CANCELED;
1403 	spin_unlock_bh(&pf->aq_wait_lock);
1404 
1405 	wake_up(&pf->aq_wait_queue);
1406 }
1407 
1408 #define ICE_MBX_OVERFLOW_WATERMARK 64
1409 
1410 /**
1411  * __ice_clean_ctrlq - helper function to clean controlq rings
1412  * @pf: ptr to struct ice_pf
1413  * @q_type: specific Control queue type
1414  */
1415 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1416 {
1417 	struct device *dev = ice_pf_to_dev(pf);
1418 	struct ice_rq_event_info event;
1419 	struct ice_hw *hw = &pf->hw;
1420 	struct ice_ctl_q_info *cq;
1421 	u16 pending, i = 0;
1422 	const char *qtype;
1423 	u32 oldval, val;
1424 
1425 	/* Do not clean control queue if/when PF reset fails */
1426 	if (test_bit(ICE_RESET_FAILED, pf->state))
1427 		return 0;
1428 
1429 	switch (q_type) {
1430 	case ICE_CTL_Q_ADMIN:
1431 		cq = &hw->adminq;
1432 		qtype = "Admin";
1433 		break;
1434 	case ICE_CTL_Q_SB:
1435 		cq = &hw->sbq;
1436 		qtype = "Sideband";
1437 		break;
1438 	case ICE_CTL_Q_MAILBOX:
1439 		cq = &hw->mailboxq;
1440 		qtype = "Mailbox";
1441 		/* we are going to try to detect a malicious VF, so set the
1442 		 * state to begin detection
1443 		 */
1444 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1445 		break;
1446 	default:
1447 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1448 		return 0;
1449 	}
1450 
1451 	/* check for error indications - PF_xx_AxQLEN register layout for
1452 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1453 	 */
1454 	val = rd32(hw, cq->rq.len);
1455 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1456 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1457 		oldval = val;
1458 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1459 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1460 				qtype);
1461 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1462 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1463 				qtype);
1464 		}
1465 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1466 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1467 				qtype);
1468 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1469 			 PF_FW_ARQLEN_ARQCRIT_M);
1470 		if (oldval != val)
1471 			wr32(hw, cq->rq.len, val);
1472 	}
1473 
1474 	val = rd32(hw, cq->sq.len);
1475 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1476 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1477 		oldval = val;
1478 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1479 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1480 				qtype);
1481 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1482 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1483 				qtype);
1484 		}
1485 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1486 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1487 				qtype);
1488 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1489 			 PF_FW_ATQLEN_ATQCRIT_M);
1490 		if (oldval != val)
1491 			wr32(hw, cq->sq.len, val);
1492 	}
1493 
1494 	event.buf_len = cq->rq_buf_size;
1495 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1496 	if (!event.msg_buf)
1497 		return 0;
1498 
1499 	do {
1500 		struct ice_mbx_data data = {};
1501 		u16 opcode;
1502 		int ret;
1503 
1504 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1505 		if (ret == -EALREADY)
1506 			break;
1507 		if (ret) {
1508 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1509 				ret);
1510 			break;
1511 		}
1512 
1513 		opcode = le16_to_cpu(event.desc.opcode);
1514 
1515 		/* Notify any thread that might be waiting for this event */
1516 		ice_aq_check_events(pf, opcode, &event);
1517 
1518 		switch (opcode) {
1519 		case ice_aqc_opc_get_link_status:
1520 			if (ice_handle_link_event(pf, &event))
1521 				dev_err(dev, "Could not handle link event\n");
1522 			break;
1523 		case ice_aqc_opc_event_lan_overflow:
1524 			ice_vf_lan_overflow_event(pf, &event);
1525 			break;
1526 		case ice_mbx_opc_send_msg_to_pf:
1527 			data.num_msg_proc = i;
1528 			data.num_pending_arq = pending;
1529 			data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
1530 			data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1531 
1532 			ice_vc_process_vf_msg(pf, &event, &data);
1533 			break;
1534 		case ice_aqc_opc_fw_logging:
1535 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1536 			break;
1537 		case ice_aqc_opc_lldp_set_mib_change:
1538 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1539 			break;
1540 		default:
1541 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1542 				qtype, opcode);
1543 			break;
1544 		}
1545 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1546 
1547 	kfree(event.msg_buf);
1548 
1549 	return pending && (i == ICE_DFLT_IRQ_WORK);
1550 }
1551 
1552 /**
1553  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1554  * @hw: pointer to hardware info
1555  * @cq: control queue information
1556  *
1557  * returns true if there are pending messages in a queue, false if there aren't
1558  */
1559 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1560 {
1561 	u16 ntu;
1562 
1563 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1564 	return cq->rq.next_to_clean != ntu;
1565 }
1566 
1567 /**
1568  * ice_clean_adminq_subtask - clean the AdminQ rings
1569  * @pf: board private structure
1570  */
1571 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1572 {
1573 	struct ice_hw *hw = &pf->hw;
1574 
1575 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1576 		return;
1577 
1578 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1579 		return;
1580 
1581 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1582 
1583 	/* There might be a situation where new messages arrive to a control
1584 	 * queue between processing the last message and clearing the
1585 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1586 	 * ice_ctrlq_pending) and process new messages if any.
1587 	 */
1588 	if (ice_ctrlq_pending(hw, &hw->adminq))
1589 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1590 
1591 	ice_flush(hw);
1592 }
1593 
1594 /**
1595  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1596  * @pf: board private structure
1597  */
1598 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1599 {
1600 	struct ice_hw *hw = &pf->hw;
1601 
1602 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1603 		return;
1604 
1605 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1606 		return;
1607 
1608 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1609 
1610 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1611 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1612 
1613 	ice_flush(hw);
1614 }
1615 
1616 /**
1617  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1618  * @pf: board private structure
1619  */
1620 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1621 {
1622 	struct ice_hw *hw = &pf->hw;
1623 
1624 	/* Nothing to do here if sideband queue is not supported */
1625 	if (!ice_is_sbq_supported(hw)) {
1626 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1627 		return;
1628 	}
1629 
1630 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1631 		return;
1632 
1633 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1634 		return;
1635 
1636 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1637 
1638 	if (ice_ctrlq_pending(hw, &hw->sbq))
1639 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1640 
1641 	ice_flush(hw);
1642 }
1643 
1644 /**
1645  * ice_service_task_schedule - schedule the service task to wake up
1646  * @pf: board private structure
1647  *
1648  * If not already scheduled, this puts the task into the work queue.
1649  */
1650 void ice_service_task_schedule(struct ice_pf *pf)
1651 {
1652 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1653 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1654 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1655 		queue_work(ice_wq, &pf->serv_task);
1656 }
1657 
1658 /**
1659  * ice_service_task_complete - finish up the service task
1660  * @pf: board private structure
1661  */
1662 static void ice_service_task_complete(struct ice_pf *pf)
1663 {
1664 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1665 
1666 	/* force memory (pf->state) to sync before next service task */
1667 	smp_mb__before_atomic();
1668 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1669 }
1670 
1671 /**
1672  * ice_service_task_stop - stop service task and cancel works
1673  * @pf: board private structure
1674  *
1675  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1676  * 1 otherwise.
1677  */
1678 static int ice_service_task_stop(struct ice_pf *pf)
1679 {
1680 	int ret;
1681 
1682 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1683 
1684 	if (pf->serv_tmr.function)
1685 		del_timer_sync(&pf->serv_tmr);
1686 	if (pf->serv_task.func)
1687 		cancel_work_sync(&pf->serv_task);
1688 
1689 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1690 	return ret;
1691 }
1692 
1693 /**
1694  * ice_service_task_restart - restart service task and schedule works
1695  * @pf: board private structure
1696  *
1697  * This function is needed for suspend and resume works (e.g WoL scenario)
1698  */
1699 static void ice_service_task_restart(struct ice_pf *pf)
1700 {
1701 	clear_bit(ICE_SERVICE_DIS, pf->state);
1702 	ice_service_task_schedule(pf);
1703 }
1704 
1705 /**
1706  * ice_service_timer - timer callback to schedule service task
1707  * @t: pointer to timer_list
1708  */
1709 static void ice_service_timer(struct timer_list *t)
1710 {
1711 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1712 
1713 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1714 	ice_service_task_schedule(pf);
1715 }
1716 
1717 /**
1718  * ice_handle_mdd_event - handle malicious driver detect event
1719  * @pf: pointer to the PF structure
1720  *
1721  * Called from service task. OICR interrupt handler indicates MDD event.
1722  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1723  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1724  * disable the queue, the PF can be configured to reset the VF using ethtool
1725  * private flag mdd-auto-reset-vf.
1726  */
1727 static void ice_handle_mdd_event(struct ice_pf *pf)
1728 {
1729 	struct device *dev = ice_pf_to_dev(pf);
1730 	struct ice_hw *hw = &pf->hw;
1731 	struct ice_vf *vf;
1732 	unsigned int bkt;
1733 	u32 reg;
1734 
1735 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1736 		/* Since the VF MDD event logging is rate limited, check if
1737 		 * there are pending MDD events.
1738 		 */
1739 		ice_print_vfs_mdd_events(pf);
1740 		return;
1741 	}
1742 
1743 	/* find what triggered an MDD event */
1744 	reg = rd32(hw, GL_MDET_TX_PQM);
1745 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1746 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1747 				GL_MDET_TX_PQM_PF_NUM_S;
1748 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1749 				GL_MDET_TX_PQM_VF_NUM_S;
1750 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1751 				GL_MDET_TX_PQM_MAL_TYPE_S;
1752 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1753 				GL_MDET_TX_PQM_QNUM_S);
1754 
1755 		if (netif_msg_tx_err(pf))
1756 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1757 				 event, queue, pf_num, vf_num);
1758 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1759 	}
1760 
1761 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1762 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1763 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1764 				GL_MDET_TX_TCLAN_PF_NUM_S;
1765 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1766 				GL_MDET_TX_TCLAN_VF_NUM_S;
1767 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1768 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1769 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1770 				GL_MDET_TX_TCLAN_QNUM_S);
1771 
1772 		if (netif_msg_tx_err(pf))
1773 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1774 				 event, queue, pf_num, vf_num);
1775 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1776 	}
1777 
1778 	reg = rd32(hw, GL_MDET_RX);
1779 	if (reg & GL_MDET_RX_VALID_M) {
1780 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1781 				GL_MDET_RX_PF_NUM_S;
1782 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1783 				GL_MDET_RX_VF_NUM_S;
1784 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1785 				GL_MDET_RX_MAL_TYPE_S;
1786 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1787 				GL_MDET_RX_QNUM_S);
1788 
1789 		if (netif_msg_rx_err(pf))
1790 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1791 				 event, queue, pf_num, vf_num);
1792 		wr32(hw, GL_MDET_RX, 0xffffffff);
1793 	}
1794 
1795 	/* check to see if this PF caused an MDD event */
1796 	reg = rd32(hw, PF_MDET_TX_PQM);
1797 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1798 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1799 		if (netif_msg_tx_err(pf))
1800 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1801 	}
1802 
1803 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1804 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1805 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1806 		if (netif_msg_tx_err(pf))
1807 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1808 	}
1809 
1810 	reg = rd32(hw, PF_MDET_RX);
1811 	if (reg & PF_MDET_RX_VALID_M) {
1812 		wr32(hw, PF_MDET_RX, 0xFFFF);
1813 		if (netif_msg_rx_err(pf))
1814 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1815 	}
1816 
1817 	/* Check to see if one of the VFs caused an MDD event, and then
1818 	 * increment counters and set print pending
1819 	 */
1820 	mutex_lock(&pf->vfs.table_lock);
1821 	ice_for_each_vf(pf, bkt, vf) {
1822 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1823 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1824 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1825 			vf->mdd_tx_events.count++;
1826 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1827 			if (netif_msg_tx_err(pf))
1828 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1829 					 vf->vf_id);
1830 		}
1831 
1832 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1833 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1834 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1835 			vf->mdd_tx_events.count++;
1836 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1837 			if (netif_msg_tx_err(pf))
1838 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1839 					 vf->vf_id);
1840 		}
1841 
1842 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1843 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1844 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1845 			vf->mdd_tx_events.count++;
1846 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1847 			if (netif_msg_tx_err(pf))
1848 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1849 					 vf->vf_id);
1850 		}
1851 
1852 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1853 		if (reg & VP_MDET_RX_VALID_M) {
1854 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1855 			vf->mdd_rx_events.count++;
1856 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1857 			if (netif_msg_rx_err(pf))
1858 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1859 					 vf->vf_id);
1860 
1861 			/* Since the queue is disabled on VF Rx MDD events, the
1862 			 * PF can be configured to reset the VF through ethtool
1863 			 * private flag mdd-auto-reset-vf.
1864 			 */
1865 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1866 				/* VF MDD event counters will be cleared by
1867 				 * reset, so print the event prior to reset.
1868 				 */
1869 				ice_print_vf_rx_mdd_event(vf);
1870 				ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1871 			}
1872 		}
1873 	}
1874 	mutex_unlock(&pf->vfs.table_lock);
1875 
1876 	ice_print_vfs_mdd_events(pf);
1877 }
1878 
1879 /**
1880  * ice_force_phys_link_state - Force the physical link state
1881  * @vsi: VSI to force the physical link state to up/down
1882  * @link_up: true/false indicates to set the physical link to up/down
1883  *
1884  * Force the physical link state by getting the current PHY capabilities from
1885  * hardware and setting the PHY config based on the determined capabilities. If
1886  * link changes a link event will be triggered because both the Enable Automatic
1887  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1888  *
1889  * Returns 0 on success, negative on failure
1890  */
1891 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1892 {
1893 	struct ice_aqc_get_phy_caps_data *pcaps;
1894 	struct ice_aqc_set_phy_cfg_data *cfg;
1895 	struct ice_port_info *pi;
1896 	struct device *dev;
1897 	int retcode;
1898 
1899 	if (!vsi || !vsi->port_info || !vsi->back)
1900 		return -EINVAL;
1901 	if (vsi->type != ICE_VSI_PF)
1902 		return 0;
1903 
1904 	dev = ice_pf_to_dev(vsi->back);
1905 
1906 	pi = vsi->port_info;
1907 
1908 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1909 	if (!pcaps)
1910 		return -ENOMEM;
1911 
1912 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1913 				      NULL);
1914 	if (retcode) {
1915 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1916 			vsi->vsi_num, retcode);
1917 		retcode = -EIO;
1918 		goto out;
1919 	}
1920 
1921 	/* No change in link */
1922 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1923 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1924 		goto out;
1925 
1926 	/* Use the current user PHY configuration. The current user PHY
1927 	 * configuration is initialized during probe from PHY capabilities
1928 	 * software mode, and updated on set PHY configuration.
1929 	 */
1930 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1931 	if (!cfg) {
1932 		retcode = -ENOMEM;
1933 		goto out;
1934 	}
1935 
1936 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1937 	if (link_up)
1938 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1939 	else
1940 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1941 
1942 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1943 	if (retcode) {
1944 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1945 			vsi->vsi_num, retcode);
1946 		retcode = -EIO;
1947 	}
1948 
1949 	kfree(cfg);
1950 out:
1951 	kfree(pcaps);
1952 	return retcode;
1953 }
1954 
1955 /**
1956  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1957  * @pi: port info structure
1958  *
1959  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1960  */
1961 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1962 {
1963 	struct ice_aqc_get_phy_caps_data *pcaps;
1964 	struct ice_pf *pf = pi->hw->back;
1965 	int err;
1966 
1967 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1968 	if (!pcaps)
1969 		return -ENOMEM;
1970 
1971 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1972 				  pcaps, NULL);
1973 
1974 	if (err) {
1975 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1976 		goto out;
1977 	}
1978 
1979 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1980 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1981 
1982 out:
1983 	kfree(pcaps);
1984 	return err;
1985 }
1986 
1987 /**
1988  * ice_init_link_dflt_override - Initialize link default override
1989  * @pi: port info structure
1990  *
1991  * Initialize link default override and PHY total port shutdown during probe
1992  */
1993 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1994 {
1995 	struct ice_link_default_override_tlv *ldo;
1996 	struct ice_pf *pf = pi->hw->back;
1997 
1998 	ldo = &pf->link_dflt_override;
1999 	if (ice_get_link_default_override(ldo, pi))
2000 		return;
2001 
2002 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2003 		return;
2004 
2005 	/* Enable Total Port Shutdown (override/replace link-down-on-close
2006 	 * ethtool private flag) for ports with Port Disable bit set.
2007 	 */
2008 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2009 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2010 }
2011 
2012 /**
2013  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2014  * @pi: port info structure
2015  *
2016  * If default override is enabled, initialize the user PHY cfg speed and FEC
2017  * settings using the default override mask from the NVM.
2018  *
2019  * The PHY should only be configured with the default override settings the
2020  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2021  * is used to indicate that the user PHY cfg default override is initialized
2022  * and the PHY has not been configured with the default override settings. The
2023  * state is set here, and cleared in ice_configure_phy the first time the PHY is
2024  * configured.
2025  *
2026  * This function should be called only if the FW doesn't support default
2027  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2028  */
2029 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2030 {
2031 	struct ice_link_default_override_tlv *ldo;
2032 	struct ice_aqc_set_phy_cfg_data *cfg;
2033 	struct ice_phy_info *phy = &pi->phy;
2034 	struct ice_pf *pf = pi->hw->back;
2035 
2036 	ldo = &pf->link_dflt_override;
2037 
2038 	/* If link default override is enabled, use to mask NVM PHY capabilities
2039 	 * for speed and FEC default configuration.
2040 	 */
2041 	cfg = &phy->curr_user_phy_cfg;
2042 
2043 	if (ldo->phy_type_low || ldo->phy_type_high) {
2044 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2045 				    cpu_to_le64(ldo->phy_type_low);
2046 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2047 				     cpu_to_le64(ldo->phy_type_high);
2048 	}
2049 	cfg->link_fec_opt = ldo->fec_options;
2050 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2051 
2052 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2053 }
2054 
2055 /**
2056  * ice_init_phy_user_cfg - Initialize the PHY user configuration
2057  * @pi: port info structure
2058  *
2059  * Initialize the current user PHY configuration, speed, FEC, and FC requested
2060  * mode to default. The PHY defaults are from get PHY capabilities topology
2061  * with media so call when media is first available. An error is returned if
2062  * called when media is not available. The PHY initialization completed state is
2063  * set here.
2064  *
2065  * These configurations are used when setting PHY
2066  * configuration. The user PHY configuration is updated on set PHY
2067  * configuration. Returns 0 on success, negative on failure
2068  */
2069 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2070 {
2071 	struct ice_aqc_get_phy_caps_data *pcaps;
2072 	struct ice_phy_info *phy = &pi->phy;
2073 	struct ice_pf *pf = pi->hw->back;
2074 	int err;
2075 
2076 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2077 		return -EIO;
2078 
2079 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2080 	if (!pcaps)
2081 		return -ENOMEM;
2082 
2083 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2084 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2085 					  pcaps, NULL);
2086 	else
2087 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2088 					  pcaps, NULL);
2089 	if (err) {
2090 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2091 		goto err_out;
2092 	}
2093 
2094 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2095 
2096 	/* check if lenient mode is supported and enabled */
2097 	if (ice_fw_supports_link_override(pi->hw) &&
2098 	    !(pcaps->module_compliance_enforcement &
2099 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2100 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2101 
2102 		/* if the FW supports default PHY configuration mode, then the driver
2103 		 * does not have to apply link override settings. If not,
2104 		 * initialize user PHY configuration with link override values
2105 		 */
2106 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2107 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2108 			ice_init_phy_cfg_dflt_override(pi);
2109 			goto out;
2110 		}
2111 	}
2112 
2113 	/* if link default override is not enabled, set user flow control and
2114 	 * FEC settings based on what get_phy_caps returned
2115 	 */
2116 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2117 						      pcaps->link_fec_options);
2118 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2119 
2120 out:
2121 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2122 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2123 err_out:
2124 	kfree(pcaps);
2125 	return err;
2126 }
2127 
2128 /**
2129  * ice_configure_phy - configure PHY
2130  * @vsi: VSI of PHY
2131  *
2132  * Set the PHY configuration. If the current PHY configuration is the same as
2133  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2134  * configure the based get PHY capabilities for topology with media.
2135  */
2136 static int ice_configure_phy(struct ice_vsi *vsi)
2137 {
2138 	struct device *dev = ice_pf_to_dev(vsi->back);
2139 	struct ice_port_info *pi = vsi->port_info;
2140 	struct ice_aqc_get_phy_caps_data *pcaps;
2141 	struct ice_aqc_set_phy_cfg_data *cfg;
2142 	struct ice_phy_info *phy = &pi->phy;
2143 	struct ice_pf *pf = vsi->back;
2144 	int err;
2145 
2146 	/* Ensure we have media as we cannot configure a medialess port */
2147 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2148 		return -EPERM;
2149 
2150 	ice_print_topo_conflict(vsi);
2151 
2152 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2153 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2154 		return -EPERM;
2155 
2156 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2157 		return ice_force_phys_link_state(vsi, true);
2158 
2159 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2160 	if (!pcaps)
2161 		return -ENOMEM;
2162 
2163 	/* Get current PHY config */
2164 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2165 				  NULL);
2166 	if (err) {
2167 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2168 			vsi->vsi_num, err);
2169 		goto done;
2170 	}
2171 
2172 	/* If PHY enable link is configured and configuration has not changed,
2173 	 * there's nothing to do
2174 	 */
2175 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2176 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2177 		goto done;
2178 
2179 	/* Use PHY topology as baseline for configuration */
2180 	memset(pcaps, 0, sizeof(*pcaps));
2181 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2182 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2183 					  pcaps, NULL);
2184 	else
2185 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2186 					  pcaps, NULL);
2187 	if (err) {
2188 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2189 			vsi->vsi_num, err);
2190 		goto done;
2191 	}
2192 
2193 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2194 	if (!cfg) {
2195 		err = -ENOMEM;
2196 		goto done;
2197 	}
2198 
2199 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2200 
2201 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2202 	 * ice_init_phy_user_cfg_ldo.
2203 	 */
2204 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2205 			       vsi->back->state)) {
2206 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2207 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2208 	} else {
2209 		u64 phy_low = 0, phy_high = 0;
2210 
2211 		ice_update_phy_type(&phy_low, &phy_high,
2212 				    pi->phy.curr_user_speed_req);
2213 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2214 		cfg->phy_type_high = pcaps->phy_type_high &
2215 				     cpu_to_le64(phy_high);
2216 	}
2217 
2218 	/* Can't provide what was requested; use PHY capabilities */
2219 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2220 		cfg->phy_type_low = pcaps->phy_type_low;
2221 		cfg->phy_type_high = pcaps->phy_type_high;
2222 	}
2223 
2224 	/* FEC */
2225 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2226 
2227 	/* Can't provide what was requested; use PHY capabilities */
2228 	if (cfg->link_fec_opt !=
2229 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2230 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2231 		cfg->link_fec_opt = pcaps->link_fec_options;
2232 	}
2233 
2234 	/* Flow Control - always supported; no need to check against
2235 	 * capabilities
2236 	 */
2237 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2238 
2239 	/* Enable link and link update */
2240 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2241 
2242 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2243 	if (err)
2244 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2245 			vsi->vsi_num, err);
2246 
2247 	kfree(cfg);
2248 done:
2249 	kfree(pcaps);
2250 	return err;
2251 }
2252 
2253 /**
2254  * ice_check_media_subtask - Check for media
2255  * @pf: pointer to PF struct
2256  *
2257  * If media is available, then initialize PHY user configuration if it is not
2258  * been, and configure the PHY if the interface is up.
2259  */
2260 static void ice_check_media_subtask(struct ice_pf *pf)
2261 {
2262 	struct ice_port_info *pi;
2263 	struct ice_vsi *vsi;
2264 	int err;
2265 
2266 	/* No need to check for media if it's already present */
2267 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2268 		return;
2269 
2270 	vsi = ice_get_main_vsi(pf);
2271 	if (!vsi)
2272 		return;
2273 
2274 	/* Refresh link info and check if media is present */
2275 	pi = vsi->port_info;
2276 	err = ice_update_link_info(pi);
2277 	if (err)
2278 		return;
2279 
2280 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2281 
2282 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2283 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2284 			ice_init_phy_user_cfg(pi);
2285 
2286 		/* PHY settings are reset on media insertion, reconfigure
2287 		 * PHY to preserve settings.
2288 		 */
2289 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2290 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2291 			return;
2292 
2293 		err = ice_configure_phy(vsi);
2294 		if (!err)
2295 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2296 
2297 		/* A Link Status Event will be generated; the event handler
2298 		 * will complete bringing the interface up
2299 		 */
2300 	}
2301 }
2302 
2303 /**
2304  * ice_service_task - manage and run subtasks
2305  * @work: pointer to work_struct contained by the PF struct
2306  */
2307 static void ice_service_task(struct work_struct *work)
2308 {
2309 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2310 	unsigned long start_time = jiffies;
2311 
2312 	/* subtasks */
2313 
2314 	/* process reset requests first */
2315 	ice_reset_subtask(pf);
2316 
2317 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2318 	if (ice_is_reset_in_progress(pf->state) ||
2319 	    test_bit(ICE_SUSPENDED, pf->state) ||
2320 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2321 		ice_service_task_complete(pf);
2322 		return;
2323 	}
2324 
2325 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2326 		struct iidc_event *event;
2327 
2328 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2329 		if (event) {
2330 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2331 			/* report the entire OICR value to AUX driver */
2332 			swap(event->reg, pf->oicr_err_reg);
2333 			ice_send_event_to_aux(pf, event);
2334 			kfree(event);
2335 		}
2336 	}
2337 
2338 	/* unplug aux dev per request, if an unplug request came in
2339 	 * while processing a plug request, this will handle it
2340 	 */
2341 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2342 		ice_unplug_aux_dev(pf);
2343 
2344 	/* Plug aux device per request */
2345 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2346 		ice_plug_aux_dev(pf);
2347 
2348 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2349 		struct iidc_event *event;
2350 
2351 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2352 		if (event) {
2353 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2354 			ice_send_event_to_aux(pf, event);
2355 			kfree(event);
2356 		}
2357 	}
2358 
2359 	ice_clean_adminq_subtask(pf);
2360 	ice_check_media_subtask(pf);
2361 	ice_check_for_hang_subtask(pf);
2362 	ice_sync_fltr_subtask(pf);
2363 	ice_handle_mdd_event(pf);
2364 	ice_watchdog_subtask(pf);
2365 
2366 	if (ice_is_safe_mode(pf)) {
2367 		ice_service_task_complete(pf);
2368 		return;
2369 	}
2370 
2371 	ice_process_vflr_event(pf);
2372 	ice_clean_mailboxq_subtask(pf);
2373 	ice_clean_sbq_subtask(pf);
2374 	ice_sync_arfs_fltrs(pf);
2375 	ice_flush_fdir_ctx(pf);
2376 
2377 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2378 	ice_service_task_complete(pf);
2379 
2380 	/* If the tasks have taken longer than one service timer period
2381 	 * or there is more work to be done, reset the service timer to
2382 	 * schedule the service task now.
2383 	 */
2384 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2385 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2386 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2387 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2388 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2389 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2390 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2391 		mod_timer(&pf->serv_tmr, jiffies);
2392 }
2393 
2394 /**
2395  * ice_set_ctrlq_len - helper function to set controlq length
2396  * @hw: pointer to the HW instance
2397  */
2398 static void ice_set_ctrlq_len(struct ice_hw *hw)
2399 {
2400 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2401 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2402 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2403 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2404 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2405 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2406 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2407 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2408 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2409 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2410 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2411 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2412 }
2413 
2414 /**
2415  * ice_schedule_reset - schedule a reset
2416  * @pf: board private structure
2417  * @reset: reset being requested
2418  */
2419 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2420 {
2421 	struct device *dev = ice_pf_to_dev(pf);
2422 
2423 	/* bail out if earlier reset has failed */
2424 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2425 		dev_dbg(dev, "earlier reset has failed\n");
2426 		return -EIO;
2427 	}
2428 	/* bail if reset/recovery already in progress */
2429 	if (ice_is_reset_in_progress(pf->state)) {
2430 		dev_dbg(dev, "Reset already in progress\n");
2431 		return -EBUSY;
2432 	}
2433 
2434 	switch (reset) {
2435 	case ICE_RESET_PFR:
2436 		set_bit(ICE_PFR_REQ, pf->state);
2437 		break;
2438 	case ICE_RESET_CORER:
2439 		set_bit(ICE_CORER_REQ, pf->state);
2440 		break;
2441 	case ICE_RESET_GLOBR:
2442 		set_bit(ICE_GLOBR_REQ, pf->state);
2443 		break;
2444 	default:
2445 		return -EINVAL;
2446 	}
2447 
2448 	ice_service_task_schedule(pf);
2449 	return 0;
2450 }
2451 
2452 /**
2453  * ice_irq_affinity_notify - Callback for affinity changes
2454  * @notify: context as to what irq was changed
2455  * @mask: the new affinity mask
2456  *
2457  * This is a callback function used by the irq_set_affinity_notifier function
2458  * so that we may register to receive changes to the irq affinity masks.
2459  */
2460 static void
2461 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2462 			const cpumask_t *mask)
2463 {
2464 	struct ice_q_vector *q_vector =
2465 		container_of(notify, struct ice_q_vector, affinity_notify);
2466 
2467 	cpumask_copy(&q_vector->affinity_mask, mask);
2468 }
2469 
2470 /**
2471  * ice_irq_affinity_release - Callback for affinity notifier release
2472  * @ref: internal core kernel usage
2473  *
2474  * This is a callback function used by the irq_set_affinity_notifier function
2475  * to inform the current notification subscriber that they will no longer
2476  * receive notifications.
2477  */
2478 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2479 
2480 /**
2481  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2482  * @vsi: the VSI being configured
2483  */
2484 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2485 {
2486 	struct ice_hw *hw = &vsi->back->hw;
2487 	int i;
2488 
2489 	ice_for_each_q_vector(vsi, i)
2490 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2491 
2492 	ice_flush(hw);
2493 	return 0;
2494 }
2495 
2496 /**
2497  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2498  * @vsi: the VSI being configured
2499  * @basename: name for the vector
2500  */
2501 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2502 {
2503 	int q_vectors = vsi->num_q_vectors;
2504 	struct ice_pf *pf = vsi->back;
2505 	struct device *dev;
2506 	int rx_int_idx = 0;
2507 	int tx_int_idx = 0;
2508 	int vector, err;
2509 	int irq_num;
2510 
2511 	dev = ice_pf_to_dev(pf);
2512 	for (vector = 0; vector < q_vectors; vector++) {
2513 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2514 
2515 		irq_num = q_vector->irq.virq;
2516 
2517 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2518 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2519 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2520 			tx_int_idx++;
2521 		} else if (q_vector->rx.rx_ring) {
2522 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2523 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2524 		} else if (q_vector->tx.tx_ring) {
2525 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2526 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2527 		} else {
2528 			/* skip this unused q_vector */
2529 			continue;
2530 		}
2531 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2532 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2533 					       IRQF_SHARED, q_vector->name,
2534 					       q_vector);
2535 		else
2536 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2537 					       0, q_vector->name, q_vector);
2538 		if (err) {
2539 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2540 				   err);
2541 			goto free_q_irqs;
2542 		}
2543 
2544 		/* register for affinity change notifications */
2545 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2546 			struct irq_affinity_notify *affinity_notify;
2547 
2548 			affinity_notify = &q_vector->affinity_notify;
2549 			affinity_notify->notify = ice_irq_affinity_notify;
2550 			affinity_notify->release = ice_irq_affinity_release;
2551 			irq_set_affinity_notifier(irq_num, affinity_notify);
2552 		}
2553 
2554 		/* assign the mask for this irq */
2555 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2556 	}
2557 
2558 	err = ice_set_cpu_rx_rmap(vsi);
2559 	if (err) {
2560 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2561 			   vsi->vsi_num, ERR_PTR(err));
2562 		goto free_q_irqs;
2563 	}
2564 
2565 	vsi->irqs_ready = true;
2566 	return 0;
2567 
2568 free_q_irqs:
2569 	while (vector--) {
2570 		irq_num = vsi->q_vectors[vector]->irq.virq;
2571 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2572 			irq_set_affinity_notifier(irq_num, NULL);
2573 		irq_set_affinity_hint(irq_num, NULL);
2574 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2575 	}
2576 	return err;
2577 }
2578 
2579 /**
2580  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2581  * @vsi: VSI to setup Tx rings used by XDP
2582  *
2583  * Return 0 on success and negative value on error
2584  */
2585 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2586 {
2587 	struct device *dev = ice_pf_to_dev(vsi->back);
2588 	struct ice_tx_desc *tx_desc;
2589 	int i, j;
2590 
2591 	ice_for_each_xdp_txq(vsi, i) {
2592 		u16 xdp_q_idx = vsi->alloc_txq + i;
2593 		struct ice_ring_stats *ring_stats;
2594 		struct ice_tx_ring *xdp_ring;
2595 
2596 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2597 		if (!xdp_ring)
2598 			goto free_xdp_rings;
2599 
2600 		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2601 		if (!ring_stats) {
2602 			ice_free_tx_ring(xdp_ring);
2603 			goto free_xdp_rings;
2604 		}
2605 
2606 		xdp_ring->ring_stats = ring_stats;
2607 		xdp_ring->q_index = xdp_q_idx;
2608 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2609 		xdp_ring->vsi = vsi;
2610 		xdp_ring->netdev = NULL;
2611 		xdp_ring->dev = dev;
2612 		xdp_ring->count = vsi->num_tx_desc;
2613 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2614 		if (ice_setup_tx_ring(xdp_ring))
2615 			goto free_xdp_rings;
2616 		ice_set_ring_xdp(xdp_ring);
2617 		spin_lock_init(&xdp_ring->tx_lock);
2618 		for (j = 0; j < xdp_ring->count; j++) {
2619 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2620 			tx_desc->cmd_type_offset_bsz = 0;
2621 		}
2622 	}
2623 
2624 	return 0;
2625 
2626 free_xdp_rings:
2627 	for (; i >= 0; i--) {
2628 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2629 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2630 			vsi->xdp_rings[i]->ring_stats = NULL;
2631 			ice_free_tx_ring(vsi->xdp_rings[i]);
2632 		}
2633 	}
2634 	return -ENOMEM;
2635 }
2636 
2637 /**
2638  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2639  * @vsi: VSI to set the bpf prog on
2640  * @prog: the bpf prog pointer
2641  */
2642 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2643 {
2644 	struct bpf_prog *old_prog;
2645 	int i;
2646 
2647 	old_prog = xchg(&vsi->xdp_prog, prog);
2648 	ice_for_each_rxq(vsi, i)
2649 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2650 
2651 	if (old_prog)
2652 		bpf_prog_put(old_prog);
2653 }
2654 
2655 /**
2656  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2657  * @vsi: VSI to bring up Tx rings used by XDP
2658  * @prog: bpf program that will be assigned to VSI
2659  *
2660  * Return 0 on success and negative value on error
2661  */
2662 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2663 {
2664 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2665 	int xdp_rings_rem = vsi->num_xdp_txq;
2666 	struct ice_pf *pf = vsi->back;
2667 	struct ice_qs_cfg xdp_qs_cfg = {
2668 		.qs_mutex = &pf->avail_q_mutex,
2669 		.pf_map = pf->avail_txqs,
2670 		.pf_map_size = pf->max_pf_txqs,
2671 		.q_count = vsi->num_xdp_txq,
2672 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2673 		.vsi_map = vsi->txq_map,
2674 		.vsi_map_offset = vsi->alloc_txq,
2675 		.mapping_mode = ICE_VSI_MAP_CONTIG
2676 	};
2677 	struct device *dev;
2678 	int i, v_idx;
2679 	int status;
2680 
2681 	dev = ice_pf_to_dev(pf);
2682 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2683 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2684 	if (!vsi->xdp_rings)
2685 		return -ENOMEM;
2686 
2687 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2688 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2689 		goto err_map_xdp;
2690 
2691 	if (static_key_enabled(&ice_xdp_locking_key))
2692 		netdev_warn(vsi->netdev,
2693 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2694 
2695 	if (ice_xdp_alloc_setup_rings(vsi))
2696 		goto clear_xdp_rings;
2697 
2698 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2699 	ice_for_each_q_vector(vsi, v_idx) {
2700 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2701 		int xdp_rings_per_v, q_id, q_base;
2702 
2703 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2704 					       vsi->num_q_vectors - v_idx);
2705 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2706 
2707 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2708 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2709 
2710 			xdp_ring->q_vector = q_vector;
2711 			xdp_ring->next = q_vector->tx.tx_ring;
2712 			q_vector->tx.tx_ring = xdp_ring;
2713 		}
2714 		xdp_rings_rem -= xdp_rings_per_v;
2715 	}
2716 
2717 	ice_for_each_rxq(vsi, i) {
2718 		if (static_key_enabled(&ice_xdp_locking_key)) {
2719 			vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2720 		} else {
2721 			struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2722 			struct ice_tx_ring *ring;
2723 
2724 			ice_for_each_tx_ring(ring, q_vector->tx) {
2725 				if (ice_ring_is_xdp(ring)) {
2726 					vsi->rx_rings[i]->xdp_ring = ring;
2727 					break;
2728 				}
2729 			}
2730 		}
2731 		ice_tx_xsk_pool(vsi, i);
2732 	}
2733 
2734 	/* omit the scheduler update if in reset path; XDP queues will be
2735 	 * taken into account at the end of ice_vsi_rebuild, where
2736 	 * ice_cfg_vsi_lan is being called
2737 	 */
2738 	if (ice_is_reset_in_progress(pf->state))
2739 		return 0;
2740 
2741 	/* tell the Tx scheduler that right now we have
2742 	 * additional queues
2743 	 */
2744 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2745 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2746 
2747 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2748 				 max_txqs);
2749 	if (status) {
2750 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2751 			status);
2752 		goto clear_xdp_rings;
2753 	}
2754 
2755 	/* assign the prog only when it's not already present on VSI;
2756 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2757 	 * VSI rebuild that happens under ethtool -L can expose us to
2758 	 * the bpf_prog refcount issues as we would be swapping same
2759 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2760 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2761 	 * this is not harmful as dev_xdp_install bumps the refcount
2762 	 * before calling the op exposed by the driver;
2763 	 */
2764 	if (!ice_is_xdp_ena_vsi(vsi))
2765 		ice_vsi_assign_bpf_prog(vsi, prog);
2766 
2767 	return 0;
2768 clear_xdp_rings:
2769 	ice_for_each_xdp_txq(vsi, i)
2770 		if (vsi->xdp_rings[i]) {
2771 			kfree_rcu(vsi->xdp_rings[i], rcu);
2772 			vsi->xdp_rings[i] = NULL;
2773 		}
2774 
2775 err_map_xdp:
2776 	mutex_lock(&pf->avail_q_mutex);
2777 	ice_for_each_xdp_txq(vsi, i) {
2778 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2779 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2780 	}
2781 	mutex_unlock(&pf->avail_q_mutex);
2782 
2783 	devm_kfree(dev, vsi->xdp_rings);
2784 	return -ENOMEM;
2785 }
2786 
2787 /**
2788  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2789  * @vsi: VSI to remove XDP rings
2790  *
2791  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2792  * resources
2793  */
2794 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2795 {
2796 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2797 	struct ice_pf *pf = vsi->back;
2798 	int i, v_idx;
2799 
2800 	/* q_vectors are freed in reset path so there's no point in detaching
2801 	 * rings; in case of rebuild being triggered not from reset bits
2802 	 * in pf->state won't be set, so additionally check first q_vector
2803 	 * against NULL
2804 	 */
2805 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2806 		goto free_qmap;
2807 
2808 	ice_for_each_q_vector(vsi, v_idx) {
2809 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2810 		struct ice_tx_ring *ring;
2811 
2812 		ice_for_each_tx_ring(ring, q_vector->tx)
2813 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2814 				break;
2815 
2816 		/* restore the value of last node prior to XDP setup */
2817 		q_vector->tx.tx_ring = ring;
2818 	}
2819 
2820 free_qmap:
2821 	mutex_lock(&pf->avail_q_mutex);
2822 	ice_for_each_xdp_txq(vsi, i) {
2823 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2824 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2825 	}
2826 	mutex_unlock(&pf->avail_q_mutex);
2827 
2828 	ice_for_each_xdp_txq(vsi, i)
2829 		if (vsi->xdp_rings[i]) {
2830 			if (vsi->xdp_rings[i]->desc) {
2831 				synchronize_rcu();
2832 				ice_free_tx_ring(vsi->xdp_rings[i]);
2833 			}
2834 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2835 			vsi->xdp_rings[i]->ring_stats = NULL;
2836 			kfree_rcu(vsi->xdp_rings[i], rcu);
2837 			vsi->xdp_rings[i] = NULL;
2838 		}
2839 
2840 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2841 	vsi->xdp_rings = NULL;
2842 
2843 	if (static_key_enabled(&ice_xdp_locking_key))
2844 		static_branch_dec(&ice_xdp_locking_key);
2845 
2846 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2847 		return 0;
2848 
2849 	ice_vsi_assign_bpf_prog(vsi, NULL);
2850 
2851 	/* notify Tx scheduler that we destroyed XDP queues and bring
2852 	 * back the old number of child nodes
2853 	 */
2854 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2855 		max_txqs[i] = vsi->num_txq;
2856 
2857 	/* change number of XDP Tx queues to 0 */
2858 	vsi->num_xdp_txq = 0;
2859 
2860 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2861 			       max_txqs);
2862 }
2863 
2864 /**
2865  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2866  * @vsi: VSI to schedule napi on
2867  */
2868 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2869 {
2870 	int i;
2871 
2872 	ice_for_each_rxq(vsi, i) {
2873 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2874 
2875 		if (rx_ring->xsk_pool)
2876 			napi_schedule(&rx_ring->q_vector->napi);
2877 	}
2878 }
2879 
2880 /**
2881  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2882  * @vsi: VSI to determine the count of XDP Tx qs
2883  *
2884  * returns 0 if Tx qs count is higher than at least half of CPU count,
2885  * -ENOMEM otherwise
2886  */
2887 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2888 {
2889 	u16 avail = ice_get_avail_txq_count(vsi->back);
2890 	u16 cpus = num_possible_cpus();
2891 
2892 	if (avail < cpus / 2)
2893 		return -ENOMEM;
2894 
2895 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2896 
2897 	if (vsi->num_xdp_txq < cpus)
2898 		static_branch_inc(&ice_xdp_locking_key);
2899 
2900 	return 0;
2901 }
2902 
2903 /**
2904  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2905  * @vsi: Pointer to VSI structure
2906  */
2907 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2908 {
2909 	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
2910 		return ICE_RXBUF_1664;
2911 	else
2912 		return ICE_RXBUF_3072;
2913 }
2914 
2915 /**
2916  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2917  * @vsi: VSI to setup XDP for
2918  * @prog: XDP program
2919  * @extack: netlink extended ack
2920  */
2921 static int
2922 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2923 		   struct netlink_ext_ack *extack)
2924 {
2925 	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2926 	bool if_running = netif_running(vsi->netdev);
2927 	int ret = 0, xdp_ring_err = 0;
2928 
2929 	if (prog && !prog->aux->xdp_has_frags) {
2930 		if (frame_size > ice_max_xdp_frame_size(vsi)) {
2931 			NL_SET_ERR_MSG_MOD(extack,
2932 					   "MTU is too large for linear frames and XDP prog does not support frags");
2933 			return -EOPNOTSUPP;
2934 		}
2935 	}
2936 
2937 	/* hot swap progs and avoid toggling link */
2938 	if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
2939 		ice_vsi_assign_bpf_prog(vsi, prog);
2940 		return 0;
2941 	}
2942 
2943 	/* need to stop netdev while setting up the program for Rx rings */
2944 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2945 		ret = ice_down(vsi);
2946 		if (ret) {
2947 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2948 			return ret;
2949 		}
2950 	}
2951 
2952 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2953 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2954 		if (xdp_ring_err) {
2955 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2956 		} else {
2957 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2958 			if (xdp_ring_err)
2959 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2960 		}
2961 		xdp_features_set_redirect_target(vsi->netdev, true);
2962 		/* reallocate Rx queues that are used for zero-copy */
2963 		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2964 		if (xdp_ring_err)
2965 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2966 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2967 		xdp_features_clear_redirect_target(vsi->netdev);
2968 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2969 		if (xdp_ring_err)
2970 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2971 		/* reallocate Rx queues that were used for zero-copy */
2972 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2973 		if (xdp_ring_err)
2974 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2975 	}
2976 
2977 	if (if_running)
2978 		ret = ice_up(vsi);
2979 
2980 	if (!ret && prog)
2981 		ice_vsi_rx_napi_schedule(vsi);
2982 
2983 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2984 }
2985 
2986 /**
2987  * ice_xdp_safe_mode - XDP handler for safe mode
2988  * @dev: netdevice
2989  * @xdp: XDP command
2990  */
2991 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2992 			     struct netdev_bpf *xdp)
2993 {
2994 	NL_SET_ERR_MSG_MOD(xdp->extack,
2995 			   "Please provide working DDP firmware package in order to use XDP\n"
2996 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2997 	return -EOPNOTSUPP;
2998 }
2999 
3000 /**
3001  * ice_xdp - implements XDP handler
3002  * @dev: netdevice
3003  * @xdp: XDP command
3004  */
3005 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3006 {
3007 	struct ice_netdev_priv *np = netdev_priv(dev);
3008 	struct ice_vsi *vsi = np->vsi;
3009 
3010 	if (vsi->type != ICE_VSI_PF) {
3011 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
3012 		return -EINVAL;
3013 	}
3014 
3015 	switch (xdp->command) {
3016 	case XDP_SETUP_PROG:
3017 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3018 	case XDP_SETUP_XSK_POOL:
3019 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
3020 					  xdp->xsk.queue_id);
3021 	default:
3022 		return -EINVAL;
3023 	}
3024 }
3025 
3026 /**
3027  * ice_ena_misc_vector - enable the non-queue interrupts
3028  * @pf: board private structure
3029  */
3030 static void ice_ena_misc_vector(struct ice_pf *pf)
3031 {
3032 	struct ice_hw *hw = &pf->hw;
3033 	u32 val;
3034 
3035 	/* Disable anti-spoof detection interrupt to prevent spurious event
3036 	 * interrupts during a function reset. Anti-spoof functionally is
3037 	 * still supported.
3038 	 */
3039 	val = rd32(hw, GL_MDCK_TX_TDPU);
3040 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3041 	wr32(hw, GL_MDCK_TX_TDPU, val);
3042 
3043 	/* clear things first */
3044 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3045 	rd32(hw, PFINT_OICR);		/* read to clear */
3046 
3047 	val = (PFINT_OICR_ECC_ERR_M |
3048 	       PFINT_OICR_MAL_DETECT_M |
3049 	       PFINT_OICR_GRST_M |
3050 	       PFINT_OICR_PCI_EXCEPTION_M |
3051 	       PFINT_OICR_VFLR_M |
3052 	       PFINT_OICR_HMC_ERR_M |
3053 	       PFINT_OICR_PE_PUSH_M |
3054 	       PFINT_OICR_PE_CRITERR_M);
3055 
3056 	wr32(hw, PFINT_OICR_ENA, val);
3057 
3058 	/* SW_ITR_IDX = 0, but don't change INTENA */
3059 	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3060 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3061 }
3062 
3063 /**
3064  * ice_misc_intr - misc interrupt handler
3065  * @irq: interrupt number
3066  * @data: pointer to a q_vector
3067  */
3068 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3069 {
3070 	struct ice_pf *pf = (struct ice_pf *)data;
3071 	struct ice_hw *hw = &pf->hw;
3072 	struct device *dev;
3073 	u32 oicr, ena_mask;
3074 
3075 	dev = ice_pf_to_dev(pf);
3076 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3077 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3078 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3079 
3080 	oicr = rd32(hw, PFINT_OICR);
3081 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3082 
3083 	if (oicr & PFINT_OICR_SWINT_M) {
3084 		ena_mask &= ~PFINT_OICR_SWINT_M;
3085 		pf->sw_int_count++;
3086 	}
3087 
3088 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3089 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3090 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3091 	}
3092 	if (oicr & PFINT_OICR_VFLR_M) {
3093 		/* disable any further VFLR event notifications */
3094 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3095 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3096 
3097 			reg &= ~PFINT_OICR_VFLR_M;
3098 			wr32(hw, PFINT_OICR_ENA, reg);
3099 		} else {
3100 			ena_mask &= ~PFINT_OICR_VFLR_M;
3101 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3102 		}
3103 	}
3104 
3105 	if (oicr & PFINT_OICR_GRST_M) {
3106 		u32 reset;
3107 
3108 		/* we have a reset warning */
3109 		ena_mask &= ~PFINT_OICR_GRST_M;
3110 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3111 			GLGEN_RSTAT_RESET_TYPE_S;
3112 
3113 		if (reset == ICE_RESET_CORER)
3114 			pf->corer_count++;
3115 		else if (reset == ICE_RESET_GLOBR)
3116 			pf->globr_count++;
3117 		else if (reset == ICE_RESET_EMPR)
3118 			pf->empr_count++;
3119 		else
3120 			dev_dbg(dev, "Invalid reset type %d\n", reset);
3121 
3122 		/* If a reset cycle isn't already in progress, we set a bit in
3123 		 * pf->state so that the service task can start a reset/rebuild.
3124 		 */
3125 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3126 			if (reset == ICE_RESET_CORER)
3127 				set_bit(ICE_CORER_RECV, pf->state);
3128 			else if (reset == ICE_RESET_GLOBR)
3129 				set_bit(ICE_GLOBR_RECV, pf->state);
3130 			else
3131 				set_bit(ICE_EMPR_RECV, pf->state);
3132 
3133 			/* There are couple of different bits at play here.
3134 			 * hw->reset_ongoing indicates whether the hardware is
3135 			 * in reset. This is set to true when a reset interrupt
3136 			 * is received and set back to false after the driver
3137 			 * has determined that the hardware is out of reset.
3138 			 *
3139 			 * ICE_RESET_OICR_RECV in pf->state indicates
3140 			 * that a post reset rebuild is required before the
3141 			 * driver is operational again. This is set above.
3142 			 *
3143 			 * As this is the start of the reset/rebuild cycle, set
3144 			 * both to indicate that.
3145 			 */
3146 			hw->reset_ongoing = true;
3147 		}
3148 	}
3149 
3150 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3151 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3152 		if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
3153 			set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3154 	}
3155 
3156 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3157 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3158 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3159 
3160 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3161 
3162 		if (ice_pf_src_tmr_owned(pf)) {
3163 			/* Save EVENTs from GLTSYN register */
3164 			pf->ptp.ext_ts_irq |= gltsyn_stat &
3165 					      (GLTSYN_STAT_EVENT0_M |
3166 					       GLTSYN_STAT_EVENT1_M |
3167 					       GLTSYN_STAT_EVENT2_M);
3168 
3169 			set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3170 		}
3171 	}
3172 
3173 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3174 	if (oicr & ICE_AUX_CRIT_ERR) {
3175 		pf->oicr_err_reg |= oicr;
3176 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3177 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3178 	}
3179 
3180 	/* Report any remaining unexpected interrupts */
3181 	oicr &= ena_mask;
3182 	if (oicr) {
3183 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3184 		/* If a critical error is pending there is no choice but to
3185 		 * reset the device.
3186 		 */
3187 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3188 			    PFINT_OICR_ECC_ERR_M)) {
3189 			set_bit(ICE_PFR_REQ, pf->state);
3190 		}
3191 	}
3192 
3193 	return IRQ_WAKE_THREAD;
3194 }
3195 
3196 /**
3197  * ice_misc_intr_thread_fn - misc interrupt thread function
3198  * @irq: interrupt number
3199  * @data: pointer to a q_vector
3200  */
3201 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3202 {
3203 	struct ice_pf *pf = data;
3204 	struct ice_hw *hw;
3205 
3206 	hw = &pf->hw;
3207 
3208 	if (ice_is_reset_in_progress(pf->state))
3209 		return IRQ_HANDLED;
3210 
3211 	ice_service_task_schedule(pf);
3212 
3213 	if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3214 		ice_ptp_extts_event(pf);
3215 
3216 	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3217 		/* Process outstanding Tx timestamps. If there is more work,
3218 		 * re-arm the interrupt to trigger again.
3219 		 */
3220 		if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3221 			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3222 			ice_flush(hw);
3223 		}
3224 	}
3225 
3226 	ice_irq_dynamic_ena(hw, NULL, NULL);
3227 
3228 	return IRQ_HANDLED;
3229 }
3230 
3231 /**
3232  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3233  * @hw: pointer to HW structure
3234  */
3235 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3236 {
3237 	/* disable Admin queue Interrupt causes */
3238 	wr32(hw, PFINT_FW_CTL,
3239 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3240 
3241 	/* disable Mailbox queue Interrupt causes */
3242 	wr32(hw, PFINT_MBX_CTL,
3243 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3244 
3245 	wr32(hw, PFINT_SB_CTL,
3246 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3247 
3248 	/* disable Control queue Interrupt causes */
3249 	wr32(hw, PFINT_OICR_CTL,
3250 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3251 
3252 	ice_flush(hw);
3253 }
3254 
3255 /**
3256  * ice_free_irq_msix_misc - Unroll misc vector setup
3257  * @pf: board private structure
3258  */
3259 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3260 {
3261 	int misc_irq_num = pf->oicr_irq.virq;
3262 	struct ice_hw *hw = &pf->hw;
3263 
3264 	ice_dis_ctrlq_interrupts(hw);
3265 
3266 	/* disable OICR interrupt */
3267 	wr32(hw, PFINT_OICR_ENA, 0);
3268 	ice_flush(hw);
3269 
3270 	synchronize_irq(misc_irq_num);
3271 	devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3272 
3273 	ice_free_irq(pf, pf->oicr_irq);
3274 }
3275 
3276 /**
3277  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3278  * @hw: pointer to HW structure
3279  * @reg_idx: HW vector index to associate the control queue interrupts with
3280  */
3281 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3282 {
3283 	u32 val;
3284 
3285 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3286 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3287 	wr32(hw, PFINT_OICR_CTL, val);
3288 
3289 	/* enable Admin queue Interrupt causes */
3290 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3291 	       PFINT_FW_CTL_CAUSE_ENA_M);
3292 	wr32(hw, PFINT_FW_CTL, val);
3293 
3294 	/* enable Mailbox queue Interrupt causes */
3295 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3296 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3297 	wr32(hw, PFINT_MBX_CTL, val);
3298 
3299 	/* This enables Sideband queue Interrupt causes */
3300 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3301 	       PFINT_SB_CTL_CAUSE_ENA_M);
3302 	wr32(hw, PFINT_SB_CTL, val);
3303 
3304 	ice_flush(hw);
3305 }
3306 
3307 /**
3308  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3309  * @pf: board private structure
3310  *
3311  * This sets up the handler for MSIX 0, which is used to manage the
3312  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3313  * when in MSI or Legacy interrupt mode.
3314  */
3315 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3316 {
3317 	struct device *dev = ice_pf_to_dev(pf);
3318 	struct ice_hw *hw = &pf->hw;
3319 	struct msi_map oicr_irq;
3320 	int err = 0;
3321 
3322 	if (!pf->int_name[0])
3323 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3324 			 dev_driver_string(dev), dev_name(dev));
3325 
3326 	/* Do not request IRQ but do enable OICR interrupt since settings are
3327 	 * lost during reset. Note that this function is called only during
3328 	 * rebuild path and not while reset is in progress.
3329 	 */
3330 	if (ice_is_reset_in_progress(pf->state))
3331 		goto skip_req_irq;
3332 
3333 	/* reserve one vector in irq_tracker for misc interrupts */
3334 	oicr_irq = ice_alloc_irq(pf, false);
3335 	if (oicr_irq.index < 0)
3336 		return oicr_irq.index;
3337 
3338 	pf->oicr_irq = oicr_irq;
3339 	err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3340 					ice_misc_intr_thread_fn, 0,
3341 					pf->int_name, pf);
3342 	if (err) {
3343 		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3344 			pf->int_name, err);
3345 		ice_free_irq(pf, pf->oicr_irq);
3346 		return err;
3347 	}
3348 
3349 skip_req_irq:
3350 	ice_ena_misc_vector(pf);
3351 
3352 	ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3353 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3354 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3355 
3356 	ice_flush(hw);
3357 	ice_irq_dynamic_ena(hw, NULL, NULL);
3358 
3359 	return 0;
3360 }
3361 
3362 /**
3363  * ice_napi_add - register NAPI handler for the VSI
3364  * @vsi: VSI for which NAPI handler is to be registered
3365  *
3366  * This function is only called in the driver's load path. Registering the NAPI
3367  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3368  * reset/rebuild, etc.)
3369  */
3370 static void ice_napi_add(struct ice_vsi *vsi)
3371 {
3372 	int v_idx;
3373 
3374 	if (!vsi->netdev)
3375 		return;
3376 
3377 	ice_for_each_q_vector(vsi, v_idx)
3378 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3379 			       ice_napi_poll);
3380 }
3381 
3382 /**
3383  * ice_set_ops - set netdev and ethtools ops for the given netdev
3384  * @vsi: the VSI associated with the new netdev
3385  */
3386 static void ice_set_ops(struct ice_vsi *vsi)
3387 {
3388 	struct net_device *netdev = vsi->netdev;
3389 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3390 
3391 	if (ice_is_safe_mode(pf)) {
3392 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3393 		ice_set_ethtool_safe_mode_ops(netdev);
3394 		return;
3395 	}
3396 
3397 	netdev->netdev_ops = &ice_netdev_ops;
3398 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3399 	ice_set_ethtool_ops(netdev);
3400 
3401 	if (vsi->type != ICE_VSI_PF)
3402 		return;
3403 
3404 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3405 			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3406 			       NETDEV_XDP_ACT_RX_SG;
3407 	netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3408 }
3409 
3410 /**
3411  * ice_set_netdev_features - set features for the given netdev
3412  * @netdev: netdev instance
3413  */
3414 static void ice_set_netdev_features(struct net_device *netdev)
3415 {
3416 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3417 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3418 	netdev_features_t csumo_features;
3419 	netdev_features_t vlano_features;
3420 	netdev_features_t dflt_features;
3421 	netdev_features_t tso_features;
3422 
3423 	if (ice_is_safe_mode(pf)) {
3424 		/* safe mode */
3425 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3426 		netdev->hw_features = netdev->features;
3427 		return;
3428 	}
3429 
3430 	dflt_features = NETIF_F_SG	|
3431 			NETIF_F_HIGHDMA	|
3432 			NETIF_F_NTUPLE	|
3433 			NETIF_F_RXHASH;
3434 
3435 	csumo_features = NETIF_F_RXCSUM	  |
3436 			 NETIF_F_IP_CSUM  |
3437 			 NETIF_F_SCTP_CRC |
3438 			 NETIF_F_IPV6_CSUM;
3439 
3440 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3441 			 NETIF_F_HW_VLAN_CTAG_TX     |
3442 			 NETIF_F_HW_VLAN_CTAG_RX;
3443 
3444 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3445 	if (is_dvm_ena)
3446 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3447 
3448 	tso_features = NETIF_F_TSO			|
3449 		       NETIF_F_TSO_ECN			|
3450 		       NETIF_F_TSO6			|
3451 		       NETIF_F_GSO_GRE			|
3452 		       NETIF_F_GSO_UDP_TUNNEL		|
3453 		       NETIF_F_GSO_GRE_CSUM		|
3454 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3455 		       NETIF_F_GSO_PARTIAL		|
3456 		       NETIF_F_GSO_IPXIP4		|
3457 		       NETIF_F_GSO_IPXIP6		|
3458 		       NETIF_F_GSO_UDP_L4;
3459 
3460 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3461 					NETIF_F_GSO_GRE_CSUM;
3462 	/* set features that user can change */
3463 	netdev->hw_features = dflt_features | csumo_features |
3464 			      vlano_features | tso_features;
3465 
3466 	/* add support for HW_CSUM on packets with MPLS header */
3467 	netdev->mpls_features =  NETIF_F_HW_CSUM |
3468 				 NETIF_F_TSO     |
3469 				 NETIF_F_TSO6;
3470 
3471 	/* enable features */
3472 	netdev->features |= netdev->hw_features;
3473 
3474 	netdev->hw_features |= NETIF_F_HW_TC;
3475 	netdev->hw_features |= NETIF_F_LOOPBACK;
3476 
3477 	/* encap and VLAN devices inherit default, csumo and tso features */
3478 	netdev->hw_enc_features |= dflt_features | csumo_features |
3479 				   tso_features;
3480 	netdev->vlan_features |= dflt_features | csumo_features |
3481 				 tso_features;
3482 
3483 	/* advertise support but don't enable by default since only one type of
3484 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3485 	 * type turns on the other has to be turned off. This is enforced by the
3486 	 * ice_fix_features() ndo callback.
3487 	 */
3488 	if (is_dvm_ena)
3489 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3490 			NETIF_F_HW_VLAN_STAG_TX;
3491 
3492 	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3493 	 * be changed at runtime
3494 	 */
3495 	netdev->hw_features |= NETIF_F_RXFCS;
3496 
3497 	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3498 }
3499 
3500 /**
3501  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3502  * @lut: Lookup table
3503  * @rss_table_size: Lookup table size
3504  * @rss_size: Range of queue number for hashing
3505  */
3506 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3507 {
3508 	u16 i;
3509 
3510 	for (i = 0; i < rss_table_size; i++)
3511 		lut[i] = i % rss_size;
3512 }
3513 
3514 /**
3515  * ice_pf_vsi_setup - Set up a PF VSI
3516  * @pf: board private structure
3517  * @pi: pointer to the port_info instance
3518  *
3519  * Returns pointer to the successfully allocated VSI software struct
3520  * on success, otherwise returns NULL on failure.
3521  */
3522 static struct ice_vsi *
3523 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3524 {
3525 	struct ice_vsi_cfg_params params = {};
3526 
3527 	params.type = ICE_VSI_PF;
3528 	params.pi = pi;
3529 	params.flags = ICE_VSI_FLAG_INIT;
3530 
3531 	return ice_vsi_setup(pf, &params);
3532 }
3533 
3534 static struct ice_vsi *
3535 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3536 		   struct ice_channel *ch)
3537 {
3538 	struct ice_vsi_cfg_params params = {};
3539 
3540 	params.type = ICE_VSI_CHNL;
3541 	params.pi = pi;
3542 	params.ch = ch;
3543 	params.flags = ICE_VSI_FLAG_INIT;
3544 
3545 	return ice_vsi_setup(pf, &params);
3546 }
3547 
3548 /**
3549  * ice_ctrl_vsi_setup - Set up a control VSI
3550  * @pf: board private structure
3551  * @pi: pointer to the port_info instance
3552  *
3553  * Returns pointer to the successfully allocated VSI software struct
3554  * on success, otherwise returns NULL on failure.
3555  */
3556 static struct ice_vsi *
3557 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3558 {
3559 	struct ice_vsi_cfg_params params = {};
3560 
3561 	params.type = ICE_VSI_CTRL;
3562 	params.pi = pi;
3563 	params.flags = ICE_VSI_FLAG_INIT;
3564 
3565 	return ice_vsi_setup(pf, &params);
3566 }
3567 
3568 /**
3569  * ice_lb_vsi_setup - Set up a loopback VSI
3570  * @pf: board private structure
3571  * @pi: pointer to the port_info instance
3572  *
3573  * Returns pointer to the successfully allocated VSI software struct
3574  * on success, otherwise returns NULL on failure.
3575  */
3576 struct ice_vsi *
3577 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3578 {
3579 	struct ice_vsi_cfg_params params = {};
3580 
3581 	params.type = ICE_VSI_LB;
3582 	params.pi = pi;
3583 	params.flags = ICE_VSI_FLAG_INIT;
3584 
3585 	return ice_vsi_setup(pf, &params);
3586 }
3587 
3588 /**
3589  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3590  * @netdev: network interface to be adjusted
3591  * @proto: VLAN TPID
3592  * @vid: VLAN ID to be added
3593  *
3594  * net_device_ops implementation for adding VLAN IDs
3595  */
3596 static int
3597 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3598 {
3599 	struct ice_netdev_priv *np = netdev_priv(netdev);
3600 	struct ice_vsi_vlan_ops *vlan_ops;
3601 	struct ice_vsi *vsi = np->vsi;
3602 	struct ice_vlan vlan;
3603 	int ret;
3604 
3605 	/* VLAN 0 is added by default during load/reset */
3606 	if (!vid)
3607 		return 0;
3608 
3609 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3610 		usleep_range(1000, 2000);
3611 
3612 	/* Add multicast promisc rule for the VLAN ID to be added if
3613 	 * all-multicast is currently enabled.
3614 	 */
3615 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3616 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3617 					       ICE_MCAST_VLAN_PROMISC_BITS,
3618 					       vid);
3619 		if (ret)
3620 			goto finish;
3621 	}
3622 
3623 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3624 
3625 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3626 	 * packets aren't pruned by the device's internal switch on Rx
3627 	 */
3628 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3629 	ret = vlan_ops->add_vlan(vsi, &vlan);
3630 	if (ret)
3631 		goto finish;
3632 
3633 	/* If all-multicast is currently enabled and this VLAN ID is only one
3634 	 * besides VLAN-0 we have to update look-up type of multicast promisc
3635 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3636 	 */
3637 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3638 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3639 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3640 					   ICE_MCAST_PROMISC_BITS, 0);
3641 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3642 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3643 	}
3644 
3645 finish:
3646 	clear_bit(ICE_CFG_BUSY, vsi->state);
3647 
3648 	return ret;
3649 }
3650 
3651 /**
3652  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3653  * @netdev: network interface to be adjusted
3654  * @proto: VLAN TPID
3655  * @vid: VLAN ID to be removed
3656  *
3657  * net_device_ops implementation for removing VLAN IDs
3658  */
3659 static int
3660 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3661 {
3662 	struct ice_netdev_priv *np = netdev_priv(netdev);
3663 	struct ice_vsi_vlan_ops *vlan_ops;
3664 	struct ice_vsi *vsi = np->vsi;
3665 	struct ice_vlan vlan;
3666 	int ret;
3667 
3668 	/* don't allow removal of VLAN 0 */
3669 	if (!vid)
3670 		return 0;
3671 
3672 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3673 		usleep_range(1000, 2000);
3674 
3675 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3676 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3677 	if (ret) {
3678 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3679 			   vsi->vsi_num);
3680 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3681 	}
3682 
3683 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3684 
3685 	/* Make sure VLAN delete is successful before updating VLAN
3686 	 * information
3687 	 */
3688 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3689 	ret = vlan_ops->del_vlan(vsi, &vlan);
3690 	if (ret)
3691 		goto finish;
3692 
3693 	/* Remove multicast promisc rule for the removed VLAN ID if
3694 	 * all-multicast is enabled.
3695 	 */
3696 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3697 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3698 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3699 
3700 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3701 		/* Update look-up type of multicast promisc rule for VLAN 0
3702 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3703 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3704 		 */
3705 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3706 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3707 						   ICE_MCAST_VLAN_PROMISC_BITS,
3708 						   0);
3709 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3710 						 ICE_MCAST_PROMISC_BITS, 0);
3711 		}
3712 	}
3713 
3714 finish:
3715 	clear_bit(ICE_CFG_BUSY, vsi->state);
3716 
3717 	return ret;
3718 }
3719 
3720 /**
3721  * ice_rep_indr_tc_block_unbind
3722  * @cb_priv: indirection block private data
3723  */
3724 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3725 {
3726 	struct ice_indr_block_priv *indr_priv = cb_priv;
3727 
3728 	list_del(&indr_priv->list);
3729 	kfree(indr_priv);
3730 }
3731 
3732 /**
3733  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3734  * @vsi: VSI struct which has the netdev
3735  */
3736 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3737 {
3738 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3739 
3740 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3741 				 ice_rep_indr_tc_block_unbind);
3742 }
3743 
3744 /**
3745  * ice_tc_indir_block_register - Register TC indirect block notifications
3746  * @vsi: VSI struct which has the netdev
3747  *
3748  * Returns 0 on success, negative value on failure
3749  */
3750 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3751 {
3752 	struct ice_netdev_priv *np;
3753 
3754 	if (!vsi || !vsi->netdev)
3755 		return -EINVAL;
3756 
3757 	np = netdev_priv(vsi->netdev);
3758 
3759 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3760 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3761 }
3762 
3763 /**
3764  * ice_get_avail_q_count - Get count of queues in use
3765  * @pf_qmap: bitmap to get queue use count from
3766  * @lock: pointer to a mutex that protects access to pf_qmap
3767  * @size: size of the bitmap
3768  */
3769 static u16
3770 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3771 {
3772 	unsigned long bit;
3773 	u16 count = 0;
3774 
3775 	mutex_lock(lock);
3776 	for_each_clear_bit(bit, pf_qmap, size)
3777 		count++;
3778 	mutex_unlock(lock);
3779 
3780 	return count;
3781 }
3782 
3783 /**
3784  * ice_get_avail_txq_count - Get count of Tx queues in use
3785  * @pf: pointer to an ice_pf instance
3786  */
3787 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3788 {
3789 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3790 				     pf->max_pf_txqs);
3791 }
3792 
3793 /**
3794  * ice_get_avail_rxq_count - Get count of Rx queues in use
3795  * @pf: pointer to an ice_pf instance
3796  */
3797 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3798 {
3799 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3800 				     pf->max_pf_rxqs);
3801 }
3802 
3803 /**
3804  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3805  * @pf: board private structure to initialize
3806  */
3807 static void ice_deinit_pf(struct ice_pf *pf)
3808 {
3809 	ice_service_task_stop(pf);
3810 	mutex_destroy(&pf->lag_mutex);
3811 	mutex_destroy(&pf->adev_mutex);
3812 	mutex_destroy(&pf->sw_mutex);
3813 	mutex_destroy(&pf->tc_mutex);
3814 	mutex_destroy(&pf->avail_q_mutex);
3815 	mutex_destroy(&pf->vfs.table_lock);
3816 
3817 	if (pf->avail_txqs) {
3818 		bitmap_free(pf->avail_txqs);
3819 		pf->avail_txqs = NULL;
3820 	}
3821 
3822 	if (pf->avail_rxqs) {
3823 		bitmap_free(pf->avail_rxqs);
3824 		pf->avail_rxqs = NULL;
3825 	}
3826 
3827 	if (pf->ptp.clock)
3828 		ptp_clock_unregister(pf->ptp.clock);
3829 }
3830 
3831 /**
3832  * ice_set_pf_caps - set PFs capability flags
3833  * @pf: pointer to the PF instance
3834  */
3835 static void ice_set_pf_caps(struct ice_pf *pf)
3836 {
3837 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3838 
3839 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3840 	if (func_caps->common_cap.rdma)
3841 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3842 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3843 	if (func_caps->common_cap.dcb)
3844 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3845 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3846 	if (func_caps->common_cap.sr_iov_1_1) {
3847 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3848 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3849 					      ICE_MAX_SRIOV_VFS);
3850 	}
3851 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3852 	if (func_caps->common_cap.rss_table_size)
3853 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3854 
3855 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3856 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3857 		u16 unused;
3858 
3859 		/* ctrl_vsi_idx will be set to a valid value when flow director
3860 		 * is setup by ice_init_fdir
3861 		 */
3862 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3863 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3864 		/* force guaranteed filter pool for PF */
3865 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3866 				       func_caps->fd_fltr_guar);
3867 		/* force shared filter pool for PF */
3868 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3869 				       func_caps->fd_fltr_best_effort);
3870 	}
3871 
3872 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3873 	if (func_caps->common_cap.ieee_1588)
3874 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3875 
3876 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3877 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3878 }
3879 
3880 /**
3881  * ice_init_pf - Initialize general software structures (struct ice_pf)
3882  * @pf: board private structure to initialize
3883  */
3884 static int ice_init_pf(struct ice_pf *pf)
3885 {
3886 	ice_set_pf_caps(pf);
3887 
3888 	mutex_init(&pf->sw_mutex);
3889 	mutex_init(&pf->tc_mutex);
3890 	mutex_init(&pf->adev_mutex);
3891 	mutex_init(&pf->lag_mutex);
3892 
3893 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3894 	spin_lock_init(&pf->aq_wait_lock);
3895 	init_waitqueue_head(&pf->aq_wait_queue);
3896 
3897 	init_waitqueue_head(&pf->reset_wait_queue);
3898 
3899 	/* setup service timer and periodic service task */
3900 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3901 	pf->serv_tmr_period = HZ;
3902 	INIT_WORK(&pf->serv_task, ice_service_task);
3903 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3904 
3905 	mutex_init(&pf->avail_q_mutex);
3906 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3907 	if (!pf->avail_txqs)
3908 		return -ENOMEM;
3909 
3910 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3911 	if (!pf->avail_rxqs) {
3912 		bitmap_free(pf->avail_txqs);
3913 		pf->avail_txqs = NULL;
3914 		return -ENOMEM;
3915 	}
3916 
3917 	mutex_init(&pf->vfs.table_lock);
3918 	hash_init(pf->vfs.table);
3919 	ice_mbx_init_snapshot(&pf->hw);
3920 
3921 	return 0;
3922 }
3923 
3924 /**
3925  * ice_is_wol_supported - check if WoL is supported
3926  * @hw: pointer to hardware info
3927  *
3928  * Check if WoL is supported based on the HW configuration.
3929  * Returns true if NVM supports and enables WoL for this port, false otherwise
3930  */
3931 bool ice_is_wol_supported(struct ice_hw *hw)
3932 {
3933 	u16 wol_ctrl;
3934 
3935 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3936 	 * word) indicates WoL is not supported on the corresponding PF ID.
3937 	 */
3938 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3939 		return false;
3940 
3941 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3942 }
3943 
3944 /**
3945  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3946  * @vsi: VSI being changed
3947  * @new_rx: new number of Rx queues
3948  * @new_tx: new number of Tx queues
3949  * @locked: is adev device_lock held
3950  *
3951  * Only change the number of queues if new_tx, or new_rx is non-0.
3952  *
3953  * Returns 0 on success.
3954  */
3955 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3956 {
3957 	struct ice_pf *pf = vsi->back;
3958 	int err = 0, timeout = 50;
3959 
3960 	if (!new_rx && !new_tx)
3961 		return -EINVAL;
3962 
3963 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3964 		timeout--;
3965 		if (!timeout)
3966 			return -EBUSY;
3967 		usleep_range(1000, 2000);
3968 	}
3969 
3970 	if (new_tx)
3971 		vsi->req_txq = (u16)new_tx;
3972 	if (new_rx)
3973 		vsi->req_rxq = (u16)new_rx;
3974 
3975 	/* set for the next time the netdev is started */
3976 	if (!netif_running(vsi->netdev)) {
3977 		ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3978 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3979 		goto done;
3980 	}
3981 
3982 	ice_vsi_close(vsi);
3983 	ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
3984 	ice_pf_dcb_recfg(pf, locked);
3985 	ice_vsi_open(vsi);
3986 done:
3987 	clear_bit(ICE_CFG_BUSY, pf->state);
3988 	return err;
3989 }
3990 
3991 /**
3992  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3993  * @pf: PF to configure
3994  *
3995  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3996  * VSI can still Tx/Rx VLAN tagged packets.
3997  */
3998 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3999 {
4000 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4001 	struct ice_vsi_ctx *ctxt;
4002 	struct ice_hw *hw;
4003 	int status;
4004 
4005 	if (!vsi)
4006 		return;
4007 
4008 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4009 	if (!ctxt)
4010 		return;
4011 
4012 	hw = &pf->hw;
4013 	ctxt->info = vsi->info;
4014 
4015 	ctxt->info.valid_sections =
4016 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4017 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4018 			    ICE_AQ_VSI_PROP_SW_VALID);
4019 
4020 	/* disable VLAN anti-spoof */
4021 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4022 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4023 
4024 	/* disable VLAN pruning and keep all other settings */
4025 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4026 
4027 	/* allow all VLANs on Tx and don't strip on Rx */
4028 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4029 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4030 
4031 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4032 	if (status) {
4033 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4034 			status, ice_aq_str(hw->adminq.sq_last_status));
4035 	} else {
4036 		vsi->info.sec_flags = ctxt->info.sec_flags;
4037 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4038 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4039 	}
4040 
4041 	kfree(ctxt);
4042 }
4043 
4044 /**
4045  * ice_log_pkg_init - log result of DDP package load
4046  * @hw: pointer to hardware info
4047  * @state: state of package load
4048  */
4049 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4050 {
4051 	struct ice_pf *pf = hw->back;
4052 	struct device *dev;
4053 
4054 	dev = ice_pf_to_dev(pf);
4055 
4056 	switch (state) {
4057 	case ICE_DDP_PKG_SUCCESS:
4058 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4059 			 hw->active_pkg_name,
4060 			 hw->active_pkg_ver.major,
4061 			 hw->active_pkg_ver.minor,
4062 			 hw->active_pkg_ver.update,
4063 			 hw->active_pkg_ver.draft);
4064 		break;
4065 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4066 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4067 			 hw->active_pkg_name,
4068 			 hw->active_pkg_ver.major,
4069 			 hw->active_pkg_ver.minor,
4070 			 hw->active_pkg_ver.update,
4071 			 hw->active_pkg_ver.draft);
4072 		break;
4073 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4074 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4075 			hw->active_pkg_name,
4076 			hw->active_pkg_ver.major,
4077 			hw->active_pkg_ver.minor,
4078 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4079 		break;
4080 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4081 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4082 			 hw->active_pkg_name,
4083 			 hw->active_pkg_ver.major,
4084 			 hw->active_pkg_ver.minor,
4085 			 hw->active_pkg_ver.update,
4086 			 hw->active_pkg_ver.draft,
4087 			 hw->pkg_name,
4088 			 hw->pkg_ver.major,
4089 			 hw->pkg_ver.minor,
4090 			 hw->pkg_ver.update,
4091 			 hw->pkg_ver.draft);
4092 		break;
4093 	case ICE_DDP_PKG_FW_MISMATCH:
4094 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4095 		break;
4096 	case ICE_DDP_PKG_INVALID_FILE:
4097 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4098 		break;
4099 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4100 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4101 		break;
4102 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4103 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4104 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4105 		break;
4106 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4107 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4108 		break;
4109 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4110 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4111 		break;
4112 	case ICE_DDP_PKG_LOAD_ERROR:
4113 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4114 		/* poll for reset to complete */
4115 		if (ice_check_reset(hw))
4116 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4117 		break;
4118 	case ICE_DDP_PKG_ERR:
4119 	default:
4120 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4121 		break;
4122 	}
4123 }
4124 
4125 /**
4126  * ice_load_pkg - load/reload the DDP Package file
4127  * @firmware: firmware structure when firmware requested or NULL for reload
4128  * @pf: pointer to the PF instance
4129  *
4130  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4131  * initialize HW tables.
4132  */
4133 static void
4134 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4135 {
4136 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4137 	struct device *dev = ice_pf_to_dev(pf);
4138 	struct ice_hw *hw = &pf->hw;
4139 
4140 	/* Load DDP Package */
4141 	if (firmware && !hw->pkg_copy) {
4142 		state = ice_copy_and_init_pkg(hw, firmware->data,
4143 					      firmware->size);
4144 		ice_log_pkg_init(hw, state);
4145 	} else if (!firmware && hw->pkg_copy) {
4146 		/* Reload package during rebuild after CORER/GLOBR reset */
4147 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4148 		ice_log_pkg_init(hw, state);
4149 	} else {
4150 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4151 	}
4152 
4153 	if (!ice_is_init_pkg_successful(state)) {
4154 		/* Safe Mode */
4155 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4156 		return;
4157 	}
4158 
4159 	/* Successful download package is the precondition for advanced
4160 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4161 	 */
4162 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4163 }
4164 
4165 /**
4166  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4167  * @pf: pointer to the PF structure
4168  *
4169  * There is no error returned here because the driver should be able to handle
4170  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4171  * specifically with Tx.
4172  */
4173 static void ice_verify_cacheline_size(struct ice_pf *pf)
4174 {
4175 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4176 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4177 			 ICE_CACHE_LINE_BYTES);
4178 }
4179 
4180 /**
4181  * ice_send_version - update firmware with driver version
4182  * @pf: PF struct
4183  *
4184  * Returns 0 on success, else error code
4185  */
4186 static int ice_send_version(struct ice_pf *pf)
4187 {
4188 	struct ice_driver_ver dv;
4189 
4190 	dv.major_ver = 0xff;
4191 	dv.minor_ver = 0xff;
4192 	dv.build_ver = 0xff;
4193 	dv.subbuild_ver = 0;
4194 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4195 		sizeof(dv.driver_string));
4196 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4197 }
4198 
4199 /**
4200  * ice_init_fdir - Initialize flow director VSI and configuration
4201  * @pf: pointer to the PF instance
4202  *
4203  * returns 0 on success, negative on error
4204  */
4205 static int ice_init_fdir(struct ice_pf *pf)
4206 {
4207 	struct device *dev = ice_pf_to_dev(pf);
4208 	struct ice_vsi *ctrl_vsi;
4209 	int err;
4210 
4211 	/* Side Band Flow Director needs to have a control VSI.
4212 	 * Allocate it and store it in the PF.
4213 	 */
4214 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4215 	if (!ctrl_vsi) {
4216 		dev_dbg(dev, "could not create control VSI\n");
4217 		return -ENOMEM;
4218 	}
4219 
4220 	err = ice_vsi_open_ctrl(ctrl_vsi);
4221 	if (err) {
4222 		dev_dbg(dev, "could not open control VSI\n");
4223 		goto err_vsi_open;
4224 	}
4225 
4226 	mutex_init(&pf->hw.fdir_fltr_lock);
4227 
4228 	err = ice_fdir_create_dflt_rules(pf);
4229 	if (err)
4230 		goto err_fdir_rule;
4231 
4232 	return 0;
4233 
4234 err_fdir_rule:
4235 	ice_fdir_release_flows(&pf->hw);
4236 	ice_vsi_close(ctrl_vsi);
4237 err_vsi_open:
4238 	ice_vsi_release(ctrl_vsi);
4239 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4240 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4241 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4242 	}
4243 	return err;
4244 }
4245 
4246 static void ice_deinit_fdir(struct ice_pf *pf)
4247 {
4248 	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4249 
4250 	if (!vsi)
4251 		return;
4252 
4253 	ice_vsi_manage_fdir(vsi, false);
4254 	ice_vsi_release(vsi);
4255 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4256 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4257 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4258 	}
4259 
4260 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4261 }
4262 
4263 /**
4264  * ice_get_opt_fw_name - return optional firmware file name or NULL
4265  * @pf: pointer to the PF instance
4266  */
4267 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4268 {
4269 	/* Optional firmware name same as default with additional dash
4270 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4271 	 */
4272 	struct pci_dev *pdev = pf->pdev;
4273 	char *opt_fw_filename;
4274 	u64 dsn;
4275 
4276 	/* Determine the name of the optional file using the DSN (two
4277 	 * dwords following the start of the DSN Capability).
4278 	 */
4279 	dsn = pci_get_dsn(pdev);
4280 	if (!dsn)
4281 		return NULL;
4282 
4283 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4284 	if (!opt_fw_filename)
4285 		return NULL;
4286 
4287 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4288 		 ICE_DDP_PKG_PATH, dsn);
4289 
4290 	return opt_fw_filename;
4291 }
4292 
4293 /**
4294  * ice_request_fw - Device initialization routine
4295  * @pf: pointer to the PF instance
4296  */
4297 static void ice_request_fw(struct ice_pf *pf)
4298 {
4299 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4300 	const struct firmware *firmware = NULL;
4301 	struct device *dev = ice_pf_to_dev(pf);
4302 	int err = 0;
4303 
4304 	/* optional device-specific DDP (if present) overrides the default DDP
4305 	 * package file. kernel logs a debug message if the file doesn't exist,
4306 	 * and warning messages for other errors.
4307 	 */
4308 	if (opt_fw_filename) {
4309 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4310 		if (err) {
4311 			kfree(opt_fw_filename);
4312 			goto dflt_pkg_load;
4313 		}
4314 
4315 		/* request for firmware was successful. Download to device */
4316 		ice_load_pkg(firmware, pf);
4317 		kfree(opt_fw_filename);
4318 		release_firmware(firmware);
4319 		return;
4320 	}
4321 
4322 dflt_pkg_load:
4323 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4324 	if (err) {
4325 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4326 		return;
4327 	}
4328 
4329 	/* request for firmware was successful. Download to device */
4330 	ice_load_pkg(firmware, pf);
4331 	release_firmware(firmware);
4332 }
4333 
4334 /**
4335  * ice_print_wake_reason - show the wake up cause in the log
4336  * @pf: pointer to the PF struct
4337  */
4338 static void ice_print_wake_reason(struct ice_pf *pf)
4339 {
4340 	u32 wus = pf->wakeup_reason;
4341 	const char *wake_str;
4342 
4343 	/* if no wake event, nothing to print */
4344 	if (!wus)
4345 		return;
4346 
4347 	if (wus & PFPM_WUS_LNKC_M)
4348 		wake_str = "Link\n";
4349 	else if (wus & PFPM_WUS_MAG_M)
4350 		wake_str = "Magic Packet\n";
4351 	else if (wus & PFPM_WUS_MNG_M)
4352 		wake_str = "Management\n";
4353 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4354 		wake_str = "Firmware Reset\n";
4355 	else
4356 		wake_str = "Unknown\n";
4357 
4358 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4359 }
4360 
4361 /**
4362  * ice_register_netdev - register netdev
4363  * @vsi: pointer to the VSI struct
4364  */
4365 static int ice_register_netdev(struct ice_vsi *vsi)
4366 {
4367 	int err;
4368 
4369 	if (!vsi || !vsi->netdev)
4370 		return -EIO;
4371 
4372 	err = register_netdev(vsi->netdev);
4373 	if (err)
4374 		return err;
4375 
4376 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4377 	netif_carrier_off(vsi->netdev);
4378 	netif_tx_stop_all_queues(vsi->netdev);
4379 
4380 	return 0;
4381 }
4382 
4383 static void ice_unregister_netdev(struct ice_vsi *vsi)
4384 {
4385 	if (!vsi || !vsi->netdev)
4386 		return;
4387 
4388 	unregister_netdev(vsi->netdev);
4389 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4390 }
4391 
4392 /**
4393  * ice_cfg_netdev - Allocate, configure and register a netdev
4394  * @vsi: the VSI associated with the new netdev
4395  *
4396  * Returns 0 on success, negative value on failure
4397  */
4398 static int ice_cfg_netdev(struct ice_vsi *vsi)
4399 {
4400 	struct ice_netdev_priv *np;
4401 	struct net_device *netdev;
4402 	u8 mac_addr[ETH_ALEN];
4403 
4404 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4405 				    vsi->alloc_rxq);
4406 	if (!netdev)
4407 		return -ENOMEM;
4408 
4409 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4410 	vsi->netdev = netdev;
4411 	np = netdev_priv(netdev);
4412 	np->vsi = vsi;
4413 
4414 	ice_set_netdev_features(netdev);
4415 	ice_set_ops(vsi);
4416 
4417 	if (vsi->type == ICE_VSI_PF) {
4418 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4419 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4420 		eth_hw_addr_set(netdev, mac_addr);
4421 	}
4422 
4423 	netdev->priv_flags |= IFF_UNICAST_FLT;
4424 
4425 	/* Setup netdev TC information */
4426 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4427 
4428 	netdev->max_mtu = ICE_MAX_MTU;
4429 
4430 	return 0;
4431 }
4432 
4433 static void ice_decfg_netdev(struct ice_vsi *vsi)
4434 {
4435 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4436 	free_netdev(vsi->netdev);
4437 	vsi->netdev = NULL;
4438 }
4439 
4440 static int ice_start_eth(struct ice_vsi *vsi)
4441 {
4442 	int err;
4443 
4444 	err = ice_init_mac_fltr(vsi->back);
4445 	if (err)
4446 		return err;
4447 
4448 	err = ice_vsi_open(vsi);
4449 	if (err)
4450 		ice_fltr_remove_all(vsi);
4451 
4452 	return err;
4453 }
4454 
4455 static void ice_stop_eth(struct ice_vsi *vsi)
4456 {
4457 	ice_fltr_remove_all(vsi);
4458 	ice_vsi_close(vsi);
4459 }
4460 
4461 static int ice_init_eth(struct ice_pf *pf)
4462 {
4463 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4464 	int err;
4465 
4466 	if (!vsi)
4467 		return -EINVAL;
4468 
4469 	/* init channel list */
4470 	INIT_LIST_HEAD(&vsi->ch_list);
4471 
4472 	err = ice_cfg_netdev(vsi);
4473 	if (err)
4474 		return err;
4475 	/* Setup DCB netlink interface */
4476 	ice_dcbnl_setup(vsi);
4477 
4478 	err = ice_init_mac_fltr(pf);
4479 	if (err)
4480 		goto err_init_mac_fltr;
4481 
4482 	err = ice_devlink_create_pf_port(pf);
4483 	if (err)
4484 		goto err_devlink_create_pf_port;
4485 
4486 	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
4487 
4488 	err = ice_register_netdev(vsi);
4489 	if (err)
4490 		goto err_register_netdev;
4491 
4492 	err = ice_tc_indir_block_register(vsi);
4493 	if (err)
4494 		goto err_tc_indir_block_register;
4495 
4496 	ice_napi_add(vsi);
4497 
4498 	return 0;
4499 
4500 err_tc_indir_block_register:
4501 	ice_unregister_netdev(vsi);
4502 err_register_netdev:
4503 	ice_devlink_destroy_pf_port(pf);
4504 err_devlink_create_pf_port:
4505 err_init_mac_fltr:
4506 	ice_decfg_netdev(vsi);
4507 	return err;
4508 }
4509 
4510 static void ice_deinit_eth(struct ice_pf *pf)
4511 {
4512 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4513 
4514 	if (!vsi)
4515 		return;
4516 
4517 	ice_vsi_close(vsi);
4518 	ice_unregister_netdev(vsi);
4519 	ice_devlink_destroy_pf_port(pf);
4520 	ice_tc_indir_block_unregister(vsi);
4521 	ice_decfg_netdev(vsi);
4522 }
4523 
4524 /**
4525  * ice_wait_for_fw - wait for full FW readiness
4526  * @hw: pointer to the hardware structure
4527  * @timeout: milliseconds that can elapse before timing out
4528  */
4529 static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
4530 {
4531 	int fw_loading;
4532 	u32 elapsed = 0;
4533 
4534 	while (elapsed <= timeout) {
4535 		fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
4536 
4537 		/* firmware was not yet loaded, we have to wait more */
4538 		if (fw_loading) {
4539 			elapsed += 100;
4540 			msleep(100);
4541 			continue;
4542 		}
4543 		return 0;
4544 	}
4545 
4546 	return -ETIMEDOUT;
4547 }
4548 
4549 static int ice_init_dev(struct ice_pf *pf)
4550 {
4551 	struct device *dev = ice_pf_to_dev(pf);
4552 	struct ice_hw *hw = &pf->hw;
4553 	int err;
4554 
4555 	err = ice_init_hw(hw);
4556 	if (err) {
4557 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4558 		return err;
4559 	}
4560 
4561 	/* Some cards require longer initialization times
4562 	 * due to necessity of loading FW from an external source.
4563 	 * This can take even half a minute.
4564 	 */
4565 	if (ice_is_pf_c827(hw)) {
4566 		err = ice_wait_for_fw(hw, 30000);
4567 		if (err) {
4568 			dev_err(dev, "ice_wait_for_fw timed out");
4569 			return err;
4570 		}
4571 	}
4572 
4573 	ice_init_feature_support(pf);
4574 
4575 	ice_request_fw(pf);
4576 
4577 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4578 	 * set in pf->state, which will cause ice_is_safe_mode to return
4579 	 * true
4580 	 */
4581 	if (ice_is_safe_mode(pf)) {
4582 		/* we already got function/device capabilities but these don't
4583 		 * reflect what the driver needs to do in safe mode. Instead of
4584 		 * adding conditional logic everywhere to ignore these
4585 		 * device/function capabilities, override them.
4586 		 */
4587 		ice_set_safe_mode_caps(hw);
4588 	}
4589 
4590 	err = ice_init_pf(pf);
4591 	if (err) {
4592 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4593 		goto err_init_pf;
4594 	}
4595 
4596 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4597 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4598 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4599 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4600 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4601 		pf->hw.udp_tunnel_nic.tables[0].n_entries =
4602 			pf->hw.tnl.valid_count[TNL_VXLAN];
4603 		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4604 			UDP_TUNNEL_TYPE_VXLAN;
4605 	}
4606 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4607 		pf->hw.udp_tunnel_nic.tables[1].n_entries =
4608 			pf->hw.tnl.valid_count[TNL_GENEVE];
4609 		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4610 			UDP_TUNNEL_TYPE_GENEVE;
4611 	}
4612 
4613 	err = ice_init_interrupt_scheme(pf);
4614 	if (err) {
4615 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4616 		err = -EIO;
4617 		goto err_init_interrupt_scheme;
4618 	}
4619 
4620 	/* In case of MSIX we are going to setup the misc vector right here
4621 	 * to handle admin queue events etc. In case of legacy and MSI
4622 	 * the misc functionality and queue processing is combined in
4623 	 * the same vector and that gets setup at open.
4624 	 */
4625 	err = ice_req_irq_msix_misc(pf);
4626 	if (err) {
4627 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4628 		goto err_req_irq_msix_misc;
4629 	}
4630 
4631 	return 0;
4632 
4633 err_req_irq_msix_misc:
4634 	ice_clear_interrupt_scheme(pf);
4635 err_init_interrupt_scheme:
4636 	ice_deinit_pf(pf);
4637 err_init_pf:
4638 	ice_deinit_hw(hw);
4639 	return err;
4640 }
4641 
4642 static void ice_deinit_dev(struct ice_pf *pf)
4643 {
4644 	ice_free_irq_msix_misc(pf);
4645 	ice_deinit_pf(pf);
4646 	ice_deinit_hw(&pf->hw);
4647 
4648 	/* Service task is already stopped, so call reset directly. */
4649 	ice_reset(&pf->hw, ICE_RESET_PFR);
4650 	pci_wait_for_pending_transaction(pf->pdev);
4651 	ice_clear_interrupt_scheme(pf);
4652 }
4653 
4654 static void ice_init_features(struct ice_pf *pf)
4655 {
4656 	struct device *dev = ice_pf_to_dev(pf);
4657 
4658 	if (ice_is_safe_mode(pf))
4659 		return;
4660 
4661 	/* initialize DDP driven features */
4662 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4663 		ice_ptp_init(pf);
4664 
4665 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4666 		ice_gnss_init(pf);
4667 
4668 	if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4669 	    ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4670 		ice_dpll_init(pf);
4671 
4672 	/* Note: Flow director init failure is non-fatal to load */
4673 	if (ice_init_fdir(pf))
4674 		dev_err(dev, "could not initialize flow director\n");
4675 
4676 	/* Note: DCB init failure is non-fatal to load */
4677 	if (ice_init_pf_dcb(pf, false)) {
4678 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4679 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4680 	} else {
4681 		ice_cfg_lldp_mib_change(&pf->hw, true);
4682 	}
4683 
4684 	if (ice_init_lag(pf))
4685 		dev_warn(dev, "Failed to init link aggregation support\n");
4686 }
4687 
4688 static void ice_deinit_features(struct ice_pf *pf)
4689 {
4690 	ice_deinit_lag(pf);
4691 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4692 		ice_cfg_lldp_mib_change(&pf->hw, false);
4693 	ice_deinit_fdir(pf);
4694 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4695 		ice_gnss_exit(pf);
4696 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4697 		ice_ptp_release(pf);
4698 	if (test_bit(ICE_FLAG_DPLL, pf->flags))
4699 		ice_dpll_deinit(pf);
4700 }
4701 
4702 static void ice_init_wakeup(struct ice_pf *pf)
4703 {
4704 	/* Save wakeup reason register for later use */
4705 	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4706 
4707 	/* check for a power management event */
4708 	ice_print_wake_reason(pf);
4709 
4710 	/* clear wake status, all bits */
4711 	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4712 
4713 	/* Disable WoL at init, wait for user to enable */
4714 	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4715 }
4716 
4717 static int ice_init_link(struct ice_pf *pf)
4718 {
4719 	struct device *dev = ice_pf_to_dev(pf);
4720 	int err;
4721 
4722 	err = ice_init_link_events(pf->hw.port_info);
4723 	if (err) {
4724 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4725 		return err;
4726 	}
4727 
4728 	/* not a fatal error if this fails */
4729 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4730 	if (err)
4731 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4732 
4733 	/* not a fatal error if this fails */
4734 	err = ice_update_link_info(pf->hw.port_info);
4735 	if (err)
4736 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4737 
4738 	ice_init_link_dflt_override(pf->hw.port_info);
4739 
4740 	ice_check_link_cfg_err(pf,
4741 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4742 
4743 	/* if media available, initialize PHY settings */
4744 	if (pf->hw.port_info->phy.link_info.link_info &
4745 	    ICE_AQ_MEDIA_AVAILABLE) {
4746 		/* not a fatal error if this fails */
4747 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4748 		if (err)
4749 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4750 
4751 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4752 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4753 
4754 			if (vsi)
4755 				ice_configure_phy(vsi);
4756 		}
4757 	} else {
4758 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4759 	}
4760 
4761 	return err;
4762 }
4763 
4764 static int ice_init_pf_sw(struct ice_pf *pf)
4765 {
4766 	bool dvm = ice_is_dvm_ena(&pf->hw);
4767 	struct ice_vsi *vsi;
4768 	int err;
4769 
4770 	/* create switch struct for the switch element created by FW on boot */
4771 	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4772 	if (!pf->first_sw)
4773 		return -ENOMEM;
4774 
4775 	if (pf->hw.evb_veb)
4776 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4777 	else
4778 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4779 
4780 	pf->first_sw->pf = pf;
4781 
4782 	/* record the sw_id available for later use */
4783 	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4784 
4785 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4786 	if (err)
4787 		goto err_aq_set_port_params;
4788 
4789 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4790 	if (!vsi) {
4791 		err = -ENOMEM;
4792 		goto err_pf_vsi_setup;
4793 	}
4794 
4795 	return 0;
4796 
4797 err_pf_vsi_setup:
4798 err_aq_set_port_params:
4799 	kfree(pf->first_sw);
4800 	return err;
4801 }
4802 
4803 static void ice_deinit_pf_sw(struct ice_pf *pf)
4804 {
4805 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4806 
4807 	if (!vsi)
4808 		return;
4809 
4810 	ice_vsi_release(vsi);
4811 	kfree(pf->first_sw);
4812 }
4813 
4814 static int ice_alloc_vsis(struct ice_pf *pf)
4815 {
4816 	struct device *dev = ice_pf_to_dev(pf);
4817 
4818 	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4819 	if (!pf->num_alloc_vsi)
4820 		return -EIO;
4821 
4822 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4823 		dev_warn(dev,
4824 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4825 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4826 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4827 	}
4828 
4829 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4830 			       GFP_KERNEL);
4831 	if (!pf->vsi)
4832 		return -ENOMEM;
4833 
4834 	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4835 				     sizeof(*pf->vsi_stats), GFP_KERNEL);
4836 	if (!pf->vsi_stats) {
4837 		devm_kfree(dev, pf->vsi);
4838 		return -ENOMEM;
4839 	}
4840 
4841 	return 0;
4842 }
4843 
4844 static void ice_dealloc_vsis(struct ice_pf *pf)
4845 {
4846 	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4847 	pf->vsi_stats = NULL;
4848 
4849 	pf->num_alloc_vsi = 0;
4850 	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4851 	pf->vsi = NULL;
4852 }
4853 
4854 static int ice_init_devlink(struct ice_pf *pf)
4855 {
4856 	int err;
4857 
4858 	err = ice_devlink_register_params(pf);
4859 	if (err)
4860 		return err;
4861 
4862 	ice_devlink_init_regions(pf);
4863 	ice_devlink_register(pf);
4864 
4865 	return 0;
4866 }
4867 
4868 static void ice_deinit_devlink(struct ice_pf *pf)
4869 {
4870 	ice_devlink_unregister(pf);
4871 	ice_devlink_destroy_regions(pf);
4872 	ice_devlink_unregister_params(pf);
4873 }
4874 
4875 static int ice_init(struct ice_pf *pf)
4876 {
4877 	int err;
4878 
4879 	err = ice_init_dev(pf);
4880 	if (err)
4881 		return err;
4882 
4883 	err = ice_alloc_vsis(pf);
4884 	if (err)
4885 		goto err_alloc_vsis;
4886 
4887 	err = ice_init_pf_sw(pf);
4888 	if (err)
4889 		goto err_init_pf_sw;
4890 
4891 	ice_init_wakeup(pf);
4892 
4893 	err = ice_init_link(pf);
4894 	if (err)
4895 		goto err_init_link;
4896 
4897 	err = ice_send_version(pf);
4898 	if (err)
4899 		goto err_init_link;
4900 
4901 	ice_verify_cacheline_size(pf);
4902 
4903 	if (ice_is_safe_mode(pf))
4904 		ice_set_safe_mode_vlan_cfg(pf);
4905 	else
4906 		/* print PCI link speed and width */
4907 		pcie_print_link_status(pf->pdev);
4908 
4909 	/* ready to go, so clear down state bit */
4910 	clear_bit(ICE_DOWN, pf->state);
4911 	clear_bit(ICE_SERVICE_DIS, pf->state);
4912 
4913 	/* since everything is good, start the service timer */
4914 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4915 
4916 	return 0;
4917 
4918 err_init_link:
4919 	ice_deinit_pf_sw(pf);
4920 err_init_pf_sw:
4921 	ice_dealloc_vsis(pf);
4922 err_alloc_vsis:
4923 	ice_deinit_dev(pf);
4924 	return err;
4925 }
4926 
4927 static void ice_deinit(struct ice_pf *pf)
4928 {
4929 	set_bit(ICE_SERVICE_DIS, pf->state);
4930 	set_bit(ICE_DOWN, pf->state);
4931 
4932 	ice_deinit_pf_sw(pf);
4933 	ice_dealloc_vsis(pf);
4934 	ice_deinit_dev(pf);
4935 }
4936 
4937 /**
4938  * ice_load - load pf by init hw and starting VSI
4939  * @pf: pointer to the pf instance
4940  */
4941 int ice_load(struct ice_pf *pf)
4942 {
4943 	struct ice_vsi_cfg_params params = {};
4944 	struct ice_vsi *vsi;
4945 	int err;
4946 
4947 	err = ice_init_dev(pf);
4948 	if (err)
4949 		return err;
4950 
4951 	vsi = ice_get_main_vsi(pf);
4952 
4953 	params = ice_vsi_to_params(vsi);
4954 	params.flags = ICE_VSI_FLAG_INIT;
4955 
4956 	rtnl_lock();
4957 	err = ice_vsi_cfg(vsi, &params);
4958 	if (err)
4959 		goto err_vsi_cfg;
4960 
4961 	err = ice_start_eth(ice_get_main_vsi(pf));
4962 	if (err)
4963 		goto err_start_eth;
4964 	rtnl_unlock();
4965 
4966 	err = ice_init_rdma(pf);
4967 	if (err)
4968 		goto err_init_rdma;
4969 
4970 	ice_init_features(pf);
4971 	ice_service_task_restart(pf);
4972 
4973 	clear_bit(ICE_DOWN, pf->state);
4974 
4975 	return 0;
4976 
4977 err_init_rdma:
4978 	ice_vsi_close(ice_get_main_vsi(pf));
4979 	rtnl_lock();
4980 err_start_eth:
4981 	ice_vsi_decfg(ice_get_main_vsi(pf));
4982 err_vsi_cfg:
4983 	rtnl_unlock();
4984 	ice_deinit_dev(pf);
4985 	return err;
4986 }
4987 
4988 /**
4989  * ice_unload - unload pf by stopping VSI and deinit hw
4990  * @pf: pointer to the pf instance
4991  */
4992 void ice_unload(struct ice_pf *pf)
4993 {
4994 	ice_deinit_features(pf);
4995 	ice_deinit_rdma(pf);
4996 	rtnl_lock();
4997 	ice_stop_eth(ice_get_main_vsi(pf));
4998 	ice_vsi_decfg(ice_get_main_vsi(pf));
4999 	rtnl_unlock();
5000 	ice_deinit_dev(pf);
5001 }
5002 
5003 /**
5004  * ice_probe - Device initialization routine
5005  * @pdev: PCI device information struct
5006  * @ent: entry in ice_pci_tbl
5007  *
5008  * Returns 0 on success, negative on failure
5009  */
5010 static int
5011 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5012 {
5013 	struct device *dev = &pdev->dev;
5014 	struct ice_pf *pf;
5015 	struct ice_hw *hw;
5016 	int err;
5017 
5018 	if (pdev->is_virtfn) {
5019 		dev_err(dev, "can't probe a virtual function\n");
5020 		return -EINVAL;
5021 	}
5022 
5023 	/* this driver uses devres, see
5024 	 * Documentation/driver-api/driver-model/devres.rst
5025 	 */
5026 	err = pcim_enable_device(pdev);
5027 	if (err)
5028 		return err;
5029 
5030 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5031 	if (err) {
5032 		dev_err(dev, "BAR0 I/O map error %d\n", err);
5033 		return err;
5034 	}
5035 
5036 	pf = ice_allocate_pf(dev);
5037 	if (!pf)
5038 		return -ENOMEM;
5039 
5040 	/* initialize Auxiliary index to invalid value */
5041 	pf->aux_idx = -1;
5042 
5043 	/* set up for high or low DMA */
5044 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5045 	if (err) {
5046 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5047 		return err;
5048 	}
5049 
5050 	pci_set_master(pdev);
5051 
5052 	pf->pdev = pdev;
5053 	pci_set_drvdata(pdev, pf);
5054 	set_bit(ICE_DOWN, pf->state);
5055 	/* Disable service task until DOWN bit is cleared */
5056 	set_bit(ICE_SERVICE_DIS, pf->state);
5057 
5058 	hw = &pf->hw;
5059 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5060 	pci_save_state(pdev);
5061 
5062 	hw->back = pf;
5063 	hw->port_info = NULL;
5064 	hw->vendor_id = pdev->vendor;
5065 	hw->device_id = pdev->device;
5066 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5067 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5068 	hw->subsystem_device_id = pdev->subsystem_device;
5069 	hw->bus.device = PCI_SLOT(pdev->devfn);
5070 	hw->bus.func = PCI_FUNC(pdev->devfn);
5071 	ice_set_ctrlq_len(hw);
5072 
5073 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5074 
5075 #ifndef CONFIG_DYNAMIC_DEBUG
5076 	if (debug < -1)
5077 		hw->debug_mask = debug;
5078 #endif
5079 
5080 	err = ice_init(pf);
5081 	if (err)
5082 		goto err_init;
5083 
5084 	err = ice_init_eth(pf);
5085 	if (err)
5086 		goto err_init_eth;
5087 
5088 	err = ice_init_rdma(pf);
5089 	if (err)
5090 		goto err_init_rdma;
5091 
5092 	err = ice_init_devlink(pf);
5093 	if (err)
5094 		goto err_init_devlink;
5095 
5096 	ice_init_features(pf);
5097 
5098 	return 0;
5099 
5100 err_init_devlink:
5101 	ice_deinit_rdma(pf);
5102 err_init_rdma:
5103 	ice_deinit_eth(pf);
5104 err_init_eth:
5105 	ice_deinit(pf);
5106 err_init:
5107 	pci_disable_device(pdev);
5108 	return err;
5109 }
5110 
5111 /**
5112  * ice_set_wake - enable or disable Wake on LAN
5113  * @pf: pointer to the PF struct
5114  *
5115  * Simple helper for WoL control
5116  */
5117 static void ice_set_wake(struct ice_pf *pf)
5118 {
5119 	struct ice_hw *hw = &pf->hw;
5120 	bool wol = pf->wol_ena;
5121 
5122 	/* clear wake state, otherwise new wake events won't fire */
5123 	wr32(hw, PFPM_WUS, U32_MAX);
5124 
5125 	/* enable / disable APM wake up, no RMW needed */
5126 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5127 
5128 	/* set magic packet filter enabled */
5129 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5130 }
5131 
5132 /**
5133  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5134  * @pf: pointer to the PF struct
5135  *
5136  * Issue firmware command to enable multicast magic wake, making
5137  * sure that any locally administered address (LAA) is used for
5138  * wake, and that PF reset doesn't undo the LAA.
5139  */
5140 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5141 {
5142 	struct device *dev = ice_pf_to_dev(pf);
5143 	struct ice_hw *hw = &pf->hw;
5144 	u8 mac_addr[ETH_ALEN];
5145 	struct ice_vsi *vsi;
5146 	int status;
5147 	u8 flags;
5148 
5149 	if (!pf->wol_ena)
5150 		return;
5151 
5152 	vsi = ice_get_main_vsi(pf);
5153 	if (!vsi)
5154 		return;
5155 
5156 	/* Get current MAC address in case it's an LAA */
5157 	if (vsi->netdev)
5158 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5159 	else
5160 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5161 
5162 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5163 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5164 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5165 
5166 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5167 	if (status)
5168 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5169 			status, ice_aq_str(hw->adminq.sq_last_status));
5170 }
5171 
5172 /**
5173  * ice_remove - Device removal routine
5174  * @pdev: PCI device information struct
5175  */
5176 static void ice_remove(struct pci_dev *pdev)
5177 {
5178 	struct ice_pf *pf = pci_get_drvdata(pdev);
5179 	int i;
5180 
5181 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5182 		if (!ice_is_reset_in_progress(pf->state))
5183 			break;
5184 		msleep(100);
5185 	}
5186 
5187 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5188 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5189 		ice_free_vfs(pf);
5190 	}
5191 
5192 	ice_service_task_stop(pf);
5193 	ice_aq_cancel_waiting_tasks(pf);
5194 	set_bit(ICE_DOWN, pf->state);
5195 
5196 	if (!ice_is_safe_mode(pf))
5197 		ice_remove_arfs(pf);
5198 	ice_deinit_features(pf);
5199 	ice_deinit_devlink(pf);
5200 	ice_deinit_rdma(pf);
5201 	ice_deinit_eth(pf);
5202 	ice_deinit(pf);
5203 
5204 	ice_vsi_release_all(pf);
5205 
5206 	ice_setup_mc_magic_wake(pf);
5207 	ice_set_wake(pf);
5208 
5209 	pci_disable_device(pdev);
5210 }
5211 
5212 /**
5213  * ice_shutdown - PCI callback for shutting down device
5214  * @pdev: PCI device information struct
5215  */
5216 static void ice_shutdown(struct pci_dev *pdev)
5217 {
5218 	struct ice_pf *pf = pci_get_drvdata(pdev);
5219 
5220 	ice_remove(pdev);
5221 
5222 	if (system_state == SYSTEM_POWER_OFF) {
5223 		pci_wake_from_d3(pdev, pf->wol_ena);
5224 		pci_set_power_state(pdev, PCI_D3hot);
5225 	}
5226 }
5227 
5228 #ifdef CONFIG_PM
5229 /**
5230  * ice_prepare_for_shutdown - prep for PCI shutdown
5231  * @pf: board private structure
5232  *
5233  * Inform or close all dependent features in prep for PCI device shutdown
5234  */
5235 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5236 {
5237 	struct ice_hw *hw = &pf->hw;
5238 	u32 v;
5239 
5240 	/* Notify VFs of impending reset */
5241 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5242 		ice_vc_notify_reset(pf);
5243 
5244 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5245 
5246 	/* disable the VSIs and their queues that are not already DOWN */
5247 	ice_pf_dis_all_vsi(pf, false);
5248 
5249 	ice_for_each_vsi(pf, v)
5250 		if (pf->vsi[v])
5251 			pf->vsi[v]->vsi_num = 0;
5252 
5253 	ice_shutdown_all_ctrlq(hw);
5254 }
5255 
5256 /**
5257  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5258  * @pf: board private structure to reinitialize
5259  *
5260  * This routine reinitialize interrupt scheme that was cleared during
5261  * power management suspend callback.
5262  *
5263  * This should be called during resume routine to re-allocate the q_vectors
5264  * and reacquire interrupts.
5265  */
5266 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5267 {
5268 	struct device *dev = ice_pf_to_dev(pf);
5269 	int ret, v;
5270 
5271 	/* Since we clear MSIX flag during suspend, we need to
5272 	 * set it back during resume...
5273 	 */
5274 
5275 	ret = ice_init_interrupt_scheme(pf);
5276 	if (ret) {
5277 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5278 		return ret;
5279 	}
5280 
5281 	/* Remap vectors and rings, after successful re-init interrupts */
5282 	ice_for_each_vsi(pf, v) {
5283 		if (!pf->vsi[v])
5284 			continue;
5285 
5286 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5287 		if (ret)
5288 			goto err_reinit;
5289 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5290 	}
5291 
5292 	ret = ice_req_irq_msix_misc(pf);
5293 	if (ret) {
5294 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5295 			ret);
5296 		goto err_reinit;
5297 	}
5298 
5299 	return 0;
5300 
5301 err_reinit:
5302 	while (v--)
5303 		if (pf->vsi[v])
5304 			ice_vsi_free_q_vectors(pf->vsi[v]);
5305 
5306 	return ret;
5307 }
5308 
5309 /**
5310  * ice_suspend
5311  * @dev: generic device information structure
5312  *
5313  * Power Management callback to quiesce the device and prepare
5314  * for D3 transition.
5315  */
5316 static int __maybe_unused ice_suspend(struct device *dev)
5317 {
5318 	struct pci_dev *pdev = to_pci_dev(dev);
5319 	struct ice_pf *pf;
5320 	int disabled, v;
5321 
5322 	pf = pci_get_drvdata(pdev);
5323 
5324 	if (!ice_pf_state_is_nominal(pf)) {
5325 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5326 		return -EBUSY;
5327 	}
5328 
5329 	/* Stop watchdog tasks until resume completion.
5330 	 * Even though it is most likely that the service task is
5331 	 * disabled if the device is suspended or down, the service task's
5332 	 * state is controlled by a different state bit, and we should
5333 	 * store and honor whatever state that bit is in at this point.
5334 	 */
5335 	disabled = ice_service_task_stop(pf);
5336 
5337 	ice_unplug_aux_dev(pf);
5338 
5339 	/* Already suspended?, then there is nothing to do */
5340 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5341 		if (!disabled)
5342 			ice_service_task_restart(pf);
5343 		return 0;
5344 	}
5345 
5346 	if (test_bit(ICE_DOWN, pf->state) ||
5347 	    ice_is_reset_in_progress(pf->state)) {
5348 		dev_err(dev, "can't suspend device in reset or already down\n");
5349 		if (!disabled)
5350 			ice_service_task_restart(pf);
5351 		return 0;
5352 	}
5353 
5354 	ice_setup_mc_magic_wake(pf);
5355 
5356 	ice_prepare_for_shutdown(pf);
5357 
5358 	ice_set_wake(pf);
5359 
5360 	/* Free vectors, clear the interrupt scheme and release IRQs
5361 	 * for proper hibernation, especially with large number of CPUs.
5362 	 * Otherwise hibernation might fail when mapping all the vectors back
5363 	 * to CPU0.
5364 	 */
5365 	ice_free_irq_msix_misc(pf);
5366 	ice_for_each_vsi(pf, v) {
5367 		if (!pf->vsi[v])
5368 			continue;
5369 		ice_vsi_free_q_vectors(pf->vsi[v]);
5370 	}
5371 	ice_clear_interrupt_scheme(pf);
5372 
5373 	pci_save_state(pdev);
5374 	pci_wake_from_d3(pdev, pf->wol_ena);
5375 	pci_set_power_state(pdev, PCI_D3hot);
5376 	return 0;
5377 }
5378 
5379 /**
5380  * ice_resume - PM callback for waking up from D3
5381  * @dev: generic device information structure
5382  */
5383 static int __maybe_unused ice_resume(struct device *dev)
5384 {
5385 	struct pci_dev *pdev = to_pci_dev(dev);
5386 	enum ice_reset_req reset_type;
5387 	struct ice_pf *pf;
5388 	struct ice_hw *hw;
5389 	int ret;
5390 
5391 	pci_set_power_state(pdev, PCI_D0);
5392 	pci_restore_state(pdev);
5393 	pci_save_state(pdev);
5394 
5395 	if (!pci_device_is_present(pdev))
5396 		return -ENODEV;
5397 
5398 	ret = pci_enable_device_mem(pdev);
5399 	if (ret) {
5400 		dev_err(dev, "Cannot enable device after suspend\n");
5401 		return ret;
5402 	}
5403 
5404 	pf = pci_get_drvdata(pdev);
5405 	hw = &pf->hw;
5406 
5407 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5408 	ice_print_wake_reason(pf);
5409 
5410 	/* We cleared the interrupt scheme when we suspended, so we need to
5411 	 * restore it now to resume device functionality.
5412 	 */
5413 	ret = ice_reinit_interrupt_scheme(pf);
5414 	if (ret)
5415 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5416 
5417 	clear_bit(ICE_DOWN, pf->state);
5418 	/* Now perform PF reset and rebuild */
5419 	reset_type = ICE_RESET_PFR;
5420 	/* re-enable service task for reset, but allow reset to schedule it */
5421 	clear_bit(ICE_SERVICE_DIS, pf->state);
5422 
5423 	if (ice_schedule_reset(pf, reset_type))
5424 		dev_err(dev, "Reset during resume failed.\n");
5425 
5426 	clear_bit(ICE_SUSPENDED, pf->state);
5427 	ice_service_task_restart(pf);
5428 
5429 	/* Restart the service task */
5430 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5431 
5432 	return 0;
5433 }
5434 #endif /* CONFIG_PM */
5435 
5436 /**
5437  * ice_pci_err_detected - warning that PCI error has been detected
5438  * @pdev: PCI device information struct
5439  * @err: the type of PCI error
5440  *
5441  * Called to warn that something happened on the PCI bus and the error handling
5442  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5443  */
5444 static pci_ers_result_t
5445 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5446 {
5447 	struct ice_pf *pf = pci_get_drvdata(pdev);
5448 
5449 	if (!pf) {
5450 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5451 			__func__, err);
5452 		return PCI_ERS_RESULT_DISCONNECT;
5453 	}
5454 
5455 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5456 		ice_service_task_stop(pf);
5457 
5458 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5459 			set_bit(ICE_PFR_REQ, pf->state);
5460 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5461 		}
5462 	}
5463 
5464 	return PCI_ERS_RESULT_NEED_RESET;
5465 }
5466 
5467 /**
5468  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5469  * @pdev: PCI device information struct
5470  *
5471  * Called to determine if the driver can recover from the PCI slot reset by
5472  * using a register read to determine if the device is recoverable.
5473  */
5474 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5475 {
5476 	struct ice_pf *pf = pci_get_drvdata(pdev);
5477 	pci_ers_result_t result;
5478 	int err;
5479 	u32 reg;
5480 
5481 	err = pci_enable_device_mem(pdev);
5482 	if (err) {
5483 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5484 			err);
5485 		result = PCI_ERS_RESULT_DISCONNECT;
5486 	} else {
5487 		pci_set_master(pdev);
5488 		pci_restore_state(pdev);
5489 		pci_save_state(pdev);
5490 		pci_wake_from_d3(pdev, false);
5491 
5492 		/* Check for life */
5493 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5494 		if (!reg)
5495 			result = PCI_ERS_RESULT_RECOVERED;
5496 		else
5497 			result = PCI_ERS_RESULT_DISCONNECT;
5498 	}
5499 
5500 	return result;
5501 }
5502 
5503 /**
5504  * ice_pci_err_resume - restart operations after PCI error recovery
5505  * @pdev: PCI device information struct
5506  *
5507  * Called to allow the driver to bring things back up after PCI error and/or
5508  * reset recovery have finished
5509  */
5510 static void ice_pci_err_resume(struct pci_dev *pdev)
5511 {
5512 	struct ice_pf *pf = pci_get_drvdata(pdev);
5513 
5514 	if (!pf) {
5515 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5516 			__func__);
5517 		return;
5518 	}
5519 
5520 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5521 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5522 			__func__);
5523 		return;
5524 	}
5525 
5526 	ice_restore_all_vfs_msi_state(pdev);
5527 
5528 	ice_do_reset(pf, ICE_RESET_PFR);
5529 	ice_service_task_restart(pf);
5530 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5531 }
5532 
5533 /**
5534  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5535  * @pdev: PCI device information struct
5536  */
5537 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5538 {
5539 	struct ice_pf *pf = pci_get_drvdata(pdev);
5540 
5541 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5542 		ice_service_task_stop(pf);
5543 
5544 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5545 			set_bit(ICE_PFR_REQ, pf->state);
5546 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5547 		}
5548 	}
5549 }
5550 
5551 /**
5552  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5553  * @pdev: PCI device information struct
5554  */
5555 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5556 {
5557 	ice_pci_err_resume(pdev);
5558 }
5559 
5560 /* ice_pci_tbl - PCI Device ID Table
5561  *
5562  * Wildcard entries (PCI_ANY_ID) should come last
5563  * Last entry must be all 0s
5564  *
5565  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5566  *   Class, Class Mask, private data (not used) }
5567  */
5568 static const struct pci_device_id ice_pci_tbl[] = {
5569 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5570 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5571 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5572 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5573 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5574 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5575 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5576 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5577 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5578 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5579 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5580 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5581 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5582 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5583 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5584 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5585 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5586 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5587 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5588 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5589 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5590 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5591 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5592 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5593 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5594 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5595 	/* required last entry */
5596 	{ 0, }
5597 };
5598 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5599 
5600 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5601 
5602 static const struct pci_error_handlers ice_pci_err_handler = {
5603 	.error_detected = ice_pci_err_detected,
5604 	.slot_reset = ice_pci_err_slot_reset,
5605 	.reset_prepare = ice_pci_err_reset_prepare,
5606 	.reset_done = ice_pci_err_reset_done,
5607 	.resume = ice_pci_err_resume
5608 };
5609 
5610 static struct pci_driver ice_driver = {
5611 	.name = KBUILD_MODNAME,
5612 	.id_table = ice_pci_tbl,
5613 	.probe = ice_probe,
5614 	.remove = ice_remove,
5615 #ifdef CONFIG_PM
5616 	.driver.pm = &ice_pm_ops,
5617 #endif /* CONFIG_PM */
5618 	.shutdown = ice_shutdown,
5619 	.sriov_configure = ice_sriov_configure,
5620 	.err_handler = &ice_pci_err_handler
5621 };
5622 
5623 /**
5624  * ice_module_init - Driver registration routine
5625  *
5626  * ice_module_init is the first routine called when the driver is
5627  * loaded. All it does is register with the PCI subsystem.
5628  */
5629 static int __init ice_module_init(void)
5630 {
5631 	int status = -ENOMEM;
5632 
5633 	pr_info("%s\n", ice_driver_string);
5634 	pr_info("%s\n", ice_copyright);
5635 
5636 	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5637 	if (!ice_wq) {
5638 		pr_err("Failed to create workqueue\n");
5639 		return status;
5640 	}
5641 
5642 	ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5643 	if (!ice_lag_wq) {
5644 		pr_err("Failed to create LAG workqueue\n");
5645 		goto err_dest_wq;
5646 	}
5647 
5648 	status = pci_register_driver(&ice_driver);
5649 	if (status) {
5650 		pr_err("failed to register PCI driver, err %d\n", status);
5651 		goto err_dest_lag_wq;
5652 	}
5653 
5654 	return 0;
5655 
5656 err_dest_lag_wq:
5657 	destroy_workqueue(ice_lag_wq);
5658 err_dest_wq:
5659 	destroy_workqueue(ice_wq);
5660 	return status;
5661 }
5662 module_init(ice_module_init);
5663 
5664 /**
5665  * ice_module_exit - Driver exit cleanup routine
5666  *
5667  * ice_module_exit is called just before the driver is removed
5668  * from memory.
5669  */
5670 static void __exit ice_module_exit(void)
5671 {
5672 	pci_unregister_driver(&ice_driver);
5673 	destroy_workqueue(ice_wq);
5674 	destroy_workqueue(ice_lag_wq);
5675 	pr_info("module unloaded\n");
5676 }
5677 module_exit(ice_module_exit);
5678 
5679 /**
5680  * ice_set_mac_address - NDO callback to set MAC address
5681  * @netdev: network interface device structure
5682  * @pi: pointer to an address structure
5683  *
5684  * Returns 0 on success, negative on failure
5685  */
5686 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5687 {
5688 	struct ice_netdev_priv *np = netdev_priv(netdev);
5689 	struct ice_vsi *vsi = np->vsi;
5690 	struct ice_pf *pf = vsi->back;
5691 	struct ice_hw *hw = &pf->hw;
5692 	struct sockaddr *addr = pi;
5693 	u8 old_mac[ETH_ALEN];
5694 	u8 flags = 0;
5695 	u8 *mac;
5696 	int err;
5697 
5698 	mac = (u8 *)addr->sa_data;
5699 
5700 	if (!is_valid_ether_addr(mac))
5701 		return -EADDRNOTAVAIL;
5702 
5703 	if (test_bit(ICE_DOWN, pf->state) ||
5704 	    ice_is_reset_in_progress(pf->state)) {
5705 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5706 			   mac);
5707 		return -EBUSY;
5708 	}
5709 
5710 	if (ice_chnl_dmac_fltr_cnt(pf)) {
5711 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5712 			   mac);
5713 		return -EAGAIN;
5714 	}
5715 
5716 	netif_addr_lock_bh(netdev);
5717 	ether_addr_copy(old_mac, netdev->dev_addr);
5718 	/* change the netdev's MAC address */
5719 	eth_hw_addr_set(netdev, mac);
5720 	netif_addr_unlock_bh(netdev);
5721 
5722 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5723 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5724 	if (err && err != -ENOENT) {
5725 		err = -EADDRNOTAVAIL;
5726 		goto err_update_filters;
5727 	}
5728 
5729 	/* Add filter for new MAC. If filter exists, return success */
5730 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5731 	if (err == -EEXIST) {
5732 		/* Although this MAC filter is already present in hardware it's
5733 		 * possible in some cases (e.g. bonding) that dev_addr was
5734 		 * modified outside of the driver and needs to be restored back
5735 		 * to this value.
5736 		 */
5737 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5738 
5739 		return 0;
5740 	} else if (err) {
5741 		/* error if the new filter addition failed */
5742 		err = -EADDRNOTAVAIL;
5743 	}
5744 
5745 err_update_filters:
5746 	if (err) {
5747 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5748 			   mac);
5749 		netif_addr_lock_bh(netdev);
5750 		eth_hw_addr_set(netdev, old_mac);
5751 		netif_addr_unlock_bh(netdev);
5752 		return err;
5753 	}
5754 
5755 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5756 		   netdev->dev_addr);
5757 
5758 	/* write new MAC address to the firmware */
5759 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5760 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5761 	if (err) {
5762 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5763 			   mac, err);
5764 	}
5765 	return 0;
5766 }
5767 
5768 /**
5769  * ice_set_rx_mode - NDO callback to set the netdev filters
5770  * @netdev: network interface device structure
5771  */
5772 static void ice_set_rx_mode(struct net_device *netdev)
5773 {
5774 	struct ice_netdev_priv *np = netdev_priv(netdev);
5775 	struct ice_vsi *vsi = np->vsi;
5776 
5777 	if (!vsi || ice_is_switchdev_running(vsi->back))
5778 		return;
5779 
5780 	/* Set the flags to synchronize filters
5781 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5782 	 * flags
5783 	 */
5784 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5785 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5786 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5787 
5788 	/* schedule our worker thread which will take care of
5789 	 * applying the new filter changes
5790 	 */
5791 	ice_service_task_schedule(vsi->back);
5792 }
5793 
5794 /**
5795  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5796  * @netdev: network interface device structure
5797  * @queue_index: Queue ID
5798  * @maxrate: maximum bandwidth in Mbps
5799  */
5800 static int
5801 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5802 {
5803 	struct ice_netdev_priv *np = netdev_priv(netdev);
5804 	struct ice_vsi *vsi = np->vsi;
5805 	u16 q_handle;
5806 	int status;
5807 	u8 tc;
5808 
5809 	/* Validate maxrate requested is within permitted range */
5810 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5811 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5812 			   maxrate, queue_index);
5813 		return -EINVAL;
5814 	}
5815 
5816 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5817 	tc = ice_dcb_get_tc(vsi, queue_index);
5818 
5819 	vsi = ice_locate_vsi_using_queue(vsi, queue_index);
5820 	if (!vsi) {
5821 		netdev_err(netdev, "Invalid VSI for given queue %d\n",
5822 			   queue_index);
5823 		return -EINVAL;
5824 	}
5825 
5826 	/* Set BW back to default, when user set maxrate to 0 */
5827 	if (!maxrate)
5828 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5829 					       q_handle, ICE_MAX_BW);
5830 	else
5831 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5832 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5833 	if (status)
5834 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5835 			   status);
5836 
5837 	return status;
5838 }
5839 
5840 /**
5841  * ice_fdb_add - add an entry to the hardware database
5842  * @ndm: the input from the stack
5843  * @tb: pointer to array of nladdr (unused)
5844  * @dev: the net device pointer
5845  * @addr: the MAC address entry being added
5846  * @vid: VLAN ID
5847  * @flags: instructions from stack about fdb operation
5848  * @extack: netlink extended ack
5849  */
5850 static int
5851 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5852 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5853 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5854 {
5855 	int err;
5856 
5857 	if (vid) {
5858 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5859 		return -EINVAL;
5860 	}
5861 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5862 		netdev_err(dev, "FDB only supports static addresses\n");
5863 		return -EINVAL;
5864 	}
5865 
5866 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5867 		err = dev_uc_add_excl(dev, addr);
5868 	else if (is_multicast_ether_addr(addr))
5869 		err = dev_mc_add_excl(dev, addr);
5870 	else
5871 		err = -EINVAL;
5872 
5873 	/* Only return duplicate errors if NLM_F_EXCL is set */
5874 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5875 		err = 0;
5876 
5877 	return err;
5878 }
5879 
5880 /**
5881  * ice_fdb_del - delete an entry from the hardware database
5882  * @ndm: the input from the stack
5883  * @tb: pointer to array of nladdr (unused)
5884  * @dev: the net device pointer
5885  * @addr: the MAC address entry being added
5886  * @vid: VLAN ID
5887  * @extack: netlink extended ack
5888  */
5889 static int
5890 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5891 	    struct net_device *dev, const unsigned char *addr,
5892 	    __always_unused u16 vid, struct netlink_ext_ack *extack)
5893 {
5894 	int err;
5895 
5896 	if (ndm->ndm_state & NUD_PERMANENT) {
5897 		netdev_err(dev, "FDB only supports static addresses\n");
5898 		return -EINVAL;
5899 	}
5900 
5901 	if (is_unicast_ether_addr(addr))
5902 		err = dev_uc_del(dev, addr);
5903 	else if (is_multicast_ether_addr(addr))
5904 		err = dev_mc_del(dev, addr);
5905 	else
5906 		err = -EINVAL;
5907 
5908 	return err;
5909 }
5910 
5911 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
5912 					 NETIF_F_HW_VLAN_CTAG_TX | \
5913 					 NETIF_F_HW_VLAN_STAG_RX | \
5914 					 NETIF_F_HW_VLAN_STAG_TX)
5915 
5916 #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
5917 					 NETIF_F_HW_VLAN_STAG_RX)
5918 
5919 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
5920 					 NETIF_F_HW_VLAN_STAG_FILTER)
5921 
5922 /**
5923  * ice_fix_features - fix the netdev features flags based on device limitations
5924  * @netdev: ptr to the netdev that flags are being fixed on
5925  * @features: features that need to be checked and possibly fixed
5926  *
5927  * Make sure any fixups are made to features in this callback. This enables the
5928  * driver to not have to check unsupported configurations throughout the driver
5929  * because that's the responsiblity of this callback.
5930  *
5931  * Single VLAN Mode (SVM) Supported Features:
5932  *	NETIF_F_HW_VLAN_CTAG_FILTER
5933  *	NETIF_F_HW_VLAN_CTAG_RX
5934  *	NETIF_F_HW_VLAN_CTAG_TX
5935  *
5936  * Double VLAN Mode (DVM) Supported Features:
5937  *	NETIF_F_HW_VLAN_CTAG_FILTER
5938  *	NETIF_F_HW_VLAN_CTAG_RX
5939  *	NETIF_F_HW_VLAN_CTAG_TX
5940  *
5941  *	NETIF_F_HW_VLAN_STAG_FILTER
5942  *	NETIF_HW_VLAN_STAG_RX
5943  *	NETIF_HW_VLAN_STAG_TX
5944  *
5945  * Features that need fixing:
5946  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5947  *	These are mutually exlusive as the VSI context cannot support multiple
5948  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
5949  *	is not done, then default to clearing the requested STAG offload
5950  *	settings.
5951  *
5952  *	All supported filtering has to be enabled or disabled together. For
5953  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5954  *	together. If this is not done, then default to VLAN filtering disabled.
5955  *	These are mutually exclusive as there is currently no way to
5956  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5957  *	prune rules.
5958  */
5959 static netdev_features_t
5960 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5961 {
5962 	struct ice_netdev_priv *np = netdev_priv(netdev);
5963 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5964 	bool cur_ctag, cur_stag, req_ctag, req_stag;
5965 
5966 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5967 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5968 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5969 
5970 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5971 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5972 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5973 
5974 	if (req_vlan_fltr != cur_vlan_fltr) {
5975 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5976 			if (req_ctag && req_stag) {
5977 				features |= NETIF_VLAN_FILTERING_FEATURES;
5978 			} else if (!req_ctag && !req_stag) {
5979 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
5980 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
5981 				   (!cur_stag && req_stag && !cur_ctag)) {
5982 				features |= NETIF_VLAN_FILTERING_FEATURES;
5983 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5984 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
5985 				   (cur_stag && !req_stag && cur_ctag)) {
5986 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
5987 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5988 			}
5989 		} else {
5990 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5991 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5992 
5993 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5994 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5995 		}
5996 	}
5997 
5998 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5999 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6000 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6001 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6002 			      NETIF_F_HW_VLAN_STAG_TX);
6003 	}
6004 
6005 	if (!(netdev->features & NETIF_F_RXFCS) &&
6006 	    (features & NETIF_F_RXFCS) &&
6007 	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6008 	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6009 		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6010 		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6011 	}
6012 
6013 	return features;
6014 }
6015 
6016 /**
6017  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6018  * @vsi: PF's VSI
6019  * @features: features used to determine VLAN offload settings
6020  *
6021  * First, determine the vlan_ethertype based on the VLAN offload bits in
6022  * features. Then determine if stripping and insertion should be enabled or
6023  * disabled. Finally enable or disable VLAN stripping and insertion.
6024  */
6025 static int
6026 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6027 {
6028 	bool enable_stripping = true, enable_insertion = true;
6029 	struct ice_vsi_vlan_ops *vlan_ops;
6030 	int strip_err = 0, insert_err = 0;
6031 	u16 vlan_ethertype = 0;
6032 
6033 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6034 
6035 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6036 		vlan_ethertype = ETH_P_8021AD;
6037 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6038 		vlan_ethertype = ETH_P_8021Q;
6039 
6040 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6041 		enable_stripping = false;
6042 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6043 		enable_insertion = false;
6044 
6045 	if (enable_stripping)
6046 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6047 	else
6048 		strip_err = vlan_ops->dis_stripping(vsi);
6049 
6050 	if (enable_insertion)
6051 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6052 	else
6053 		insert_err = vlan_ops->dis_insertion(vsi);
6054 
6055 	if (strip_err || insert_err)
6056 		return -EIO;
6057 
6058 	return 0;
6059 }
6060 
6061 /**
6062  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6063  * @vsi: PF's VSI
6064  * @features: features used to determine VLAN filtering settings
6065  *
6066  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6067  * features.
6068  */
6069 static int
6070 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6071 {
6072 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6073 	int err = 0;
6074 
6075 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6076 	 * if either bit is set
6077 	 */
6078 	if (features &
6079 	    (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
6080 		err = vlan_ops->ena_rx_filtering(vsi);
6081 	else
6082 		err = vlan_ops->dis_rx_filtering(vsi);
6083 
6084 	return err;
6085 }
6086 
6087 /**
6088  * ice_set_vlan_features - set VLAN settings based on suggested feature set
6089  * @netdev: ptr to the netdev being adjusted
6090  * @features: the feature set that the stack is suggesting
6091  *
6092  * Only update VLAN settings if the requested_vlan_features are different than
6093  * the current_vlan_features.
6094  */
6095 static int
6096 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6097 {
6098 	netdev_features_t current_vlan_features, requested_vlan_features;
6099 	struct ice_netdev_priv *np = netdev_priv(netdev);
6100 	struct ice_vsi *vsi = np->vsi;
6101 	int err;
6102 
6103 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6104 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6105 	if (current_vlan_features ^ requested_vlan_features) {
6106 		if ((features & NETIF_F_RXFCS) &&
6107 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6108 			dev_err(ice_pf_to_dev(vsi->back),
6109 				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6110 			return -EIO;
6111 		}
6112 
6113 		err = ice_set_vlan_offload_features(vsi, features);
6114 		if (err)
6115 			return err;
6116 	}
6117 
6118 	current_vlan_features = netdev->features &
6119 		NETIF_VLAN_FILTERING_FEATURES;
6120 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6121 	if (current_vlan_features ^ requested_vlan_features) {
6122 		err = ice_set_vlan_filtering_features(vsi, features);
6123 		if (err)
6124 			return err;
6125 	}
6126 
6127 	return 0;
6128 }
6129 
6130 /**
6131  * ice_set_loopback - turn on/off loopback mode on underlying PF
6132  * @vsi: ptr to VSI
6133  * @ena: flag to indicate the on/off setting
6134  */
6135 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6136 {
6137 	bool if_running = netif_running(vsi->netdev);
6138 	int ret;
6139 
6140 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6141 		ret = ice_down(vsi);
6142 		if (ret) {
6143 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6144 			return ret;
6145 		}
6146 	}
6147 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6148 	if (ret)
6149 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6150 	if (if_running)
6151 		ret = ice_up(vsi);
6152 
6153 	return ret;
6154 }
6155 
6156 /**
6157  * ice_set_features - set the netdev feature flags
6158  * @netdev: ptr to the netdev being adjusted
6159  * @features: the feature set that the stack is suggesting
6160  */
6161 static int
6162 ice_set_features(struct net_device *netdev, netdev_features_t features)
6163 {
6164 	netdev_features_t changed = netdev->features ^ features;
6165 	struct ice_netdev_priv *np = netdev_priv(netdev);
6166 	struct ice_vsi *vsi = np->vsi;
6167 	struct ice_pf *pf = vsi->back;
6168 	int ret = 0;
6169 
6170 	/* Don't set any netdev advanced features with device in Safe Mode */
6171 	if (ice_is_safe_mode(pf)) {
6172 		dev_err(ice_pf_to_dev(pf),
6173 			"Device is in Safe Mode - not enabling advanced netdev features\n");
6174 		return ret;
6175 	}
6176 
6177 	/* Do not change setting during reset */
6178 	if (ice_is_reset_in_progress(pf->state)) {
6179 		dev_err(ice_pf_to_dev(pf),
6180 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6181 		return -EBUSY;
6182 	}
6183 
6184 	/* Multiple features can be changed in one call so keep features in
6185 	 * separate if/else statements to guarantee each feature is checked
6186 	 */
6187 	if (changed & NETIF_F_RXHASH)
6188 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6189 
6190 	ret = ice_set_vlan_features(netdev, features);
6191 	if (ret)
6192 		return ret;
6193 
6194 	/* Turn on receive of FCS aka CRC, and after setting this
6195 	 * flag the packet data will have the 4 byte CRC appended
6196 	 */
6197 	if (changed & NETIF_F_RXFCS) {
6198 		if ((features & NETIF_F_RXFCS) &&
6199 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6200 			dev_err(ice_pf_to_dev(vsi->back),
6201 				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6202 			return -EIO;
6203 		}
6204 
6205 		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6206 		ret = ice_down_up(vsi);
6207 		if (ret)
6208 			return ret;
6209 	}
6210 
6211 	if (changed & NETIF_F_NTUPLE) {
6212 		bool ena = !!(features & NETIF_F_NTUPLE);
6213 
6214 		ice_vsi_manage_fdir(vsi, ena);
6215 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6216 	}
6217 
6218 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6219 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6220 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6221 		return -EACCES;
6222 	}
6223 
6224 	if (changed & NETIF_F_HW_TC) {
6225 		bool ena = !!(features & NETIF_F_HW_TC);
6226 
6227 		ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6228 		      clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6229 	}
6230 
6231 	if (changed & NETIF_F_LOOPBACK)
6232 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6233 
6234 	return ret;
6235 }
6236 
6237 /**
6238  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6239  * @vsi: VSI to setup VLAN properties for
6240  */
6241 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6242 {
6243 	int err;
6244 
6245 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6246 	if (err)
6247 		return err;
6248 
6249 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6250 	if (err)
6251 		return err;
6252 
6253 	return ice_vsi_add_vlan_zero(vsi);
6254 }
6255 
6256 /**
6257  * ice_vsi_cfg_lan - Setup the VSI lan related config
6258  * @vsi: the VSI being configured
6259  *
6260  * Return 0 on success and negative value on error
6261  */
6262 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6263 {
6264 	int err;
6265 
6266 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6267 		ice_set_rx_mode(vsi->netdev);
6268 
6269 		err = ice_vsi_vlan_setup(vsi);
6270 		if (err)
6271 			return err;
6272 	}
6273 	ice_vsi_cfg_dcb_rings(vsi);
6274 
6275 	err = ice_vsi_cfg_lan_txqs(vsi);
6276 	if (!err && ice_is_xdp_ena_vsi(vsi))
6277 		err = ice_vsi_cfg_xdp_txqs(vsi);
6278 	if (!err)
6279 		err = ice_vsi_cfg_rxqs(vsi);
6280 
6281 	return err;
6282 }
6283 
6284 /* THEORY OF MODERATION:
6285  * The ice driver hardware works differently than the hardware that DIMLIB was
6286  * originally made for. ice hardware doesn't have packet count limits that
6287  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6288  * which is hard-coded to a limit of 250,000 ints/second.
6289  * If not using dynamic moderation, the INTRL value can be modified
6290  * by ethtool rx-usecs-high.
6291  */
6292 struct ice_dim {
6293 	/* the throttle rate for interrupts, basically worst case delay before
6294 	 * an initial interrupt fires, value is stored in microseconds.
6295 	 */
6296 	u16 itr;
6297 };
6298 
6299 /* Make a different profile for Rx that doesn't allow quite so aggressive
6300  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6301  * second.
6302  */
6303 static const struct ice_dim rx_profile[] = {
6304 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6305 	{8},    /* 125,000 ints/s */
6306 	{16},   /*  62,500 ints/s */
6307 	{62},   /*  16,129 ints/s */
6308 	{126}   /*   7,936 ints/s */
6309 };
6310 
6311 /* The transmit profile, which has the same sorts of values
6312  * as the previous struct
6313  */
6314 static const struct ice_dim tx_profile[] = {
6315 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6316 	{8},    /* 125,000 ints/s */
6317 	{40},   /*  16,125 ints/s */
6318 	{128},  /*   7,812 ints/s */
6319 	{256}   /*   3,906 ints/s */
6320 };
6321 
6322 static void ice_tx_dim_work(struct work_struct *work)
6323 {
6324 	struct ice_ring_container *rc;
6325 	struct dim *dim;
6326 	u16 itr;
6327 
6328 	dim = container_of(work, struct dim, work);
6329 	rc = dim->priv;
6330 
6331 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6332 
6333 	/* look up the values in our local table */
6334 	itr = tx_profile[dim->profile_ix].itr;
6335 
6336 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6337 	ice_write_itr(rc, itr);
6338 
6339 	dim->state = DIM_START_MEASURE;
6340 }
6341 
6342 static void ice_rx_dim_work(struct work_struct *work)
6343 {
6344 	struct ice_ring_container *rc;
6345 	struct dim *dim;
6346 	u16 itr;
6347 
6348 	dim = container_of(work, struct dim, work);
6349 	rc = dim->priv;
6350 
6351 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6352 
6353 	/* look up the values in our local table */
6354 	itr = rx_profile[dim->profile_ix].itr;
6355 
6356 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6357 	ice_write_itr(rc, itr);
6358 
6359 	dim->state = DIM_START_MEASURE;
6360 }
6361 
6362 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6363 
6364 /**
6365  * ice_init_moderation - set up interrupt moderation
6366  * @q_vector: the vector containing rings to be configured
6367  *
6368  * Set up interrupt moderation registers, with the intent to do the right thing
6369  * when called from reset or from probe, and whether or not dynamic moderation
6370  * is enabled or not. Take special care to write all the registers in both
6371  * dynamic moderation mode or not in order to make sure hardware is in a known
6372  * state.
6373  */
6374 static void ice_init_moderation(struct ice_q_vector *q_vector)
6375 {
6376 	struct ice_ring_container *rc;
6377 	bool tx_dynamic, rx_dynamic;
6378 
6379 	rc = &q_vector->tx;
6380 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6381 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6382 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6383 	rc->dim.priv = rc;
6384 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6385 
6386 	/* set the initial TX ITR to match the above */
6387 	ice_write_itr(rc, tx_dynamic ?
6388 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6389 
6390 	rc = &q_vector->rx;
6391 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6392 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6393 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6394 	rc->dim.priv = rc;
6395 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6396 
6397 	/* set the initial RX ITR to match the above */
6398 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6399 				       rc->itr_setting);
6400 
6401 	ice_set_q_vector_intrl(q_vector);
6402 }
6403 
6404 /**
6405  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6406  * @vsi: the VSI being configured
6407  */
6408 static void ice_napi_enable_all(struct ice_vsi *vsi)
6409 {
6410 	int q_idx;
6411 
6412 	if (!vsi->netdev)
6413 		return;
6414 
6415 	ice_for_each_q_vector(vsi, q_idx) {
6416 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6417 
6418 		ice_init_moderation(q_vector);
6419 
6420 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6421 			napi_enable(&q_vector->napi);
6422 	}
6423 }
6424 
6425 /**
6426  * ice_up_complete - Finish the last steps of bringing up a connection
6427  * @vsi: The VSI being configured
6428  *
6429  * Return 0 on success and negative value on error
6430  */
6431 static int ice_up_complete(struct ice_vsi *vsi)
6432 {
6433 	struct ice_pf *pf = vsi->back;
6434 	int err;
6435 
6436 	ice_vsi_cfg_msix(vsi);
6437 
6438 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6439 	 * Tx queue group list was configured and the context bits were
6440 	 * programmed using ice_vsi_cfg_txqs
6441 	 */
6442 	err = ice_vsi_start_all_rx_rings(vsi);
6443 	if (err)
6444 		return err;
6445 
6446 	clear_bit(ICE_VSI_DOWN, vsi->state);
6447 	ice_napi_enable_all(vsi);
6448 	ice_vsi_ena_irq(vsi);
6449 
6450 	if (vsi->port_info &&
6451 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6452 	    vsi->netdev && vsi->type == ICE_VSI_PF) {
6453 		ice_print_link_msg(vsi, true);
6454 		netif_tx_start_all_queues(vsi->netdev);
6455 		netif_carrier_on(vsi->netdev);
6456 		ice_ptp_link_change(pf, pf->hw.pf_id, true);
6457 	}
6458 
6459 	/* Perform an initial read of the statistics registers now to
6460 	 * set the baseline so counters are ready when interface is up
6461 	 */
6462 	ice_update_eth_stats(vsi);
6463 
6464 	if (vsi->type == ICE_VSI_PF)
6465 		ice_service_task_schedule(pf);
6466 
6467 	return 0;
6468 }
6469 
6470 /**
6471  * ice_up - Bring the connection back up after being down
6472  * @vsi: VSI being configured
6473  */
6474 int ice_up(struct ice_vsi *vsi)
6475 {
6476 	int err;
6477 
6478 	err = ice_vsi_cfg_lan(vsi);
6479 	if (!err)
6480 		err = ice_up_complete(vsi);
6481 
6482 	return err;
6483 }
6484 
6485 /**
6486  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6487  * @syncp: pointer to u64_stats_sync
6488  * @stats: stats that pkts and bytes count will be taken from
6489  * @pkts: packets stats counter
6490  * @bytes: bytes stats counter
6491  *
6492  * This function fetches stats from the ring considering the atomic operations
6493  * that needs to be performed to read u64 values in 32 bit machine.
6494  */
6495 void
6496 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6497 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6498 {
6499 	unsigned int start;
6500 
6501 	do {
6502 		start = u64_stats_fetch_begin(syncp);
6503 		*pkts = stats.pkts;
6504 		*bytes = stats.bytes;
6505 	} while (u64_stats_fetch_retry(syncp, start));
6506 }
6507 
6508 /**
6509  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6510  * @vsi: the VSI to be updated
6511  * @vsi_stats: the stats struct to be updated
6512  * @rings: rings to work on
6513  * @count: number of rings
6514  */
6515 static void
6516 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6517 			     struct rtnl_link_stats64 *vsi_stats,
6518 			     struct ice_tx_ring **rings, u16 count)
6519 {
6520 	u16 i;
6521 
6522 	for (i = 0; i < count; i++) {
6523 		struct ice_tx_ring *ring;
6524 		u64 pkts = 0, bytes = 0;
6525 
6526 		ring = READ_ONCE(rings[i]);
6527 		if (!ring || !ring->ring_stats)
6528 			continue;
6529 		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6530 					     ring->ring_stats->stats, &pkts,
6531 					     &bytes);
6532 		vsi_stats->tx_packets += pkts;
6533 		vsi_stats->tx_bytes += bytes;
6534 		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6535 		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6536 		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6537 	}
6538 }
6539 
6540 /**
6541  * ice_update_vsi_ring_stats - Update VSI stats counters
6542  * @vsi: the VSI to be updated
6543  */
6544 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6545 {
6546 	struct rtnl_link_stats64 *net_stats, *stats_prev;
6547 	struct rtnl_link_stats64 *vsi_stats;
6548 	u64 pkts, bytes;
6549 	int i;
6550 
6551 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6552 	if (!vsi_stats)
6553 		return;
6554 
6555 	/* reset non-netdev (extended) stats */
6556 	vsi->tx_restart = 0;
6557 	vsi->tx_busy = 0;
6558 	vsi->tx_linearize = 0;
6559 	vsi->rx_buf_failed = 0;
6560 	vsi->rx_page_failed = 0;
6561 
6562 	rcu_read_lock();
6563 
6564 	/* update Tx rings counters */
6565 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6566 				     vsi->num_txq);
6567 
6568 	/* update Rx rings counters */
6569 	ice_for_each_rxq(vsi, i) {
6570 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6571 		struct ice_ring_stats *ring_stats;
6572 
6573 		ring_stats = ring->ring_stats;
6574 		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6575 					     ring_stats->stats, &pkts,
6576 					     &bytes);
6577 		vsi_stats->rx_packets += pkts;
6578 		vsi_stats->rx_bytes += bytes;
6579 		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6580 		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6581 	}
6582 
6583 	/* update XDP Tx rings counters */
6584 	if (ice_is_xdp_ena_vsi(vsi))
6585 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6586 					     vsi->num_xdp_txq);
6587 
6588 	rcu_read_unlock();
6589 
6590 	net_stats = &vsi->net_stats;
6591 	stats_prev = &vsi->net_stats_prev;
6592 
6593 	/* clear prev counters after reset */
6594 	if (vsi_stats->tx_packets < stats_prev->tx_packets ||
6595 	    vsi_stats->rx_packets < stats_prev->rx_packets) {
6596 		stats_prev->tx_packets = 0;
6597 		stats_prev->tx_bytes = 0;
6598 		stats_prev->rx_packets = 0;
6599 		stats_prev->rx_bytes = 0;
6600 	}
6601 
6602 	/* update netdev counters */
6603 	net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6604 	net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6605 	net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6606 	net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6607 
6608 	stats_prev->tx_packets = vsi_stats->tx_packets;
6609 	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6610 	stats_prev->rx_packets = vsi_stats->rx_packets;
6611 	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6612 
6613 	kfree(vsi_stats);
6614 }
6615 
6616 /**
6617  * ice_update_vsi_stats - Update VSI stats counters
6618  * @vsi: the VSI to be updated
6619  */
6620 void ice_update_vsi_stats(struct ice_vsi *vsi)
6621 {
6622 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6623 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6624 	struct ice_pf *pf = vsi->back;
6625 
6626 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6627 	    test_bit(ICE_CFG_BUSY, pf->state))
6628 		return;
6629 
6630 	/* get stats as recorded by Tx/Rx rings */
6631 	ice_update_vsi_ring_stats(vsi);
6632 
6633 	/* get VSI stats as recorded by the hardware */
6634 	ice_update_eth_stats(vsi);
6635 
6636 	cur_ns->tx_errors = cur_es->tx_errors;
6637 	cur_ns->rx_dropped = cur_es->rx_discards;
6638 	cur_ns->tx_dropped = cur_es->tx_discards;
6639 	cur_ns->multicast = cur_es->rx_multicast;
6640 
6641 	/* update some more netdev stats if this is main VSI */
6642 	if (vsi->type == ICE_VSI_PF) {
6643 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6644 		cur_ns->rx_errors = pf->stats.crc_errors +
6645 				    pf->stats.illegal_bytes +
6646 				    pf->stats.rx_len_errors +
6647 				    pf->stats.rx_undersize +
6648 				    pf->hw_csum_rx_error +
6649 				    pf->stats.rx_jabber +
6650 				    pf->stats.rx_fragments +
6651 				    pf->stats.rx_oversize;
6652 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6653 		/* record drops from the port level */
6654 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6655 	}
6656 }
6657 
6658 /**
6659  * ice_update_pf_stats - Update PF port stats counters
6660  * @pf: PF whose stats needs to be updated
6661  */
6662 void ice_update_pf_stats(struct ice_pf *pf)
6663 {
6664 	struct ice_hw_port_stats *prev_ps, *cur_ps;
6665 	struct ice_hw *hw = &pf->hw;
6666 	u16 fd_ctr_base;
6667 	u8 port;
6668 
6669 	port = hw->port_info->lport;
6670 	prev_ps = &pf->stats_prev;
6671 	cur_ps = &pf->stats;
6672 
6673 	if (ice_is_reset_in_progress(pf->state))
6674 		pf->stat_prev_loaded = false;
6675 
6676 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6677 			  &prev_ps->eth.rx_bytes,
6678 			  &cur_ps->eth.rx_bytes);
6679 
6680 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6681 			  &prev_ps->eth.rx_unicast,
6682 			  &cur_ps->eth.rx_unicast);
6683 
6684 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6685 			  &prev_ps->eth.rx_multicast,
6686 			  &cur_ps->eth.rx_multicast);
6687 
6688 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6689 			  &prev_ps->eth.rx_broadcast,
6690 			  &cur_ps->eth.rx_broadcast);
6691 
6692 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6693 			  &prev_ps->eth.rx_discards,
6694 			  &cur_ps->eth.rx_discards);
6695 
6696 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6697 			  &prev_ps->eth.tx_bytes,
6698 			  &cur_ps->eth.tx_bytes);
6699 
6700 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6701 			  &prev_ps->eth.tx_unicast,
6702 			  &cur_ps->eth.tx_unicast);
6703 
6704 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6705 			  &prev_ps->eth.tx_multicast,
6706 			  &cur_ps->eth.tx_multicast);
6707 
6708 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6709 			  &prev_ps->eth.tx_broadcast,
6710 			  &cur_ps->eth.tx_broadcast);
6711 
6712 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6713 			  &prev_ps->tx_dropped_link_down,
6714 			  &cur_ps->tx_dropped_link_down);
6715 
6716 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6717 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6718 
6719 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6720 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6721 
6722 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6723 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6724 
6725 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6726 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6727 
6728 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6729 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6730 
6731 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6732 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6733 
6734 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6735 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6736 
6737 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6738 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6739 
6740 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6741 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6742 
6743 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6744 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6745 
6746 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6747 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6748 
6749 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6750 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6751 
6752 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6753 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6754 
6755 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6756 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6757 
6758 	fd_ctr_base = hw->fd_ctr_base;
6759 
6760 	ice_stat_update40(hw,
6761 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6762 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6763 			  &cur_ps->fd_sb_match);
6764 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6765 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6766 
6767 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6768 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6769 
6770 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6771 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6772 
6773 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6774 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6775 
6776 	ice_update_dcb_stats(pf);
6777 
6778 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6779 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
6780 
6781 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6782 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6783 
6784 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6785 			  &prev_ps->mac_local_faults,
6786 			  &cur_ps->mac_local_faults);
6787 
6788 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6789 			  &prev_ps->mac_remote_faults,
6790 			  &cur_ps->mac_remote_faults);
6791 
6792 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6793 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6794 
6795 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6796 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6797 
6798 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6799 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6800 
6801 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6802 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6803 
6804 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6805 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6806 
6807 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6808 
6809 	pf->stat_prev_loaded = true;
6810 }
6811 
6812 /**
6813  * ice_get_stats64 - get statistics for network device structure
6814  * @netdev: network interface device structure
6815  * @stats: main device statistics structure
6816  */
6817 static
6818 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6819 {
6820 	struct ice_netdev_priv *np = netdev_priv(netdev);
6821 	struct rtnl_link_stats64 *vsi_stats;
6822 	struct ice_vsi *vsi = np->vsi;
6823 
6824 	vsi_stats = &vsi->net_stats;
6825 
6826 	if (!vsi->num_txq || !vsi->num_rxq)
6827 		return;
6828 
6829 	/* netdev packet/byte stats come from ring counter. These are obtained
6830 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6831 	 * But, only call the update routine and read the registers if VSI is
6832 	 * not down.
6833 	 */
6834 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6835 		ice_update_vsi_ring_stats(vsi);
6836 	stats->tx_packets = vsi_stats->tx_packets;
6837 	stats->tx_bytes = vsi_stats->tx_bytes;
6838 	stats->rx_packets = vsi_stats->rx_packets;
6839 	stats->rx_bytes = vsi_stats->rx_bytes;
6840 
6841 	/* The rest of the stats can be read from the hardware but instead we
6842 	 * just return values that the watchdog task has already obtained from
6843 	 * the hardware.
6844 	 */
6845 	stats->multicast = vsi_stats->multicast;
6846 	stats->tx_errors = vsi_stats->tx_errors;
6847 	stats->tx_dropped = vsi_stats->tx_dropped;
6848 	stats->rx_errors = vsi_stats->rx_errors;
6849 	stats->rx_dropped = vsi_stats->rx_dropped;
6850 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6851 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6852 }
6853 
6854 /**
6855  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6856  * @vsi: VSI having NAPI disabled
6857  */
6858 static void ice_napi_disable_all(struct ice_vsi *vsi)
6859 {
6860 	int q_idx;
6861 
6862 	if (!vsi->netdev)
6863 		return;
6864 
6865 	ice_for_each_q_vector(vsi, q_idx) {
6866 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6867 
6868 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6869 			napi_disable(&q_vector->napi);
6870 
6871 		cancel_work_sync(&q_vector->tx.dim.work);
6872 		cancel_work_sync(&q_vector->rx.dim.work);
6873 	}
6874 }
6875 
6876 /**
6877  * ice_down - Shutdown the connection
6878  * @vsi: The VSI being stopped
6879  *
6880  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6881  */
6882 int ice_down(struct ice_vsi *vsi)
6883 {
6884 	int i, tx_err, rx_err, vlan_err = 0;
6885 
6886 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6887 
6888 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6889 		vlan_err = ice_vsi_del_vlan_zero(vsi);
6890 		ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6891 		netif_carrier_off(vsi->netdev);
6892 		netif_tx_disable(vsi->netdev);
6893 	} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6894 		ice_eswitch_stop_all_tx_queues(vsi->back);
6895 	}
6896 
6897 	ice_vsi_dis_irq(vsi);
6898 
6899 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6900 	if (tx_err)
6901 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6902 			   vsi->vsi_num, tx_err);
6903 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6904 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6905 		if (tx_err)
6906 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6907 				   vsi->vsi_num, tx_err);
6908 	}
6909 
6910 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6911 	if (rx_err)
6912 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6913 			   vsi->vsi_num, rx_err);
6914 
6915 	ice_napi_disable_all(vsi);
6916 
6917 	ice_for_each_txq(vsi, i)
6918 		ice_clean_tx_ring(vsi->tx_rings[i]);
6919 
6920 	if (ice_is_xdp_ena_vsi(vsi))
6921 		ice_for_each_xdp_txq(vsi, i)
6922 			ice_clean_tx_ring(vsi->xdp_rings[i]);
6923 
6924 	ice_for_each_rxq(vsi, i)
6925 		ice_clean_rx_ring(vsi->rx_rings[i]);
6926 
6927 	if (tx_err || rx_err || vlan_err) {
6928 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6929 			   vsi->vsi_num, vsi->vsw->sw_id);
6930 		return -EIO;
6931 	}
6932 
6933 	return 0;
6934 }
6935 
6936 /**
6937  * ice_down_up - shutdown the VSI connection and bring it up
6938  * @vsi: the VSI to be reconnected
6939  */
6940 int ice_down_up(struct ice_vsi *vsi)
6941 {
6942 	int ret;
6943 
6944 	/* if DOWN already set, nothing to do */
6945 	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6946 		return 0;
6947 
6948 	ret = ice_down(vsi);
6949 	if (ret)
6950 		return ret;
6951 
6952 	ret = ice_up(vsi);
6953 	if (ret) {
6954 		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6955 		return ret;
6956 	}
6957 
6958 	return 0;
6959 }
6960 
6961 /**
6962  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6963  * @vsi: VSI having resources allocated
6964  *
6965  * Return 0 on success, negative on failure
6966  */
6967 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6968 {
6969 	int i, err = 0;
6970 
6971 	if (!vsi->num_txq) {
6972 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6973 			vsi->vsi_num);
6974 		return -EINVAL;
6975 	}
6976 
6977 	ice_for_each_txq(vsi, i) {
6978 		struct ice_tx_ring *ring = vsi->tx_rings[i];
6979 
6980 		if (!ring)
6981 			return -EINVAL;
6982 
6983 		if (vsi->netdev)
6984 			ring->netdev = vsi->netdev;
6985 		err = ice_setup_tx_ring(ring);
6986 		if (err)
6987 			break;
6988 	}
6989 
6990 	return err;
6991 }
6992 
6993 /**
6994  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6995  * @vsi: VSI having resources allocated
6996  *
6997  * Return 0 on success, negative on failure
6998  */
6999 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7000 {
7001 	int i, err = 0;
7002 
7003 	if (!vsi->num_rxq) {
7004 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7005 			vsi->vsi_num);
7006 		return -EINVAL;
7007 	}
7008 
7009 	ice_for_each_rxq(vsi, i) {
7010 		struct ice_rx_ring *ring = vsi->rx_rings[i];
7011 
7012 		if (!ring)
7013 			return -EINVAL;
7014 
7015 		if (vsi->netdev)
7016 			ring->netdev = vsi->netdev;
7017 		err = ice_setup_rx_ring(ring);
7018 		if (err)
7019 			break;
7020 	}
7021 
7022 	return err;
7023 }
7024 
7025 /**
7026  * ice_vsi_open_ctrl - open control VSI for use
7027  * @vsi: the VSI to open
7028  *
7029  * Initialization of the Control VSI
7030  *
7031  * Returns 0 on success, negative value on error
7032  */
7033 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7034 {
7035 	char int_name[ICE_INT_NAME_STR_LEN];
7036 	struct ice_pf *pf = vsi->back;
7037 	struct device *dev;
7038 	int err;
7039 
7040 	dev = ice_pf_to_dev(pf);
7041 	/* allocate descriptors */
7042 	err = ice_vsi_setup_tx_rings(vsi);
7043 	if (err)
7044 		goto err_setup_tx;
7045 
7046 	err = ice_vsi_setup_rx_rings(vsi);
7047 	if (err)
7048 		goto err_setup_rx;
7049 
7050 	err = ice_vsi_cfg_lan(vsi);
7051 	if (err)
7052 		goto err_setup_rx;
7053 
7054 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7055 		 dev_driver_string(dev), dev_name(dev));
7056 	err = ice_vsi_req_irq_msix(vsi, int_name);
7057 	if (err)
7058 		goto err_setup_rx;
7059 
7060 	ice_vsi_cfg_msix(vsi);
7061 
7062 	err = ice_vsi_start_all_rx_rings(vsi);
7063 	if (err)
7064 		goto err_up_complete;
7065 
7066 	clear_bit(ICE_VSI_DOWN, vsi->state);
7067 	ice_vsi_ena_irq(vsi);
7068 
7069 	return 0;
7070 
7071 err_up_complete:
7072 	ice_down(vsi);
7073 err_setup_rx:
7074 	ice_vsi_free_rx_rings(vsi);
7075 err_setup_tx:
7076 	ice_vsi_free_tx_rings(vsi);
7077 
7078 	return err;
7079 }
7080 
7081 /**
7082  * ice_vsi_open - Called when a network interface is made active
7083  * @vsi: the VSI to open
7084  *
7085  * Initialization of the VSI
7086  *
7087  * Returns 0 on success, negative value on error
7088  */
7089 int ice_vsi_open(struct ice_vsi *vsi)
7090 {
7091 	char int_name[ICE_INT_NAME_STR_LEN];
7092 	struct ice_pf *pf = vsi->back;
7093 	int err;
7094 
7095 	/* allocate descriptors */
7096 	err = ice_vsi_setup_tx_rings(vsi);
7097 	if (err)
7098 		goto err_setup_tx;
7099 
7100 	err = ice_vsi_setup_rx_rings(vsi);
7101 	if (err)
7102 		goto err_setup_rx;
7103 
7104 	err = ice_vsi_cfg_lan(vsi);
7105 	if (err)
7106 		goto err_setup_rx;
7107 
7108 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7109 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7110 	err = ice_vsi_req_irq_msix(vsi, int_name);
7111 	if (err)
7112 		goto err_setup_rx;
7113 
7114 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7115 
7116 	if (vsi->type == ICE_VSI_PF) {
7117 		/* Notify the stack of the actual queue counts. */
7118 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7119 		if (err)
7120 			goto err_set_qs;
7121 
7122 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7123 		if (err)
7124 			goto err_set_qs;
7125 	}
7126 
7127 	err = ice_up_complete(vsi);
7128 	if (err)
7129 		goto err_up_complete;
7130 
7131 	return 0;
7132 
7133 err_up_complete:
7134 	ice_down(vsi);
7135 err_set_qs:
7136 	ice_vsi_free_irq(vsi);
7137 err_setup_rx:
7138 	ice_vsi_free_rx_rings(vsi);
7139 err_setup_tx:
7140 	ice_vsi_free_tx_rings(vsi);
7141 
7142 	return err;
7143 }
7144 
7145 /**
7146  * ice_vsi_release_all - Delete all VSIs
7147  * @pf: PF from which all VSIs are being removed
7148  */
7149 static void ice_vsi_release_all(struct ice_pf *pf)
7150 {
7151 	int err, i;
7152 
7153 	if (!pf->vsi)
7154 		return;
7155 
7156 	ice_for_each_vsi(pf, i) {
7157 		if (!pf->vsi[i])
7158 			continue;
7159 
7160 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7161 			continue;
7162 
7163 		err = ice_vsi_release(pf->vsi[i]);
7164 		if (err)
7165 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7166 				i, err, pf->vsi[i]->vsi_num);
7167 	}
7168 }
7169 
7170 /**
7171  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7172  * @pf: pointer to the PF instance
7173  * @type: VSI type to rebuild
7174  *
7175  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7176  */
7177 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7178 {
7179 	struct device *dev = ice_pf_to_dev(pf);
7180 	int i, err;
7181 
7182 	ice_for_each_vsi(pf, i) {
7183 		struct ice_vsi *vsi = pf->vsi[i];
7184 
7185 		if (!vsi || vsi->type != type)
7186 			continue;
7187 
7188 		/* rebuild the VSI */
7189 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7190 		if (err) {
7191 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7192 				err, vsi->idx, ice_vsi_type_str(type));
7193 			return err;
7194 		}
7195 
7196 		/* replay filters for the VSI */
7197 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7198 		if (err) {
7199 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7200 				err, vsi->idx, ice_vsi_type_str(type));
7201 			return err;
7202 		}
7203 
7204 		/* Re-map HW VSI number, using VSI handle that has been
7205 		 * previously validated in ice_replay_vsi() call above
7206 		 */
7207 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7208 
7209 		/* enable the VSI */
7210 		err = ice_ena_vsi(vsi, false);
7211 		if (err) {
7212 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7213 				err, vsi->idx, ice_vsi_type_str(type));
7214 			return err;
7215 		}
7216 
7217 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7218 			 ice_vsi_type_str(type));
7219 	}
7220 
7221 	return 0;
7222 }
7223 
7224 /**
7225  * ice_update_pf_netdev_link - Update PF netdev link status
7226  * @pf: pointer to the PF instance
7227  */
7228 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7229 {
7230 	bool link_up;
7231 	int i;
7232 
7233 	ice_for_each_vsi(pf, i) {
7234 		struct ice_vsi *vsi = pf->vsi[i];
7235 
7236 		if (!vsi || vsi->type != ICE_VSI_PF)
7237 			return;
7238 
7239 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7240 		if (link_up) {
7241 			netif_carrier_on(pf->vsi[i]->netdev);
7242 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7243 		} else {
7244 			netif_carrier_off(pf->vsi[i]->netdev);
7245 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7246 		}
7247 	}
7248 }
7249 
7250 /**
7251  * ice_rebuild - rebuild after reset
7252  * @pf: PF to rebuild
7253  * @reset_type: type of reset
7254  *
7255  * Do not rebuild VF VSI in this flow because that is already handled via
7256  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7257  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7258  * to reset/rebuild all the VF VSI twice.
7259  */
7260 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7261 {
7262 	struct device *dev = ice_pf_to_dev(pf);
7263 	struct ice_hw *hw = &pf->hw;
7264 	bool dvm;
7265 	int err;
7266 
7267 	if (test_bit(ICE_DOWN, pf->state))
7268 		goto clear_recovery;
7269 
7270 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7271 
7272 #define ICE_EMP_RESET_SLEEP_MS 5000
7273 	if (reset_type == ICE_RESET_EMPR) {
7274 		/* If an EMP reset has occurred, any previously pending flash
7275 		 * update will have completed. We no longer know whether or
7276 		 * not the NVM update EMP reset is restricted.
7277 		 */
7278 		pf->fw_emp_reset_disabled = false;
7279 
7280 		msleep(ICE_EMP_RESET_SLEEP_MS);
7281 	}
7282 
7283 	err = ice_init_all_ctrlq(hw);
7284 	if (err) {
7285 		dev_err(dev, "control queues init failed %d\n", err);
7286 		goto err_init_ctrlq;
7287 	}
7288 
7289 	/* if DDP was previously loaded successfully */
7290 	if (!ice_is_safe_mode(pf)) {
7291 		/* reload the SW DB of filter tables */
7292 		if (reset_type == ICE_RESET_PFR)
7293 			ice_fill_blk_tbls(hw);
7294 		else
7295 			/* Reload DDP Package after CORER/GLOBR reset */
7296 			ice_load_pkg(NULL, pf);
7297 	}
7298 
7299 	err = ice_clear_pf_cfg(hw);
7300 	if (err) {
7301 		dev_err(dev, "clear PF configuration failed %d\n", err);
7302 		goto err_init_ctrlq;
7303 	}
7304 
7305 	ice_clear_pxe_mode(hw);
7306 
7307 	err = ice_init_nvm(hw);
7308 	if (err) {
7309 		dev_err(dev, "ice_init_nvm failed %d\n", err);
7310 		goto err_init_ctrlq;
7311 	}
7312 
7313 	err = ice_get_caps(hw);
7314 	if (err) {
7315 		dev_err(dev, "ice_get_caps failed %d\n", err);
7316 		goto err_init_ctrlq;
7317 	}
7318 
7319 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7320 	if (err) {
7321 		dev_err(dev, "set_mac_cfg failed %d\n", err);
7322 		goto err_init_ctrlq;
7323 	}
7324 
7325 	dvm = ice_is_dvm_ena(hw);
7326 
7327 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7328 	if (err)
7329 		goto err_init_ctrlq;
7330 
7331 	err = ice_sched_init_port(hw->port_info);
7332 	if (err)
7333 		goto err_sched_init_port;
7334 
7335 	/* start misc vector */
7336 	err = ice_req_irq_msix_misc(pf);
7337 	if (err) {
7338 		dev_err(dev, "misc vector setup failed: %d\n", err);
7339 		goto err_sched_init_port;
7340 	}
7341 
7342 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7343 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7344 		if (!rd32(hw, PFQF_FD_SIZE)) {
7345 			u16 unused, guar, b_effort;
7346 
7347 			guar = hw->func_caps.fd_fltr_guar;
7348 			b_effort = hw->func_caps.fd_fltr_best_effort;
7349 
7350 			/* force guaranteed filter pool for PF */
7351 			ice_alloc_fd_guar_item(hw, &unused, guar);
7352 			/* force shared filter pool for PF */
7353 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7354 		}
7355 	}
7356 
7357 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7358 		ice_dcb_rebuild(pf);
7359 
7360 	/* If the PF previously had enabled PTP, PTP init needs to happen before
7361 	 * the VSI rebuild. If not, this causes the PTP link status events to
7362 	 * fail.
7363 	 */
7364 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7365 		ice_ptp_reset(pf);
7366 
7367 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7368 		ice_gnss_init(pf);
7369 
7370 	/* rebuild PF VSI */
7371 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7372 	if (err) {
7373 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7374 		goto err_vsi_rebuild;
7375 	}
7376 
7377 	/* configure PTP timestamping after VSI rebuild */
7378 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) {
7379 		if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
7380 			ice_ptp_cfg_timestamp(pf, false);
7381 		else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL)
7382 			/* for E82x PHC owner always need to have interrupts */
7383 			ice_ptp_cfg_timestamp(pf, true);
7384 	}
7385 
7386 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7387 	if (err) {
7388 		dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7389 		goto err_vsi_rebuild;
7390 	}
7391 
7392 	if (reset_type == ICE_RESET_PFR) {
7393 		err = ice_rebuild_channels(pf);
7394 		if (err) {
7395 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7396 				err);
7397 			goto err_vsi_rebuild;
7398 		}
7399 	}
7400 
7401 	/* If Flow Director is active */
7402 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7403 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7404 		if (err) {
7405 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7406 			goto err_vsi_rebuild;
7407 		}
7408 
7409 		/* replay HW Flow Director recipes */
7410 		if (hw->fdir_prof)
7411 			ice_fdir_replay_flows(hw);
7412 
7413 		/* replay Flow Director filters */
7414 		ice_fdir_replay_fltrs(pf);
7415 
7416 		ice_rebuild_arfs(pf);
7417 	}
7418 
7419 	ice_update_pf_netdev_link(pf);
7420 
7421 	/* tell the firmware we are up */
7422 	err = ice_send_version(pf);
7423 	if (err) {
7424 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7425 			err);
7426 		goto err_vsi_rebuild;
7427 	}
7428 
7429 	ice_replay_post(hw);
7430 
7431 	/* if we get here, reset flow is successful */
7432 	clear_bit(ICE_RESET_FAILED, pf->state);
7433 
7434 	ice_plug_aux_dev(pf);
7435 	if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7436 		ice_lag_rebuild(pf);
7437 	return;
7438 
7439 err_vsi_rebuild:
7440 err_sched_init_port:
7441 	ice_sched_cleanup_all(hw);
7442 err_init_ctrlq:
7443 	ice_shutdown_all_ctrlq(hw);
7444 	set_bit(ICE_RESET_FAILED, pf->state);
7445 clear_recovery:
7446 	/* set this bit in PF state to control service task scheduling */
7447 	set_bit(ICE_NEEDS_RESTART, pf->state);
7448 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7449 }
7450 
7451 /**
7452  * ice_change_mtu - NDO callback to change the MTU
7453  * @netdev: network interface device structure
7454  * @new_mtu: new value for maximum frame size
7455  *
7456  * Returns 0 on success, negative on failure
7457  */
7458 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7459 {
7460 	struct ice_netdev_priv *np = netdev_priv(netdev);
7461 	struct ice_vsi *vsi = np->vsi;
7462 	struct ice_pf *pf = vsi->back;
7463 	struct bpf_prog *prog;
7464 	u8 count = 0;
7465 	int err = 0;
7466 
7467 	if (new_mtu == (int)netdev->mtu) {
7468 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7469 		return 0;
7470 	}
7471 
7472 	prog = vsi->xdp_prog;
7473 	if (prog && !prog->aux->xdp_has_frags) {
7474 		int frame_size = ice_max_xdp_frame_size(vsi);
7475 
7476 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7477 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7478 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7479 			return -EINVAL;
7480 		}
7481 	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7482 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7483 			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7484 				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7485 			return -EINVAL;
7486 		}
7487 	}
7488 
7489 	/* if a reset is in progress, wait for some time for it to complete */
7490 	do {
7491 		if (ice_is_reset_in_progress(pf->state)) {
7492 			count++;
7493 			usleep_range(1000, 2000);
7494 		} else {
7495 			break;
7496 		}
7497 
7498 	} while (count < 100);
7499 
7500 	if (count == 100) {
7501 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7502 		return -EBUSY;
7503 	}
7504 
7505 	netdev->mtu = (unsigned int)new_mtu;
7506 	err = ice_down_up(vsi);
7507 	if (err)
7508 		return err;
7509 
7510 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7511 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7512 
7513 	return err;
7514 }
7515 
7516 /**
7517  * ice_eth_ioctl - Access the hwtstamp interface
7518  * @netdev: network interface device structure
7519  * @ifr: interface request data
7520  * @cmd: ioctl command
7521  */
7522 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7523 {
7524 	struct ice_netdev_priv *np = netdev_priv(netdev);
7525 	struct ice_pf *pf = np->vsi->back;
7526 
7527 	switch (cmd) {
7528 	case SIOCGHWTSTAMP:
7529 		return ice_ptp_get_ts_config(pf, ifr);
7530 	case SIOCSHWTSTAMP:
7531 		return ice_ptp_set_ts_config(pf, ifr);
7532 	default:
7533 		return -EOPNOTSUPP;
7534 	}
7535 }
7536 
7537 /**
7538  * ice_aq_str - convert AQ err code to a string
7539  * @aq_err: the AQ error code to convert
7540  */
7541 const char *ice_aq_str(enum ice_aq_err aq_err)
7542 {
7543 	switch (aq_err) {
7544 	case ICE_AQ_RC_OK:
7545 		return "OK";
7546 	case ICE_AQ_RC_EPERM:
7547 		return "ICE_AQ_RC_EPERM";
7548 	case ICE_AQ_RC_ENOENT:
7549 		return "ICE_AQ_RC_ENOENT";
7550 	case ICE_AQ_RC_ENOMEM:
7551 		return "ICE_AQ_RC_ENOMEM";
7552 	case ICE_AQ_RC_EBUSY:
7553 		return "ICE_AQ_RC_EBUSY";
7554 	case ICE_AQ_RC_EEXIST:
7555 		return "ICE_AQ_RC_EEXIST";
7556 	case ICE_AQ_RC_EINVAL:
7557 		return "ICE_AQ_RC_EINVAL";
7558 	case ICE_AQ_RC_ENOSPC:
7559 		return "ICE_AQ_RC_ENOSPC";
7560 	case ICE_AQ_RC_ENOSYS:
7561 		return "ICE_AQ_RC_ENOSYS";
7562 	case ICE_AQ_RC_EMODE:
7563 		return "ICE_AQ_RC_EMODE";
7564 	case ICE_AQ_RC_ENOSEC:
7565 		return "ICE_AQ_RC_ENOSEC";
7566 	case ICE_AQ_RC_EBADSIG:
7567 		return "ICE_AQ_RC_EBADSIG";
7568 	case ICE_AQ_RC_ESVN:
7569 		return "ICE_AQ_RC_ESVN";
7570 	case ICE_AQ_RC_EBADMAN:
7571 		return "ICE_AQ_RC_EBADMAN";
7572 	case ICE_AQ_RC_EBADBUF:
7573 		return "ICE_AQ_RC_EBADBUF";
7574 	}
7575 
7576 	return "ICE_AQ_RC_UNKNOWN";
7577 }
7578 
7579 /**
7580  * ice_set_rss_lut - Set RSS LUT
7581  * @vsi: Pointer to VSI structure
7582  * @lut: Lookup table
7583  * @lut_size: Lookup table size
7584  *
7585  * Returns 0 on success, negative on failure
7586  */
7587 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7588 {
7589 	struct ice_aq_get_set_rss_lut_params params = {};
7590 	struct ice_hw *hw = &vsi->back->hw;
7591 	int status;
7592 
7593 	if (!lut)
7594 		return -EINVAL;
7595 
7596 	params.vsi_handle = vsi->idx;
7597 	params.lut_size = lut_size;
7598 	params.lut_type = vsi->rss_lut_type;
7599 	params.lut = lut;
7600 
7601 	status = ice_aq_set_rss_lut(hw, &params);
7602 	if (status)
7603 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7604 			status, ice_aq_str(hw->adminq.sq_last_status));
7605 
7606 	return status;
7607 }
7608 
7609 /**
7610  * ice_set_rss_key - Set RSS key
7611  * @vsi: Pointer to the VSI structure
7612  * @seed: RSS hash seed
7613  *
7614  * Returns 0 on success, negative on failure
7615  */
7616 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7617 {
7618 	struct ice_hw *hw = &vsi->back->hw;
7619 	int status;
7620 
7621 	if (!seed)
7622 		return -EINVAL;
7623 
7624 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7625 	if (status)
7626 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7627 			status, ice_aq_str(hw->adminq.sq_last_status));
7628 
7629 	return status;
7630 }
7631 
7632 /**
7633  * ice_get_rss_lut - Get RSS LUT
7634  * @vsi: Pointer to VSI structure
7635  * @lut: Buffer to store the lookup table entries
7636  * @lut_size: Size of buffer to store the lookup table entries
7637  *
7638  * Returns 0 on success, negative on failure
7639  */
7640 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7641 {
7642 	struct ice_aq_get_set_rss_lut_params params = {};
7643 	struct ice_hw *hw = &vsi->back->hw;
7644 	int status;
7645 
7646 	if (!lut)
7647 		return -EINVAL;
7648 
7649 	params.vsi_handle = vsi->idx;
7650 	params.lut_size = lut_size;
7651 	params.lut_type = vsi->rss_lut_type;
7652 	params.lut = lut;
7653 
7654 	status = ice_aq_get_rss_lut(hw, &params);
7655 	if (status)
7656 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7657 			status, ice_aq_str(hw->adminq.sq_last_status));
7658 
7659 	return status;
7660 }
7661 
7662 /**
7663  * ice_get_rss_key - Get RSS key
7664  * @vsi: Pointer to VSI structure
7665  * @seed: Buffer to store the key in
7666  *
7667  * Returns 0 on success, negative on failure
7668  */
7669 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7670 {
7671 	struct ice_hw *hw = &vsi->back->hw;
7672 	int status;
7673 
7674 	if (!seed)
7675 		return -EINVAL;
7676 
7677 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7678 	if (status)
7679 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7680 			status, ice_aq_str(hw->adminq.sq_last_status));
7681 
7682 	return status;
7683 }
7684 
7685 /**
7686  * ice_bridge_getlink - Get the hardware bridge mode
7687  * @skb: skb buff
7688  * @pid: process ID
7689  * @seq: RTNL message seq
7690  * @dev: the netdev being configured
7691  * @filter_mask: filter mask passed in
7692  * @nlflags: netlink flags passed in
7693  *
7694  * Return the bridge mode (VEB/VEPA)
7695  */
7696 static int
7697 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7698 		   struct net_device *dev, u32 filter_mask, int nlflags)
7699 {
7700 	struct ice_netdev_priv *np = netdev_priv(dev);
7701 	struct ice_vsi *vsi = np->vsi;
7702 	struct ice_pf *pf = vsi->back;
7703 	u16 bmode;
7704 
7705 	bmode = pf->first_sw->bridge_mode;
7706 
7707 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7708 				       filter_mask, NULL);
7709 }
7710 
7711 /**
7712  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7713  * @vsi: Pointer to VSI structure
7714  * @bmode: Hardware bridge mode (VEB/VEPA)
7715  *
7716  * Returns 0 on success, negative on failure
7717  */
7718 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7719 {
7720 	struct ice_aqc_vsi_props *vsi_props;
7721 	struct ice_hw *hw = &vsi->back->hw;
7722 	struct ice_vsi_ctx *ctxt;
7723 	int ret;
7724 
7725 	vsi_props = &vsi->info;
7726 
7727 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7728 	if (!ctxt)
7729 		return -ENOMEM;
7730 
7731 	ctxt->info = vsi->info;
7732 
7733 	if (bmode == BRIDGE_MODE_VEB)
7734 		/* change from VEPA to VEB mode */
7735 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7736 	else
7737 		/* change from VEB to VEPA mode */
7738 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7739 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7740 
7741 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7742 	if (ret) {
7743 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7744 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7745 		goto out;
7746 	}
7747 	/* Update sw flags for book keeping */
7748 	vsi_props->sw_flags = ctxt->info.sw_flags;
7749 
7750 out:
7751 	kfree(ctxt);
7752 	return ret;
7753 }
7754 
7755 /**
7756  * ice_bridge_setlink - Set the hardware bridge mode
7757  * @dev: the netdev being configured
7758  * @nlh: RTNL message
7759  * @flags: bridge setlink flags
7760  * @extack: netlink extended ack
7761  *
7762  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7763  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7764  * not already set for all VSIs connected to this switch. And also update the
7765  * unicast switch filter rules for the corresponding switch of the netdev.
7766  */
7767 static int
7768 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7769 		   u16 __always_unused flags,
7770 		   struct netlink_ext_ack __always_unused *extack)
7771 {
7772 	struct ice_netdev_priv *np = netdev_priv(dev);
7773 	struct ice_pf *pf = np->vsi->back;
7774 	struct nlattr *attr, *br_spec;
7775 	struct ice_hw *hw = &pf->hw;
7776 	struct ice_sw *pf_sw;
7777 	int rem, v, err = 0;
7778 
7779 	pf_sw = pf->first_sw;
7780 	/* find the attribute in the netlink message */
7781 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7782 
7783 	nla_for_each_nested(attr, br_spec, rem) {
7784 		__u16 mode;
7785 
7786 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
7787 			continue;
7788 		mode = nla_get_u16(attr);
7789 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7790 			return -EINVAL;
7791 		/* Continue  if bridge mode is not being flipped */
7792 		if (mode == pf_sw->bridge_mode)
7793 			continue;
7794 		/* Iterates through the PF VSI list and update the loopback
7795 		 * mode of the VSI
7796 		 */
7797 		ice_for_each_vsi(pf, v) {
7798 			if (!pf->vsi[v])
7799 				continue;
7800 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7801 			if (err)
7802 				return err;
7803 		}
7804 
7805 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7806 		/* Update the unicast switch filter rules for the corresponding
7807 		 * switch of the netdev
7808 		 */
7809 		err = ice_update_sw_rule_bridge_mode(hw);
7810 		if (err) {
7811 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7812 				   mode, err,
7813 				   ice_aq_str(hw->adminq.sq_last_status));
7814 			/* revert hw->evb_veb */
7815 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7816 			return err;
7817 		}
7818 
7819 		pf_sw->bridge_mode = mode;
7820 	}
7821 
7822 	return 0;
7823 }
7824 
7825 /**
7826  * ice_tx_timeout - Respond to a Tx Hang
7827  * @netdev: network interface device structure
7828  * @txqueue: Tx queue
7829  */
7830 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7831 {
7832 	struct ice_netdev_priv *np = netdev_priv(netdev);
7833 	struct ice_tx_ring *tx_ring = NULL;
7834 	struct ice_vsi *vsi = np->vsi;
7835 	struct ice_pf *pf = vsi->back;
7836 	u32 i;
7837 
7838 	pf->tx_timeout_count++;
7839 
7840 	/* Check if PFC is enabled for the TC to which the queue belongs
7841 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7842 	 * need to reset and rebuild
7843 	 */
7844 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7845 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7846 			 txqueue);
7847 		return;
7848 	}
7849 
7850 	/* now that we have an index, find the tx_ring struct */
7851 	ice_for_each_txq(vsi, i)
7852 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7853 			if (txqueue == vsi->tx_rings[i]->q_index) {
7854 				tx_ring = vsi->tx_rings[i];
7855 				break;
7856 			}
7857 
7858 	/* Reset recovery level if enough time has elapsed after last timeout.
7859 	 * Also ensure no new reset action happens before next timeout period.
7860 	 */
7861 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7862 		pf->tx_timeout_recovery_level = 1;
7863 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7864 				       netdev->watchdog_timeo)))
7865 		return;
7866 
7867 	if (tx_ring) {
7868 		struct ice_hw *hw = &pf->hw;
7869 		u32 head, val = 0;
7870 
7871 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7872 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7873 		/* Read interrupt register */
7874 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7875 
7876 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7877 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7878 			    head, tx_ring->next_to_use, val);
7879 	}
7880 
7881 	pf->tx_timeout_last_recovery = jiffies;
7882 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7883 		    pf->tx_timeout_recovery_level, txqueue);
7884 
7885 	switch (pf->tx_timeout_recovery_level) {
7886 	case 1:
7887 		set_bit(ICE_PFR_REQ, pf->state);
7888 		break;
7889 	case 2:
7890 		set_bit(ICE_CORER_REQ, pf->state);
7891 		break;
7892 	case 3:
7893 		set_bit(ICE_GLOBR_REQ, pf->state);
7894 		break;
7895 	default:
7896 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7897 		set_bit(ICE_DOWN, pf->state);
7898 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7899 		set_bit(ICE_SERVICE_DIS, pf->state);
7900 		break;
7901 	}
7902 
7903 	ice_service_task_schedule(pf);
7904 	pf->tx_timeout_recovery_level++;
7905 }
7906 
7907 /**
7908  * ice_setup_tc_cls_flower - flower classifier offloads
7909  * @np: net device to configure
7910  * @filter_dev: device on which filter is added
7911  * @cls_flower: offload data
7912  */
7913 static int
7914 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7915 			struct net_device *filter_dev,
7916 			struct flow_cls_offload *cls_flower)
7917 {
7918 	struct ice_vsi *vsi = np->vsi;
7919 
7920 	if (cls_flower->common.chain_index)
7921 		return -EOPNOTSUPP;
7922 
7923 	switch (cls_flower->command) {
7924 	case FLOW_CLS_REPLACE:
7925 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7926 	case FLOW_CLS_DESTROY:
7927 		return ice_del_cls_flower(vsi, cls_flower);
7928 	default:
7929 		return -EINVAL;
7930 	}
7931 }
7932 
7933 /**
7934  * ice_setup_tc_block_cb - callback handler registered for TC block
7935  * @type: TC SETUP type
7936  * @type_data: TC flower offload data that contains user input
7937  * @cb_priv: netdev private data
7938  */
7939 static int
7940 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7941 {
7942 	struct ice_netdev_priv *np = cb_priv;
7943 
7944 	switch (type) {
7945 	case TC_SETUP_CLSFLOWER:
7946 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7947 					       type_data);
7948 	default:
7949 		return -EOPNOTSUPP;
7950 	}
7951 }
7952 
7953 /**
7954  * ice_validate_mqprio_qopt - Validate TCF input parameters
7955  * @vsi: Pointer to VSI
7956  * @mqprio_qopt: input parameters for mqprio queue configuration
7957  *
7958  * This function validates MQPRIO params, such as qcount (power of 2 wherever
7959  * needed), and make sure user doesn't specify qcount and BW rate limit
7960  * for TCs, which are more than "num_tc"
7961  */
7962 static int
7963 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7964 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
7965 {
7966 	int non_power_of_2_qcount = 0;
7967 	struct ice_pf *pf = vsi->back;
7968 	int max_rss_q_cnt = 0;
7969 	u64 sum_min_rate = 0;
7970 	struct device *dev;
7971 	int i, speed;
7972 	u8 num_tc;
7973 
7974 	if (vsi->type != ICE_VSI_PF)
7975 		return -EINVAL;
7976 
7977 	if (mqprio_qopt->qopt.offset[0] != 0 ||
7978 	    mqprio_qopt->qopt.num_tc < 1 ||
7979 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7980 		return -EINVAL;
7981 
7982 	dev = ice_pf_to_dev(pf);
7983 	vsi->ch_rss_size = 0;
7984 	num_tc = mqprio_qopt->qopt.num_tc;
7985 	speed = ice_get_link_speed_kbps(vsi);
7986 
7987 	for (i = 0; num_tc; i++) {
7988 		int qcount = mqprio_qopt->qopt.count[i];
7989 		u64 max_rate, min_rate, rem;
7990 
7991 		if (!qcount)
7992 			return -EINVAL;
7993 
7994 		if (is_power_of_2(qcount)) {
7995 			if (non_power_of_2_qcount &&
7996 			    qcount > non_power_of_2_qcount) {
7997 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7998 					qcount, non_power_of_2_qcount);
7999 				return -EINVAL;
8000 			}
8001 			if (qcount > max_rss_q_cnt)
8002 				max_rss_q_cnt = qcount;
8003 		} else {
8004 			if (non_power_of_2_qcount &&
8005 			    qcount != non_power_of_2_qcount) {
8006 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8007 					qcount, non_power_of_2_qcount);
8008 				return -EINVAL;
8009 			}
8010 			if (qcount < max_rss_q_cnt) {
8011 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8012 					qcount, max_rss_q_cnt);
8013 				return -EINVAL;
8014 			}
8015 			max_rss_q_cnt = qcount;
8016 			non_power_of_2_qcount = qcount;
8017 		}
8018 
8019 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8020 		 * converts the bandwidth rate limit into Bytes/s when
8021 		 * passing it down to the driver. So convert input bandwidth
8022 		 * from Bytes/s to Kbps
8023 		 */
8024 		max_rate = mqprio_qopt->max_rate[i];
8025 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8026 
8027 		/* min_rate is minimum guaranteed rate and it can't be zero */
8028 		min_rate = mqprio_qopt->min_rate[i];
8029 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8030 		sum_min_rate += min_rate;
8031 
8032 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8033 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8034 				min_rate, ICE_MIN_BW_LIMIT);
8035 			return -EINVAL;
8036 		}
8037 
8038 		if (max_rate && max_rate > speed) {
8039 			dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8040 				i, max_rate, speed);
8041 			return -EINVAL;
8042 		}
8043 
8044 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8045 		if (rem) {
8046 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8047 				i, ICE_MIN_BW_LIMIT);
8048 			return -EINVAL;
8049 		}
8050 
8051 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8052 		if (rem) {
8053 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8054 				i, ICE_MIN_BW_LIMIT);
8055 			return -EINVAL;
8056 		}
8057 
8058 		/* min_rate can't be more than max_rate, except when max_rate
8059 		 * is zero (implies max_rate sought is max line rate). In such
8060 		 * a case min_rate can be more than max.
8061 		 */
8062 		if (max_rate && min_rate > max_rate) {
8063 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8064 				min_rate, max_rate);
8065 			return -EINVAL;
8066 		}
8067 
8068 		if (i >= mqprio_qopt->qopt.num_tc - 1)
8069 			break;
8070 		if (mqprio_qopt->qopt.offset[i + 1] !=
8071 		    (mqprio_qopt->qopt.offset[i] + qcount))
8072 			return -EINVAL;
8073 	}
8074 	if (vsi->num_rxq <
8075 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8076 		return -EINVAL;
8077 	if (vsi->num_txq <
8078 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8079 		return -EINVAL;
8080 
8081 	if (sum_min_rate && sum_min_rate > (u64)speed) {
8082 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8083 			sum_min_rate, speed);
8084 		return -EINVAL;
8085 	}
8086 
8087 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8088 	vsi->ch_rss_size = max_rss_q_cnt;
8089 
8090 	return 0;
8091 }
8092 
8093 /**
8094  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8095  * @pf: ptr to PF device
8096  * @vsi: ptr to VSI
8097  */
8098 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8099 {
8100 	struct device *dev = ice_pf_to_dev(pf);
8101 	bool added = false;
8102 	struct ice_hw *hw;
8103 	int flow;
8104 
8105 	if (!(vsi->num_gfltr || vsi->num_bfltr))
8106 		return -EINVAL;
8107 
8108 	hw = &pf->hw;
8109 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8110 		struct ice_fd_hw_prof *prof;
8111 		int tun, status;
8112 		u64 entry_h;
8113 
8114 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8115 		      hw->fdir_prof[flow]->cnt))
8116 			continue;
8117 
8118 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8119 			enum ice_flow_priority prio;
8120 			u64 prof_id;
8121 
8122 			/* add this VSI to FDir profile for this flow */
8123 			prio = ICE_FLOW_PRIO_NORMAL;
8124 			prof = hw->fdir_prof[flow];
8125 			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8126 			status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8127 						    prof->vsi_h[0], vsi->idx,
8128 						    prio, prof->fdir_seg[tun],
8129 						    &entry_h);
8130 			if (status) {
8131 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8132 					vsi->idx, flow);
8133 				continue;
8134 			}
8135 
8136 			prof->entry_h[prof->cnt][tun] = entry_h;
8137 		}
8138 
8139 		/* store VSI for filter replay and delete */
8140 		prof->vsi_h[prof->cnt] = vsi->idx;
8141 		prof->cnt++;
8142 
8143 		added = true;
8144 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8145 			flow);
8146 	}
8147 
8148 	if (!added)
8149 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8150 
8151 	return 0;
8152 }
8153 
8154 /**
8155  * ice_add_channel - add a channel by adding VSI
8156  * @pf: ptr to PF device
8157  * @sw_id: underlying HW switching element ID
8158  * @ch: ptr to channel structure
8159  *
8160  * Add a channel (VSI) using add_vsi and queue_map
8161  */
8162 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8163 {
8164 	struct device *dev = ice_pf_to_dev(pf);
8165 	struct ice_vsi *vsi;
8166 
8167 	if (ch->type != ICE_VSI_CHNL) {
8168 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8169 		return -EINVAL;
8170 	}
8171 
8172 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8173 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8174 		dev_err(dev, "create chnl VSI failure\n");
8175 		return -EINVAL;
8176 	}
8177 
8178 	ice_add_vsi_to_fdir(pf, vsi);
8179 
8180 	ch->sw_id = sw_id;
8181 	ch->vsi_num = vsi->vsi_num;
8182 	ch->info.mapping_flags = vsi->info.mapping_flags;
8183 	ch->ch_vsi = vsi;
8184 	/* set the back pointer of channel for newly created VSI */
8185 	vsi->ch = ch;
8186 
8187 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8188 	       sizeof(vsi->info.q_mapping));
8189 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8190 	       sizeof(vsi->info.tc_mapping));
8191 
8192 	return 0;
8193 }
8194 
8195 /**
8196  * ice_chnl_cfg_res
8197  * @vsi: the VSI being setup
8198  * @ch: ptr to channel structure
8199  *
8200  * Configure channel specific resources such as rings, vector.
8201  */
8202 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8203 {
8204 	int i;
8205 
8206 	for (i = 0; i < ch->num_txq; i++) {
8207 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8208 		struct ice_ring_container *rc;
8209 		struct ice_tx_ring *tx_ring;
8210 		struct ice_rx_ring *rx_ring;
8211 
8212 		tx_ring = vsi->tx_rings[ch->base_q + i];
8213 		rx_ring = vsi->rx_rings[ch->base_q + i];
8214 		if (!tx_ring || !rx_ring)
8215 			continue;
8216 
8217 		/* setup ring being channel enabled */
8218 		tx_ring->ch = ch;
8219 		rx_ring->ch = ch;
8220 
8221 		/* following code block sets up vector specific attributes */
8222 		tx_q_vector = tx_ring->q_vector;
8223 		rx_q_vector = rx_ring->q_vector;
8224 		if (!tx_q_vector && !rx_q_vector)
8225 			continue;
8226 
8227 		if (tx_q_vector) {
8228 			tx_q_vector->ch = ch;
8229 			/* setup Tx and Rx ITR setting if DIM is off */
8230 			rc = &tx_q_vector->tx;
8231 			if (!ITR_IS_DYNAMIC(rc))
8232 				ice_write_itr(rc, rc->itr_setting);
8233 		}
8234 		if (rx_q_vector) {
8235 			rx_q_vector->ch = ch;
8236 			/* setup Tx and Rx ITR setting if DIM is off */
8237 			rc = &rx_q_vector->rx;
8238 			if (!ITR_IS_DYNAMIC(rc))
8239 				ice_write_itr(rc, rc->itr_setting);
8240 		}
8241 	}
8242 
8243 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8244 	 * GLINT_ITR register would have written to perform in-context
8245 	 * update, hence perform flush
8246 	 */
8247 	if (ch->num_txq || ch->num_rxq)
8248 		ice_flush(&vsi->back->hw);
8249 }
8250 
8251 /**
8252  * ice_cfg_chnl_all_res - configure channel resources
8253  * @vsi: pte to main_vsi
8254  * @ch: ptr to channel structure
8255  *
8256  * This function configures channel specific resources such as flow-director
8257  * counter index, and other resources such as queues, vectors, ITR settings
8258  */
8259 static void
8260 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8261 {
8262 	/* configure channel (aka ADQ) resources such as queues, vectors,
8263 	 * ITR settings for channel specific vectors and anything else
8264 	 */
8265 	ice_chnl_cfg_res(vsi, ch);
8266 }
8267 
8268 /**
8269  * ice_setup_hw_channel - setup new channel
8270  * @pf: ptr to PF device
8271  * @vsi: the VSI being setup
8272  * @ch: ptr to channel structure
8273  * @sw_id: underlying HW switching element ID
8274  * @type: type of channel to be created (VMDq2/VF)
8275  *
8276  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8277  * and configures Tx rings accordingly
8278  */
8279 static int
8280 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8281 		     struct ice_channel *ch, u16 sw_id, u8 type)
8282 {
8283 	struct device *dev = ice_pf_to_dev(pf);
8284 	int ret;
8285 
8286 	ch->base_q = vsi->next_base_q;
8287 	ch->type = type;
8288 
8289 	ret = ice_add_channel(pf, sw_id, ch);
8290 	if (ret) {
8291 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8292 		return ret;
8293 	}
8294 
8295 	/* configure/setup ADQ specific resources */
8296 	ice_cfg_chnl_all_res(vsi, ch);
8297 
8298 	/* make sure to update the next_base_q so that subsequent channel's
8299 	 * (aka ADQ) VSI queue map is correct
8300 	 */
8301 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8302 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8303 		ch->num_rxq);
8304 
8305 	return 0;
8306 }
8307 
8308 /**
8309  * ice_setup_channel - setup new channel using uplink element
8310  * @pf: ptr to PF device
8311  * @vsi: the VSI being setup
8312  * @ch: ptr to channel structure
8313  *
8314  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8315  * and uplink switching element
8316  */
8317 static bool
8318 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8319 		  struct ice_channel *ch)
8320 {
8321 	struct device *dev = ice_pf_to_dev(pf);
8322 	u16 sw_id;
8323 	int ret;
8324 
8325 	if (vsi->type != ICE_VSI_PF) {
8326 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8327 		return false;
8328 	}
8329 
8330 	sw_id = pf->first_sw->sw_id;
8331 
8332 	/* create channel (VSI) */
8333 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8334 	if (ret) {
8335 		dev_err(dev, "failed to setup hw_channel\n");
8336 		return false;
8337 	}
8338 	dev_dbg(dev, "successfully created channel()\n");
8339 
8340 	return ch->ch_vsi ? true : false;
8341 }
8342 
8343 /**
8344  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8345  * @vsi: VSI to be configured
8346  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8347  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8348  */
8349 static int
8350 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8351 {
8352 	int err;
8353 
8354 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8355 	if (err)
8356 		return err;
8357 
8358 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8359 }
8360 
8361 /**
8362  * ice_create_q_channel - function to create channel
8363  * @vsi: VSI to be configured
8364  * @ch: ptr to channel (it contains channel specific params)
8365  *
8366  * This function creates channel (VSI) using num_queues specified by user,
8367  * reconfigs RSS if needed.
8368  */
8369 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8370 {
8371 	struct ice_pf *pf = vsi->back;
8372 	struct device *dev;
8373 
8374 	if (!ch)
8375 		return -EINVAL;
8376 
8377 	dev = ice_pf_to_dev(pf);
8378 	if (!ch->num_txq || !ch->num_rxq) {
8379 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8380 		return -EINVAL;
8381 	}
8382 
8383 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8384 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8385 			vsi->cnt_q_avail, ch->num_txq);
8386 		return -EINVAL;
8387 	}
8388 
8389 	if (!ice_setup_channel(pf, vsi, ch)) {
8390 		dev_info(dev, "Failed to setup channel\n");
8391 		return -EINVAL;
8392 	}
8393 	/* configure BW rate limit */
8394 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8395 		int ret;
8396 
8397 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8398 				       ch->min_tx_rate);
8399 		if (ret)
8400 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8401 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8402 		else
8403 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8404 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8405 	}
8406 
8407 	vsi->cnt_q_avail -= ch->num_txq;
8408 
8409 	return 0;
8410 }
8411 
8412 /**
8413  * ice_rem_all_chnl_fltrs - removes all channel filters
8414  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8415  *
8416  * Remove all advanced switch filters only if they are channel specific
8417  * tc-flower based filter
8418  */
8419 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8420 {
8421 	struct ice_tc_flower_fltr *fltr;
8422 	struct hlist_node *node;
8423 
8424 	/* to remove all channel filters, iterate an ordered list of filters */
8425 	hlist_for_each_entry_safe(fltr, node,
8426 				  &pf->tc_flower_fltr_list,
8427 				  tc_flower_node) {
8428 		struct ice_rule_query_data rule;
8429 		int status;
8430 
8431 		/* for now process only channel specific filters */
8432 		if (!ice_is_chnl_fltr(fltr))
8433 			continue;
8434 
8435 		rule.rid = fltr->rid;
8436 		rule.rule_id = fltr->rule_id;
8437 		rule.vsi_handle = fltr->dest_vsi_handle;
8438 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8439 		if (status) {
8440 			if (status == -ENOENT)
8441 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8442 					rule.rule_id);
8443 			else
8444 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8445 					status);
8446 		} else if (fltr->dest_vsi) {
8447 			/* update advanced switch filter count */
8448 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8449 				u32 flags = fltr->flags;
8450 
8451 				fltr->dest_vsi->num_chnl_fltr--;
8452 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8453 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8454 					pf->num_dmac_chnl_fltrs--;
8455 			}
8456 		}
8457 
8458 		hlist_del(&fltr->tc_flower_node);
8459 		kfree(fltr);
8460 	}
8461 }
8462 
8463 /**
8464  * ice_remove_q_channels - Remove queue channels for the TCs
8465  * @vsi: VSI to be configured
8466  * @rem_fltr: delete advanced switch filter or not
8467  *
8468  * Remove queue channels for the TCs
8469  */
8470 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8471 {
8472 	struct ice_channel *ch, *ch_tmp;
8473 	struct ice_pf *pf = vsi->back;
8474 	int i;
8475 
8476 	/* remove all tc-flower based filter if they are channel filters only */
8477 	if (rem_fltr)
8478 		ice_rem_all_chnl_fltrs(pf);
8479 
8480 	/* remove ntuple filters since queue configuration is being changed */
8481 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8482 		struct ice_hw *hw = &pf->hw;
8483 
8484 		mutex_lock(&hw->fdir_fltr_lock);
8485 		ice_fdir_del_all_fltrs(vsi);
8486 		mutex_unlock(&hw->fdir_fltr_lock);
8487 	}
8488 
8489 	/* perform cleanup for channels if they exist */
8490 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8491 		struct ice_vsi *ch_vsi;
8492 
8493 		list_del(&ch->list);
8494 		ch_vsi = ch->ch_vsi;
8495 		if (!ch_vsi) {
8496 			kfree(ch);
8497 			continue;
8498 		}
8499 
8500 		/* Reset queue contexts */
8501 		for (i = 0; i < ch->num_rxq; i++) {
8502 			struct ice_tx_ring *tx_ring;
8503 			struct ice_rx_ring *rx_ring;
8504 
8505 			tx_ring = vsi->tx_rings[ch->base_q + i];
8506 			rx_ring = vsi->rx_rings[ch->base_q + i];
8507 			if (tx_ring) {
8508 				tx_ring->ch = NULL;
8509 				if (tx_ring->q_vector)
8510 					tx_ring->q_vector->ch = NULL;
8511 			}
8512 			if (rx_ring) {
8513 				rx_ring->ch = NULL;
8514 				if (rx_ring->q_vector)
8515 					rx_ring->q_vector->ch = NULL;
8516 			}
8517 		}
8518 
8519 		/* Release FD resources for the channel VSI */
8520 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8521 
8522 		/* clear the VSI from scheduler tree */
8523 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8524 
8525 		/* Delete VSI from FW, PF and HW VSI arrays */
8526 		ice_vsi_delete(ch->ch_vsi);
8527 
8528 		/* free the channel */
8529 		kfree(ch);
8530 	}
8531 
8532 	/* clear the channel VSI map which is stored in main VSI */
8533 	ice_for_each_chnl_tc(i)
8534 		vsi->tc_map_vsi[i] = NULL;
8535 
8536 	/* reset main VSI's all TC information */
8537 	vsi->all_enatc = 0;
8538 	vsi->all_numtc = 0;
8539 }
8540 
8541 /**
8542  * ice_rebuild_channels - rebuild channel
8543  * @pf: ptr to PF
8544  *
8545  * Recreate channel VSIs and replay filters
8546  */
8547 static int ice_rebuild_channels(struct ice_pf *pf)
8548 {
8549 	struct device *dev = ice_pf_to_dev(pf);
8550 	struct ice_vsi *main_vsi;
8551 	bool rem_adv_fltr = true;
8552 	struct ice_channel *ch;
8553 	struct ice_vsi *vsi;
8554 	int tc_idx = 1;
8555 	int i, err;
8556 
8557 	main_vsi = ice_get_main_vsi(pf);
8558 	if (!main_vsi)
8559 		return 0;
8560 
8561 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8562 	    main_vsi->old_numtc == 1)
8563 		return 0; /* nothing to be done */
8564 
8565 	/* reconfigure main VSI based on old value of TC and cached values
8566 	 * for MQPRIO opts
8567 	 */
8568 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8569 	if (err) {
8570 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8571 			main_vsi->old_ena_tc, main_vsi->vsi_num);
8572 		return err;
8573 	}
8574 
8575 	/* rebuild ADQ VSIs */
8576 	ice_for_each_vsi(pf, i) {
8577 		enum ice_vsi_type type;
8578 
8579 		vsi = pf->vsi[i];
8580 		if (!vsi || vsi->type != ICE_VSI_CHNL)
8581 			continue;
8582 
8583 		type = vsi->type;
8584 
8585 		/* rebuild ADQ VSI */
8586 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
8587 		if (err) {
8588 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8589 				ice_vsi_type_str(type), vsi->idx, err);
8590 			goto cleanup;
8591 		}
8592 
8593 		/* Re-map HW VSI number, using VSI handle that has been
8594 		 * previously validated in ice_replay_vsi() call above
8595 		 */
8596 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8597 
8598 		/* replay filters for the VSI */
8599 		err = ice_replay_vsi(&pf->hw, vsi->idx);
8600 		if (err) {
8601 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8602 				ice_vsi_type_str(type), err, vsi->idx);
8603 			rem_adv_fltr = false;
8604 			goto cleanup;
8605 		}
8606 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8607 			 ice_vsi_type_str(type), vsi->idx);
8608 
8609 		/* store ADQ VSI at correct TC index in main VSI's
8610 		 * map of TC to VSI
8611 		 */
8612 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
8613 	}
8614 
8615 	/* ADQ VSI(s) has been rebuilt successfully, so setup
8616 	 * channel for main VSI's Tx and Rx rings
8617 	 */
8618 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
8619 		struct ice_vsi *ch_vsi;
8620 
8621 		ch_vsi = ch->ch_vsi;
8622 		if (!ch_vsi)
8623 			continue;
8624 
8625 		/* reconfig channel resources */
8626 		ice_cfg_chnl_all_res(main_vsi, ch);
8627 
8628 		/* replay BW rate limit if it is non-zero */
8629 		if (!ch->max_tx_rate && !ch->min_tx_rate)
8630 			continue;
8631 
8632 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8633 				       ch->min_tx_rate);
8634 		if (err)
8635 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8636 				err, ch->max_tx_rate, ch->min_tx_rate,
8637 				ch_vsi->vsi_num);
8638 		else
8639 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8640 				ch->max_tx_rate, ch->min_tx_rate,
8641 				ch_vsi->vsi_num);
8642 	}
8643 
8644 	/* reconfig RSS for main VSI */
8645 	if (main_vsi->ch_rss_size)
8646 		ice_vsi_cfg_rss_lut_key(main_vsi);
8647 
8648 	return 0;
8649 
8650 cleanup:
8651 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
8652 	return err;
8653 }
8654 
8655 /**
8656  * ice_create_q_channels - Add queue channel for the given TCs
8657  * @vsi: VSI to be configured
8658  *
8659  * Configures queue channel mapping to the given TCs
8660  */
8661 static int ice_create_q_channels(struct ice_vsi *vsi)
8662 {
8663 	struct ice_pf *pf = vsi->back;
8664 	struct ice_channel *ch;
8665 	int ret = 0, i;
8666 
8667 	ice_for_each_chnl_tc(i) {
8668 		if (!(vsi->all_enatc & BIT(i)))
8669 			continue;
8670 
8671 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8672 		if (!ch) {
8673 			ret = -ENOMEM;
8674 			goto err_free;
8675 		}
8676 		INIT_LIST_HEAD(&ch->list);
8677 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8678 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8679 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8680 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8681 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8682 
8683 		/* convert to Kbits/s */
8684 		if (ch->max_tx_rate)
8685 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
8686 						  ICE_BW_KBPS_DIVISOR);
8687 		if (ch->min_tx_rate)
8688 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
8689 						  ICE_BW_KBPS_DIVISOR);
8690 
8691 		ret = ice_create_q_channel(vsi, ch);
8692 		if (ret) {
8693 			dev_err(ice_pf_to_dev(pf),
8694 				"failed creating channel TC:%d\n", i);
8695 			kfree(ch);
8696 			goto err_free;
8697 		}
8698 		list_add_tail(&ch->list, &vsi->ch_list);
8699 		vsi->tc_map_vsi[i] = ch->ch_vsi;
8700 		dev_dbg(ice_pf_to_dev(pf),
8701 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
8702 	}
8703 	return 0;
8704 
8705 err_free:
8706 	ice_remove_q_channels(vsi, false);
8707 
8708 	return ret;
8709 }
8710 
8711 /**
8712  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8713  * @netdev: net device to configure
8714  * @type_data: TC offload data
8715  */
8716 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8717 {
8718 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8719 	struct ice_netdev_priv *np = netdev_priv(netdev);
8720 	struct ice_vsi *vsi = np->vsi;
8721 	struct ice_pf *pf = vsi->back;
8722 	u16 mode, ena_tc_qdisc = 0;
8723 	int cur_txq, cur_rxq;
8724 	u8 hw = 0, num_tcf;
8725 	struct device *dev;
8726 	int ret, i;
8727 
8728 	dev = ice_pf_to_dev(pf);
8729 	num_tcf = mqprio_qopt->qopt.num_tc;
8730 	hw = mqprio_qopt->qopt.hw;
8731 	mode = mqprio_qopt->mode;
8732 	if (!hw) {
8733 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8734 		vsi->ch_rss_size = 0;
8735 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8736 		goto config_tcf;
8737 	}
8738 
8739 	/* Generate queue region map for number of TCF requested */
8740 	for (i = 0; i < num_tcf; i++)
8741 		ena_tc_qdisc |= BIT(i);
8742 
8743 	switch (mode) {
8744 	case TC_MQPRIO_MODE_CHANNEL:
8745 
8746 		if (pf->hw.port_info->is_custom_tx_enabled) {
8747 			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
8748 			return -EBUSY;
8749 		}
8750 		ice_tear_down_devlink_rate_tree(pf);
8751 
8752 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8753 		if (ret) {
8754 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8755 				   ret);
8756 			return ret;
8757 		}
8758 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8759 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8760 		/* don't assume state of hw_tc_offload during driver load
8761 		 * and set the flag for TC flower filter if hw_tc_offload
8762 		 * already ON
8763 		 */
8764 		if (vsi->netdev->features & NETIF_F_HW_TC)
8765 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8766 		break;
8767 	default:
8768 		return -EINVAL;
8769 	}
8770 
8771 config_tcf:
8772 
8773 	/* Requesting same TCF configuration as already enabled */
8774 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8775 	    mode != TC_MQPRIO_MODE_CHANNEL)
8776 		return 0;
8777 
8778 	/* Pause VSI queues */
8779 	ice_dis_vsi(vsi, true);
8780 
8781 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8782 		ice_remove_q_channels(vsi, true);
8783 
8784 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8785 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8786 				     num_online_cpus());
8787 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8788 				     num_online_cpus());
8789 	} else {
8790 		/* logic to rebuild VSI, same like ethtool -L */
8791 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8792 
8793 		for (i = 0; i < num_tcf; i++) {
8794 			if (!(ena_tc_qdisc & BIT(i)))
8795 				continue;
8796 
8797 			offset = vsi->mqprio_qopt.qopt.offset[i];
8798 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8799 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8800 		}
8801 		vsi->req_txq = offset + qcount_tx;
8802 		vsi->req_rxq = offset + qcount_rx;
8803 
8804 		/* store away original rss_size info, so that it gets reused
8805 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8806 		 * determine, what should be the rss_sizefor main VSI
8807 		 */
8808 		vsi->orig_rss_size = vsi->rss_size;
8809 	}
8810 
8811 	/* save current values of Tx and Rx queues before calling VSI rebuild
8812 	 * for fallback option
8813 	 */
8814 	cur_txq = vsi->num_txq;
8815 	cur_rxq = vsi->num_rxq;
8816 
8817 	/* proceed with rebuild main VSI using correct number of queues */
8818 	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
8819 	if (ret) {
8820 		/* fallback to current number of queues */
8821 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8822 		vsi->req_txq = cur_txq;
8823 		vsi->req_rxq = cur_rxq;
8824 		clear_bit(ICE_RESET_FAILED, pf->state);
8825 		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
8826 			dev_err(dev, "Rebuild of main VSI failed again\n");
8827 			return ret;
8828 		}
8829 	}
8830 
8831 	vsi->all_numtc = num_tcf;
8832 	vsi->all_enatc = ena_tc_qdisc;
8833 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8834 	if (ret) {
8835 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8836 			   vsi->vsi_num);
8837 		goto exit;
8838 	}
8839 
8840 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8841 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8842 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8843 
8844 		/* set TC0 rate limit if specified */
8845 		if (max_tx_rate || min_tx_rate) {
8846 			/* convert to Kbits/s */
8847 			if (max_tx_rate)
8848 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8849 			if (min_tx_rate)
8850 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8851 
8852 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8853 			if (!ret) {
8854 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8855 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8856 			} else {
8857 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8858 					max_tx_rate, min_tx_rate, vsi->vsi_num);
8859 				goto exit;
8860 			}
8861 		}
8862 		ret = ice_create_q_channels(vsi);
8863 		if (ret) {
8864 			netdev_err(netdev, "failed configuring queue channels\n");
8865 			goto exit;
8866 		} else {
8867 			netdev_dbg(netdev, "successfully configured channels\n");
8868 		}
8869 	}
8870 
8871 	if (vsi->ch_rss_size)
8872 		ice_vsi_cfg_rss_lut_key(vsi);
8873 
8874 exit:
8875 	/* if error, reset the all_numtc and all_enatc */
8876 	if (ret) {
8877 		vsi->all_numtc = 0;
8878 		vsi->all_enatc = 0;
8879 	}
8880 	/* resume VSI */
8881 	ice_ena_vsi(vsi, true);
8882 
8883 	return ret;
8884 }
8885 
8886 static LIST_HEAD(ice_block_cb_list);
8887 
8888 static int
8889 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8890 	     void *type_data)
8891 {
8892 	struct ice_netdev_priv *np = netdev_priv(netdev);
8893 	struct ice_pf *pf = np->vsi->back;
8894 	bool locked = false;
8895 	int err;
8896 
8897 	switch (type) {
8898 	case TC_SETUP_BLOCK:
8899 		return flow_block_cb_setup_simple(type_data,
8900 						  &ice_block_cb_list,
8901 						  ice_setup_tc_block_cb,
8902 						  np, np, true);
8903 	case TC_SETUP_QDISC_MQPRIO:
8904 		if (ice_is_eswitch_mode_switchdev(pf)) {
8905 			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
8906 			return -EOPNOTSUPP;
8907 		}
8908 
8909 		if (pf->adev) {
8910 			mutex_lock(&pf->adev_mutex);
8911 			device_lock(&pf->adev->dev);
8912 			locked = true;
8913 			if (pf->adev->dev.driver) {
8914 				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
8915 				err = -EBUSY;
8916 				goto adev_unlock;
8917 			}
8918 		}
8919 
8920 		/* setup traffic classifier for receive side */
8921 		mutex_lock(&pf->tc_mutex);
8922 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8923 		mutex_unlock(&pf->tc_mutex);
8924 
8925 adev_unlock:
8926 		if (locked) {
8927 			device_unlock(&pf->adev->dev);
8928 			mutex_unlock(&pf->adev_mutex);
8929 		}
8930 		return err;
8931 	default:
8932 		return -EOPNOTSUPP;
8933 	}
8934 	return -EOPNOTSUPP;
8935 }
8936 
8937 static struct ice_indr_block_priv *
8938 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8939 			   struct net_device *netdev)
8940 {
8941 	struct ice_indr_block_priv *cb_priv;
8942 
8943 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8944 		if (!cb_priv->netdev)
8945 			return NULL;
8946 		if (cb_priv->netdev == netdev)
8947 			return cb_priv;
8948 	}
8949 	return NULL;
8950 }
8951 
8952 static int
8953 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8954 			void *indr_priv)
8955 {
8956 	struct ice_indr_block_priv *priv = indr_priv;
8957 	struct ice_netdev_priv *np = priv->np;
8958 
8959 	switch (type) {
8960 	case TC_SETUP_CLSFLOWER:
8961 		return ice_setup_tc_cls_flower(np, priv->netdev,
8962 					       (struct flow_cls_offload *)
8963 					       type_data);
8964 	default:
8965 		return -EOPNOTSUPP;
8966 	}
8967 }
8968 
8969 static int
8970 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8971 			struct ice_netdev_priv *np,
8972 			struct flow_block_offload *f, void *data,
8973 			void (*cleanup)(struct flow_block_cb *block_cb))
8974 {
8975 	struct ice_indr_block_priv *indr_priv;
8976 	struct flow_block_cb *block_cb;
8977 
8978 	if (!ice_is_tunnel_supported(netdev) &&
8979 	    !(is_vlan_dev(netdev) &&
8980 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
8981 		return -EOPNOTSUPP;
8982 
8983 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8984 		return -EOPNOTSUPP;
8985 
8986 	switch (f->command) {
8987 	case FLOW_BLOCK_BIND:
8988 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
8989 		if (indr_priv)
8990 			return -EEXIST;
8991 
8992 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8993 		if (!indr_priv)
8994 			return -ENOMEM;
8995 
8996 		indr_priv->netdev = netdev;
8997 		indr_priv->np = np;
8998 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8999 
9000 		block_cb =
9001 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9002 						 indr_priv, indr_priv,
9003 						 ice_rep_indr_tc_block_unbind,
9004 						 f, netdev, sch, data, np,
9005 						 cleanup);
9006 
9007 		if (IS_ERR(block_cb)) {
9008 			list_del(&indr_priv->list);
9009 			kfree(indr_priv);
9010 			return PTR_ERR(block_cb);
9011 		}
9012 		flow_block_cb_add(block_cb, f);
9013 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9014 		break;
9015 	case FLOW_BLOCK_UNBIND:
9016 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9017 		if (!indr_priv)
9018 			return -ENOENT;
9019 
9020 		block_cb = flow_block_cb_lookup(f->block,
9021 						ice_indr_setup_block_cb,
9022 						indr_priv);
9023 		if (!block_cb)
9024 			return -ENOENT;
9025 
9026 		flow_indr_block_cb_remove(block_cb, f);
9027 
9028 		list_del(&block_cb->driver_list);
9029 		break;
9030 	default:
9031 		return -EOPNOTSUPP;
9032 	}
9033 	return 0;
9034 }
9035 
9036 static int
9037 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9038 		     void *cb_priv, enum tc_setup_type type, void *type_data,
9039 		     void *data,
9040 		     void (*cleanup)(struct flow_block_cb *block_cb))
9041 {
9042 	switch (type) {
9043 	case TC_SETUP_BLOCK:
9044 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9045 					       data, cleanup);
9046 
9047 	default:
9048 		return -EOPNOTSUPP;
9049 	}
9050 }
9051 
9052 /**
9053  * ice_open - Called when a network interface becomes active
9054  * @netdev: network interface device structure
9055  *
9056  * The open entry point is called when a network interface is made
9057  * active by the system (IFF_UP). At this point all resources needed
9058  * for transmit and receive operations are allocated, the interrupt
9059  * handler is registered with the OS, the netdev watchdog is enabled,
9060  * and the stack is notified that the interface is ready.
9061  *
9062  * Returns 0 on success, negative value on failure
9063  */
9064 int ice_open(struct net_device *netdev)
9065 {
9066 	struct ice_netdev_priv *np = netdev_priv(netdev);
9067 	struct ice_pf *pf = np->vsi->back;
9068 
9069 	if (ice_is_reset_in_progress(pf->state)) {
9070 		netdev_err(netdev, "can't open net device while reset is in progress");
9071 		return -EBUSY;
9072 	}
9073 
9074 	return ice_open_internal(netdev);
9075 }
9076 
9077 /**
9078  * ice_open_internal - Called when a network interface becomes active
9079  * @netdev: network interface device structure
9080  *
9081  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9082  * handling routine
9083  *
9084  * Returns 0 on success, negative value on failure
9085  */
9086 int ice_open_internal(struct net_device *netdev)
9087 {
9088 	struct ice_netdev_priv *np = netdev_priv(netdev);
9089 	struct ice_vsi *vsi = np->vsi;
9090 	struct ice_pf *pf = vsi->back;
9091 	struct ice_port_info *pi;
9092 	int err;
9093 
9094 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9095 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9096 		return -EIO;
9097 	}
9098 
9099 	netif_carrier_off(netdev);
9100 
9101 	pi = vsi->port_info;
9102 	err = ice_update_link_info(pi);
9103 	if (err) {
9104 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9105 		return err;
9106 	}
9107 
9108 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9109 
9110 	/* Set PHY if there is media, otherwise, turn off PHY */
9111 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9112 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9113 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9114 			err = ice_init_phy_user_cfg(pi);
9115 			if (err) {
9116 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9117 					   err);
9118 				return err;
9119 			}
9120 		}
9121 
9122 		err = ice_configure_phy(vsi);
9123 		if (err) {
9124 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9125 				   err);
9126 			return err;
9127 		}
9128 	} else {
9129 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9130 		ice_set_link(vsi, false);
9131 	}
9132 
9133 	err = ice_vsi_open(vsi);
9134 	if (err)
9135 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9136 			   vsi->vsi_num, vsi->vsw->sw_id);
9137 
9138 	/* Update existing tunnels information */
9139 	udp_tunnel_get_rx_info(netdev);
9140 
9141 	return err;
9142 }
9143 
9144 /**
9145  * ice_stop - Disables a network interface
9146  * @netdev: network interface device structure
9147  *
9148  * The stop entry point is called when an interface is de-activated by the OS,
9149  * and the netdevice enters the DOWN state. The hardware is still under the
9150  * driver's control, but the netdev interface is disabled.
9151  *
9152  * Returns success only - not allowed to fail
9153  */
9154 int ice_stop(struct net_device *netdev)
9155 {
9156 	struct ice_netdev_priv *np = netdev_priv(netdev);
9157 	struct ice_vsi *vsi = np->vsi;
9158 	struct ice_pf *pf = vsi->back;
9159 
9160 	if (ice_is_reset_in_progress(pf->state)) {
9161 		netdev_err(netdev, "can't stop net device while reset is in progress");
9162 		return -EBUSY;
9163 	}
9164 
9165 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9166 		int link_err = ice_force_phys_link_state(vsi, false);
9167 
9168 		if (link_err) {
9169 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9170 				   vsi->vsi_num, link_err);
9171 			return -EIO;
9172 		}
9173 	}
9174 
9175 	ice_vsi_close(vsi);
9176 
9177 	return 0;
9178 }
9179 
9180 /**
9181  * ice_features_check - Validate encapsulated packet conforms to limits
9182  * @skb: skb buffer
9183  * @netdev: This port's netdev
9184  * @features: Offload features that the stack believes apply
9185  */
9186 static netdev_features_t
9187 ice_features_check(struct sk_buff *skb,
9188 		   struct net_device __always_unused *netdev,
9189 		   netdev_features_t features)
9190 {
9191 	bool gso = skb_is_gso(skb);
9192 	size_t len;
9193 
9194 	/* No point in doing any of this if neither checksum nor GSO are
9195 	 * being requested for this frame. We can rule out both by just
9196 	 * checking for CHECKSUM_PARTIAL
9197 	 */
9198 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9199 		return features;
9200 
9201 	/* We cannot support GSO if the MSS is going to be less than
9202 	 * 64 bytes. If it is then we need to drop support for GSO.
9203 	 */
9204 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9205 		features &= ~NETIF_F_GSO_MASK;
9206 
9207 	len = skb_network_offset(skb);
9208 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9209 		goto out_rm_features;
9210 
9211 	len = skb_network_header_len(skb);
9212 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9213 		goto out_rm_features;
9214 
9215 	if (skb->encapsulation) {
9216 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9217 		 * the case of IPIP frames, the transport header pointer is
9218 		 * after the inner header! So check to make sure that this
9219 		 * is a GRE or UDP_TUNNEL frame before doing that math.
9220 		 */
9221 		if (gso && (skb_shinfo(skb)->gso_type &
9222 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9223 			len = skb_inner_network_header(skb) -
9224 			      skb_transport_header(skb);
9225 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9226 				goto out_rm_features;
9227 		}
9228 
9229 		len = skb_inner_network_header_len(skb);
9230 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9231 			goto out_rm_features;
9232 	}
9233 
9234 	return features;
9235 out_rm_features:
9236 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9237 }
9238 
9239 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9240 	.ndo_open = ice_open,
9241 	.ndo_stop = ice_stop,
9242 	.ndo_start_xmit = ice_start_xmit,
9243 	.ndo_set_mac_address = ice_set_mac_address,
9244 	.ndo_validate_addr = eth_validate_addr,
9245 	.ndo_change_mtu = ice_change_mtu,
9246 	.ndo_get_stats64 = ice_get_stats64,
9247 	.ndo_tx_timeout = ice_tx_timeout,
9248 	.ndo_bpf = ice_xdp_safe_mode,
9249 };
9250 
9251 static const struct net_device_ops ice_netdev_ops = {
9252 	.ndo_open = ice_open,
9253 	.ndo_stop = ice_stop,
9254 	.ndo_start_xmit = ice_start_xmit,
9255 	.ndo_select_queue = ice_select_queue,
9256 	.ndo_features_check = ice_features_check,
9257 	.ndo_fix_features = ice_fix_features,
9258 	.ndo_set_rx_mode = ice_set_rx_mode,
9259 	.ndo_set_mac_address = ice_set_mac_address,
9260 	.ndo_validate_addr = eth_validate_addr,
9261 	.ndo_change_mtu = ice_change_mtu,
9262 	.ndo_get_stats64 = ice_get_stats64,
9263 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9264 	.ndo_eth_ioctl = ice_eth_ioctl,
9265 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9266 	.ndo_set_vf_mac = ice_set_vf_mac,
9267 	.ndo_get_vf_config = ice_get_vf_cfg,
9268 	.ndo_set_vf_trust = ice_set_vf_trust,
9269 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9270 	.ndo_set_vf_link_state = ice_set_vf_link_state,
9271 	.ndo_get_vf_stats = ice_get_vf_stats,
9272 	.ndo_set_vf_rate = ice_set_vf_bw,
9273 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9274 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9275 	.ndo_setup_tc = ice_setup_tc,
9276 	.ndo_set_features = ice_set_features,
9277 	.ndo_bridge_getlink = ice_bridge_getlink,
9278 	.ndo_bridge_setlink = ice_bridge_setlink,
9279 	.ndo_fdb_add = ice_fdb_add,
9280 	.ndo_fdb_del = ice_fdb_del,
9281 #ifdef CONFIG_RFS_ACCEL
9282 	.ndo_rx_flow_steer = ice_rx_flow_steer,
9283 #endif
9284 	.ndo_tx_timeout = ice_tx_timeout,
9285 	.ndo_bpf = ice_xdp,
9286 	.ndo_xdp_xmit = ice_xdp_xmit,
9287 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9288 };
9289