xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision d0f482bb06f9447d44d2cae0386a0bd768c3cc16)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 
17 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
18 static const char ice_driver_string[] = DRV_SUMMARY;
19 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
20 
21 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
22 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
23 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
24 
25 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
26 MODULE_DESCRIPTION(DRV_SUMMARY);
27 MODULE_LICENSE("GPL v2");
28 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
29 
30 static int debug = -1;
31 module_param(debug, int, 0644);
32 #ifndef CONFIG_DYNAMIC_DEBUG
33 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
34 #else
35 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
36 #endif /* !CONFIG_DYNAMIC_DEBUG */
37 
38 static DEFINE_IDA(ice_aux_ida);
39 
40 static struct workqueue_struct *ice_wq;
41 static const struct net_device_ops ice_netdev_safe_mode_ops;
42 static const struct net_device_ops ice_netdev_ops;
43 static int ice_vsi_open(struct ice_vsi *vsi);
44 
45 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
46 
47 static void ice_vsi_release_all(struct ice_pf *pf);
48 
49 bool netif_is_ice(struct net_device *dev)
50 {
51 	return dev && (dev->netdev_ops == &ice_netdev_ops);
52 }
53 
54 /**
55  * ice_get_tx_pending - returns number of Tx descriptors not processed
56  * @ring: the ring of descriptors
57  */
58 static u16 ice_get_tx_pending(struct ice_ring *ring)
59 {
60 	u16 head, tail;
61 
62 	head = ring->next_to_clean;
63 	tail = ring->next_to_use;
64 
65 	if (head != tail)
66 		return (head < tail) ?
67 			tail - head : (tail + ring->count - head);
68 	return 0;
69 }
70 
71 /**
72  * ice_check_for_hang_subtask - check for and recover hung queues
73  * @pf: pointer to PF struct
74  */
75 static void ice_check_for_hang_subtask(struct ice_pf *pf)
76 {
77 	struct ice_vsi *vsi = NULL;
78 	struct ice_hw *hw;
79 	unsigned int i;
80 	int packets;
81 	u32 v;
82 
83 	ice_for_each_vsi(pf, v)
84 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
85 			vsi = pf->vsi[v];
86 			break;
87 		}
88 
89 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
90 		return;
91 
92 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
93 		return;
94 
95 	hw = &vsi->back->hw;
96 
97 	for (i = 0; i < vsi->num_txq; i++) {
98 		struct ice_ring *tx_ring = vsi->tx_rings[i];
99 
100 		if (tx_ring && tx_ring->desc) {
101 			/* If packet counter has not changed the queue is
102 			 * likely stalled, so force an interrupt for this
103 			 * queue.
104 			 *
105 			 * prev_pkt would be negative if there was no
106 			 * pending work.
107 			 */
108 			packets = tx_ring->stats.pkts & INT_MAX;
109 			if (tx_ring->tx_stats.prev_pkt == packets) {
110 				/* Trigger sw interrupt to revive the queue */
111 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
112 				continue;
113 			}
114 
115 			/* Memory barrier between read of packet count and call
116 			 * to ice_get_tx_pending()
117 			 */
118 			smp_rmb();
119 			tx_ring->tx_stats.prev_pkt =
120 			    ice_get_tx_pending(tx_ring) ? packets : -1;
121 		}
122 	}
123 }
124 
125 /**
126  * ice_init_mac_fltr - Set initial MAC filters
127  * @pf: board private structure
128  *
129  * Set initial set of MAC filters for PF VSI; configure filters for permanent
130  * address and broadcast address. If an error is encountered, netdevice will be
131  * unregistered.
132  */
133 static int ice_init_mac_fltr(struct ice_pf *pf)
134 {
135 	enum ice_status status;
136 	struct ice_vsi *vsi;
137 	u8 *perm_addr;
138 
139 	vsi = ice_get_main_vsi(pf);
140 	if (!vsi)
141 		return -EINVAL;
142 
143 	perm_addr = vsi->port_info->mac.perm_addr;
144 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
145 	if (status)
146 		return -EIO;
147 
148 	return 0;
149 }
150 
151 /**
152  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
153  * @netdev: the net device on which the sync is happening
154  * @addr: MAC address to sync
155  *
156  * This is a callback function which is called by the in kernel device sync
157  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
158  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
159  * MAC filters from the hardware.
160  */
161 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
162 {
163 	struct ice_netdev_priv *np = netdev_priv(netdev);
164 	struct ice_vsi *vsi = np->vsi;
165 
166 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
167 				     ICE_FWD_TO_VSI))
168 		return -EINVAL;
169 
170 	return 0;
171 }
172 
173 /**
174  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
175  * @netdev: the net device on which the unsync is happening
176  * @addr: MAC address to unsync
177  *
178  * This is a callback function which is called by the in kernel device unsync
179  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
180  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
181  * delete the MAC filters from the hardware.
182  */
183 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
184 {
185 	struct ice_netdev_priv *np = netdev_priv(netdev);
186 	struct ice_vsi *vsi = np->vsi;
187 
188 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
189 				     ICE_FWD_TO_VSI))
190 		return -EINVAL;
191 
192 	return 0;
193 }
194 
195 /**
196  * ice_vsi_fltr_changed - check if filter state changed
197  * @vsi: VSI to be checked
198  *
199  * returns true if filter state has changed, false otherwise.
200  */
201 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
202 {
203 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
204 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
205 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
206 }
207 
208 /**
209  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
210  * @vsi: the VSI being configured
211  * @promisc_m: mask of promiscuous config bits
212  * @set_promisc: enable or disable promisc flag request
213  *
214  */
215 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
216 {
217 	struct ice_hw *hw = &vsi->back->hw;
218 	enum ice_status status = 0;
219 
220 	if (vsi->type != ICE_VSI_PF)
221 		return 0;
222 
223 	if (vsi->num_vlan > 1) {
224 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
225 						  set_promisc);
226 	} else {
227 		if (set_promisc)
228 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
229 						     0);
230 		else
231 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
232 						       0);
233 	}
234 
235 	if (status)
236 		return -EIO;
237 
238 	return 0;
239 }
240 
241 /**
242  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
243  * @vsi: ptr to the VSI
244  *
245  * Push any outstanding VSI filter changes through the AdminQ.
246  */
247 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
248 {
249 	struct device *dev = ice_pf_to_dev(vsi->back);
250 	struct net_device *netdev = vsi->netdev;
251 	bool promisc_forced_on = false;
252 	struct ice_pf *pf = vsi->back;
253 	struct ice_hw *hw = &pf->hw;
254 	enum ice_status status = 0;
255 	u32 changed_flags = 0;
256 	u8 promisc_m;
257 	int err = 0;
258 
259 	if (!vsi->netdev)
260 		return -EINVAL;
261 
262 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
263 		usleep_range(1000, 2000);
264 
265 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
266 	vsi->current_netdev_flags = vsi->netdev->flags;
267 
268 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
269 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
270 
271 	if (ice_vsi_fltr_changed(vsi)) {
272 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
273 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
274 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
275 
276 		/* grab the netdev's addr_list_lock */
277 		netif_addr_lock_bh(netdev);
278 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
279 			      ice_add_mac_to_unsync_list);
280 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
281 			      ice_add_mac_to_unsync_list);
282 		/* our temp lists are populated. release lock */
283 		netif_addr_unlock_bh(netdev);
284 	}
285 
286 	/* Remove MAC addresses in the unsync list */
287 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
288 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
289 	if (status) {
290 		netdev_err(netdev, "Failed to delete MAC filters\n");
291 		/* if we failed because of alloc failures, just bail */
292 		if (status == ICE_ERR_NO_MEMORY) {
293 			err = -ENOMEM;
294 			goto out;
295 		}
296 	}
297 
298 	/* Add MAC addresses in the sync list */
299 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
300 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
301 	/* If filter is added successfully or already exists, do not go into
302 	 * 'if' condition and report it as error. Instead continue processing
303 	 * rest of the function.
304 	 */
305 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
306 		netdev_err(netdev, "Failed to add MAC filters\n");
307 		/* If there is no more space for new umac filters, VSI
308 		 * should go into promiscuous mode. There should be some
309 		 * space reserved for promiscuous filters.
310 		 */
311 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
312 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
313 				      vsi->state)) {
314 			promisc_forced_on = true;
315 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
316 				    vsi->vsi_num);
317 		} else {
318 			err = -EIO;
319 			goto out;
320 		}
321 	}
322 	/* check for changes in promiscuous modes */
323 	if (changed_flags & IFF_ALLMULTI) {
324 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
325 			if (vsi->num_vlan > 1)
326 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
327 			else
328 				promisc_m = ICE_MCAST_PROMISC_BITS;
329 
330 			err = ice_cfg_promisc(vsi, promisc_m, true);
331 			if (err) {
332 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
333 					   vsi->vsi_num);
334 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
335 				goto out_promisc;
336 			}
337 		} else {
338 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
339 			if (vsi->num_vlan > 1)
340 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
341 			else
342 				promisc_m = ICE_MCAST_PROMISC_BITS;
343 
344 			err = ice_cfg_promisc(vsi, promisc_m, false);
345 			if (err) {
346 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
347 					   vsi->vsi_num);
348 				vsi->current_netdev_flags |= IFF_ALLMULTI;
349 				goto out_promisc;
350 			}
351 		}
352 	}
353 
354 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
355 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
356 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
357 		if (vsi->current_netdev_flags & IFF_PROMISC) {
358 			/* Apply Rx filter rule to get traffic from wire */
359 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
360 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
361 				if (err && err != -EEXIST) {
362 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
363 						   err, vsi->vsi_num);
364 					vsi->current_netdev_flags &=
365 						~IFF_PROMISC;
366 					goto out_promisc;
367 				}
368 				ice_cfg_vlan_pruning(vsi, false, false);
369 			}
370 		} else {
371 			/* Clear Rx filter to remove traffic from wire */
372 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
373 				err = ice_clear_dflt_vsi(pf->first_sw);
374 				if (err) {
375 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
376 						   err, vsi->vsi_num);
377 					vsi->current_netdev_flags |=
378 						IFF_PROMISC;
379 					goto out_promisc;
380 				}
381 				if (vsi->num_vlan > 1)
382 					ice_cfg_vlan_pruning(vsi, true, false);
383 			}
384 		}
385 	}
386 	goto exit;
387 
388 out_promisc:
389 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
390 	goto exit;
391 out:
392 	/* if something went wrong then set the changed flag so we try again */
393 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
394 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
395 exit:
396 	clear_bit(ICE_CFG_BUSY, vsi->state);
397 	return err;
398 }
399 
400 /**
401  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
402  * @pf: board private structure
403  */
404 static void ice_sync_fltr_subtask(struct ice_pf *pf)
405 {
406 	int v;
407 
408 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
409 		return;
410 
411 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
412 
413 	ice_for_each_vsi(pf, v)
414 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
415 		    ice_vsi_sync_fltr(pf->vsi[v])) {
416 			/* come back and try again later */
417 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
418 			break;
419 		}
420 }
421 
422 /**
423  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
424  * @pf: the PF
425  * @locked: is the rtnl_lock already held
426  */
427 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
428 {
429 	int node;
430 	int v;
431 
432 	ice_for_each_vsi(pf, v)
433 		if (pf->vsi[v])
434 			ice_dis_vsi(pf->vsi[v], locked);
435 
436 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
437 		pf->pf_agg_node[node].num_vsis = 0;
438 
439 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
440 		pf->vf_agg_node[node].num_vsis = 0;
441 }
442 
443 /**
444  * ice_prepare_for_reset - prep for the core to reset
445  * @pf: board private structure
446  *
447  * Inform or close all dependent features in prep for reset.
448  */
449 static void
450 ice_prepare_for_reset(struct ice_pf *pf)
451 {
452 	struct ice_hw *hw = &pf->hw;
453 	unsigned int i;
454 
455 	/* already prepared for reset */
456 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
457 		return;
458 
459 	ice_unplug_aux_dev(pf);
460 
461 	/* Notify VFs of impending reset */
462 	if (ice_check_sq_alive(hw, &hw->mailboxq))
463 		ice_vc_notify_reset(pf);
464 
465 	/* Disable VFs until reset is completed */
466 	ice_for_each_vf(pf, i)
467 		ice_set_vf_state_qs_dis(&pf->vf[i]);
468 
469 	/* clear SW filtering DB */
470 	ice_clear_hw_tbls(hw);
471 	/* disable the VSIs and their queues that are not already DOWN */
472 	ice_pf_dis_all_vsi(pf, false);
473 
474 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
475 		ice_ptp_release(pf);
476 
477 	if (hw->port_info)
478 		ice_sched_clear_port(hw->port_info);
479 
480 	ice_shutdown_all_ctrlq(hw);
481 
482 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
483 }
484 
485 /**
486  * ice_do_reset - Initiate one of many types of resets
487  * @pf: board private structure
488  * @reset_type: reset type requested
489  * before this function was called.
490  */
491 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
492 {
493 	struct device *dev = ice_pf_to_dev(pf);
494 	struct ice_hw *hw = &pf->hw;
495 
496 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
497 
498 	ice_prepare_for_reset(pf);
499 
500 	/* trigger the reset */
501 	if (ice_reset(hw, reset_type)) {
502 		dev_err(dev, "reset %d failed\n", reset_type);
503 		set_bit(ICE_RESET_FAILED, pf->state);
504 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
505 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
506 		clear_bit(ICE_PFR_REQ, pf->state);
507 		clear_bit(ICE_CORER_REQ, pf->state);
508 		clear_bit(ICE_GLOBR_REQ, pf->state);
509 		wake_up(&pf->reset_wait_queue);
510 		return;
511 	}
512 
513 	/* PFR is a bit of a special case because it doesn't result in an OICR
514 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
515 	 * associated state bits.
516 	 */
517 	if (reset_type == ICE_RESET_PFR) {
518 		pf->pfr_count++;
519 		ice_rebuild(pf, reset_type);
520 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
521 		clear_bit(ICE_PFR_REQ, pf->state);
522 		wake_up(&pf->reset_wait_queue);
523 		ice_reset_all_vfs(pf, true);
524 	}
525 }
526 
527 /**
528  * ice_reset_subtask - Set up for resetting the device and driver
529  * @pf: board private structure
530  */
531 static void ice_reset_subtask(struct ice_pf *pf)
532 {
533 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
534 
535 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
536 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
537 	 * of reset is pending and sets bits in pf->state indicating the reset
538 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
539 	 * prepare for pending reset if not already (for PF software-initiated
540 	 * global resets the software should already be prepared for it as
541 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
542 	 * by firmware or software on other PFs, that bit is not set so prepare
543 	 * for the reset now), poll for reset done, rebuild and return.
544 	 */
545 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
546 		/* Perform the largest reset requested */
547 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
548 			reset_type = ICE_RESET_CORER;
549 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
550 			reset_type = ICE_RESET_GLOBR;
551 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
552 			reset_type = ICE_RESET_EMPR;
553 		/* return if no valid reset type requested */
554 		if (reset_type == ICE_RESET_INVAL)
555 			return;
556 		ice_prepare_for_reset(pf);
557 
558 		/* make sure we are ready to rebuild */
559 		if (ice_check_reset(&pf->hw)) {
560 			set_bit(ICE_RESET_FAILED, pf->state);
561 		} else {
562 			/* done with reset. start rebuild */
563 			pf->hw.reset_ongoing = false;
564 			ice_rebuild(pf, reset_type);
565 			/* clear bit to resume normal operations, but
566 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
567 			 */
568 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
569 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
570 			clear_bit(ICE_PFR_REQ, pf->state);
571 			clear_bit(ICE_CORER_REQ, pf->state);
572 			clear_bit(ICE_GLOBR_REQ, pf->state);
573 			wake_up(&pf->reset_wait_queue);
574 			ice_reset_all_vfs(pf, true);
575 		}
576 
577 		return;
578 	}
579 
580 	/* No pending resets to finish processing. Check for new resets */
581 	if (test_bit(ICE_PFR_REQ, pf->state))
582 		reset_type = ICE_RESET_PFR;
583 	if (test_bit(ICE_CORER_REQ, pf->state))
584 		reset_type = ICE_RESET_CORER;
585 	if (test_bit(ICE_GLOBR_REQ, pf->state))
586 		reset_type = ICE_RESET_GLOBR;
587 	/* If no valid reset type requested just return */
588 	if (reset_type == ICE_RESET_INVAL)
589 		return;
590 
591 	/* reset if not already down or busy */
592 	if (!test_bit(ICE_DOWN, pf->state) &&
593 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
594 		ice_do_reset(pf, reset_type);
595 	}
596 }
597 
598 /**
599  * ice_print_topo_conflict - print topology conflict message
600  * @vsi: the VSI whose topology status is being checked
601  */
602 static void ice_print_topo_conflict(struct ice_vsi *vsi)
603 {
604 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
605 	case ICE_AQ_LINK_TOPO_CONFLICT:
606 	case ICE_AQ_LINK_MEDIA_CONFLICT:
607 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
608 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
609 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
610 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
611 		break;
612 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
613 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
614 		break;
615 	default:
616 		break;
617 	}
618 }
619 
620 /**
621  * ice_print_link_msg - print link up or down message
622  * @vsi: the VSI whose link status is being queried
623  * @isup: boolean for if the link is now up or down
624  */
625 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
626 {
627 	struct ice_aqc_get_phy_caps_data *caps;
628 	const char *an_advertised;
629 	enum ice_status status;
630 	const char *fec_req;
631 	const char *speed;
632 	const char *fec;
633 	const char *fc;
634 	const char *an;
635 
636 	if (!vsi)
637 		return;
638 
639 	if (vsi->current_isup == isup)
640 		return;
641 
642 	vsi->current_isup = isup;
643 
644 	if (!isup) {
645 		netdev_info(vsi->netdev, "NIC Link is Down\n");
646 		return;
647 	}
648 
649 	switch (vsi->port_info->phy.link_info.link_speed) {
650 	case ICE_AQ_LINK_SPEED_100GB:
651 		speed = "100 G";
652 		break;
653 	case ICE_AQ_LINK_SPEED_50GB:
654 		speed = "50 G";
655 		break;
656 	case ICE_AQ_LINK_SPEED_40GB:
657 		speed = "40 G";
658 		break;
659 	case ICE_AQ_LINK_SPEED_25GB:
660 		speed = "25 G";
661 		break;
662 	case ICE_AQ_LINK_SPEED_20GB:
663 		speed = "20 G";
664 		break;
665 	case ICE_AQ_LINK_SPEED_10GB:
666 		speed = "10 G";
667 		break;
668 	case ICE_AQ_LINK_SPEED_5GB:
669 		speed = "5 G";
670 		break;
671 	case ICE_AQ_LINK_SPEED_2500MB:
672 		speed = "2.5 G";
673 		break;
674 	case ICE_AQ_LINK_SPEED_1000MB:
675 		speed = "1 G";
676 		break;
677 	case ICE_AQ_LINK_SPEED_100MB:
678 		speed = "100 M";
679 		break;
680 	default:
681 		speed = "Unknown ";
682 		break;
683 	}
684 
685 	switch (vsi->port_info->fc.current_mode) {
686 	case ICE_FC_FULL:
687 		fc = "Rx/Tx";
688 		break;
689 	case ICE_FC_TX_PAUSE:
690 		fc = "Tx";
691 		break;
692 	case ICE_FC_RX_PAUSE:
693 		fc = "Rx";
694 		break;
695 	case ICE_FC_NONE:
696 		fc = "None";
697 		break;
698 	default:
699 		fc = "Unknown";
700 		break;
701 	}
702 
703 	/* Get FEC mode based on negotiated link info */
704 	switch (vsi->port_info->phy.link_info.fec_info) {
705 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
706 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
707 		fec = "RS-FEC";
708 		break;
709 	case ICE_AQ_LINK_25G_KR_FEC_EN:
710 		fec = "FC-FEC/BASE-R";
711 		break;
712 	default:
713 		fec = "NONE";
714 		break;
715 	}
716 
717 	/* check if autoneg completed, might be false due to not supported */
718 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
719 		an = "True";
720 	else
721 		an = "False";
722 
723 	/* Get FEC mode requested based on PHY caps last SW configuration */
724 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
725 	if (!caps) {
726 		fec_req = "Unknown";
727 		an_advertised = "Unknown";
728 		goto done;
729 	}
730 
731 	status = ice_aq_get_phy_caps(vsi->port_info, false,
732 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
733 	if (status)
734 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
735 
736 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
737 
738 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
739 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
740 		fec_req = "RS-FEC";
741 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
742 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
743 		fec_req = "FC-FEC/BASE-R";
744 	else
745 		fec_req = "NONE";
746 
747 	kfree(caps);
748 
749 done:
750 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
751 		    speed, fec_req, fec, an_advertised, an, fc);
752 	ice_print_topo_conflict(vsi);
753 }
754 
755 /**
756  * ice_vsi_link_event - update the VSI's netdev
757  * @vsi: the VSI on which the link event occurred
758  * @link_up: whether or not the VSI needs to be set up or down
759  */
760 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
761 {
762 	if (!vsi)
763 		return;
764 
765 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
766 		return;
767 
768 	if (vsi->type == ICE_VSI_PF) {
769 		if (link_up == netif_carrier_ok(vsi->netdev))
770 			return;
771 
772 		if (link_up) {
773 			netif_carrier_on(vsi->netdev);
774 			netif_tx_wake_all_queues(vsi->netdev);
775 		} else {
776 			netif_carrier_off(vsi->netdev);
777 			netif_tx_stop_all_queues(vsi->netdev);
778 		}
779 	}
780 }
781 
782 /**
783  * ice_set_dflt_mib - send a default config MIB to the FW
784  * @pf: private PF struct
785  *
786  * This function sends a default configuration MIB to the FW.
787  *
788  * If this function errors out at any point, the driver is still able to
789  * function.  The main impact is that LFC may not operate as expected.
790  * Therefore an error state in this function should be treated with a DBG
791  * message and continue on with driver rebuild/reenable.
792  */
793 static void ice_set_dflt_mib(struct ice_pf *pf)
794 {
795 	struct device *dev = ice_pf_to_dev(pf);
796 	u8 mib_type, *buf, *lldpmib = NULL;
797 	u16 len, typelen, offset = 0;
798 	struct ice_lldp_org_tlv *tlv;
799 	struct ice_hw *hw = &pf->hw;
800 	u32 ouisubtype;
801 
802 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
803 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
804 	if (!lldpmib) {
805 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
806 			__func__);
807 		return;
808 	}
809 
810 	/* Add ETS CFG TLV */
811 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
812 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
813 		   ICE_IEEE_ETS_TLV_LEN);
814 	tlv->typelen = htons(typelen);
815 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
816 		      ICE_IEEE_SUBTYPE_ETS_CFG);
817 	tlv->ouisubtype = htonl(ouisubtype);
818 
819 	buf = tlv->tlvinfo;
820 	buf[0] = 0;
821 
822 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
823 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
824 	 * Octets 13 - 20 are TSA values - leave as zeros
825 	 */
826 	buf[5] = 0x64;
827 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
828 	offset += len + 2;
829 	tlv = (struct ice_lldp_org_tlv *)
830 		((char *)tlv + sizeof(tlv->typelen) + len);
831 
832 	/* Add ETS REC TLV */
833 	buf = tlv->tlvinfo;
834 	tlv->typelen = htons(typelen);
835 
836 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
837 		      ICE_IEEE_SUBTYPE_ETS_REC);
838 	tlv->ouisubtype = htonl(ouisubtype);
839 
840 	/* First octet of buf is reserved
841 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
842 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
843 	 * Octets 13 - 20 are TSA value - leave as zeros
844 	 */
845 	buf[5] = 0x64;
846 	offset += len + 2;
847 	tlv = (struct ice_lldp_org_tlv *)
848 		((char *)tlv + sizeof(tlv->typelen) + len);
849 
850 	/* Add PFC CFG TLV */
851 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
852 		   ICE_IEEE_PFC_TLV_LEN);
853 	tlv->typelen = htons(typelen);
854 
855 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
856 		      ICE_IEEE_SUBTYPE_PFC_CFG);
857 	tlv->ouisubtype = htonl(ouisubtype);
858 
859 	/* Octet 1 left as all zeros - PFC disabled */
860 	buf[0] = 0x08;
861 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
862 	offset += len + 2;
863 
864 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
865 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
866 
867 	kfree(lldpmib);
868 }
869 
870 /**
871  * ice_check_module_power
872  * @pf: pointer to PF struct
873  * @link_cfg_err: bitmap from the link info structure
874  *
875  * check module power level returned by a previous call to aq_get_link_info
876  * and print error messages if module power level is not supported
877  */
878 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
879 {
880 	/* if module power level is supported, clear the flag */
881 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
882 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
883 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
884 		return;
885 	}
886 
887 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
888 	 * above block didn't clear this bit, there's nothing to do
889 	 */
890 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
891 		return;
892 
893 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
894 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
895 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
896 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
897 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
898 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
899 	}
900 }
901 
902 /**
903  * ice_link_event - process the link event
904  * @pf: PF that the link event is associated with
905  * @pi: port_info for the port that the link event is associated with
906  * @link_up: true if the physical link is up and false if it is down
907  * @link_speed: current link speed received from the link event
908  *
909  * Returns 0 on success and negative on failure
910  */
911 static int
912 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
913 	       u16 link_speed)
914 {
915 	struct device *dev = ice_pf_to_dev(pf);
916 	struct ice_phy_info *phy_info;
917 	enum ice_status status;
918 	struct ice_vsi *vsi;
919 	u16 old_link_speed;
920 	bool old_link;
921 
922 	phy_info = &pi->phy;
923 	phy_info->link_info_old = phy_info->link_info;
924 
925 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
926 	old_link_speed = phy_info->link_info_old.link_speed;
927 
928 	/* update the link info structures and re-enable link events,
929 	 * don't bail on failure due to other book keeping needed
930 	 */
931 	status = ice_update_link_info(pi);
932 	if (status)
933 		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
934 			pi->lport, ice_stat_str(status),
935 			ice_aq_str(pi->hw->adminq.sq_last_status));
936 
937 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
938 
939 	/* Check if the link state is up after updating link info, and treat
940 	 * this event as an UP event since the link is actually UP now.
941 	 */
942 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
943 		link_up = true;
944 
945 	vsi = ice_get_main_vsi(pf);
946 	if (!vsi || !vsi->port_info)
947 		return -EINVAL;
948 
949 	/* turn off PHY if media was removed */
950 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
951 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
952 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
953 		ice_set_link(vsi, false);
954 	}
955 
956 	/* if the old link up/down and speed is the same as the new */
957 	if (link_up == old_link && link_speed == old_link_speed)
958 		return 0;
959 
960 	if (ice_is_dcb_active(pf)) {
961 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
962 			ice_dcb_rebuild(pf);
963 	} else {
964 		if (link_up)
965 			ice_set_dflt_mib(pf);
966 	}
967 	ice_vsi_link_event(vsi, link_up);
968 	ice_print_link_msg(vsi, link_up);
969 
970 	ice_vc_notify_link_state(pf);
971 
972 	return 0;
973 }
974 
975 /**
976  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
977  * @pf: board private structure
978  */
979 static void ice_watchdog_subtask(struct ice_pf *pf)
980 {
981 	int i;
982 
983 	/* if interface is down do nothing */
984 	if (test_bit(ICE_DOWN, pf->state) ||
985 	    test_bit(ICE_CFG_BUSY, pf->state))
986 		return;
987 
988 	/* make sure we don't do these things too often */
989 	if (time_before(jiffies,
990 			pf->serv_tmr_prev + pf->serv_tmr_period))
991 		return;
992 
993 	pf->serv_tmr_prev = jiffies;
994 
995 	/* Update the stats for active netdevs so the network stack
996 	 * can look at updated numbers whenever it cares to
997 	 */
998 	ice_update_pf_stats(pf);
999 	ice_for_each_vsi(pf, i)
1000 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1001 			ice_update_vsi_stats(pf->vsi[i]);
1002 }
1003 
1004 /**
1005  * ice_init_link_events - enable/initialize link events
1006  * @pi: pointer to the port_info instance
1007  *
1008  * Returns -EIO on failure, 0 on success
1009  */
1010 static int ice_init_link_events(struct ice_port_info *pi)
1011 {
1012 	u16 mask;
1013 
1014 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1015 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
1016 
1017 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1018 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1019 			pi->lport);
1020 		return -EIO;
1021 	}
1022 
1023 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1024 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1025 			pi->lport);
1026 		return -EIO;
1027 	}
1028 
1029 	return 0;
1030 }
1031 
1032 /**
1033  * ice_handle_link_event - handle link event via ARQ
1034  * @pf: PF that the link event is associated with
1035  * @event: event structure containing link status info
1036  */
1037 static int
1038 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1039 {
1040 	struct ice_aqc_get_link_status_data *link_data;
1041 	struct ice_port_info *port_info;
1042 	int status;
1043 
1044 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1045 	port_info = pf->hw.port_info;
1046 	if (!port_info)
1047 		return -EINVAL;
1048 
1049 	status = ice_link_event(pf, port_info,
1050 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1051 				le16_to_cpu(link_data->link_speed));
1052 	if (status)
1053 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1054 			status);
1055 
1056 	return status;
1057 }
1058 
1059 enum ice_aq_task_state {
1060 	ICE_AQ_TASK_WAITING = 0,
1061 	ICE_AQ_TASK_COMPLETE,
1062 	ICE_AQ_TASK_CANCELED,
1063 };
1064 
1065 struct ice_aq_task {
1066 	struct hlist_node entry;
1067 
1068 	u16 opcode;
1069 	struct ice_rq_event_info *event;
1070 	enum ice_aq_task_state state;
1071 };
1072 
1073 /**
1074  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1075  * @pf: pointer to the PF private structure
1076  * @opcode: the opcode to wait for
1077  * @timeout: how long to wait, in jiffies
1078  * @event: storage for the event info
1079  *
1080  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1081  * current thread will be put to sleep until the specified event occurs or
1082  * until the given timeout is reached.
1083  *
1084  * To obtain only the descriptor contents, pass an event without an allocated
1085  * msg_buf. If the complete data buffer is desired, allocate the
1086  * event->msg_buf with enough space ahead of time.
1087  *
1088  * Returns: zero on success, or a negative error code on failure.
1089  */
1090 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1091 			  struct ice_rq_event_info *event)
1092 {
1093 	struct device *dev = ice_pf_to_dev(pf);
1094 	struct ice_aq_task *task;
1095 	unsigned long start;
1096 	long ret;
1097 	int err;
1098 
1099 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1100 	if (!task)
1101 		return -ENOMEM;
1102 
1103 	INIT_HLIST_NODE(&task->entry);
1104 	task->opcode = opcode;
1105 	task->event = event;
1106 	task->state = ICE_AQ_TASK_WAITING;
1107 
1108 	spin_lock_bh(&pf->aq_wait_lock);
1109 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1110 	spin_unlock_bh(&pf->aq_wait_lock);
1111 
1112 	start = jiffies;
1113 
1114 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1115 					       timeout);
1116 	switch (task->state) {
1117 	case ICE_AQ_TASK_WAITING:
1118 		err = ret < 0 ? ret : -ETIMEDOUT;
1119 		break;
1120 	case ICE_AQ_TASK_CANCELED:
1121 		err = ret < 0 ? ret : -ECANCELED;
1122 		break;
1123 	case ICE_AQ_TASK_COMPLETE:
1124 		err = ret < 0 ? ret : 0;
1125 		break;
1126 	default:
1127 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1128 		err = -EINVAL;
1129 		break;
1130 	}
1131 
1132 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1133 		jiffies_to_msecs(jiffies - start),
1134 		jiffies_to_msecs(timeout),
1135 		opcode);
1136 
1137 	spin_lock_bh(&pf->aq_wait_lock);
1138 	hlist_del(&task->entry);
1139 	spin_unlock_bh(&pf->aq_wait_lock);
1140 	kfree(task);
1141 
1142 	return err;
1143 }
1144 
1145 /**
1146  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1147  * @pf: pointer to the PF private structure
1148  * @opcode: the opcode of the event
1149  * @event: the event to check
1150  *
1151  * Loops over the current list of pending threads waiting for an AdminQ event.
1152  * For each matching task, copy the contents of the event into the task
1153  * structure and wake up the thread.
1154  *
1155  * If multiple threads wait for the same opcode, they will all be woken up.
1156  *
1157  * Note that event->msg_buf will only be duplicated if the event has a buffer
1158  * with enough space already allocated. Otherwise, only the descriptor and
1159  * message length will be copied.
1160  *
1161  * Returns: true if an event was found, false otherwise
1162  */
1163 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1164 				struct ice_rq_event_info *event)
1165 {
1166 	struct ice_aq_task *task;
1167 	bool found = false;
1168 
1169 	spin_lock_bh(&pf->aq_wait_lock);
1170 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1171 		if (task->state || task->opcode != opcode)
1172 			continue;
1173 
1174 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1175 		task->event->msg_len = event->msg_len;
1176 
1177 		/* Only copy the data buffer if a destination was set */
1178 		if (task->event->msg_buf &&
1179 		    task->event->buf_len > event->buf_len) {
1180 			memcpy(task->event->msg_buf, event->msg_buf,
1181 			       event->buf_len);
1182 			task->event->buf_len = event->buf_len;
1183 		}
1184 
1185 		task->state = ICE_AQ_TASK_COMPLETE;
1186 		found = true;
1187 	}
1188 	spin_unlock_bh(&pf->aq_wait_lock);
1189 
1190 	if (found)
1191 		wake_up(&pf->aq_wait_queue);
1192 }
1193 
1194 /**
1195  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1196  * @pf: the PF private structure
1197  *
1198  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1199  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1200  */
1201 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1202 {
1203 	struct ice_aq_task *task;
1204 
1205 	spin_lock_bh(&pf->aq_wait_lock);
1206 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1207 		task->state = ICE_AQ_TASK_CANCELED;
1208 	spin_unlock_bh(&pf->aq_wait_lock);
1209 
1210 	wake_up(&pf->aq_wait_queue);
1211 }
1212 
1213 /**
1214  * __ice_clean_ctrlq - helper function to clean controlq rings
1215  * @pf: ptr to struct ice_pf
1216  * @q_type: specific Control queue type
1217  */
1218 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1219 {
1220 	struct device *dev = ice_pf_to_dev(pf);
1221 	struct ice_rq_event_info event;
1222 	struct ice_hw *hw = &pf->hw;
1223 	struct ice_ctl_q_info *cq;
1224 	u16 pending, i = 0;
1225 	const char *qtype;
1226 	u32 oldval, val;
1227 
1228 	/* Do not clean control queue if/when PF reset fails */
1229 	if (test_bit(ICE_RESET_FAILED, pf->state))
1230 		return 0;
1231 
1232 	switch (q_type) {
1233 	case ICE_CTL_Q_ADMIN:
1234 		cq = &hw->adminq;
1235 		qtype = "Admin";
1236 		break;
1237 	case ICE_CTL_Q_SB:
1238 		cq = &hw->sbq;
1239 		qtype = "Sideband";
1240 		break;
1241 	case ICE_CTL_Q_MAILBOX:
1242 		cq = &hw->mailboxq;
1243 		qtype = "Mailbox";
1244 		/* we are going to try to detect a malicious VF, so set the
1245 		 * state to begin detection
1246 		 */
1247 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1248 		break;
1249 	default:
1250 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1251 		return 0;
1252 	}
1253 
1254 	/* check for error indications - PF_xx_AxQLEN register layout for
1255 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1256 	 */
1257 	val = rd32(hw, cq->rq.len);
1258 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1259 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1260 		oldval = val;
1261 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1262 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1263 				qtype);
1264 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1265 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1266 				qtype);
1267 		}
1268 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1269 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1270 				qtype);
1271 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1272 			 PF_FW_ARQLEN_ARQCRIT_M);
1273 		if (oldval != val)
1274 			wr32(hw, cq->rq.len, val);
1275 	}
1276 
1277 	val = rd32(hw, cq->sq.len);
1278 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1279 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1280 		oldval = val;
1281 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1282 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1283 				qtype);
1284 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1285 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1286 				qtype);
1287 		}
1288 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1289 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1290 				qtype);
1291 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1292 			 PF_FW_ATQLEN_ATQCRIT_M);
1293 		if (oldval != val)
1294 			wr32(hw, cq->sq.len, val);
1295 	}
1296 
1297 	event.buf_len = cq->rq_buf_size;
1298 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1299 	if (!event.msg_buf)
1300 		return 0;
1301 
1302 	do {
1303 		enum ice_status ret;
1304 		u16 opcode;
1305 
1306 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1307 		if (ret == ICE_ERR_AQ_NO_WORK)
1308 			break;
1309 		if (ret) {
1310 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1311 				ice_stat_str(ret));
1312 			break;
1313 		}
1314 
1315 		opcode = le16_to_cpu(event.desc.opcode);
1316 
1317 		/* Notify any thread that might be waiting for this event */
1318 		ice_aq_check_events(pf, opcode, &event);
1319 
1320 		switch (opcode) {
1321 		case ice_aqc_opc_get_link_status:
1322 			if (ice_handle_link_event(pf, &event))
1323 				dev_err(dev, "Could not handle link event\n");
1324 			break;
1325 		case ice_aqc_opc_event_lan_overflow:
1326 			ice_vf_lan_overflow_event(pf, &event);
1327 			break;
1328 		case ice_mbx_opc_send_msg_to_pf:
1329 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1330 				ice_vc_process_vf_msg(pf, &event);
1331 			break;
1332 		case ice_aqc_opc_fw_logging:
1333 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1334 			break;
1335 		case ice_aqc_opc_lldp_set_mib_change:
1336 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1337 			break;
1338 		default:
1339 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1340 				qtype, opcode);
1341 			break;
1342 		}
1343 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1344 
1345 	kfree(event.msg_buf);
1346 
1347 	return pending && (i == ICE_DFLT_IRQ_WORK);
1348 }
1349 
1350 /**
1351  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1352  * @hw: pointer to hardware info
1353  * @cq: control queue information
1354  *
1355  * returns true if there are pending messages in a queue, false if there aren't
1356  */
1357 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1358 {
1359 	u16 ntu;
1360 
1361 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1362 	return cq->rq.next_to_clean != ntu;
1363 }
1364 
1365 /**
1366  * ice_clean_adminq_subtask - clean the AdminQ rings
1367  * @pf: board private structure
1368  */
1369 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1370 {
1371 	struct ice_hw *hw = &pf->hw;
1372 
1373 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1374 		return;
1375 
1376 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1377 		return;
1378 
1379 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1380 
1381 	/* There might be a situation where new messages arrive to a control
1382 	 * queue between processing the last message and clearing the
1383 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1384 	 * ice_ctrlq_pending) and process new messages if any.
1385 	 */
1386 	if (ice_ctrlq_pending(hw, &hw->adminq))
1387 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1388 
1389 	ice_flush(hw);
1390 }
1391 
1392 /**
1393  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1394  * @pf: board private structure
1395  */
1396 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1397 {
1398 	struct ice_hw *hw = &pf->hw;
1399 
1400 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1401 		return;
1402 
1403 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1404 		return;
1405 
1406 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1407 
1408 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1409 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1410 
1411 	ice_flush(hw);
1412 }
1413 
1414 /**
1415  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1416  * @pf: board private structure
1417  */
1418 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1419 {
1420 	struct ice_hw *hw = &pf->hw;
1421 
1422 	/* Nothing to do here if sideband queue is not supported */
1423 	if (!ice_is_sbq_supported(hw)) {
1424 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1425 		return;
1426 	}
1427 
1428 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1429 		return;
1430 
1431 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1432 		return;
1433 
1434 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1435 
1436 	if (ice_ctrlq_pending(hw, &hw->sbq))
1437 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1438 
1439 	ice_flush(hw);
1440 }
1441 
1442 /**
1443  * ice_service_task_schedule - schedule the service task to wake up
1444  * @pf: board private structure
1445  *
1446  * If not already scheduled, this puts the task into the work queue.
1447  */
1448 void ice_service_task_schedule(struct ice_pf *pf)
1449 {
1450 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1451 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1452 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1453 		queue_work(ice_wq, &pf->serv_task);
1454 }
1455 
1456 /**
1457  * ice_service_task_complete - finish up the service task
1458  * @pf: board private structure
1459  */
1460 static void ice_service_task_complete(struct ice_pf *pf)
1461 {
1462 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1463 
1464 	/* force memory (pf->state) to sync before next service task */
1465 	smp_mb__before_atomic();
1466 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1467 }
1468 
1469 /**
1470  * ice_service_task_stop - stop service task and cancel works
1471  * @pf: board private structure
1472  *
1473  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1474  * 1 otherwise.
1475  */
1476 static int ice_service_task_stop(struct ice_pf *pf)
1477 {
1478 	int ret;
1479 
1480 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1481 
1482 	if (pf->serv_tmr.function)
1483 		del_timer_sync(&pf->serv_tmr);
1484 	if (pf->serv_task.func)
1485 		cancel_work_sync(&pf->serv_task);
1486 
1487 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1488 	return ret;
1489 }
1490 
1491 /**
1492  * ice_service_task_restart - restart service task and schedule works
1493  * @pf: board private structure
1494  *
1495  * This function is needed for suspend and resume works (e.g WoL scenario)
1496  */
1497 static void ice_service_task_restart(struct ice_pf *pf)
1498 {
1499 	clear_bit(ICE_SERVICE_DIS, pf->state);
1500 	ice_service_task_schedule(pf);
1501 }
1502 
1503 /**
1504  * ice_service_timer - timer callback to schedule service task
1505  * @t: pointer to timer_list
1506  */
1507 static void ice_service_timer(struct timer_list *t)
1508 {
1509 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1510 
1511 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1512 	ice_service_task_schedule(pf);
1513 }
1514 
1515 /**
1516  * ice_handle_mdd_event - handle malicious driver detect event
1517  * @pf: pointer to the PF structure
1518  *
1519  * Called from service task. OICR interrupt handler indicates MDD event.
1520  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1521  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1522  * disable the queue, the PF can be configured to reset the VF using ethtool
1523  * private flag mdd-auto-reset-vf.
1524  */
1525 static void ice_handle_mdd_event(struct ice_pf *pf)
1526 {
1527 	struct device *dev = ice_pf_to_dev(pf);
1528 	struct ice_hw *hw = &pf->hw;
1529 	unsigned int i;
1530 	u32 reg;
1531 
1532 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1533 		/* Since the VF MDD event logging is rate limited, check if
1534 		 * there are pending MDD events.
1535 		 */
1536 		ice_print_vfs_mdd_events(pf);
1537 		return;
1538 	}
1539 
1540 	/* find what triggered an MDD event */
1541 	reg = rd32(hw, GL_MDET_TX_PQM);
1542 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1543 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1544 				GL_MDET_TX_PQM_PF_NUM_S;
1545 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1546 				GL_MDET_TX_PQM_VF_NUM_S;
1547 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1548 				GL_MDET_TX_PQM_MAL_TYPE_S;
1549 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1550 				GL_MDET_TX_PQM_QNUM_S);
1551 
1552 		if (netif_msg_tx_err(pf))
1553 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1554 				 event, queue, pf_num, vf_num);
1555 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1556 	}
1557 
1558 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1559 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1560 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1561 				GL_MDET_TX_TCLAN_PF_NUM_S;
1562 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1563 				GL_MDET_TX_TCLAN_VF_NUM_S;
1564 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1565 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1566 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1567 				GL_MDET_TX_TCLAN_QNUM_S);
1568 
1569 		if (netif_msg_tx_err(pf))
1570 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1571 				 event, queue, pf_num, vf_num);
1572 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1573 	}
1574 
1575 	reg = rd32(hw, GL_MDET_RX);
1576 	if (reg & GL_MDET_RX_VALID_M) {
1577 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1578 				GL_MDET_RX_PF_NUM_S;
1579 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1580 				GL_MDET_RX_VF_NUM_S;
1581 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1582 				GL_MDET_RX_MAL_TYPE_S;
1583 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1584 				GL_MDET_RX_QNUM_S);
1585 
1586 		if (netif_msg_rx_err(pf))
1587 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1588 				 event, queue, pf_num, vf_num);
1589 		wr32(hw, GL_MDET_RX, 0xffffffff);
1590 	}
1591 
1592 	/* check to see if this PF caused an MDD event */
1593 	reg = rd32(hw, PF_MDET_TX_PQM);
1594 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1595 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1596 		if (netif_msg_tx_err(pf))
1597 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1598 	}
1599 
1600 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1601 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1602 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1603 		if (netif_msg_tx_err(pf))
1604 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1605 	}
1606 
1607 	reg = rd32(hw, PF_MDET_RX);
1608 	if (reg & PF_MDET_RX_VALID_M) {
1609 		wr32(hw, PF_MDET_RX, 0xFFFF);
1610 		if (netif_msg_rx_err(pf))
1611 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1612 	}
1613 
1614 	/* Check to see if one of the VFs caused an MDD event, and then
1615 	 * increment counters and set print pending
1616 	 */
1617 	ice_for_each_vf(pf, i) {
1618 		struct ice_vf *vf = &pf->vf[i];
1619 
1620 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1621 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1622 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1623 			vf->mdd_tx_events.count++;
1624 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1625 			if (netif_msg_tx_err(pf))
1626 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1627 					 i);
1628 		}
1629 
1630 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1631 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1632 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1633 			vf->mdd_tx_events.count++;
1634 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1635 			if (netif_msg_tx_err(pf))
1636 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1637 					 i);
1638 		}
1639 
1640 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1641 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1642 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1643 			vf->mdd_tx_events.count++;
1644 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1645 			if (netif_msg_tx_err(pf))
1646 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1647 					 i);
1648 		}
1649 
1650 		reg = rd32(hw, VP_MDET_RX(i));
1651 		if (reg & VP_MDET_RX_VALID_M) {
1652 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1653 			vf->mdd_rx_events.count++;
1654 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1655 			if (netif_msg_rx_err(pf))
1656 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1657 					 i);
1658 
1659 			/* Since the queue is disabled on VF Rx MDD events, the
1660 			 * PF can be configured to reset the VF through ethtool
1661 			 * private flag mdd-auto-reset-vf.
1662 			 */
1663 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1664 				/* VF MDD event counters will be cleared by
1665 				 * reset, so print the event prior to reset.
1666 				 */
1667 				ice_print_vf_rx_mdd_event(vf);
1668 				ice_reset_vf(&pf->vf[i], false);
1669 			}
1670 		}
1671 	}
1672 
1673 	ice_print_vfs_mdd_events(pf);
1674 }
1675 
1676 /**
1677  * ice_force_phys_link_state - Force the physical link state
1678  * @vsi: VSI to force the physical link state to up/down
1679  * @link_up: true/false indicates to set the physical link to up/down
1680  *
1681  * Force the physical link state by getting the current PHY capabilities from
1682  * hardware and setting the PHY config based on the determined capabilities. If
1683  * link changes a link event will be triggered because both the Enable Automatic
1684  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1685  *
1686  * Returns 0 on success, negative on failure
1687  */
1688 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1689 {
1690 	struct ice_aqc_get_phy_caps_data *pcaps;
1691 	struct ice_aqc_set_phy_cfg_data *cfg;
1692 	struct ice_port_info *pi;
1693 	struct device *dev;
1694 	int retcode;
1695 
1696 	if (!vsi || !vsi->port_info || !vsi->back)
1697 		return -EINVAL;
1698 	if (vsi->type != ICE_VSI_PF)
1699 		return 0;
1700 
1701 	dev = ice_pf_to_dev(vsi->back);
1702 
1703 	pi = vsi->port_info;
1704 
1705 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1706 	if (!pcaps)
1707 		return -ENOMEM;
1708 
1709 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1710 				      NULL);
1711 	if (retcode) {
1712 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1713 			vsi->vsi_num, retcode);
1714 		retcode = -EIO;
1715 		goto out;
1716 	}
1717 
1718 	/* No change in link */
1719 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1720 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1721 		goto out;
1722 
1723 	/* Use the current user PHY configuration. The current user PHY
1724 	 * configuration is initialized during probe from PHY capabilities
1725 	 * software mode, and updated on set PHY configuration.
1726 	 */
1727 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1728 	if (!cfg) {
1729 		retcode = -ENOMEM;
1730 		goto out;
1731 	}
1732 
1733 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1734 	if (link_up)
1735 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1736 	else
1737 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1738 
1739 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1740 	if (retcode) {
1741 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1742 			vsi->vsi_num, retcode);
1743 		retcode = -EIO;
1744 	}
1745 
1746 	kfree(cfg);
1747 out:
1748 	kfree(pcaps);
1749 	return retcode;
1750 }
1751 
1752 /**
1753  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1754  * @pi: port info structure
1755  *
1756  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1757  */
1758 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1759 {
1760 	struct ice_aqc_get_phy_caps_data *pcaps;
1761 	struct ice_pf *pf = pi->hw->back;
1762 	enum ice_status status;
1763 	int err = 0;
1764 
1765 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1766 	if (!pcaps)
1767 		return -ENOMEM;
1768 
1769 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1770 				     NULL);
1771 
1772 	if (status) {
1773 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1774 		err = -EIO;
1775 		goto out;
1776 	}
1777 
1778 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1779 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1780 
1781 out:
1782 	kfree(pcaps);
1783 	return err;
1784 }
1785 
1786 /**
1787  * ice_init_link_dflt_override - Initialize link default override
1788  * @pi: port info structure
1789  *
1790  * Initialize link default override and PHY total port shutdown during probe
1791  */
1792 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1793 {
1794 	struct ice_link_default_override_tlv *ldo;
1795 	struct ice_pf *pf = pi->hw->back;
1796 
1797 	ldo = &pf->link_dflt_override;
1798 	if (ice_get_link_default_override(ldo, pi))
1799 		return;
1800 
1801 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1802 		return;
1803 
1804 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1805 	 * ethtool private flag) for ports with Port Disable bit set.
1806 	 */
1807 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1808 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1809 }
1810 
1811 /**
1812  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1813  * @pi: port info structure
1814  *
1815  * If default override is enabled, initialize the user PHY cfg speed and FEC
1816  * settings using the default override mask from the NVM.
1817  *
1818  * The PHY should only be configured with the default override settings the
1819  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1820  * is used to indicate that the user PHY cfg default override is initialized
1821  * and the PHY has not been configured with the default override settings. The
1822  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1823  * configured.
1824  *
1825  * This function should be called only if the FW doesn't support default
1826  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1827  */
1828 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1829 {
1830 	struct ice_link_default_override_tlv *ldo;
1831 	struct ice_aqc_set_phy_cfg_data *cfg;
1832 	struct ice_phy_info *phy = &pi->phy;
1833 	struct ice_pf *pf = pi->hw->back;
1834 
1835 	ldo = &pf->link_dflt_override;
1836 
1837 	/* If link default override is enabled, use to mask NVM PHY capabilities
1838 	 * for speed and FEC default configuration.
1839 	 */
1840 	cfg = &phy->curr_user_phy_cfg;
1841 
1842 	if (ldo->phy_type_low || ldo->phy_type_high) {
1843 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1844 				    cpu_to_le64(ldo->phy_type_low);
1845 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1846 				     cpu_to_le64(ldo->phy_type_high);
1847 	}
1848 	cfg->link_fec_opt = ldo->fec_options;
1849 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1850 
1851 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1852 }
1853 
1854 /**
1855  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1856  * @pi: port info structure
1857  *
1858  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1859  * mode to default. The PHY defaults are from get PHY capabilities topology
1860  * with media so call when media is first available. An error is returned if
1861  * called when media is not available. The PHY initialization completed state is
1862  * set here.
1863  *
1864  * These configurations are used when setting PHY
1865  * configuration. The user PHY configuration is updated on set PHY
1866  * configuration. Returns 0 on success, negative on failure
1867  */
1868 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1869 {
1870 	struct ice_aqc_get_phy_caps_data *pcaps;
1871 	struct ice_phy_info *phy = &pi->phy;
1872 	struct ice_pf *pf = pi->hw->back;
1873 	enum ice_status status;
1874 	int err = 0;
1875 
1876 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1877 		return -EIO;
1878 
1879 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1880 	if (!pcaps)
1881 		return -ENOMEM;
1882 
1883 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1884 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1885 					     pcaps, NULL);
1886 	else
1887 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1888 					     pcaps, NULL);
1889 	if (status) {
1890 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1891 		err = -EIO;
1892 		goto err_out;
1893 	}
1894 
1895 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1896 
1897 	/* check if lenient mode is supported and enabled */
1898 	if (ice_fw_supports_link_override(pi->hw) &&
1899 	    !(pcaps->module_compliance_enforcement &
1900 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1901 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1902 
1903 		/* if the FW supports default PHY configuration mode, then the driver
1904 		 * does not have to apply link override settings. If not,
1905 		 * initialize user PHY configuration with link override values
1906 		 */
1907 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1908 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1909 			ice_init_phy_cfg_dflt_override(pi);
1910 			goto out;
1911 		}
1912 	}
1913 
1914 	/* if link default override is not enabled, set user flow control and
1915 	 * FEC settings based on what get_phy_caps returned
1916 	 */
1917 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1918 						      pcaps->link_fec_options);
1919 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1920 
1921 out:
1922 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1923 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1924 err_out:
1925 	kfree(pcaps);
1926 	return err;
1927 }
1928 
1929 /**
1930  * ice_configure_phy - configure PHY
1931  * @vsi: VSI of PHY
1932  *
1933  * Set the PHY configuration. If the current PHY configuration is the same as
1934  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1935  * configure the based get PHY capabilities for topology with media.
1936  */
1937 static int ice_configure_phy(struct ice_vsi *vsi)
1938 {
1939 	struct device *dev = ice_pf_to_dev(vsi->back);
1940 	struct ice_port_info *pi = vsi->port_info;
1941 	struct ice_aqc_get_phy_caps_data *pcaps;
1942 	struct ice_aqc_set_phy_cfg_data *cfg;
1943 	struct ice_phy_info *phy = &pi->phy;
1944 	struct ice_pf *pf = vsi->back;
1945 	enum ice_status status;
1946 	int err = 0;
1947 
1948 	/* Ensure we have media as we cannot configure a medialess port */
1949 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1950 		return -EPERM;
1951 
1952 	ice_print_topo_conflict(vsi);
1953 
1954 	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1955 		return -EPERM;
1956 
1957 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1958 		return ice_force_phys_link_state(vsi, true);
1959 
1960 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1961 	if (!pcaps)
1962 		return -ENOMEM;
1963 
1964 	/* Get current PHY config */
1965 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1966 				     NULL);
1967 	if (status) {
1968 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1969 			vsi->vsi_num, ice_stat_str(status));
1970 		err = -EIO;
1971 		goto done;
1972 	}
1973 
1974 	/* If PHY enable link is configured and configuration has not changed,
1975 	 * there's nothing to do
1976 	 */
1977 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1978 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1979 		goto done;
1980 
1981 	/* Use PHY topology as baseline for configuration */
1982 	memset(pcaps, 0, sizeof(*pcaps));
1983 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1984 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1985 					     pcaps, NULL);
1986 	else
1987 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1988 					     pcaps, NULL);
1989 	if (status) {
1990 		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
1991 			vsi->vsi_num, ice_stat_str(status));
1992 		err = -EIO;
1993 		goto done;
1994 	}
1995 
1996 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1997 	if (!cfg) {
1998 		err = -ENOMEM;
1999 		goto done;
2000 	}
2001 
2002 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2003 
2004 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2005 	 * ice_init_phy_user_cfg_ldo.
2006 	 */
2007 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2008 			       vsi->back->state)) {
2009 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2010 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2011 	} else {
2012 		u64 phy_low = 0, phy_high = 0;
2013 
2014 		ice_update_phy_type(&phy_low, &phy_high,
2015 				    pi->phy.curr_user_speed_req);
2016 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2017 		cfg->phy_type_high = pcaps->phy_type_high &
2018 				     cpu_to_le64(phy_high);
2019 	}
2020 
2021 	/* Can't provide what was requested; use PHY capabilities */
2022 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2023 		cfg->phy_type_low = pcaps->phy_type_low;
2024 		cfg->phy_type_high = pcaps->phy_type_high;
2025 	}
2026 
2027 	/* FEC */
2028 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2029 
2030 	/* Can't provide what was requested; use PHY capabilities */
2031 	if (cfg->link_fec_opt !=
2032 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2033 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2034 		cfg->link_fec_opt = pcaps->link_fec_options;
2035 	}
2036 
2037 	/* Flow Control - always supported; no need to check against
2038 	 * capabilities
2039 	 */
2040 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2041 
2042 	/* Enable link and link update */
2043 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2044 
2045 	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2046 	if (status) {
2047 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2048 			vsi->vsi_num, ice_stat_str(status));
2049 		err = -EIO;
2050 	}
2051 
2052 	kfree(cfg);
2053 done:
2054 	kfree(pcaps);
2055 	return err;
2056 }
2057 
2058 /**
2059  * ice_check_media_subtask - Check for media
2060  * @pf: pointer to PF struct
2061  *
2062  * If media is available, then initialize PHY user configuration if it is not
2063  * been, and configure the PHY if the interface is up.
2064  */
2065 static void ice_check_media_subtask(struct ice_pf *pf)
2066 {
2067 	struct ice_port_info *pi;
2068 	struct ice_vsi *vsi;
2069 	int err;
2070 
2071 	/* No need to check for media if it's already present */
2072 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2073 		return;
2074 
2075 	vsi = ice_get_main_vsi(pf);
2076 	if (!vsi)
2077 		return;
2078 
2079 	/* Refresh link info and check if media is present */
2080 	pi = vsi->port_info;
2081 	err = ice_update_link_info(pi);
2082 	if (err)
2083 		return;
2084 
2085 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2086 
2087 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2088 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2089 			ice_init_phy_user_cfg(pi);
2090 
2091 		/* PHY settings are reset on media insertion, reconfigure
2092 		 * PHY to preserve settings.
2093 		 */
2094 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2095 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2096 			return;
2097 
2098 		err = ice_configure_phy(vsi);
2099 		if (!err)
2100 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2101 
2102 		/* A Link Status Event will be generated; the event handler
2103 		 * will complete bringing the interface up
2104 		 */
2105 	}
2106 }
2107 
2108 /**
2109  * ice_service_task - manage and run subtasks
2110  * @work: pointer to work_struct contained by the PF struct
2111  */
2112 static void ice_service_task(struct work_struct *work)
2113 {
2114 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2115 	unsigned long start_time = jiffies;
2116 
2117 	/* subtasks */
2118 
2119 	/* process reset requests first */
2120 	ice_reset_subtask(pf);
2121 
2122 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2123 	if (ice_is_reset_in_progress(pf->state) ||
2124 	    test_bit(ICE_SUSPENDED, pf->state) ||
2125 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2126 		ice_service_task_complete(pf);
2127 		return;
2128 	}
2129 
2130 	ice_clean_adminq_subtask(pf);
2131 	ice_check_media_subtask(pf);
2132 	ice_check_for_hang_subtask(pf);
2133 	ice_sync_fltr_subtask(pf);
2134 	ice_handle_mdd_event(pf);
2135 	ice_watchdog_subtask(pf);
2136 
2137 	if (ice_is_safe_mode(pf)) {
2138 		ice_service_task_complete(pf);
2139 		return;
2140 	}
2141 
2142 	ice_process_vflr_event(pf);
2143 	ice_clean_mailboxq_subtask(pf);
2144 	ice_clean_sbq_subtask(pf);
2145 	ice_sync_arfs_fltrs(pf);
2146 	ice_flush_fdir_ctx(pf);
2147 
2148 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2149 	ice_service_task_complete(pf);
2150 
2151 	/* If the tasks have taken longer than one service timer period
2152 	 * or there is more work to be done, reset the service timer to
2153 	 * schedule the service task now.
2154 	 */
2155 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2156 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2157 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2158 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2159 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2160 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2161 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2162 		mod_timer(&pf->serv_tmr, jiffies);
2163 }
2164 
2165 /**
2166  * ice_set_ctrlq_len - helper function to set controlq length
2167  * @hw: pointer to the HW instance
2168  */
2169 static void ice_set_ctrlq_len(struct ice_hw *hw)
2170 {
2171 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2172 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2173 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2174 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2175 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2176 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2177 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2178 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2179 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2180 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2181 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2182 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2183 }
2184 
2185 /**
2186  * ice_schedule_reset - schedule a reset
2187  * @pf: board private structure
2188  * @reset: reset being requested
2189  */
2190 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2191 {
2192 	struct device *dev = ice_pf_to_dev(pf);
2193 
2194 	/* bail out if earlier reset has failed */
2195 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2196 		dev_dbg(dev, "earlier reset has failed\n");
2197 		return -EIO;
2198 	}
2199 	/* bail if reset/recovery already in progress */
2200 	if (ice_is_reset_in_progress(pf->state)) {
2201 		dev_dbg(dev, "Reset already in progress\n");
2202 		return -EBUSY;
2203 	}
2204 
2205 	ice_unplug_aux_dev(pf);
2206 
2207 	switch (reset) {
2208 	case ICE_RESET_PFR:
2209 		set_bit(ICE_PFR_REQ, pf->state);
2210 		break;
2211 	case ICE_RESET_CORER:
2212 		set_bit(ICE_CORER_REQ, pf->state);
2213 		break;
2214 	case ICE_RESET_GLOBR:
2215 		set_bit(ICE_GLOBR_REQ, pf->state);
2216 		break;
2217 	default:
2218 		return -EINVAL;
2219 	}
2220 
2221 	ice_service_task_schedule(pf);
2222 	return 0;
2223 }
2224 
2225 /**
2226  * ice_irq_affinity_notify - Callback for affinity changes
2227  * @notify: context as to what irq was changed
2228  * @mask: the new affinity mask
2229  *
2230  * This is a callback function used by the irq_set_affinity_notifier function
2231  * so that we may register to receive changes to the irq affinity masks.
2232  */
2233 static void
2234 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2235 			const cpumask_t *mask)
2236 {
2237 	struct ice_q_vector *q_vector =
2238 		container_of(notify, struct ice_q_vector, affinity_notify);
2239 
2240 	cpumask_copy(&q_vector->affinity_mask, mask);
2241 }
2242 
2243 /**
2244  * ice_irq_affinity_release - Callback for affinity notifier release
2245  * @ref: internal core kernel usage
2246  *
2247  * This is a callback function used by the irq_set_affinity_notifier function
2248  * to inform the current notification subscriber that they will no longer
2249  * receive notifications.
2250  */
2251 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2252 
2253 /**
2254  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2255  * @vsi: the VSI being configured
2256  */
2257 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2258 {
2259 	struct ice_hw *hw = &vsi->back->hw;
2260 	int i;
2261 
2262 	ice_for_each_q_vector(vsi, i)
2263 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2264 
2265 	ice_flush(hw);
2266 	return 0;
2267 }
2268 
2269 /**
2270  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2271  * @vsi: the VSI being configured
2272  * @basename: name for the vector
2273  */
2274 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2275 {
2276 	int q_vectors = vsi->num_q_vectors;
2277 	struct ice_pf *pf = vsi->back;
2278 	int base = vsi->base_vector;
2279 	struct device *dev;
2280 	int rx_int_idx = 0;
2281 	int tx_int_idx = 0;
2282 	int vector, err;
2283 	int irq_num;
2284 
2285 	dev = ice_pf_to_dev(pf);
2286 	for (vector = 0; vector < q_vectors; vector++) {
2287 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2288 
2289 		irq_num = pf->msix_entries[base + vector].vector;
2290 
2291 		if (q_vector->tx.ring && q_vector->rx.ring) {
2292 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2293 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2294 			tx_int_idx++;
2295 		} else if (q_vector->rx.ring) {
2296 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2297 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2298 		} else if (q_vector->tx.ring) {
2299 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2300 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2301 		} else {
2302 			/* skip this unused q_vector */
2303 			continue;
2304 		}
2305 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2306 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2307 					       IRQF_SHARED, q_vector->name,
2308 					       q_vector);
2309 		else
2310 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2311 					       0, q_vector->name, q_vector);
2312 		if (err) {
2313 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2314 				   err);
2315 			goto free_q_irqs;
2316 		}
2317 
2318 		/* register for affinity change notifications */
2319 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2320 			struct irq_affinity_notify *affinity_notify;
2321 
2322 			affinity_notify = &q_vector->affinity_notify;
2323 			affinity_notify->notify = ice_irq_affinity_notify;
2324 			affinity_notify->release = ice_irq_affinity_release;
2325 			irq_set_affinity_notifier(irq_num, affinity_notify);
2326 		}
2327 
2328 		/* assign the mask for this irq */
2329 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2330 	}
2331 
2332 	vsi->irqs_ready = true;
2333 	return 0;
2334 
2335 free_q_irqs:
2336 	while (vector) {
2337 		vector--;
2338 		irq_num = pf->msix_entries[base + vector].vector;
2339 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2340 			irq_set_affinity_notifier(irq_num, NULL);
2341 		irq_set_affinity_hint(irq_num, NULL);
2342 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2343 	}
2344 	return err;
2345 }
2346 
2347 /**
2348  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2349  * @vsi: VSI to setup Tx rings used by XDP
2350  *
2351  * Return 0 on success and negative value on error
2352  */
2353 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2354 {
2355 	struct device *dev = ice_pf_to_dev(vsi->back);
2356 	int i;
2357 
2358 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2359 		u16 xdp_q_idx = vsi->alloc_txq + i;
2360 		struct ice_ring *xdp_ring;
2361 
2362 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2363 
2364 		if (!xdp_ring)
2365 			goto free_xdp_rings;
2366 
2367 		xdp_ring->q_index = xdp_q_idx;
2368 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2369 		xdp_ring->ring_active = false;
2370 		xdp_ring->vsi = vsi;
2371 		xdp_ring->netdev = NULL;
2372 		xdp_ring->dev = dev;
2373 		xdp_ring->count = vsi->num_tx_desc;
2374 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2375 		if (ice_setup_tx_ring(xdp_ring))
2376 			goto free_xdp_rings;
2377 		ice_set_ring_xdp(xdp_ring);
2378 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2379 	}
2380 
2381 	return 0;
2382 
2383 free_xdp_rings:
2384 	for (; i >= 0; i--)
2385 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2386 			ice_free_tx_ring(vsi->xdp_rings[i]);
2387 	return -ENOMEM;
2388 }
2389 
2390 /**
2391  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2392  * @vsi: VSI to set the bpf prog on
2393  * @prog: the bpf prog pointer
2394  */
2395 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2396 {
2397 	struct bpf_prog *old_prog;
2398 	int i;
2399 
2400 	old_prog = xchg(&vsi->xdp_prog, prog);
2401 	if (old_prog)
2402 		bpf_prog_put(old_prog);
2403 
2404 	ice_for_each_rxq(vsi, i)
2405 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2406 }
2407 
2408 /**
2409  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2410  * @vsi: VSI to bring up Tx rings used by XDP
2411  * @prog: bpf program that will be assigned to VSI
2412  *
2413  * Return 0 on success and negative value on error
2414  */
2415 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2416 {
2417 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2418 	int xdp_rings_rem = vsi->num_xdp_txq;
2419 	struct ice_pf *pf = vsi->back;
2420 	struct ice_qs_cfg xdp_qs_cfg = {
2421 		.qs_mutex = &pf->avail_q_mutex,
2422 		.pf_map = pf->avail_txqs,
2423 		.pf_map_size = pf->max_pf_txqs,
2424 		.q_count = vsi->num_xdp_txq,
2425 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2426 		.vsi_map = vsi->txq_map,
2427 		.vsi_map_offset = vsi->alloc_txq,
2428 		.mapping_mode = ICE_VSI_MAP_CONTIG
2429 	};
2430 	enum ice_status status;
2431 	struct device *dev;
2432 	int i, v_idx;
2433 
2434 	dev = ice_pf_to_dev(pf);
2435 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2436 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2437 	if (!vsi->xdp_rings)
2438 		return -ENOMEM;
2439 
2440 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2441 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2442 		goto err_map_xdp;
2443 
2444 	if (ice_xdp_alloc_setup_rings(vsi))
2445 		goto clear_xdp_rings;
2446 
2447 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2448 	ice_for_each_q_vector(vsi, v_idx) {
2449 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2450 		int xdp_rings_per_v, q_id, q_base;
2451 
2452 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2453 					       vsi->num_q_vectors - v_idx);
2454 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2455 
2456 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2457 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2458 
2459 			xdp_ring->q_vector = q_vector;
2460 			xdp_ring->next = q_vector->tx.ring;
2461 			q_vector->tx.ring = xdp_ring;
2462 		}
2463 		xdp_rings_rem -= xdp_rings_per_v;
2464 	}
2465 
2466 	/* omit the scheduler update if in reset path; XDP queues will be
2467 	 * taken into account at the end of ice_vsi_rebuild, where
2468 	 * ice_cfg_vsi_lan is being called
2469 	 */
2470 	if (ice_is_reset_in_progress(pf->state))
2471 		return 0;
2472 
2473 	/* tell the Tx scheduler that right now we have
2474 	 * additional queues
2475 	 */
2476 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2477 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2478 
2479 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2480 				 max_txqs);
2481 	if (status) {
2482 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2483 			ice_stat_str(status));
2484 		goto clear_xdp_rings;
2485 	}
2486 	ice_vsi_assign_bpf_prog(vsi, prog);
2487 
2488 	return 0;
2489 clear_xdp_rings:
2490 	for (i = 0; i < vsi->num_xdp_txq; i++)
2491 		if (vsi->xdp_rings[i]) {
2492 			kfree_rcu(vsi->xdp_rings[i], rcu);
2493 			vsi->xdp_rings[i] = NULL;
2494 		}
2495 
2496 err_map_xdp:
2497 	mutex_lock(&pf->avail_q_mutex);
2498 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2499 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2500 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2501 	}
2502 	mutex_unlock(&pf->avail_q_mutex);
2503 
2504 	devm_kfree(dev, vsi->xdp_rings);
2505 	return -ENOMEM;
2506 }
2507 
2508 /**
2509  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2510  * @vsi: VSI to remove XDP rings
2511  *
2512  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2513  * resources
2514  */
2515 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2516 {
2517 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2518 	struct ice_pf *pf = vsi->back;
2519 	int i, v_idx;
2520 
2521 	/* q_vectors are freed in reset path so there's no point in detaching
2522 	 * rings; in case of rebuild being triggered not from reset bits
2523 	 * in pf->state won't be set, so additionally check first q_vector
2524 	 * against NULL
2525 	 */
2526 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2527 		goto free_qmap;
2528 
2529 	ice_for_each_q_vector(vsi, v_idx) {
2530 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2531 		struct ice_ring *ring;
2532 
2533 		ice_for_each_ring(ring, q_vector->tx)
2534 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2535 				break;
2536 
2537 		/* restore the value of last node prior to XDP setup */
2538 		q_vector->tx.ring = ring;
2539 	}
2540 
2541 free_qmap:
2542 	mutex_lock(&pf->avail_q_mutex);
2543 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2544 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2545 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2546 	}
2547 	mutex_unlock(&pf->avail_q_mutex);
2548 
2549 	for (i = 0; i < vsi->num_xdp_txq; i++)
2550 		if (vsi->xdp_rings[i]) {
2551 			if (vsi->xdp_rings[i]->desc)
2552 				ice_free_tx_ring(vsi->xdp_rings[i]);
2553 			kfree_rcu(vsi->xdp_rings[i], rcu);
2554 			vsi->xdp_rings[i] = NULL;
2555 		}
2556 
2557 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2558 	vsi->xdp_rings = NULL;
2559 
2560 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2561 		return 0;
2562 
2563 	ice_vsi_assign_bpf_prog(vsi, NULL);
2564 
2565 	/* notify Tx scheduler that we destroyed XDP queues and bring
2566 	 * back the old number of child nodes
2567 	 */
2568 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2569 		max_txqs[i] = vsi->num_txq;
2570 
2571 	/* change number of XDP Tx queues to 0 */
2572 	vsi->num_xdp_txq = 0;
2573 
2574 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2575 			       max_txqs);
2576 }
2577 
2578 /**
2579  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2580  * @vsi: VSI to schedule napi on
2581  */
2582 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2583 {
2584 	int i;
2585 
2586 	ice_for_each_rxq(vsi, i) {
2587 		struct ice_ring *rx_ring = vsi->rx_rings[i];
2588 
2589 		if (rx_ring->xsk_pool)
2590 			napi_schedule(&rx_ring->q_vector->napi);
2591 	}
2592 }
2593 
2594 /**
2595  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2596  * @vsi: VSI to setup XDP for
2597  * @prog: XDP program
2598  * @extack: netlink extended ack
2599  */
2600 static int
2601 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2602 		   struct netlink_ext_ack *extack)
2603 {
2604 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2605 	bool if_running = netif_running(vsi->netdev);
2606 	int ret = 0, xdp_ring_err = 0;
2607 
2608 	if (frame_size > vsi->rx_buf_len) {
2609 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2610 		return -EOPNOTSUPP;
2611 	}
2612 
2613 	/* need to stop netdev while setting up the program for Rx rings */
2614 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2615 		ret = ice_down(vsi);
2616 		if (ret) {
2617 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2618 			return ret;
2619 		}
2620 	}
2621 
2622 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2623 		vsi->num_xdp_txq = vsi->alloc_rxq;
2624 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2625 		if (xdp_ring_err)
2626 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2627 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2628 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2629 		if (xdp_ring_err)
2630 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2631 	} else {
2632 		ice_vsi_assign_bpf_prog(vsi, prog);
2633 	}
2634 
2635 	if (if_running)
2636 		ret = ice_up(vsi);
2637 
2638 	if (!ret && prog)
2639 		ice_vsi_rx_napi_schedule(vsi);
2640 
2641 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2642 }
2643 
2644 /**
2645  * ice_xdp_safe_mode - XDP handler for safe mode
2646  * @dev: netdevice
2647  * @xdp: XDP command
2648  */
2649 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2650 			     struct netdev_bpf *xdp)
2651 {
2652 	NL_SET_ERR_MSG_MOD(xdp->extack,
2653 			   "Please provide working DDP firmware package in order to use XDP\n"
2654 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2655 	return -EOPNOTSUPP;
2656 }
2657 
2658 /**
2659  * ice_xdp - implements XDP handler
2660  * @dev: netdevice
2661  * @xdp: XDP command
2662  */
2663 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2664 {
2665 	struct ice_netdev_priv *np = netdev_priv(dev);
2666 	struct ice_vsi *vsi = np->vsi;
2667 
2668 	if (vsi->type != ICE_VSI_PF) {
2669 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2670 		return -EINVAL;
2671 	}
2672 
2673 	switch (xdp->command) {
2674 	case XDP_SETUP_PROG:
2675 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2676 	case XDP_SETUP_XSK_POOL:
2677 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2678 					  xdp->xsk.queue_id);
2679 	default:
2680 		return -EINVAL;
2681 	}
2682 }
2683 
2684 /**
2685  * ice_ena_misc_vector - enable the non-queue interrupts
2686  * @pf: board private structure
2687  */
2688 static void ice_ena_misc_vector(struct ice_pf *pf)
2689 {
2690 	struct ice_hw *hw = &pf->hw;
2691 	u32 val;
2692 
2693 	/* Disable anti-spoof detection interrupt to prevent spurious event
2694 	 * interrupts during a function reset. Anti-spoof functionally is
2695 	 * still supported.
2696 	 */
2697 	val = rd32(hw, GL_MDCK_TX_TDPU);
2698 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2699 	wr32(hw, GL_MDCK_TX_TDPU, val);
2700 
2701 	/* clear things first */
2702 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2703 	rd32(hw, PFINT_OICR);		/* read to clear */
2704 
2705 	val = (PFINT_OICR_ECC_ERR_M |
2706 	       PFINT_OICR_MAL_DETECT_M |
2707 	       PFINT_OICR_GRST_M |
2708 	       PFINT_OICR_PCI_EXCEPTION_M |
2709 	       PFINT_OICR_VFLR_M |
2710 	       PFINT_OICR_HMC_ERR_M |
2711 	       PFINT_OICR_PE_PUSH_M |
2712 	       PFINT_OICR_PE_CRITERR_M);
2713 
2714 	wr32(hw, PFINT_OICR_ENA, val);
2715 
2716 	/* SW_ITR_IDX = 0, but don't change INTENA */
2717 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2718 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2719 }
2720 
2721 /**
2722  * ice_misc_intr - misc interrupt handler
2723  * @irq: interrupt number
2724  * @data: pointer to a q_vector
2725  */
2726 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2727 {
2728 	struct ice_pf *pf = (struct ice_pf *)data;
2729 	struct ice_hw *hw = &pf->hw;
2730 	irqreturn_t ret = IRQ_NONE;
2731 	struct device *dev;
2732 	u32 oicr, ena_mask;
2733 
2734 	dev = ice_pf_to_dev(pf);
2735 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2736 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2737 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2738 
2739 	oicr = rd32(hw, PFINT_OICR);
2740 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2741 
2742 	if (oicr & PFINT_OICR_SWINT_M) {
2743 		ena_mask &= ~PFINT_OICR_SWINT_M;
2744 		pf->sw_int_count++;
2745 	}
2746 
2747 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2748 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2749 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2750 	}
2751 	if (oicr & PFINT_OICR_VFLR_M) {
2752 		/* disable any further VFLR event notifications */
2753 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2754 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2755 
2756 			reg &= ~PFINT_OICR_VFLR_M;
2757 			wr32(hw, PFINT_OICR_ENA, reg);
2758 		} else {
2759 			ena_mask &= ~PFINT_OICR_VFLR_M;
2760 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2761 		}
2762 	}
2763 
2764 	if (oicr & PFINT_OICR_GRST_M) {
2765 		u32 reset;
2766 
2767 		/* we have a reset warning */
2768 		ena_mask &= ~PFINT_OICR_GRST_M;
2769 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2770 			GLGEN_RSTAT_RESET_TYPE_S;
2771 
2772 		if (reset == ICE_RESET_CORER)
2773 			pf->corer_count++;
2774 		else if (reset == ICE_RESET_GLOBR)
2775 			pf->globr_count++;
2776 		else if (reset == ICE_RESET_EMPR)
2777 			pf->empr_count++;
2778 		else
2779 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2780 
2781 		/* If a reset cycle isn't already in progress, we set a bit in
2782 		 * pf->state so that the service task can start a reset/rebuild.
2783 		 */
2784 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2785 			if (reset == ICE_RESET_CORER)
2786 				set_bit(ICE_CORER_RECV, pf->state);
2787 			else if (reset == ICE_RESET_GLOBR)
2788 				set_bit(ICE_GLOBR_RECV, pf->state);
2789 			else
2790 				set_bit(ICE_EMPR_RECV, pf->state);
2791 
2792 			/* There are couple of different bits at play here.
2793 			 * hw->reset_ongoing indicates whether the hardware is
2794 			 * in reset. This is set to true when a reset interrupt
2795 			 * is received and set back to false after the driver
2796 			 * has determined that the hardware is out of reset.
2797 			 *
2798 			 * ICE_RESET_OICR_RECV in pf->state indicates
2799 			 * that a post reset rebuild is required before the
2800 			 * driver is operational again. This is set above.
2801 			 *
2802 			 * As this is the start of the reset/rebuild cycle, set
2803 			 * both to indicate that.
2804 			 */
2805 			hw->reset_ongoing = true;
2806 		}
2807 	}
2808 
2809 	if (oicr & PFINT_OICR_TSYN_TX_M) {
2810 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
2811 		ice_ptp_process_ts(pf);
2812 	}
2813 
2814 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2815 	if (oicr & ICE_AUX_CRIT_ERR) {
2816 		struct iidc_event *event;
2817 
2818 		ena_mask &= ~ICE_AUX_CRIT_ERR;
2819 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2820 		if (event) {
2821 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2822 			/* report the entire OICR value to AUX driver */
2823 			event->reg = oicr;
2824 			ice_send_event_to_aux(pf, event);
2825 			kfree(event);
2826 		}
2827 	}
2828 
2829 	/* Report any remaining unexpected interrupts */
2830 	oicr &= ena_mask;
2831 	if (oicr) {
2832 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2833 		/* If a critical error is pending there is no choice but to
2834 		 * reset the device.
2835 		 */
2836 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2837 			    PFINT_OICR_ECC_ERR_M)) {
2838 			set_bit(ICE_PFR_REQ, pf->state);
2839 			ice_service_task_schedule(pf);
2840 		}
2841 	}
2842 	ret = IRQ_HANDLED;
2843 
2844 	ice_service_task_schedule(pf);
2845 	ice_irq_dynamic_ena(hw, NULL, NULL);
2846 
2847 	return ret;
2848 }
2849 
2850 /**
2851  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2852  * @hw: pointer to HW structure
2853  */
2854 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2855 {
2856 	/* disable Admin queue Interrupt causes */
2857 	wr32(hw, PFINT_FW_CTL,
2858 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2859 
2860 	/* disable Mailbox queue Interrupt causes */
2861 	wr32(hw, PFINT_MBX_CTL,
2862 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2863 
2864 	wr32(hw, PFINT_SB_CTL,
2865 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
2866 
2867 	/* disable Control queue Interrupt causes */
2868 	wr32(hw, PFINT_OICR_CTL,
2869 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2870 
2871 	ice_flush(hw);
2872 }
2873 
2874 /**
2875  * ice_free_irq_msix_misc - Unroll misc vector setup
2876  * @pf: board private structure
2877  */
2878 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2879 {
2880 	struct ice_hw *hw = &pf->hw;
2881 
2882 	ice_dis_ctrlq_interrupts(hw);
2883 
2884 	/* disable OICR interrupt */
2885 	wr32(hw, PFINT_OICR_ENA, 0);
2886 	ice_flush(hw);
2887 
2888 	if (pf->msix_entries) {
2889 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2890 		devm_free_irq(ice_pf_to_dev(pf),
2891 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2892 	}
2893 
2894 	pf->num_avail_sw_msix += 1;
2895 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2896 }
2897 
2898 /**
2899  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2900  * @hw: pointer to HW structure
2901  * @reg_idx: HW vector index to associate the control queue interrupts with
2902  */
2903 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2904 {
2905 	u32 val;
2906 
2907 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2908 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2909 	wr32(hw, PFINT_OICR_CTL, val);
2910 
2911 	/* enable Admin queue Interrupt causes */
2912 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2913 	       PFINT_FW_CTL_CAUSE_ENA_M);
2914 	wr32(hw, PFINT_FW_CTL, val);
2915 
2916 	/* enable Mailbox queue Interrupt causes */
2917 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2918 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2919 	wr32(hw, PFINT_MBX_CTL, val);
2920 
2921 	/* This enables Sideband queue Interrupt causes */
2922 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
2923 	       PFINT_SB_CTL_CAUSE_ENA_M);
2924 	wr32(hw, PFINT_SB_CTL, val);
2925 
2926 	ice_flush(hw);
2927 }
2928 
2929 /**
2930  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2931  * @pf: board private structure
2932  *
2933  * This sets up the handler for MSIX 0, which is used to manage the
2934  * non-queue interrupts, e.g. AdminQ and errors. This is not used
2935  * when in MSI or Legacy interrupt mode.
2936  */
2937 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2938 {
2939 	struct device *dev = ice_pf_to_dev(pf);
2940 	struct ice_hw *hw = &pf->hw;
2941 	int oicr_idx, err = 0;
2942 
2943 	if (!pf->int_name[0])
2944 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2945 			 dev_driver_string(dev), dev_name(dev));
2946 
2947 	/* Do not request IRQ but do enable OICR interrupt since settings are
2948 	 * lost during reset. Note that this function is called only during
2949 	 * rebuild path and not while reset is in progress.
2950 	 */
2951 	if (ice_is_reset_in_progress(pf->state))
2952 		goto skip_req_irq;
2953 
2954 	/* reserve one vector in irq_tracker for misc interrupts */
2955 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2956 	if (oicr_idx < 0)
2957 		return oicr_idx;
2958 
2959 	pf->num_avail_sw_msix -= 1;
2960 	pf->oicr_idx = (u16)oicr_idx;
2961 
2962 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2963 			       ice_misc_intr, 0, pf->int_name, pf);
2964 	if (err) {
2965 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2966 			pf->int_name, err);
2967 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2968 		pf->num_avail_sw_msix += 1;
2969 		return err;
2970 	}
2971 
2972 skip_req_irq:
2973 	ice_ena_misc_vector(pf);
2974 
2975 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2976 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2977 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2978 
2979 	ice_flush(hw);
2980 	ice_irq_dynamic_ena(hw, NULL, NULL);
2981 
2982 	return 0;
2983 }
2984 
2985 /**
2986  * ice_napi_add - register NAPI handler for the VSI
2987  * @vsi: VSI for which NAPI handler is to be registered
2988  *
2989  * This function is only called in the driver's load path. Registering the NAPI
2990  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2991  * reset/rebuild, etc.)
2992  */
2993 static void ice_napi_add(struct ice_vsi *vsi)
2994 {
2995 	int v_idx;
2996 
2997 	if (!vsi->netdev)
2998 		return;
2999 
3000 	ice_for_each_q_vector(vsi, v_idx)
3001 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3002 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3003 }
3004 
3005 /**
3006  * ice_set_ops - set netdev and ethtools ops for the given netdev
3007  * @netdev: netdev instance
3008  */
3009 static void ice_set_ops(struct net_device *netdev)
3010 {
3011 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3012 
3013 	if (ice_is_safe_mode(pf)) {
3014 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3015 		ice_set_ethtool_safe_mode_ops(netdev);
3016 		return;
3017 	}
3018 
3019 	netdev->netdev_ops = &ice_netdev_ops;
3020 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3021 	ice_set_ethtool_ops(netdev);
3022 }
3023 
3024 /**
3025  * ice_set_netdev_features - set features for the given netdev
3026  * @netdev: netdev instance
3027  */
3028 static void ice_set_netdev_features(struct net_device *netdev)
3029 {
3030 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3031 	netdev_features_t csumo_features;
3032 	netdev_features_t vlano_features;
3033 	netdev_features_t dflt_features;
3034 	netdev_features_t tso_features;
3035 
3036 	if (ice_is_safe_mode(pf)) {
3037 		/* safe mode */
3038 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3039 		netdev->hw_features = netdev->features;
3040 		return;
3041 	}
3042 
3043 	dflt_features = NETIF_F_SG	|
3044 			NETIF_F_HIGHDMA	|
3045 			NETIF_F_NTUPLE	|
3046 			NETIF_F_RXHASH;
3047 
3048 	csumo_features = NETIF_F_RXCSUM	  |
3049 			 NETIF_F_IP_CSUM  |
3050 			 NETIF_F_SCTP_CRC |
3051 			 NETIF_F_IPV6_CSUM;
3052 
3053 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3054 			 NETIF_F_HW_VLAN_CTAG_TX     |
3055 			 NETIF_F_HW_VLAN_CTAG_RX;
3056 
3057 	tso_features = NETIF_F_TSO			|
3058 		       NETIF_F_TSO_ECN			|
3059 		       NETIF_F_TSO6			|
3060 		       NETIF_F_GSO_GRE			|
3061 		       NETIF_F_GSO_UDP_TUNNEL		|
3062 		       NETIF_F_GSO_GRE_CSUM		|
3063 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3064 		       NETIF_F_GSO_PARTIAL		|
3065 		       NETIF_F_GSO_IPXIP4		|
3066 		       NETIF_F_GSO_IPXIP6		|
3067 		       NETIF_F_GSO_UDP_L4;
3068 
3069 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3070 					NETIF_F_GSO_GRE_CSUM;
3071 	/* set features that user can change */
3072 	netdev->hw_features = dflt_features | csumo_features |
3073 			      vlano_features | tso_features;
3074 
3075 	/* add support for HW_CSUM on packets with MPLS header */
3076 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3077 
3078 	/* enable features */
3079 	netdev->features |= netdev->hw_features;
3080 	/* encap and VLAN devices inherit default, csumo and tso features */
3081 	netdev->hw_enc_features |= dflt_features | csumo_features |
3082 				   tso_features;
3083 	netdev->vlan_features |= dflt_features | csumo_features |
3084 				 tso_features;
3085 }
3086 
3087 /**
3088  * ice_cfg_netdev - Allocate, configure and register a netdev
3089  * @vsi: the VSI associated with the new netdev
3090  *
3091  * Returns 0 on success, negative value on failure
3092  */
3093 static int ice_cfg_netdev(struct ice_vsi *vsi)
3094 {
3095 	struct ice_netdev_priv *np;
3096 	struct net_device *netdev;
3097 	u8 mac_addr[ETH_ALEN];
3098 
3099 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3100 				    vsi->alloc_rxq);
3101 	if (!netdev)
3102 		return -ENOMEM;
3103 
3104 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3105 	vsi->netdev = netdev;
3106 	np = netdev_priv(netdev);
3107 	np->vsi = vsi;
3108 
3109 	ice_set_netdev_features(netdev);
3110 
3111 	ice_set_ops(netdev);
3112 
3113 	if (vsi->type == ICE_VSI_PF) {
3114 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3115 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3116 		ether_addr_copy(netdev->dev_addr, mac_addr);
3117 		ether_addr_copy(netdev->perm_addr, mac_addr);
3118 	}
3119 
3120 	netdev->priv_flags |= IFF_UNICAST_FLT;
3121 
3122 	/* Setup netdev TC information */
3123 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3124 
3125 	/* setup watchdog timeout value to be 5 second */
3126 	netdev->watchdog_timeo = 5 * HZ;
3127 
3128 	netdev->min_mtu = ETH_MIN_MTU;
3129 	netdev->max_mtu = ICE_MAX_MTU;
3130 
3131 	return 0;
3132 }
3133 
3134 /**
3135  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3136  * @lut: Lookup table
3137  * @rss_table_size: Lookup table size
3138  * @rss_size: Range of queue number for hashing
3139  */
3140 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3141 {
3142 	u16 i;
3143 
3144 	for (i = 0; i < rss_table_size; i++)
3145 		lut[i] = i % rss_size;
3146 }
3147 
3148 /**
3149  * ice_pf_vsi_setup - Set up a PF VSI
3150  * @pf: board private structure
3151  * @pi: pointer to the port_info instance
3152  *
3153  * Returns pointer to the successfully allocated VSI software struct
3154  * on success, otherwise returns NULL on failure.
3155  */
3156 static struct ice_vsi *
3157 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3158 {
3159 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3160 }
3161 
3162 /**
3163  * ice_ctrl_vsi_setup - Set up a control VSI
3164  * @pf: board private structure
3165  * @pi: pointer to the port_info instance
3166  *
3167  * Returns pointer to the successfully allocated VSI software struct
3168  * on success, otherwise returns NULL on failure.
3169  */
3170 static struct ice_vsi *
3171 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3172 {
3173 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3174 }
3175 
3176 /**
3177  * ice_lb_vsi_setup - Set up a loopback VSI
3178  * @pf: board private structure
3179  * @pi: pointer to the port_info instance
3180  *
3181  * Returns pointer to the successfully allocated VSI software struct
3182  * on success, otherwise returns NULL on failure.
3183  */
3184 struct ice_vsi *
3185 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3186 {
3187 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3188 }
3189 
3190 /**
3191  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3192  * @netdev: network interface to be adjusted
3193  * @proto: unused protocol
3194  * @vid: VLAN ID to be added
3195  *
3196  * net_device_ops implementation for adding VLAN IDs
3197  */
3198 static int
3199 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3200 		    u16 vid)
3201 {
3202 	struct ice_netdev_priv *np = netdev_priv(netdev);
3203 	struct ice_vsi *vsi = np->vsi;
3204 	int ret;
3205 
3206 	/* VLAN 0 is added by default during load/reset */
3207 	if (!vid)
3208 		return 0;
3209 
3210 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3211 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3212 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3213 		if (ret)
3214 			return ret;
3215 	}
3216 
3217 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3218 	 * packets aren't pruned by the device's internal switch on Rx
3219 	 */
3220 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3221 	if (!ret)
3222 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3223 
3224 	return ret;
3225 }
3226 
3227 /**
3228  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3229  * @netdev: network interface to be adjusted
3230  * @proto: unused protocol
3231  * @vid: VLAN ID to be removed
3232  *
3233  * net_device_ops implementation for removing VLAN IDs
3234  */
3235 static int
3236 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3237 		     u16 vid)
3238 {
3239 	struct ice_netdev_priv *np = netdev_priv(netdev);
3240 	struct ice_vsi *vsi = np->vsi;
3241 	int ret;
3242 
3243 	/* don't allow removal of VLAN 0 */
3244 	if (!vid)
3245 		return 0;
3246 
3247 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3248 	 * information
3249 	 */
3250 	ret = ice_vsi_kill_vlan(vsi, vid);
3251 	if (ret)
3252 		return ret;
3253 
3254 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3255 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3256 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3257 
3258 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3259 	return ret;
3260 }
3261 
3262 /**
3263  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3264  * @pf: board private structure
3265  *
3266  * Returns 0 on success, negative value on failure
3267  */
3268 static int ice_setup_pf_sw(struct ice_pf *pf)
3269 {
3270 	struct ice_vsi *vsi;
3271 	int status = 0;
3272 
3273 	if (ice_is_reset_in_progress(pf->state))
3274 		return -EBUSY;
3275 
3276 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3277 	if (!vsi)
3278 		return -ENOMEM;
3279 
3280 	status = ice_cfg_netdev(vsi);
3281 	if (status) {
3282 		status = -ENODEV;
3283 		goto unroll_vsi_setup;
3284 	}
3285 	/* netdev has to be configured before setting frame size */
3286 	ice_vsi_cfg_frame_size(vsi);
3287 
3288 	/* Setup DCB netlink interface */
3289 	ice_dcbnl_setup(vsi);
3290 
3291 	/* registering the NAPI handler requires both the queues and
3292 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3293 	 * and ice_cfg_netdev() respectively
3294 	 */
3295 	ice_napi_add(vsi);
3296 
3297 	status = ice_set_cpu_rx_rmap(vsi);
3298 	if (status) {
3299 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3300 			vsi->vsi_num, status);
3301 		status = -EINVAL;
3302 		goto unroll_napi_add;
3303 	}
3304 	status = ice_init_mac_fltr(pf);
3305 	if (status)
3306 		goto free_cpu_rx_map;
3307 
3308 	return status;
3309 
3310 free_cpu_rx_map:
3311 	ice_free_cpu_rx_rmap(vsi);
3312 
3313 unroll_napi_add:
3314 	if (vsi) {
3315 		ice_napi_del(vsi);
3316 		if (vsi->netdev) {
3317 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3318 			free_netdev(vsi->netdev);
3319 			vsi->netdev = NULL;
3320 		}
3321 	}
3322 
3323 unroll_vsi_setup:
3324 	ice_vsi_release(vsi);
3325 	return status;
3326 }
3327 
3328 /**
3329  * ice_get_avail_q_count - Get count of queues in use
3330  * @pf_qmap: bitmap to get queue use count from
3331  * @lock: pointer to a mutex that protects access to pf_qmap
3332  * @size: size of the bitmap
3333  */
3334 static u16
3335 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3336 {
3337 	unsigned long bit;
3338 	u16 count = 0;
3339 
3340 	mutex_lock(lock);
3341 	for_each_clear_bit(bit, pf_qmap, size)
3342 		count++;
3343 	mutex_unlock(lock);
3344 
3345 	return count;
3346 }
3347 
3348 /**
3349  * ice_get_avail_txq_count - Get count of Tx queues in use
3350  * @pf: pointer to an ice_pf instance
3351  */
3352 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3353 {
3354 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3355 				     pf->max_pf_txqs);
3356 }
3357 
3358 /**
3359  * ice_get_avail_rxq_count - Get count of Rx queues in use
3360  * @pf: pointer to an ice_pf instance
3361  */
3362 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3363 {
3364 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3365 				     pf->max_pf_rxqs);
3366 }
3367 
3368 /**
3369  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3370  * @pf: board private structure to initialize
3371  */
3372 static void ice_deinit_pf(struct ice_pf *pf)
3373 {
3374 	ice_service_task_stop(pf);
3375 	mutex_destroy(&pf->sw_mutex);
3376 	mutex_destroy(&pf->tc_mutex);
3377 	mutex_destroy(&pf->avail_q_mutex);
3378 
3379 	if (pf->avail_txqs) {
3380 		bitmap_free(pf->avail_txqs);
3381 		pf->avail_txqs = NULL;
3382 	}
3383 
3384 	if (pf->avail_rxqs) {
3385 		bitmap_free(pf->avail_rxqs);
3386 		pf->avail_rxqs = NULL;
3387 	}
3388 
3389 	if (pf->ptp.clock)
3390 		ptp_clock_unregister(pf->ptp.clock);
3391 }
3392 
3393 /**
3394  * ice_set_pf_caps - set PFs capability flags
3395  * @pf: pointer to the PF instance
3396  */
3397 static void ice_set_pf_caps(struct ice_pf *pf)
3398 {
3399 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3400 
3401 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3402 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3403 	if (func_caps->common_cap.rdma) {
3404 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3405 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3406 	}
3407 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3408 	if (func_caps->common_cap.dcb)
3409 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3410 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3411 	if (func_caps->common_cap.sr_iov_1_1) {
3412 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3413 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3414 					      ICE_MAX_VF_COUNT);
3415 	}
3416 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3417 	if (func_caps->common_cap.rss_table_size)
3418 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3419 
3420 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3421 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3422 		u16 unused;
3423 
3424 		/* ctrl_vsi_idx will be set to a valid value when flow director
3425 		 * is setup by ice_init_fdir
3426 		 */
3427 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3428 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3429 		/* force guaranteed filter pool for PF */
3430 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3431 				       func_caps->fd_fltr_guar);
3432 		/* force shared filter pool for PF */
3433 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3434 				       func_caps->fd_fltr_best_effort);
3435 	}
3436 
3437 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3438 	if (func_caps->common_cap.ieee_1588)
3439 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3440 
3441 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3442 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3443 }
3444 
3445 /**
3446  * ice_init_pf - Initialize general software structures (struct ice_pf)
3447  * @pf: board private structure to initialize
3448  */
3449 static int ice_init_pf(struct ice_pf *pf)
3450 {
3451 	ice_set_pf_caps(pf);
3452 
3453 	mutex_init(&pf->sw_mutex);
3454 	mutex_init(&pf->tc_mutex);
3455 
3456 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3457 	spin_lock_init(&pf->aq_wait_lock);
3458 	init_waitqueue_head(&pf->aq_wait_queue);
3459 
3460 	init_waitqueue_head(&pf->reset_wait_queue);
3461 
3462 	/* setup service timer and periodic service task */
3463 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3464 	pf->serv_tmr_period = HZ;
3465 	INIT_WORK(&pf->serv_task, ice_service_task);
3466 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3467 
3468 	mutex_init(&pf->avail_q_mutex);
3469 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3470 	if (!pf->avail_txqs)
3471 		return -ENOMEM;
3472 
3473 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3474 	if (!pf->avail_rxqs) {
3475 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3476 		pf->avail_txqs = NULL;
3477 		return -ENOMEM;
3478 	}
3479 
3480 	return 0;
3481 }
3482 
3483 /**
3484  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3485  * @pf: board private structure
3486  *
3487  * compute the number of MSIX vectors required (v_budget) and request from
3488  * the OS. Return the number of vectors reserved or negative on failure
3489  */
3490 static int ice_ena_msix_range(struct ice_pf *pf)
3491 {
3492 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3493 	struct device *dev = ice_pf_to_dev(pf);
3494 	int needed, err, i;
3495 
3496 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3497 	num_cpus = num_online_cpus();
3498 
3499 	/* reserve for LAN miscellaneous handler */
3500 	needed = ICE_MIN_LAN_OICR_MSIX;
3501 	if (v_left < needed)
3502 		goto no_hw_vecs_left_err;
3503 	v_budget += needed;
3504 	v_left -= needed;
3505 
3506 	/* reserve for flow director */
3507 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3508 		needed = ICE_FDIR_MSIX;
3509 		if (v_left < needed)
3510 			goto no_hw_vecs_left_err;
3511 		v_budget += needed;
3512 		v_left -= needed;
3513 	}
3514 
3515 	/* total used for non-traffic vectors */
3516 	v_other = v_budget;
3517 
3518 	/* reserve vectors for LAN traffic */
3519 	needed = num_cpus;
3520 	if (v_left < needed)
3521 		goto no_hw_vecs_left_err;
3522 	pf->num_lan_msix = needed;
3523 	v_budget += needed;
3524 	v_left -= needed;
3525 
3526 	/* reserve vectors for RDMA auxiliary driver */
3527 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3528 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3529 		if (v_left < needed)
3530 			goto no_hw_vecs_left_err;
3531 		pf->num_rdma_msix = needed;
3532 		v_budget += needed;
3533 		v_left -= needed;
3534 	}
3535 
3536 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3537 					sizeof(*pf->msix_entries), GFP_KERNEL);
3538 	if (!pf->msix_entries) {
3539 		err = -ENOMEM;
3540 		goto exit_err;
3541 	}
3542 
3543 	for (i = 0; i < v_budget; i++)
3544 		pf->msix_entries[i].entry = i;
3545 
3546 	/* actually reserve the vectors */
3547 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3548 					 ICE_MIN_MSIX, v_budget);
3549 	if (v_actual < 0) {
3550 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3551 		err = v_actual;
3552 		goto msix_err;
3553 	}
3554 
3555 	if (v_actual < v_budget) {
3556 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3557 			 v_budget, v_actual);
3558 
3559 		if (v_actual < ICE_MIN_MSIX) {
3560 			/* error if we can't get minimum vectors */
3561 			pci_disable_msix(pf->pdev);
3562 			err = -ERANGE;
3563 			goto msix_err;
3564 		} else {
3565 			int v_remain = v_actual - v_other;
3566 			int v_rdma = 0, v_min_rdma = 0;
3567 
3568 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3569 				/* Need at least 1 interrupt in addition to
3570 				 * AEQ MSIX
3571 				 */
3572 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3573 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3574 			}
3575 
3576 			if (v_actual == ICE_MIN_MSIX ||
3577 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3578 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3579 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3580 
3581 				pf->num_rdma_msix = 0;
3582 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3583 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3584 				   (v_remain - v_rdma < v_rdma)) {
3585 				/* Support minimum RDMA and give remaining
3586 				 * vectors to LAN MSIX
3587 				 */
3588 				pf->num_rdma_msix = v_min_rdma;
3589 				pf->num_lan_msix = v_remain - v_min_rdma;
3590 			} else {
3591 				/* Split remaining MSIX with RDMA after
3592 				 * accounting for AEQ MSIX
3593 				 */
3594 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3595 						    ICE_RDMA_NUM_AEQ_MSIX;
3596 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3597 			}
3598 
3599 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3600 				   pf->num_lan_msix);
3601 
3602 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3603 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3604 					   pf->num_rdma_msix);
3605 		}
3606 	}
3607 
3608 	return v_actual;
3609 
3610 msix_err:
3611 	devm_kfree(dev, pf->msix_entries);
3612 	goto exit_err;
3613 
3614 no_hw_vecs_left_err:
3615 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3616 		needed, v_left);
3617 	err = -ERANGE;
3618 exit_err:
3619 	pf->num_rdma_msix = 0;
3620 	pf->num_lan_msix = 0;
3621 	return err;
3622 }
3623 
3624 /**
3625  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3626  * @pf: board private structure
3627  */
3628 static void ice_dis_msix(struct ice_pf *pf)
3629 {
3630 	pci_disable_msix(pf->pdev);
3631 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3632 	pf->msix_entries = NULL;
3633 }
3634 
3635 /**
3636  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3637  * @pf: board private structure
3638  */
3639 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3640 {
3641 	ice_dis_msix(pf);
3642 
3643 	if (pf->irq_tracker) {
3644 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3645 		pf->irq_tracker = NULL;
3646 	}
3647 }
3648 
3649 /**
3650  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3651  * @pf: board private structure to initialize
3652  */
3653 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3654 {
3655 	int vectors;
3656 
3657 	vectors = ice_ena_msix_range(pf);
3658 
3659 	if (vectors < 0)
3660 		return vectors;
3661 
3662 	/* set up vector assignment tracking */
3663 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3664 				       struct_size(pf->irq_tracker, list, vectors),
3665 				       GFP_KERNEL);
3666 	if (!pf->irq_tracker) {
3667 		ice_dis_msix(pf);
3668 		return -ENOMEM;
3669 	}
3670 
3671 	/* populate SW interrupts pool with number of OS granted IRQs. */
3672 	pf->num_avail_sw_msix = (u16)vectors;
3673 	pf->irq_tracker->num_entries = (u16)vectors;
3674 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3675 
3676 	return 0;
3677 }
3678 
3679 /**
3680  * ice_is_wol_supported - check if WoL is supported
3681  * @hw: pointer to hardware info
3682  *
3683  * Check if WoL is supported based on the HW configuration.
3684  * Returns true if NVM supports and enables WoL for this port, false otherwise
3685  */
3686 bool ice_is_wol_supported(struct ice_hw *hw)
3687 {
3688 	u16 wol_ctrl;
3689 
3690 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3691 	 * word) indicates WoL is not supported on the corresponding PF ID.
3692 	 */
3693 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3694 		return false;
3695 
3696 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3697 }
3698 
3699 /**
3700  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3701  * @vsi: VSI being changed
3702  * @new_rx: new number of Rx queues
3703  * @new_tx: new number of Tx queues
3704  *
3705  * Only change the number of queues if new_tx, or new_rx is non-0.
3706  *
3707  * Returns 0 on success.
3708  */
3709 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3710 {
3711 	struct ice_pf *pf = vsi->back;
3712 	int err = 0, timeout = 50;
3713 
3714 	if (!new_rx && !new_tx)
3715 		return -EINVAL;
3716 
3717 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3718 		timeout--;
3719 		if (!timeout)
3720 			return -EBUSY;
3721 		usleep_range(1000, 2000);
3722 	}
3723 
3724 	if (new_tx)
3725 		vsi->req_txq = (u16)new_tx;
3726 	if (new_rx)
3727 		vsi->req_rxq = (u16)new_rx;
3728 
3729 	/* set for the next time the netdev is started */
3730 	if (!netif_running(vsi->netdev)) {
3731 		ice_vsi_rebuild(vsi, false);
3732 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3733 		goto done;
3734 	}
3735 
3736 	ice_vsi_close(vsi);
3737 	ice_vsi_rebuild(vsi, false);
3738 	ice_pf_dcb_recfg(pf);
3739 	ice_vsi_open(vsi);
3740 done:
3741 	clear_bit(ICE_CFG_BUSY, pf->state);
3742 	return err;
3743 }
3744 
3745 /**
3746  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3747  * @pf: PF to configure
3748  *
3749  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3750  * VSI can still Tx/Rx VLAN tagged packets.
3751  */
3752 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3753 {
3754 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3755 	struct ice_vsi_ctx *ctxt;
3756 	enum ice_status status;
3757 	struct ice_hw *hw;
3758 
3759 	if (!vsi)
3760 		return;
3761 
3762 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3763 	if (!ctxt)
3764 		return;
3765 
3766 	hw = &pf->hw;
3767 	ctxt->info = vsi->info;
3768 
3769 	ctxt->info.valid_sections =
3770 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3771 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3772 			    ICE_AQ_VSI_PROP_SW_VALID);
3773 
3774 	/* disable VLAN anti-spoof */
3775 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3776 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3777 
3778 	/* disable VLAN pruning and keep all other settings */
3779 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3780 
3781 	/* allow all VLANs on Tx and don't strip on Rx */
3782 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3783 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3784 
3785 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3786 	if (status) {
3787 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3788 			ice_stat_str(status),
3789 			ice_aq_str(hw->adminq.sq_last_status));
3790 	} else {
3791 		vsi->info.sec_flags = ctxt->info.sec_flags;
3792 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3793 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3794 	}
3795 
3796 	kfree(ctxt);
3797 }
3798 
3799 /**
3800  * ice_log_pkg_init - log result of DDP package load
3801  * @hw: pointer to hardware info
3802  * @status: status of package load
3803  */
3804 static void
3805 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3806 {
3807 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3808 	struct device *dev = ice_pf_to_dev(pf);
3809 
3810 	switch (*status) {
3811 	case ICE_SUCCESS:
3812 		/* The package download AdminQ command returned success because
3813 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3814 		 * already a package loaded on the device.
3815 		 */
3816 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3817 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3818 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3819 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3820 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3821 			    sizeof(hw->pkg_name))) {
3822 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3823 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3824 					 hw->active_pkg_name,
3825 					 hw->active_pkg_ver.major,
3826 					 hw->active_pkg_ver.minor,
3827 					 hw->active_pkg_ver.update,
3828 					 hw->active_pkg_ver.draft);
3829 			else
3830 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3831 					 hw->active_pkg_name,
3832 					 hw->active_pkg_ver.major,
3833 					 hw->active_pkg_ver.minor,
3834 					 hw->active_pkg_ver.update,
3835 					 hw->active_pkg_ver.draft);
3836 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3837 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3838 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3839 				hw->active_pkg_name,
3840 				hw->active_pkg_ver.major,
3841 				hw->active_pkg_ver.minor,
3842 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3843 			*status = ICE_ERR_NOT_SUPPORTED;
3844 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3845 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3846 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3847 				 hw->active_pkg_name,
3848 				 hw->active_pkg_ver.major,
3849 				 hw->active_pkg_ver.minor,
3850 				 hw->active_pkg_ver.update,
3851 				 hw->active_pkg_ver.draft,
3852 				 hw->pkg_name,
3853 				 hw->pkg_ver.major,
3854 				 hw->pkg_ver.minor,
3855 				 hw->pkg_ver.update,
3856 				 hw->pkg_ver.draft);
3857 		} else {
3858 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3859 			*status = ICE_ERR_NOT_SUPPORTED;
3860 		}
3861 		break;
3862 	case ICE_ERR_FW_DDP_MISMATCH:
3863 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3864 		break;
3865 	case ICE_ERR_BUF_TOO_SHORT:
3866 	case ICE_ERR_CFG:
3867 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3868 		break;
3869 	case ICE_ERR_NOT_SUPPORTED:
3870 		/* Package File version not supported */
3871 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3872 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3873 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3874 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3875 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3876 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3877 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3878 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3879 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3880 		break;
3881 	case ICE_ERR_AQ_ERROR:
3882 		switch (hw->pkg_dwnld_status) {
3883 		case ICE_AQ_RC_ENOSEC:
3884 		case ICE_AQ_RC_EBADSIG:
3885 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3886 			return;
3887 		case ICE_AQ_RC_ESVN:
3888 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3889 			return;
3890 		case ICE_AQ_RC_EBADMAN:
3891 		case ICE_AQ_RC_EBADBUF:
3892 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3893 			/* poll for reset to complete */
3894 			if (ice_check_reset(hw))
3895 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3896 			return;
3897 		default:
3898 			break;
3899 		}
3900 		fallthrough;
3901 	default:
3902 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3903 			*status);
3904 		break;
3905 	}
3906 }
3907 
3908 /**
3909  * ice_load_pkg - load/reload the DDP Package file
3910  * @firmware: firmware structure when firmware requested or NULL for reload
3911  * @pf: pointer to the PF instance
3912  *
3913  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3914  * initialize HW tables.
3915  */
3916 static void
3917 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3918 {
3919 	enum ice_status status = ICE_ERR_PARAM;
3920 	struct device *dev = ice_pf_to_dev(pf);
3921 	struct ice_hw *hw = &pf->hw;
3922 
3923 	/* Load DDP Package */
3924 	if (firmware && !hw->pkg_copy) {
3925 		status = ice_copy_and_init_pkg(hw, firmware->data,
3926 					       firmware->size);
3927 		ice_log_pkg_init(hw, &status);
3928 	} else if (!firmware && hw->pkg_copy) {
3929 		/* Reload package during rebuild after CORER/GLOBR reset */
3930 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3931 		ice_log_pkg_init(hw, &status);
3932 	} else {
3933 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3934 	}
3935 
3936 	if (status) {
3937 		/* Safe Mode */
3938 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3939 		return;
3940 	}
3941 
3942 	/* Successful download package is the precondition for advanced
3943 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3944 	 */
3945 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3946 }
3947 
3948 /**
3949  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3950  * @pf: pointer to the PF structure
3951  *
3952  * There is no error returned here because the driver should be able to handle
3953  * 128 Byte cache lines, so we only print a warning in case issues are seen,
3954  * specifically with Tx.
3955  */
3956 static void ice_verify_cacheline_size(struct ice_pf *pf)
3957 {
3958 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3959 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3960 			 ICE_CACHE_LINE_BYTES);
3961 }
3962 
3963 /**
3964  * ice_send_version - update firmware with driver version
3965  * @pf: PF struct
3966  *
3967  * Returns ICE_SUCCESS on success, else error code
3968  */
3969 static enum ice_status ice_send_version(struct ice_pf *pf)
3970 {
3971 	struct ice_driver_ver dv;
3972 
3973 	dv.major_ver = 0xff;
3974 	dv.minor_ver = 0xff;
3975 	dv.build_ver = 0xff;
3976 	dv.subbuild_ver = 0;
3977 	strscpy((char *)dv.driver_string, UTS_RELEASE,
3978 		sizeof(dv.driver_string));
3979 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3980 }
3981 
3982 /**
3983  * ice_init_fdir - Initialize flow director VSI and configuration
3984  * @pf: pointer to the PF instance
3985  *
3986  * returns 0 on success, negative on error
3987  */
3988 static int ice_init_fdir(struct ice_pf *pf)
3989 {
3990 	struct device *dev = ice_pf_to_dev(pf);
3991 	struct ice_vsi *ctrl_vsi;
3992 	int err;
3993 
3994 	/* Side Band Flow Director needs to have a control VSI.
3995 	 * Allocate it and store it in the PF.
3996 	 */
3997 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3998 	if (!ctrl_vsi) {
3999 		dev_dbg(dev, "could not create control VSI\n");
4000 		return -ENOMEM;
4001 	}
4002 
4003 	err = ice_vsi_open_ctrl(ctrl_vsi);
4004 	if (err) {
4005 		dev_dbg(dev, "could not open control VSI\n");
4006 		goto err_vsi_open;
4007 	}
4008 
4009 	mutex_init(&pf->hw.fdir_fltr_lock);
4010 
4011 	err = ice_fdir_create_dflt_rules(pf);
4012 	if (err)
4013 		goto err_fdir_rule;
4014 
4015 	return 0;
4016 
4017 err_fdir_rule:
4018 	ice_fdir_release_flows(&pf->hw);
4019 	ice_vsi_close(ctrl_vsi);
4020 err_vsi_open:
4021 	ice_vsi_release(ctrl_vsi);
4022 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4023 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4024 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4025 	}
4026 	return err;
4027 }
4028 
4029 /**
4030  * ice_get_opt_fw_name - return optional firmware file name or NULL
4031  * @pf: pointer to the PF instance
4032  */
4033 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4034 {
4035 	/* Optional firmware name same as default with additional dash
4036 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4037 	 */
4038 	struct pci_dev *pdev = pf->pdev;
4039 	char *opt_fw_filename;
4040 	u64 dsn;
4041 
4042 	/* Determine the name of the optional file using the DSN (two
4043 	 * dwords following the start of the DSN Capability).
4044 	 */
4045 	dsn = pci_get_dsn(pdev);
4046 	if (!dsn)
4047 		return NULL;
4048 
4049 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4050 	if (!opt_fw_filename)
4051 		return NULL;
4052 
4053 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4054 		 ICE_DDP_PKG_PATH, dsn);
4055 
4056 	return opt_fw_filename;
4057 }
4058 
4059 /**
4060  * ice_request_fw - Device initialization routine
4061  * @pf: pointer to the PF instance
4062  */
4063 static void ice_request_fw(struct ice_pf *pf)
4064 {
4065 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4066 	const struct firmware *firmware = NULL;
4067 	struct device *dev = ice_pf_to_dev(pf);
4068 	int err = 0;
4069 
4070 	/* optional device-specific DDP (if present) overrides the default DDP
4071 	 * package file. kernel logs a debug message if the file doesn't exist,
4072 	 * and warning messages for other errors.
4073 	 */
4074 	if (opt_fw_filename) {
4075 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4076 		if (err) {
4077 			kfree(opt_fw_filename);
4078 			goto dflt_pkg_load;
4079 		}
4080 
4081 		/* request for firmware was successful. Download to device */
4082 		ice_load_pkg(firmware, pf);
4083 		kfree(opt_fw_filename);
4084 		release_firmware(firmware);
4085 		return;
4086 	}
4087 
4088 dflt_pkg_load:
4089 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4090 	if (err) {
4091 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4092 		return;
4093 	}
4094 
4095 	/* request for firmware was successful. Download to device */
4096 	ice_load_pkg(firmware, pf);
4097 	release_firmware(firmware);
4098 }
4099 
4100 /**
4101  * ice_print_wake_reason - show the wake up cause in the log
4102  * @pf: pointer to the PF struct
4103  */
4104 static void ice_print_wake_reason(struct ice_pf *pf)
4105 {
4106 	u32 wus = pf->wakeup_reason;
4107 	const char *wake_str;
4108 
4109 	/* if no wake event, nothing to print */
4110 	if (!wus)
4111 		return;
4112 
4113 	if (wus & PFPM_WUS_LNKC_M)
4114 		wake_str = "Link\n";
4115 	else if (wus & PFPM_WUS_MAG_M)
4116 		wake_str = "Magic Packet\n";
4117 	else if (wus & PFPM_WUS_MNG_M)
4118 		wake_str = "Management\n";
4119 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4120 		wake_str = "Firmware Reset\n";
4121 	else
4122 		wake_str = "Unknown\n";
4123 
4124 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4125 }
4126 
4127 /**
4128  * ice_register_netdev - register netdev and devlink port
4129  * @pf: pointer to the PF struct
4130  */
4131 static int ice_register_netdev(struct ice_pf *pf)
4132 {
4133 	struct ice_vsi *vsi;
4134 	int err = 0;
4135 
4136 	vsi = ice_get_main_vsi(pf);
4137 	if (!vsi || !vsi->netdev)
4138 		return -EIO;
4139 
4140 	err = register_netdev(vsi->netdev);
4141 	if (err)
4142 		goto err_register_netdev;
4143 
4144 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4145 	netif_carrier_off(vsi->netdev);
4146 	netif_tx_stop_all_queues(vsi->netdev);
4147 	err = ice_devlink_create_port(vsi);
4148 	if (err)
4149 		goto err_devlink_create;
4150 
4151 	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
4152 
4153 	return 0;
4154 err_devlink_create:
4155 	unregister_netdev(vsi->netdev);
4156 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4157 err_register_netdev:
4158 	free_netdev(vsi->netdev);
4159 	vsi->netdev = NULL;
4160 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4161 	return err;
4162 }
4163 
4164 /**
4165  * ice_probe - Device initialization routine
4166  * @pdev: PCI device information struct
4167  * @ent: entry in ice_pci_tbl
4168  *
4169  * Returns 0 on success, negative on failure
4170  */
4171 static int
4172 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4173 {
4174 	struct device *dev = &pdev->dev;
4175 	struct ice_pf *pf;
4176 	struct ice_hw *hw;
4177 	int i, err;
4178 
4179 	/* this driver uses devres, see
4180 	 * Documentation/driver-api/driver-model/devres.rst
4181 	 */
4182 	err = pcim_enable_device(pdev);
4183 	if (err)
4184 		return err;
4185 
4186 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4187 	if (err) {
4188 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4189 		return err;
4190 	}
4191 
4192 	pf = ice_allocate_pf(dev);
4193 	if (!pf)
4194 		return -ENOMEM;
4195 
4196 	/* set up for high or low DMA */
4197 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4198 	if (err)
4199 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4200 	if (err) {
4201 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4202 		return err;
4203 	}
4204 
4205 	pci_enable_pcie_error_reporting(pdev);
4206 	pci_set_master(pdev);
4207 
4208 	pf->pdev = pdev;
4209 	pci_set_drvdata(pdev, pf);
4210 	set_bit(ICE_DOWN, pf->state);
4211 	/* Disable service task until DOWN bit is cleared */
4212 	set_bit(ICE_SERVICE_DIS, pf->state);
4213 
4214 	hw = &pf->hw;
4215 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4216 	pci_save_state(pdev);
4217 
4218 	hw->back = pf;
4219 	hw->vendor_id = pdev->vendor;
4220 	hw->device_id = pdev->device;
4221 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4222 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4223 	hw->subsystem_device_id = pdev->subsystem_device;
4224 	hw->bus.device = PCI_SLOT(pdev->devfn);
4225 	hw->bus.func = PCI_FUNC(pdev->devfn);
4226 	ice_set_ctrlq_len(hw);
4227 
4228 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4229 
4230 	err = ice_devlink_register(pf);
4231 	if (err) {
4232 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4233 		goto err_exit_unroll;
4234 	}
4235 
4236 #ifndef CONFIG_DYNAMIC_DEBUG
4237 	if (debug < -1)
4238 		hw->debug_mask = debug;
4239 #endif
4240 
4241 	err = ice_init_hw(hw);
4242 	if (err) {
4243 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4244 		err = -EIO;
4245 		goto err_exit_unroll;
4246 	}
4247 
4248 	ice_request_fw(pf);
4249 
4250 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4251 	 * set in pf->state, which will cause ice_is_safe_mode to return
4252 	 * true
4253 	 */
4254 	if (ice_is_safe_mode(pf)) {
4255 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4256 		/* we already got function/device capabilities but these don't
4257 		 * reflect what the driver needs to do in safe mode. Instead of
4258 		 * adding conditional logic everywhere to ignore these
4259 		 * device/function capabilities, override them.
4260 		 */
4261 		ice_set_safe_mode_caps(hw);
4262 	}
4263 
4264 	err = ice_init_pf(pf);
4265 	if (err) {
4266 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4267 		goto err_init_pf_unroll;
4268 	}
4269 
4270 	ice_devlink_init_regions(pf);
4271 
4272 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4273 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4274 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4275 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4276 	i = 0;
4277 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4278 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4279 			pf->hw.tnl.valid_count[TNL_VXLAN];
4280 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4281 			UDP_TUNNEL_TYPE_VXLAN;
4282 		i++;
4283 	}
4284 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4285 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4286 			pf->hw.tnl.valid_count[TNL_GENEVE];
4287 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4288 			UDP_TUNNEL_TYPE_GENEVE;
4289 		i++;
4290 	}
4291 
4292 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4293 	if (!pf->num_alloc_vsi) {
4294 		err = -EIO;
4295 		goto err_init_pf_unroll;
4296 	}
4297 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4298 		dev_warn(&pf->pdev->dev,
4299 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4300 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4301 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4302 	}
4303 
4304 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4305 			       GFP_KERNEL);
4306 	if (!pf->vsi) {
4307 		err = -ENOMEM;
4308 		goto err_init_pf_unroll;
4309 	}
4310 
4311 	err = ice_init_interrupt_scheme(pf);
4312 	if (err) {
4313 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4314 		err = -EIO;
4315 		goto err_init_vsi_unroll;
4316 	}
4317 
4318 	/* In case of MSIX we are going to setup the misc vector right here
4319 	 * to handle admin queue events etc. In case of legacy and MSI
4320 	 * the misc functionality and queue processing is combined in
4321 	 * the same vector and that gets setup at open.
4322 	 */
4323 	err = ice_req_irq_msix_misc(pf);
4324 	if (err) {
4325 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4326 		goto err_init_interrupt_unroll;
4327 	}
4328 
4329 	/* create switch struct for the switch element created by FW on boot */
4330 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4331 	if (!pf->first_sw) {
4332 		err = -ENOMEM;
4333 		goto err_msix_misc_unroll;
4334 	}
4335 
4336 	if (hw->evb_veb)
4337 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4338 	else
4339 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4340 
4341 	pf->first_sw->pf = pf;
4342 
4343 	/* record the sw_id available for later use */
4344 	pf->first_sw->sw_id = hw->port_info->sw_id;
4345 
4346 	err = ice_setup_pf_sw(pf);
4347 	if (err) {
4348 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4349 		goto err_alloc_sw_unroll;
4350 	}
4351 
4352 	clear_bit(ICE_SERVICE_DIS, pf->state);
4353 
4354 	/* tell the firmware we are up */
4355 	err = ice_send_version(pf);
4356 	if (err) {
4357 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4358 			UTS_RELEASE, err);
4359 		goto err_send_version_unroll;
4360 	}
4361 
4362 	/* since everything is good, start the service timer */
4363 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4364 
4365 	err = ice_init_link_events(pf->hw.port_info);
4366 	if (err) {
4367 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4368 		goto err_send_version_unroll;
4369 	}
4370 
4371 	/* not a fatal error if this fails */
4372 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4373 	if (err)
4374 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4375 
4376 	/* not a fatal error if this fails */
4377 	err = ice_update_link_info(pf->hw.port_info);
4378 	if (err)
4379 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4380 
4381 	ice_init_link_dflt_override(pf->hw.port_info);
4382 
4383 	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
4384 
4385 	/* if media available, initialize PHY settings */
4386 	if (pf->hw.port_info->phy.link_info.link_info &
4387 	    ICE_AQ_MEDIA_AVAILABLE) {
4388 		/* not a fatal error if this fails */
4389 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4390 		if (err)
4391 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4392 
4393 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4394 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4395 
4396 			if (vsi)
4397 				ice_configure_phy(vsi);
4398 		}
4399 	} else {
4400 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4401 	}
4402 
4403 	ice_verify_cacheline_size(pf);
4404 
4405 	/* Save wakeup reason register for later use */
4406 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4407 
4408 	/* check for a power management event */
4409 	ice_print_wake_reason(pf);
4410 
4411 	/* clear wake status, all bits */
4412 	wr32(hw, PFPM_WUS, U32_MAX);
4413 
4414 	/* Disable WoL at init, wait for user to enable */
4415 	device_set_wakeup_enable(dev, false);
4416 
4417 	if (ice_is_safe_mode(pf)) {
4418 		ice_set_safe_mode_vlan_cfg(pf);
4419 		goto probe_done;
4420 	}
4421 
4422 	/* initialize DDP driven features */
4423 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4424 		ice_ptp_init(pf);
4425 
4426 	/* Note: Flow director init failure is non-fatal to load */
4427 	if (ice_init_fdir(pf))
4428 		dev_err(dev, "could not initialize flow director\n");
4429 
4430 	/* Note: DCB init failure is non-fatal to load */
4431 	if (ice_init_pf_dcb(pf, false)) {
4432 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4433 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4434 	} else {
4435 		ice_cfg_lldp_mib_change(&pf->hw, true);
4436 	}
4437 
4438 	if (ice_init_lag(pf))
4439 		dev_warn(dev, "Failed to init link aggregation support\n");
4440 
4441 	/* print PCI link speed and width */
4442 	pcie_print_link_status(pf->pdev);
4443 
4444 probe_done:
4445 	err = ice_register_netdev(pf);
4446 	if (err)
4447 		goto err_netdev_reg;
4448 
4449 	/* ready to go, so clear down state bit */
4450 	clear_bit(ICE_DOWN, pf->state);
4451 	if (ice_is_aux_ena(pf)) {
4452 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4453 		if (pf->aux_idx < 0) {
4454 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4455 			err = -ENOMEM;
4456 			goto err_netdev_reg;
4457 		}
4458 
4459 		err = ice_init_rdma(pf);
4460 		if (err) {
4461 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4462 			err = -EIO;
4463 			goto err_init_aux_unroll;
4464 		}
4465 	} else {
4466 		dev_warn(dev, "RDMA is not supported on this device\n");
4467 	}
4468 
4469 	return 0;
4470 
4471 err_init_aux_unroll:
4472 	pf->adev = NULL;
4473 	ida_free(&ice_aux_ida, pf->aux_idx);
4474 err_netdev_reg:
4475 err_send_version_unroll:
4476 	ice_vsi_release_all(pf);
4477 err_alloc_sw_unroll:
4478 	set_bit(ICE_SERVICE_DIS, pf->state);
4479 	set_bit(ICE_DOWN, pf->state);
4480 	devm_kfree(dev, pf->first_sw);
4481 err_msix_misc_unroll:
4482 	ice_free_irq_msix_misc(pf);
4483 err_init_interrupt_unroll:
4484 	ice_clear_interrupt_scheme(pf);
4485 err_init_vsi_unroll:
4486 	devm_kfree(dev, pf->vsi);
4487 err_init_pf_unroll:
4488 	ice_deinit_pf(pf);
4489 	ice_devlink_destroy_regions(pf);
4490 	ice_deinit_hw(hw);
4491 err_exit_unroll:
4492 	ice_devlink_unregister(pf);
4493 	pci_disable_pcie_error_reporting(pdev);
4494 	pci_disable_device(pdev);
4495 	return err;
4496 }
4497 
4498 /**
4499  * ice_set_wake - enable or disable Wake on LAN
4500  * @pf: pointer to the PF struct
4501  *
4502  * Simple helper for WoL control
4503  */
4504 static void ice_set_wake(struct ice_pf *pf)
4505 {
4506 	struct ice_hw *hw = &pf->hw;
4507 	bool wol = pf->wol_ena;
4508 
4509 	/* clear wake state, otherwise new wake events won't fire */
4510 	wr32(hw, PFPM_WUS, U32_MAX);
4511 
4512 	/* enable / disable APM wake up, no RMW needed */
4513 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4514 
4515 	/* set magic packet filter enabled */
4516 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4517 }
4518 
4519 /**
4520  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4521  * @pf: pointer to the PF struct
4522  *
4523  * Issue firmware command to enable multicast magic wake, making
4524  * sure that any locally administered address (LAA) is used for
4525  * wake, and that PF reset doesn't undo the LAA.
4526  */
4527 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4528 {
4529 	struct device *dev = ice_pf_to_dev(pf);
4530 	struct ice_hw *hw = &pf->hw;
4531 	enum ice_status status;
4532 	u8 mac_addr[ETH_ALEN];
4533 	struct ice_vsi *vsi;
4534 	u8 flags;
4535 
4536 	if (!pf->wol_ena)
4537 		return;
4538 
4539 	vsi = ice_get_main_vsi(pf);
4540 	if (!vsi)
4541 		return;
4542 
4543 	/* Get current MAC address in case it's an LAA */
4544 	if (vsi->netdev)
4545 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4546 	else
4547 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4548 
4549 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4550 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4551 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4552 
4553 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4554 	if (status)
4555 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4556 			ice_stat_str(status),
4557 			ice_aq_str(hw->adminq.sq_last_status));
4558 }
4559 
4560 /**
4561  * ice_remove - Device removal routine
4562  * @pdev: PCI device information struct
4563  */
4564 static void ice_remove(struct pci_dev *pdev)
4565 {
4566 	struct ice_pf *pf = pci_get_drvdata(pdev);
4567 	int i;
4568 
4569 	if (!pf)
4570 		return;
4571 
4572 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4573 		if (!ice_is_reset_in_progress(pf->state))
4574 			break;
4575 		msleep(100);
4576 	}
4577 
4578 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4579 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4580 		ice_free_vfs(pf);
4581 	}
4582 
4583 	ice_service_task_stop(pf);
4584 
4585 	ice_aq_cancel_waiting_tasks(pf);
4586 	ice_unplug_aux_dev(pf);
4587 	ida_free(&ice_aux_ida, pf->aux_idx);
4588 	set_bit(ICE_DOWN, pf->state);
4589 
4590 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4591 	ice_deinit_lag(pf);
4592 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4593 		ice_ptp_release(pf);
4594 	if (!ice_is_safe_mode(pf))
4595 		ice_remove_arfs(pf);
4596 	ice_setup_mc_magic_wake(pf);
4597 	ice_vsi_release_all(pf);
4598 	ice_set_wake(pf);
4599 	ice_free_irq_msix_misc(pf);
4600 	ice_for_each_vsi(pf, i) {
4601 		if (!pf->vsi[i])
4602 			continue;
4603 		ice_vsi_free_q_vectors(pf->vsi[i]);
4604 	}
4605 	ice_deinit_pf(pf);
4606 	ice_devlink_destroy_regions(pf);
4607 	ice_deinit_hw(&pf->hw);
4608 	ice_devlink_unregister(pf);
4609 
4610 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4611 	 * do it via ice_schedule_reset() since there is no need to rebuild
4612 	 * and the service task is already stopped.
4613 	 */
4614 	ice_reset(&pf->hw, ICE_RESET_PFR);
4615 	pci_wait_for_pending_transaction(pdev);
4616 	ice_clear_interrupt_scheme(pf);
4617 	pci_disable_pcie_error_reporting(pdev);
4618 	pci_disable_device(pdev);
4619 }
4620 
4621 /**
4622  * ice_shutdown - PCI callback for shutting down device
4623  * @pdev: PCI device information struct
4624  */
4625 static void ice_shutdown(struct pci_dev *pdev)
4626 {
4627 	struct ice_pf *pf = pci_get_drvdata(pdev);
4628 
4629 	ice_remove(pdev);
4630 
4631 	if (system_state == SYSTEM_POWER_OFF) {
4632 		pci_wake_from_d3(pdev, pf->wol_ena);
4633 		pci_set_power_state(pdev, PCI_D3hot);
4634 	}
4635 }
4636 
4637 #ifdef CONFIG_PM
4638 /**
4639  * ice_prepare_for_shutdown - prep for PCI shutdown
4640  * @pf: board private structure
4641  *
4642  * Inform or close all dependent features in prep for PCI device shutdown
4643  */
4644 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4645 {
4646 	struct ice_hw *hw = &pf->hw;
4647 	u32 v;
4648 
4649 	/* Notify VFs of impending reset */
4650 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4651 		ice_vc_notify_reset(pf);
4652 
4653 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4654 
4655 	/* disable the VSIs and their queues that are not already DOWN */
4656 	ice_pf_dis_all_vsi(pf, false);
4657 
4658 	ice_for_each_vsi(pf, v)
4659 		if (pf->vsi[v])
4660 			pf->vsi[v]->vsi_num = 0;
4661 
4662 	ice_shutdown_all_ctrlq(hw);
4663 }
4664 
4665 /**
4666  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4667  * @pf: board private structure to reinitialize
4668  *
4669  * This routine reinitialize interrupt scheme that was cleared during
4670  * power management suspend callback.
4671  *
4672  * This should be called during resume routine to re-allocate the q_vectors
4673  * and reacquire interrupts.
4674  */
4675 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4676 {
4677 	struct device *dev = ice_pf_to_dev(pf);
4678 	int ret, v;
4679 
4680 	/* Since we clear MSIX flag during suspend, we need to
4681 	 * set it back during resume...
4682 	 */
4683 
4684 	ret = ice_init_interrupt_scheme(pf);
4685 	if (ret) {
4686 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4687 		return ret;
4688 	}
4689 
4690 	/* Remap vectors and rings, after successful re-init interrupts */
4691 	ice_for_each_vsi(pf, v) {
4692 		if (!pf->vsi[v])
4693 			continue;
4694 
4695 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4696 		if (ret)
4697 			goto err_reinit;
4698 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4699 	}
4700 
4701 	ret = ice_req_irq_msix_misc(pf);
4702 	if (ret) {
4703 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4704 			ret);
4705 		goto err_reinit;
4706 	}
4707 
4708 	return 0;
4709 
4710 err_reinit:
4711 	while (v--)
4712 		if (pf->vsi[v])
4713 			ice_vsi_free_q_vectors(pf->vsi[v]);
4714 
4715 	return ret;
4716 }
4717 
4718 /**
4719  * ice_suspend
4720  * @dev: generic device information structure
4721  *
4722  * Power Management callback to quiesce the device and prepare
4723  * for D3 transition.
4724  */
4725 static int __maybe_unused ice_suspend(struct device *dev)
4726 {
4727 	struct pci_dev *pdev = to_pci_dev(dev);
4728 	struct ice_pf *pf;
4729 	int disabled, v;
4730 
4731 	pf = pci_get_drvdata(pdev);
4732 
4733 	if (!ice_pf_state_is_nominal(pf)) {
4734 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4735 		return -EBUSY;
4736 	}
4737 
4738 	/* Stop watchdog tasks until resume completion.
4739 	 * Even though it is most likely that the service task is
4740 	 * disabled if the device is suspended or down, the service task's
4741 	 * state is controlled by a different state bit, and we should
4742 	 * store and honor whatever state that bit is in at this point.
4743 	 */
4744 	disabled = ice_service_task_stop(pf);
4745 
4746 	ice_unplug_aux_dev(pf);
4747 
4748 	/* Already suspended?, then there is nothing to do */
4749 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4750 		if (!disabled)
4751 			ice_service_task_restart(pf);
4752 		return 0;
4753 	}
4754 
4755 	if (test_bit(ICE_DOWN, pf->state) ||
4756 	    ice_is_reset_in_progress(pf->state)) {
4757 		dev_err(dev, "can't suspend device in reset or already down\n");
4758 		if (!disabled)
4759 			ice_service_task_restart(pf);
4760 		return 0;
4761 	}
4762 
4763 	ice_setup_mc_magic_wake(pf);
4764 
4765 	ice_prepare_for_shutdown(pf);
4766 
4767 	ice_set_wake(pf);
4768 
4769 	/* Free vectors, clear the interrupt scheme and release IRQs
4770 	 * for proper hibernation, especially with large number of CPUs.
4771 	 * Otherwise hibernation might fail when mapping all the vectors back
4772 	 * to CPU0.
4773 	 */
4774 	ice_free_irq_msix_misc(pf);
4775 	ice_for_each_vsi(pf, v) {
4776 		if (!pf->vsi[v])
4777 			continue;
4778 		ice_vsi_free_q_vectors(pf->vsi[v]);
4779 	}
4780 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4781 	ice_clear_interrupt_scheme(pf);
4782 
4783 	pci_save_state(pdev);
4784 	pci_wake_from_d3(pdev, pf->wol_ena);
4785 	pci_set_power_state(pdev, PCI_D3hot);
4786 	return 0;
4787 }
4788 
4789 /**
4790  * ice_resume - PM callback for waking up from D3
4791  * @dev: generic device information structure
4792  */
4793 static int __maybe_unused ice_resume(struct device *dev)
4794 {
4795 	struct pci_dev *pdev = to_pci_dev(dev);
4796 	enum ice_reset_req reset_type;
4797 	struct ice_pf *pf;
4798 	struct ice_hw *hw;
4799 	int ret;
4800 
4801 	pci_set_power_state(pdev, PCI_D0);
4802 	pci_restore_state(pdev);
4803 	pci_save_state(pdev);
4804 
4805 	if (!pci_device_is_present(pdev))
4806 		return -ENODEV;
4807 
4808 	ret = pci_enable_device_mem(pdev);
4809 	if (ret) {
4810 		dev_err(dev, "Cannot enable device after suspend\n");
4811 		return ret;
4812 	}
4813 
4814 	pf = pci_get_drvdata(pdev);
4815 	hw = &pf->hw;
4816 
4817 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4818 	ice_print_wake_reason(pf);
4819 
4820 	/* We cleared the interrupt scheme when we suspended, so we need to
4821 	 * restore it now to resume device functionality.
4822 	 */
4823 	ret = ice_reinit_interrupt_scheme(pf);
4824 	if (ret)
4825 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4826 
4827 	clear_bit(ICE_DOWN, pf->state);
4828 	/* Now perform PF reset and rebuild */
4829 	reset_type = ICE_RESET_PFR;
4830 	/* re-enable service task for reset, but allow reset to schedule it */
4831 	clear_bit(ICE_SERVICE_DIS, pf->state);
4832 
4833 	if (ice_schedule_reset(pf, reset_type))
4834 		dev_err(dev, "Reset during resume failed.\n");
4835 
4836 	clear_bit(ICE_SUSPENDED, pf->state);
4837 	ice_service_task_restart(pf);
4838 
4839 	/* Restart the service task */
4840 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4841 
4842 	return 0;
4843 }
4844 #endif /* CONFIG_PM */
4845 
4846 /**
4847  * ice_pci_err_detected - warning that PCI error has been detected
4848  * @pdev: PCI device information struct
4849  * @err: the type of PCI error
4850  *
4851  * Called to warn that something happened on the PCI bus and the error handling
4852  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4853  */
4854 static pci_ers_result_t
4855 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4856 {
4857 	struct ice_pf *pf = pci_get_drvdata(pdev);
4858 
4859 	if (!pf) {
4860 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4861 			__func__, err);
4862 		return PCI_ERS_RESULT_DISCONNECT;
4863 	}
4864 
4865 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4866 		ice_service_task_stop(pf);
4867 
4868 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4869 			set_bit(ICE_PFR_REQ, pf->state);
4870 			ice_prepare_for_reset(pf);
4871 		}
4872 	}
4873 
4874 	return PCI_ERS_RESULT_NEED_RESET;
4875 }
4876 
4877 /**
4878  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4879  * @pdev: PCI device information struct
4880  *
4881  * Called to determine if the driver can recover from the PCI slot reset by
4882  * using a register read to determine if the device is recoverable.
4883  */
4884 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4885 {
4886 	struct ice_pf *pf = pci_get_drvdata(pdev);
4887 	pci_ers_result_t result;
4888 	int err;
4889 	u32 reg;
4890 
4891 	err = pci_enable_device_mem(pdev);
4892 	if (err) {
4893 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4894 			err);
4895 		result = PCI_ERS_RESULT_DISCONNECT;
4896 	} else {
4897 		pci_set_master(pdev);
4898 		pci_restore_state(pdev);
4899 		pci_save_state(pdev);
4900 		pci_wake_from_d3(pdev, false);
4901 
4902 		/* Check for life */
4903 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4904 		if (!reg)
4905 			result = PCI_ERS_RESULT_RECOVERED;
4906 		else
4907 			result = PCI_ERS_RESULT_DISCONNECT;
4908 	}
4909 
4910 	err = pci_aer_clear_nonfatal_status(pdev);
4911 	if (err)
4912 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4913 			err);
4914 		/* non-fatal, continue */
4915 
4916 	return result;
4917 }
4918 
4919 /**
4920  * ice_pci_err_resume - restart operations after PCI error recovery
4921  * @pdev: PCI device information struct
4922  *
4923  * Called to allow the driver to bring things back up after PCI error and/or
4924  * reset recovery have finished
4925  */
4926 static void ice_pci_err_resume(struct pci_dev *pdev)
4927 {
4928 	struct ice_pf *pf = pci_get_drvdata(pdev);
4929 
4930 	if (!pf) {
4931 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4932 			__func__);
4933 		return;
4934 	}
4935 
4936 	if (test_bit(ICE_SUSPENDED, pf->state)) {
4937 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4938 			__func__);
4939 		return;
4940 	}
4941 
4942 	ice_restore_all_vfs_msi_state(pdev);
4943 
4944 	ice_do_reset(pf, ICE_RESET_PFR);
4945 	ice_service_task_restart(pf);
4946 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4947 }
4948 
4949 /**
4950  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4951  * @pdev: PCI device information struct
4952  */
4953 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4954 {
4955 	struct ice_pf *pf = pci_get_drvdata(pdev);
4956 
4957 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4958 		ice_service_task_stop(pf);
4959 
4960 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4961 			set_bit(ICE_PFR_REQ, pf->state);
4962 			ice_prepare_for_reset(pf);
4963 		}
4964 	}
4965 }
4966 
4967 /**
4968  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4969  * @pdev: PCI device information struct
4970  */
4971 static void ice_pci_err_reset_done(struct pci_dev *pdev)
4972 {
4973 	ice_pci_err_resume(pdev);
4974 }
4975 
4976 /* ice_pci_tbl - PCI Device ID Table
4977  *
4978  * Wildcard entries (PCI_ANY_ID) should come last
4979  * Last entry must be all 0s
4980  *
4981  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
4982  *   Class, Class Mask, private data (not used) }
4983  */
4984 static const struct pci_device_id ice_pci_tbl[] = {
4985 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4986 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4987 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4988 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4989 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4990 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4991 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4992 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4993 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4994 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4995 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4996 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4997 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4998 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4999 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5000 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5001 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5002 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5003 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5004 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5005 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5006 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5007 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5008 	/* required last entry */
5009 	{ 0, }
5010 };
5011 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5012 
5013 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5014 
5015 static const struct pci_error_handlers ice_pci_err_handler = {
5016 	.error_detected = ice_pci_err_detected,
5017 	.slot_reset = ice_pci_err_slot_reset,
5018 	.reset_prepare = ice_pci_err_reset_prepare,
5019 	.reset_done = ice_pci_err_reset_done,
5020 	.resume = ice_pci_err_resume
5021 };
5022 
5023 static struct pci_driver ice_driver = {
5024 	.name = KBUILD_MODNAME,
5025 	.id_table = ice_pci_tbl,
5026 	.probe = ice_probe,
5027 	.remove = ice_remove,
5028 #ifdef CONFIG_PM
5029 	.driver.pm = &ice_pm_ops,
5030 #endif /* CONFIG_PM */
5031 	.shutdown = ice_shutdown,
5032 	.sriov_configure = ice_sriov_configure,
5033 	.err_handler = &ice_pci_err_handler
5034 };
5035 
5036 /**
5037  * ice_module_init - Driver registration routine
5038  *
5039  * ice_module_init is the first routine called when the driver is
5040  * loaded. All it does is register with the PCI subsystem.
5041  */
5042 static int __init ice_module_init(void)
5043 {
5044 	int status;
5045 
5046 	pr_info("%s\n", ice_driver_string);
5047 	pr_info("%s\n", ice_copyright);
5048 
5049 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5050 	if (!ice_wq) {
5051 		pr_err("Failed to create workqueue\n");
5052 		return -ENOMEM;
5053 	}
5054 
5055 	status = pci_register_driver(&ice_driver);
5056 	if (status) {
5057 		pr_err("failed to register PCI driver, err %d\n", status);
5058 		destroy_workqueue(ice_wq);
5059 	}
5060 
5061 	return status;
5062 }
5063 module_init(ice_module_init);
5064 
5065 /**
5066  * ice_module_exit - Driver exit cleanup routine
5067  *
5068  * ice_module_exit is called just before the driver is removed
5069  * from memory.
5070  */
5071 static void __exit ice_module_exit(void)
5072 {
5073 	pci_unregister_driver(&ice_driver);
5074 	destroy_workqueue(ice_wq);
5075 	pr_info("module unloaded\n");
5076 }
5077 module_exit(ice_module_exit);
5078 
5079 /**
5080  * ice_set_mac_address - NDO callback to set MAC address
5081  * @netdev: network interface device structure
5082  * @pi: pointer to an address structure
5083  *
5084  * Returns 0 on success, negative on failure
5085  */
5086 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5087 {
5088 	struct ice_netdev_priv *np = netdev_priv(netdev);
5089 	struct ice_vsi *vsi = np->vsi;
5090 	struct ice_pf *pf = vsi->back;
5091 	struct ice_hw *hw = &pf->hw;
5092 	struct sockaddr *addr = pi;
5093 	enum ice_status status;
5094 	u8 flags = 0;
5095 	int err = 0;
5096 	u8 *mac;
5097 
5098 	mac = (u8 *)addr->sa_data;
5099 
5100 	if (!is_valid_ether_addr(mac))
5101 		return -EADDRNOTAVAIL;
5102 
5103 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5104 		netdev_warn(netdev, "already using mac %pM\n", mac);
5105 		return 0;
5106 	}
5107 
5108 	if (test_bit(ICE_DOWN, pf->state) ||
5109 	    ice_is_reset_in_progress(pf->state)) {
5110 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5111 			   mac);
5112 		return -EBUSY;
5113 	}
5114 
5115 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5116 	status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
5117 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5118 		err = -EADDRNOTAVAIL;
5119 		goto err_update_filters;
5120 	}
5121 
5122 	/* Add filter for new MAC. If filter exists, return success */
5123 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5124 	if (status == ICE_ERR_ALREADY_EXISTS) {
5125 		/* Although this MAC filter is already present in hardware it's
5126 		 * possible in some cases (e.g. bonding) that dev_addr was
5127 		 * modified outside of the driver and needs to be restored back
5128 		 * to this value.
5129 		 */
5130 		memcpy(netdev->dev_addr, mac, netdev->addr_len);
5131 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5132 		return 0;
5133 	}
5134 
5135 	/* error if the new filter addition failed */
5136 	if (status)
5137 		err = -EADDRNOTAVAIL;
5138 
5139 err_update_filters:
5140 	if (err) {
5141 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5142 			   mac);
5143 		return err;
5144 	}
5145 
5146 	/* change the netdev's MAC address */
5147 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
5148 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5149 		   netdev->dev_addr);
5150 
5151 	/* write new MAC address to the firmware */
5152 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5153 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5154 	if (status) {
5155 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5156 			   mac, ice_stat_str(status));
5157 	}
5158 	return 0;
5159 }
5160 
5161 /**
5162  * ice_set_rx_mode - NDO callback to set the netdev filters
5163  * @netdev: network interface device structure
5164  */
5165 static void ice_set_rx_mode(struct net_device *netdev)
5166 {
5167 	struct ice_netdev_priv *np = netdev_priv(netdev);
5168 	struct ice_vsi *vsi = np->vsi;
5169 
5170 	if (!vsi)
5171 		return;
5172 
5173 	/* Set the flags to synchronize filters
5174 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5175 	 * flags
5176 	 */
5177 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5178 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5179 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5180 
5181 	/* schedule our worker thread which will take care of
5182 	 * applying the new filter changes
5183 	 */
5184 	ice_service_task_schedule(vsi->back);
5185 }
5186 
5187 /**
5188  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5189  * @netdev: network interface device structure
5190  * @queue_index: Queue ID
5191  * @maxrate: maximum bandwidth in Mbps
5192  */
5193 static int
5194 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5195 {
5196 	struct ice_netdev_priv *np = netdev_priv(netdev);
5197 	struct ice_vsi *vsi = np->vsi;
5198 	enum ice_status status;
5199 	u16 q_handle;
5200 	u8 tc;
5201 
5202 	/* Validate maxrate requested is within permitted range */
5203 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5204 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5205 			   maxrate, queue_index);
5206 		return -EINVAL;
5207 	}
5208 
5209 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5210 	tc = ice_dcb_get_tc(vsi, queue_index);
5211 
5212 	/* Set BW back to default, when user set maxrate to 0 */
5213 	if (!maxrate)
5214 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5215 					       q_handle, ICE_MAX_BW);
5216 	else
5217 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5218 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5219 	if (status) {
5220 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5221 			   ice_stat_str(status));
5222 		return -EIO;
5223 	}
5224 
5225 	return 0;
5226 }
5227 
5228 /**
5229  * ice_fdb_add - add an entry to the hardware database
5230  * @ndm: the input from the stack
5231  * @tb: pointer to array of nladdr (unused)
5232  * @dev: the net device pointer
5233  * @addr: the MAC address entry being added
5234  * @vid: VLAN ID
5235  * @flags: instructions from stack about fdb operation
5236  * @extack: netlink extended ack
5237  */
5238 static int
5239 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5240 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5241 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5242 {
5243 	int err;
5244 
5245 	if (vid) {
5246 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5247 		return -EINVAL;
5248 	}
5249 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5250 		netdev_err(dev, "FDB only supports static addresses\n");
5251 		return -EINVAL;
5252 	}
5253 
5254 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5255 		err = dev_uc_add_excl(dev, addr);
5256 	else if (is_multicast_ether_addr(addr))
5257 		err = dev_mc_add_excl(dev, addr);
5258 	else
5259 		err = -EINVAL;
5260 
5261 	/* Only return duplicate errors if NLM_F_EXCL is set */
5262 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5263 		err = 0;
5264 
5265 	return err;
5266 }
5267 
5268 /**
5269  * ice_fdb_del - delete an entry from the hardware database
5270  * @ndm: the input from the stack
5271  * @tb: pointer to array of nladdr (unused)
5272  * @dev: the net device pointer
5273  * @addr: the MAC address entry being added
5274  * @vid: VLAN ID
5275  */
5276 static int
5277 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5278 	    struct net_device *dev, const unsigned char *addr,
5279 	    __always_unused u16 vid)
5280 {
5281 	int err;
5282 
5283 	if (ndm->ndm_state & NUD_PERMANENT) {
5284 		netdev_err(dev, "FDB only supports static addresses\n");
5285 		return -EINVAL;
5286 	}
5287 
5288 	if (is_unicast_ether_addr(addr))
5289 		err = dev_uc_del(dev, addr);
5290 	else if (is_multicast_ether_addr(addr))
5291 		err = dev_mc_del(dev, addr);
5292 	else
5293 		err = -EINVAL;
5294 
5295 	return err;
5296 }
5297 
5298 /**
5299  * ice_set_features - set the netdev feature flags
5300  * @netdev: ptr to the netdev being adjusted
5301  * @features: the feature set that the stack is suggesting
5302  */
5303 static int
5304 ice_set_features(struct net_device *netdev, netdev_features_t features)
5305 {
5306 	struct ice_netdev_priv *np = netdev_priv(netdev);
5307 	struct ice_vsi *vsi = np->vsi;
5308 	struct ice_pf *pf = vsi->back;
5309 	int ret = 0;
5310 
5311 	/* Don't set any netdev advanced features with device in Safe Mode */
5312 	if (ice_is_safe_mode(vsi->back)) {
5313 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5314 		return ret;
5315 	}
5316 
5317 	/* Do not change setting during reset */
5318 	if (ice_is_reset_in_progress(pf->state)) {
5319 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5320 		return -EBUSY;
5321 	}
5322 
5323 	/* Multiple features can be changed in one call so keep features in
5324 	 * separate if/else statements to guarantee each feature is checked
5325 	 */
5326 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5327 		ice_vsi_manage_rss_lut(vsi, true);
5328 	else if (!(features & NETIF_F_RXHASH) &&
5329 		 netdev->features & NETIF_F_RXHASH)
5330 		ice_vsi_manage_rss_lut(vsi, false);
5331 
5332 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5333 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5334 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5335 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5336 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5337 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5338 
5339 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5340 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5341 		ret = ice_vsi_manage_vlan_insertion(vsi);
5342 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5343 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5344 		ret = ice_vsi_manage_vlan_insertion(vsi);
5345 
5346 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5347 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5348 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5349 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5350 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5351 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5352 
5353 	if ((features & NETIF_F_NTUPLE) &&
5354 	    !(netdev->features & NETIF_F_NTUPLE)) {
5355 		ice_vsi_manage_fdir(vsi, true);
5356 		ice_init_arfs(vsi);
5357 	} else if (!(features & NETIF_F_NTUPLE) &&
5358 		 (netdev->features & NETIF_F_NTUPLE)) {
5359 		ice_vsi_manage_fdir(vsi, false);
5360 		ice_clear_arfs(vsi);
5361 	}
5362 
5363 	return ret;
5364 }
5365 
5366 /**
5367  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5368  * @vsi: VSI to setup VLAN properties for
5369  */
5370 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5371 {
5372 	int ret = 0;
5373 
5374 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5375 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5376 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5377 		ret = ice_vsi_manage_vlan_insertion(vsi);
5378 
5379 	return ret;
5380 }
5381 
5382 /**
5383  * ice_vsi_cfg - Setup the VSI
5384  * @vsi: the VSI being configured
5385  *
5386  * Return 0 on success and negative value on error
5387  */
5388 int ice_vsi_cfg(struct ice_vsi *vsi)
5389 {
5390 	int err;
5391 
5392 	if (vsi->netdev) {
5393 		ice_set_rx_mode(vsi->netdev);
5394 
5395 		err = ice_vsi_vlan_setup(vsi);
5396 
5397 		if (err)
5398 			return err;
5399 	}
5400 	ice_vsi_cfg_dcb_rings(vsi);
5401 
5402 	err = ice_vsi_cfg_lan_txqs(vsi);
5403 	if (!err && ice_is_xdp_ena_vsi(vsi))
5404 		err = ice_vsi_cfg_xdp_txqs(vsi);
5405 	if (!err)
5406 		err = ice_vsi_cfg_rxqs(vsi);
5407 
5408 	return err;
5409 }
5410 
5411 /* THEORY OF MODERATION:
5412  * The below code creates custom DIM profiles for use by this driver, because
5413  * the ice driver hardware works differently than the hardware that DIMLIB was
5414  * originally made for. ice hardware doesn't have packet count limits that
5415  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5416  * and this code adds that capability to be used by the driver when it's using
5417  * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5418  * for how to "respond" to traffic and interrupts, so this driver uses a
5419  * slightly different set of moderation parameters to get best performance.
5420  */
5421 struct ice_dim {
5422 	/* the throttle rate for interrupts, basically worst case delay before
5423 	 * an initial interrupt fires, value is stored in microseconds.
5424 	 */
5425 	u16 itr;
5426 	/* the rate limit for interrupts, which can cap a delay from a small
5427 	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5428 	 * could yield as much as 500,000 interrupts per second, but with a
5429 	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5430 	 * is stored in microseconds.
5431 	 */
5432 	u16 intrl;
5433 };
5434 
5435 /* Make a different profile for Rx that doesn't allow quite so aggressive
5436  * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5437  * second. The INTRL/rate parameters here are only useful to cap small ITR
5438  * values, which is why for larger ITR's - like 128, which can only generate
5439  * 8k interrupts per second, there is no point to rate limit and the values
5440  * are set to zero. The rate limit values do affect latency, and so must
5441  * be reasonably small so to not impact latency sensitive tests.
5442  */
5443 static const struct ice_dim rx_profile[] = {
5444 	{2, 10},
5445 	{8, 16},
5446 	{32, 0},
5447 	{96, 0},
5448 	{128, 0}
5449 };
5450 
5451 /* The transmit profile, which has the same sorts of values
5452  * as the previous struct
5453  */
5454 static const struct ice_dim tx_profile[] = {
5455 	{2, 10},
5456 	{8, 16},
5457 	{64, 0},
5458 	{128, 0},
5459 	{256, 0}
5460 };
5461 
5462 static void ice_tx_dim_work(struct work_struct *work)
5463 {
5464 	struct ice_ring_container *rc;
5465 	struct ice_q_vector *q_vector;
5466 	struct dim *dim;
5467 	u16 itr, intrl;
5468 
5469 	dim = container_of(work, struct dim, work);
5470 	rc = container_of(dim, struct ice_ring_container, dim);
5471 	q_vector = container_of(rc, struct ice_q_vector, tx);
5472 
5473 	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5474 		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5475 
5476 	/* look up the values in our local table */
5477 	itr = tx_profile[dim->profile_ix].itr;
5478 	intrl = tx_profile[dim->profile_ix].intrl;
5479 
5480 	ice_write_itr(rc, itr);
5481 	ice_write_intrl(q_vector, intrl);
5482 
5483 	dim->state = DIM_START_MEASURE;
5484 }
5485 
5486 static void ice_rx_dim_work(struct work_struct *work)
5487 {
5488 	struct ice_ring_container *rc;
5489 	struct ice_q_vector *q_vector;
5490 	struct dim *dim;
5491 	u16 itr, intrl;
5492 
5493 	dim = container_of(work, struct dim, work);
5494 	rc = container_of(dim, struct ice_ring_container, dim);
5495 	q_vector = container_of(rc, struct ice_q_vector, rx);
5496 
5497 	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5498 		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5499 
5500 	/* look up the values in our local table */
5501 	itr = rx_profile[dim->profile_ix].itr;
5502 	intrl = rx_profile[dim->profile_ix].intrl;
5503 
5504 	ice_write_itr(rc, itr);
5505 	ice_write_intrl(q_vector, intrl);
5506 
5507 	dim->state = DIM_START_MEASURE;
5508 }
5509 
5510 /**
5511  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5512  * @vsi: the VSI being configured
5513  */
5514 static void ice_napi_enable_all(struct ice_vsi *vsi)
5515 {
5516 	int q_idx;
5517 
5518 	if (!vsi->netdev)
5519 		return;
5520 
5521 	ice_for_each_q_vector(vsi, q_idx) {
5522 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5523 
5524 		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5525 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5526 
5527 		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5528 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5529 
5530 		if (q_vector->rx.ring || q_vector->tx.ring)
5531 			napi_enable(&q_vector->napi);
5532 	}
5533 }
5534 
5535 /**
5536  * ice_up_complete - Finish the last steps of bringing up a connection
5537  * @vsi: The VSI being configured
5538  *
5539  * Return 0 on success and negative value on error
5540  */
5541 static int ice_up_complete(struct ice_vsi *vsi)
5542 {
5543 	struct ice_pf *pf = vsi->back;
5544 	int err;
5545 
5546 	ice_vsi_cfg_msix(vsi);
5547 
5548 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5549 	 * Tx queue group list was configured and the context bits were
5550 	 * programmed using ice_vsi_cfg_txqs
5551 	 */
5552 	err = ice_vsi_start_all_rx_rings(vsi);
5553 	if (err)
5554 		return err;
5555 
5556 	clear_bit(ICE_VSI_DOWN, vsi->state);
5557 	ice_napi_enable_all(vsi);
5558 	ice_vsi_ena_irq(vsi);
5559 
5560 	if (vsi->port_info &&
5561 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5562 	    vsi->netdev) {
5563 		ice_print_link_msg(vsi, true);
5564 		netif_tx_start_all_queues(vsi->netdev);
5565 		netif_carrier_on(vsi->netdev);
5566 	}
5567 
5568 	ice_service_task_schedule(pf);
5569 
5570 	return 0;
5571 }
5572 
5573 /**
5574  * ice_up - Bring the connection back up after being down
5575  * @vsi: VSI being configured
5576  */
5577 int ice_up(struct ice_vsi *vsi)
5578 {
5579 	int err;
5580 
5581 	err = ice_vsi_cfg(vsi);
5582 	if (!err)
5583 		err = ice_up_complete(vsi);
5584 
5585 	return err;
5586 }
5587 
5588 /**
5589  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5590  * @ring: Tx or Rx ring to read stats from
5591  * @pkts: packets stats counter
5592  * @bytes: bytes stats counter
5593  *
5594  * This function fetches stats from the ring considering the atomic operations
5595  * that needs to be performed to read u64 values in 32 bit machine.
5596  */
5597 static void
5598 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5599 {
5600 	unsigned int start;
5601 	*pkts = 0;
5602 	*bytes = 0;
5603 
5604 	if (!ring)
5605 		return;
5606 	do {
5607 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5608 		*pkts = ring->stats.pkts;
5609 		*bytes = ring->stats.bytes;
5610 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5611 }
5612 
5613 /**
5614  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5615  * @vsi: the VSI to be updated
5616  * @rings: rings to work on
5617  * @count: number of rings
5618  */
5619 static void
5620 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5621 			     u16 count)
5622 {
5623 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5624 	u16 i;
5625 
5626 	for (i = 0; i < count; i++) {
5627 		struct ice_ring *ring;
5628 		u64 pkts, bytes;
5629 
5630 		ring = READ_ONCE(rings[i]);
5631 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5632 		vsi_stats->tx_packets += pkts;
5633 		vsi_stats->tx_bytes += bytes;
5634 		vsi->tx_restart += ring->tx_stats.restart_q;
5635 		vsi->tx_busy += ring->tx_stats.tx_busy;
5636 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5637 	}
5638 }
5639 
5640 /**
5641  * ice_update_vsi_ring_stats - Update VSI stats counters
5642  * @vsi: the VSI to be updated
5643  */
5644 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5645 {
5646 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5647 	u64 pkts, bytes;
5648 	int i;
5649 
5650 	/* reset netdev stats */
5651 	vsi_stats->tx_packets = 0;
5652 	vsi_stats->tx_bytes = 0;
5653 	vsi_stats->rx_packets = 0;
5654 	vsi_stats->rx_bytes = 0;
5655 
5656 	/* reset non-netdev (extended) stats */
5657 	vsi->tx_restart = 0;
5658 	vsi->tx_busy = 0;
5659 	vsi->tx_linearize = 0;
5660 	vsi->rx_buf_failed = 0;
5661 	vsi->rx_page_failed = 0;
5662 
5663 	rcu_read_lock();
5664 
5665 	/* update Tx rings counters */
5666 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5667 
5668 	/* update Rx rings counters */
5669 	ice_for_each_rxq(vsi, i) {
5670 		struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5671 
5672 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5673 		vsi_stats->rx_packets += pkts;
5674 		vsi_stats->rx_bytes += bytes;
5675 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5676 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5677 	}
5678 
5679 	/* update XDP Tx rings counters */
5680 	if (ice_is_xdp_ena_vsi(vsi))
5681 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5682 					     vsi->num_xdp_txq);
5683 
5684 	rcu_read_unlock();
5685 }
5686 
5687 /**
5688  * ice_update_vsi_stats - Update VSI stats counters
5689  * @vsi: the VSI to be updated
5690  */
5691 void ice_update_vsi_stats(struct ice_vsi *vsi)
5692 {
5693 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5694 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5695 	struct ice_pf *pf = vsi->back;
5696 
5697 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5698 	    test_bit(ICE_CFG_BUSY, pf->state))
5699 		return;
5700 
5701 	/* get stats as recorded by Tx/Rx rings */
5702 	ice_update_vsi_ring_stats(vsi);
5703 
5704 	/* get VSI stats as recorded by the hardware */
5705 	ice_update_eth_stats(vsi);
5706 
5707 	cur_ns->tx_errors = cur_es->tx_errors;
5708 	cur_ns->rx_dropped = cur_es->rx_discards;
5709 	cur_ns->tx_dropped = cur_es->tx_discards;
5710 	cur_ns->multicast = cur_es->rx_multicast;
5711 
5712 	/* update some more netdev stats if this is main VSI */
5713 	if (vsi->type == ICE_VSI_PF) {
5714 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5715 		cur_ns->rx_errors = pf->stats.crc_errors +
5716 				    pf->stats.illegal_bytes +
5717 				    pf->stats.rx_len_errors +
5718 				    pf->stats.rx_undersize +
5719 				    pf->hw_csum_rx_error +
5720 				    pf->stats.rx_jabber +
5721 				    pf->stats.rx_fragments +
5722 				    pf->stats.rx_oversize;
5723 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5724 		/* record drops from the port level */
5725 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5726 	}
5727 }
5728 
5729 /**
5730  * ice_update_pf_stats - Update PF port stats counters
5731  * @pf: PF whose stats needs to be updated
5732  */
5733 void ice_update_pf_stats(struct ice_pf *pf)
5734 {
5735 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5736 	struct ice_hw *hw = &pf->hw;
5737 	u16 fd_ctr_base;
5738 	u8 port;
5739 
5740 	port = hw->port_info->lport;
5741 	prev_ps = &pf->stats_prev;
5742 	cur_ps = &pf->stats;
5743 
5744 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5745 			  &prev_ps->eth.rx_bytes,
5746 			  &cur_ps->eth.rx_bytes);
5747 
5748 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5749 			  &prev_ps->eth.rx_unicast,
5750 			  &cur_ps->eth.rx_unicast);
5751 
5752 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5753 			  &prev_ps->eth.rx_multicast,
5754 			  &cur_ps->eth.rx_multicast);
5755 
5756 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5757 			  &prev_ps->eth.rx_broadcast,
5758 			  &cur_ps->eth.rx_broadcast);
5759 
5760 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5761 			  &prev_ps->eth.rx_discards,
5762 			  &cur_ps->eth.rx_discards);
5763 
5764 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5765 			  &prev_ps->eth.tx_bytes,
5766 			  &cur_ps->eth.tx_bytes);
5767 
5768 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5769 			  &prev_ps->eth.tx_unicast,
5770 			  &cur_ps->eth.tx_unicast);
5771 
5772 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5773 			  &prev_ps->eth.tx_multicast,
5774 			  &cur_ps->eth.tx_multicast);
5775 
5776 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5777 			  &prev_ps->eth.tx_broadcast,
5778 			  &cur_ps->eth.tx_broadcast);
5779 
5780 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5781 			  &prev_ps->tx_dropped_link_down,
5782 			  &cur_ps->tx_dropped_link_down);
5783 
5784 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5785 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5786 
5787 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5788 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5789 
5790 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5791 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5792 
5793 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5794 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5795 
5796 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5797 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5798 
5799 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5800 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5801 
5802 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5803 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5804 
5805 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5806 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5807 
5808 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5809 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5810 
5811 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5812 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5813 
5814 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5815 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5816 
5817 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5818 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5819 
5820 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5821 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5822 
5823 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5824 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5825 
5826 	fd_ctr_base = hw->fd_ctr_base;
5827 
5828 	ice_stat_update40(hw,
5829 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5830 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5831 			  &cur_ps->fd_sb_match);
5832 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5833 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5834 
5835 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5836 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5837 
5838 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5839 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5840 
5841 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5842 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5843 
5844 	ice_update_dcb_stats(pf);
5845 
5846 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5847 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5848 
5849 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5850 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5851 
5852 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5853 			  &prev_ps->mac_local_faults,
5854 			  &cur_ps->mac_local_faults);
5855 
5856 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5857 			  &prev_ps->mac_remote_faults,
5858 			  &cur_ps->mac_remote_faults);
5859 
5860 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5861 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5862 
5863 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5864 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5865 
5866 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5867 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5868 
5869 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5870 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5871 
5872 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5873 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5874 
5875 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5876 
5877 	pf->stat_prev_loaded = true;
5878 }
5879 
5880 /**
5881  * ice_get_stats64 - get statistics for network device structure
5882  * @netdev: network interface device structure
5883  * @stats: main device statistics structure
5884  */
5885 static
5886 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5887 {
5888 	struct ice_netdev_priv *np = netdev_priv(netdev);
5889 	struct rtnl_link_stats64 *vsi_stats;
5890 	struct ice_vsi *vsi = np->vsi;
5891 
5892 	vsi_stats = &vsi->net_stats;
5893 
5894 	if (!vsi->num_txq || !vsi->num_rxq)
5895 		return;
5896 
5897 	/* netdev packet/byte stats come from ring counter. These are obtained
5898 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5899 	 * But, only call the update routine and read the registers if VSI is
5900 	 * not down.
5901 	 */
5902 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5903 		ice_update_vsi_ring_stats(vsi);
5904 	stats->tx_packets = vsi_stats->tx_packets;
5905 	stats->tx_bytes = vsi_stats->tx_bytes;
5906 	stats->rx_packets = vsi_stats->rx_packets;
5907 	stats->rx_bytes = vsi_stats->rx_bytes;
5908 
5909 	/* The rest of the stats can be read from the hardware but instead we
5910 	 * just return values that the watchdog task has already obtained from
5911 	 * the hardware.
5912 	 */
5913 	stats->multicast = vsi_stats->multicast;
5914 	stats->tx_errors = vsi_stats->tx_errors;
5915 	stats->tx_dropped = vsi_stats->tx_dropped;
5916 	stats->rx_errors = vsi_stats->rx_errors;
5917 	stats->rx_dropped = vsi_stats->rx_dropped;
5918 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5919 	stats->rx_length_errors = vsi_stats->rx_length_errors;
5920 }
5921 
5922 /**
5923  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5924  * @vsi: VSI having NAPI disabled
5925  */
5926 static void ice_napi_disable_all(struct ice_vsi *vsi)
5927 {
5928 	int q_idx;
5929 
5930 	if (!vsi->netdev)
5931 		return;
5932 
5933 	ice_for_each_q_vector(vsi, q_idx) {
5934 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5935 
5936 		if (q_vector->rx.ring || q_vector->tx.ring)
5937 			napi_disable(&q_vector->napi);
5938 
5939 		cancel_work_sync(&q_vector->tx.dim.work);
5940 		cancel_work_sync(&q_vector->rx.dim.work);
5941 	}
5942 }
5943 
5944 /**
5945  * ice_down - Shutdown the connection
5946  * @vsi: The VSI being stopped
5947  */
5948 int ice_down(struct ice_vsi *vsi)
5949 {
5950 	int i, tx_err, rx_err, link_err = 0;
5951 
5952 	/* Caller of this function is expected to set the
5953 	 * vsi->state ICE_DOWN bit
5954 	 */
5955 	if (vsi->netdev) {
5956 		netif_carrier_off(vsi->netdev);
5957 		netif_tx_disable(vsi->netdev);
5958 	}
5959 
5960 	ice_vsi_dis_irq(vsi);
5961 
5962 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5963 	if (tx_err)
5964 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5965 			   vsi->vsi_num, tx_err);
5966 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5967 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5968 		if (tx_err)
5969 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5970 				   vsi->vsi_num, tx_err);
5971 	}
5972 
5973 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
5974 	if (rx_err)
5975 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5976 			   vsi->vsi_num, rx_err);
5977 
5978 	ice_napi_disable_all(vsi);
5979 
5980 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5981 		link_err = ice_force_phys_link_state(vsi, false);
5982 		if (link_err)
5983 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5984 				   vsi->vsi_num, link_err);
5985 	}
5986 
5987 	ice_for_each_txq(vsi, i)
5988 		ice_clean_tx_ring(vsi->tx_rings[i]);
5989 
5990 	ice_for_each_rxq(vsi, i)
5991 		ice_clean_rx_ring(vsi->rx_rings[i]);
5992 
5993 	if (tx_err || rx_err || link_err) {
5994 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5995 			   vsi->vsi_num, vsi->vsw->sw_id);
5996 		return -EIO;
5997 	}
5998 
5999 	return 0;
6000 }
6001 
6002 /**
6003  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6004  * @vsi: VSI having resources allocated
6005  *
6006  * Return 0 on success, negative on failure
6007  */
6008 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6009 {
6010 	int i, err = 0;
6011 
6012 	if (!vsi->num_txq) {
6013 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6014 			vsi->vsi_num);
6015 		return -EINVAL;
6016 	}
6017 
6018 	ice_for_each_txq(vsi, i) {
6019 		struct ice_ring *ring = vsi->tx_rings[i];
6020 
6021 		if (!ring)
6022 			return -EINVAL;
6023 
6024 		ring->netdev = vsi->netdev;
6025 		err = ice_setup_tx_ring(ring);
6026 		if (err)
6027 			break;
6028 	}
6029 
6030 	return err;
6031 }
6032 
6033 /**
6034  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6035  * @vsi: VSI having resources allocated
6036  *
6037  * Return 0 on success, negative on failure
6038  */
6039 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6040 {
6041 	int i, err = 0;
6042 
6043 	if (!vsi->num_rxq) {
6044 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6045 			vsi->vsi_num);
6046 		return -EINVAL;
6047 	}
6048 
6049 	ice_for_each_rxq(vsi, i) {
6050 		struct ice_ring *ring = vsi->rx_rings[i];
6051 
6052 		if (!ring)
6053 			return -EINVAL;
6054 
6055 		ring->netdev = vsi->netdev;
6056 		err = ice_setup_rx_ring(ring);
6057 		if (err)
6058 			break;
6059 	}
6060 
6061 	return err;
6062 }
6063 
6064 /**
6065  * ice_vsi_open_ctrl - open control VSI for use
6066  * @vsi: the VSI to open
6067  *
6068  * Initialization of the Control VSI
6069  *
6070  * Returns 0 on success, negative value on error
6071  */
6072 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6073 {
6074 	char int_name[ICE_INT_NAME_STR_LEN];
6075 	struct ice_pf *pf = vsi->back;
6076 	struct device *dev;
6077 	int err;
6078 
6079 	dev = ice_pf_to_dev(pf);
6080 	/* allocate descriptors */
6081 	err = ice_vsi_setup_tx_rings(vsi);
6082 	if (err)
6083 		goto err_setup_tx;
6084 
6085 	err = ice_vsi_setup_rx_rings(vsi);
6086 	if (err)
6087 		goto err_setup_rx;
6088 
6089 	err = ice_vsi_cfg(vsi);
6090 	if (err)
6091 		goto err_setup_rx;
6092 
6093 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6094 		 dev_driver_string(dev), dev_name(dev));
6095 	err = ice_vsi_req_irq_msix(vsi, int_name);
6096 	if (err)
6097 		goto err_setup_rx;
6098 
6099 	ice_vsi_cfg_msix(vsi);
6100 
6101 	err = ice_vsi_start_all_rx_rings(vsi);
6102 	if (err)
6103 		goto err_up_complete;
6104 
6105 	clear_bit(ICE_VSI_DOWN, vsi->state);
6106 	ice_vsi_ena_irq(vsi);
6107 
6108 	return 0;
6109 
6110 err_up_complete:
6111 	ice_down(vsi);
6112 err_setup_rx:
6113 	ice_vsi_free_rx_rings(vsi);
6114 err_setup_tx:
6115 	ice_vsi_free_tx_rings(vsi);
6116 
6117 	return err;
6118 }
6119 
6120 /**
6121  * ice_vsi_open - Called when a network interface is made active
6122  * @vsi: the VSI to open
6123  *
6124  * Initialization of the VSI
6125  *
6126  * Returns 0 on success, negative value on error
6127  */
6128 static int ice_vsi_open(struct ice_vsi *vsi)
6129 {
6130 	char int_name[ICE_INT_NAME_STR_LEN];
6131 	struct ice_pf *pf = vsi->back;
6132 	int err;
6133 
6134 	/* allocate descriptors */
6135 	err = ice_vsi_setup_tx_rings(vsi);
6136 	if (err)
6137 		goto err_setup_tx;
6138 
6139 	err = ice_vsi_setup_rx_rings(vsi);
6140 	if (err)
6141 		goto err_setup_rx;
6142 
6143 	err = ice_vsi_cfg(vsi);
6144 	if (err)
6145 		goto err_setup_rx;
6146 
6147 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6148 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6149 	err = ice_vsi_req_irq_msix(vsi, int_name);
6150 	if (err)
6151 		goto err_setup_rx;
6152 
6153 	/* Notify the stack of the actual queue counts. */
6154 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6155 	if (err)
6156 		goto err_set_qs;
6157 
6158 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6159 	if (err)
6160 		goto err_set_qs;
6161 
6162 	err = ice_up_complete(vsi);
6163 	if (err)
6164 		goto err_up_complete;
6165 
6166 	return 0;
6167 
6168 err_up_complete:
6169 	ice_down(vsi);
6170 err_set_qs:
6171 	ice_vsi_free_irq(vsi);
6172 err_setup_rx:
6173 	ice_vsi_free_rx_rings(vsi);
6174 err_setup_tx:
6175 	ice_vsi_free_tx_rings(vsi);
6176 
6177 	return err;
6178 }
6179 
6180 /**
6181  * ice_vsi_release_all - Delete all VSIs
6182  * @pf: PF from which all VSIs are being removed
6183  */
6184 static void ice_vsi_release_all(struct ice_pf *pf)
6185 {
6186 	int err, i;
6187 
6188 	if (!pf->vsi)
6189 		return;
6190 
6191 	ice_for_each_vsi(pf, i) {
6192 		if (!pf->vsi[i])
6193 			continue;
6194 
6195 		err = ice_vsi_release(pf->vsi[i]);
6196 		if (err)
6197 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6198 				i, err, pf->vsi[i]->vsi_num);
6199 	}
6200 }
6201 
6202 /**
6203  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6204  * @pf: pointer to the PF instance
6205  * @type: VSI type to rebuild
6206  *
6207  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6208  */
6209 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6210 {
6211 	struct device *dev = ice_pf_to_dev(pf);
6212 	enum ice_status status;
6213 	int i, err;
6214 
6215 	ice_for_each_vsi(pf, i) {
6216 		struct ice_vsi *vsi = pf->vsi[i];
6217 
6218 		if (!vsi || vsi->type != type)
6219 			continue;
6220 
6221 		/* rebuild the VSI */
6222 		err = ice_vsi_rebuild(vsi, true);
6223 		if (err) {
6224 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6225 				err, vsi->idx, ice_vsi_type_str(type));
6226 			return err;
6227 		}
6228 
6229 		/* replay filters for the VSI */
6230 		status = ice_replay_vsi(&pf->hw, vsi->idx);
6231 		if (status) {
6232 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6233 				ice_stat_str(status), vsi->idx,
6234 				ice_vsi_type_str(type));
6235 			return -EIO;
6236 		}
6237 
6238 		/* Re-map HW VSI number, using VSI handle that has been
6239 		 * previously validated in ice_replay_vsi() call above
6240 		 */
6241 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6242 
6243 		/* enable the VSI */
6244 		err = ice_ena_vsi(vsi, false);
6245 		if (err) {
6246 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6247 				err, vsi->idx, ice_vsi_type_str(type));
6248 			return err;
6249 		}
6250 
6251 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6252 			 ice_vsi_type_str(type));
6253 	}
6254 
6255 	return 0;
6256 }
6257 
6258 /**
6259  * ice_update_pf_netdev_link - Update PF netdev link status
6260  * @pf: pointer to the PF instance
6261  */
6262 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6263 {
6264 	bool link_up;
6265 	int i;
6266 
6267 	ice_for_each_vsi(pf, i) {
6268 		struct ice_vsi *vsi = pf->vsi[i];
6269 
6270 		if (!vsi || vsi->type != ICE_VSI_PF)
6271 			return;
6272 
6273 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6274 		if (link_up) {
6275 			netif_carrier_on(pf->vsi[i]->netdev);
6276 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6277 		} else {
6278 			netif_carrier_off(pf->vsi[i]->netdev);
6279 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6280 		}
6281 	}
6282 }
6283 
6284 /**
6285  * ice_rebuild - rebuild after reset
6286  * @pf: PF to rebuild
6287  * @reset_type: type of reset
6288  *
6289  * Do not rebuild VF VSI in this flow because that is already handled via
6290  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6291  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6292  * to reset/rebuild all the VF VSI twice.
6293  */
6294 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6295 {
6296 	struct device *dev = ice_pf_to_dev(pf);
6297 	struct ice_hw *hw = &pf->hw;
6298 	enum ice_status ret;
6299 	int err;
6300 
6301 	if (test_bit(ICE_DOWN, pf->state))
6302 		goto clear_recovery;
6303 
6304 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6305 
6306 	ret = ice_init_all_ctrlq(hw);
6307 	if (ret) {
6308 		dev_err(dev, "control queues init failed %s\n",
6309 			ice_stat_str(ret));
6310 		goto err_init_ctrlq;
6311 	}
6312 
6313 	/* if DDP was previously loaded successfully */
6314 	if (!ice_is_safe_mode(pf)) {
6315 		/* reload the SW DB of filter tables */
6316 		if (reset_type == ICE_RESET_PFR)
6317 			ice_fill_blk_tbls(hw);
6318 		else
6319 			/* Reload DDP Package after CORER/GLOBR reset */
6320 			ice_load_pkg(NULL, pf);
6321 	}
6322 
6323 	ret = ice_clear_pf_cfg(hw);
6324 	if (ret) {
6325 		dev_err(dev, "clear PF configuration failed %s\n",
6326 			ice_stat_str(ret));
6327 		goto err_init_ctrlq;
6328 	}
6329 
6330 	if (pf->first_sw->dflt_vsi_ena)
6331 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6332 	/* clear the default VSI configuration if it exists */
6333 	pf->first_sw->dflt_vsi = NULL;
6334 	pf->first_sw->dflt_vsi_ena = false;
6335 
6336 	ice_clear_pxe_mode(hw);
6337 
6338 	ret = ice_init_nvm(hw);
6339 	if (ret) {
6340 		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6341 		goto err_init_ctrlq;
6342 	}
6343 
6344 	ret = ice_get_caps(hw);
6345 	if (ret) {
6346 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6347 		goto err_init_ctrlq;
6348 	}
6349 
6350 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6351 	if (ret) {
6352 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6353 		goto err_init_ctrlq;
6354 	}
6355 
6356 	err = ice_sched_init_port(hw->port_info);
6357 	if (err)
6358 		goto err_sched_init_port;
6359 
6360 	/* start misc vector */
6361 	err = ice_req_irq_msix_misc(pf);
6362 	if (err) {
6363 		dev_err(dev, "misc vector setup failed: %d\n", err);
6364 		goto err_sched_init_port;
6365 	}
6366 
6367 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6368 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6369 		if (!rd32(hw, PFQF_FD_SIZE)) {
6370 			u16 unused, guar, b_effort;
6371 
6372 			guar = hw->func_caps.fd_fltr_guar;
6373 			b_effort = hw->func_caps.fd_fltr_best_effort;
6374 
6375 			/* force guaranteed filter pool for PF */
6376 			ice_alloc_fd_guar_item(hw, &unused, guar);
6377 			/* force shared filter pool for PF */
6378 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6379 		}
6380 	}
6381 
6382 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6383 		ice_dcb_rebuild(pf);
6384 
6385 	/* If the PF previously had enabled PTP, PTP init needs to happen before
6386 	 * the VSI rebuild. If not, this causes the PTP link status events to
6387 	 * fail.
6388 	 */
6389 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6390 		ice_ptp_init(pf);
6391 
6392 	/* rebuild PF VSI */
6393 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6394 	if (err) {
6395 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6396 		goto err_vsi_rebuild;
6397 	}
6398 
6399 	/* If Flow Director is active */
6400 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6401 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6402 		if (err) {
6403 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6404 			goto err_vsi_rebuild;
6405 		}
6406 
6407 		/* replay HW Flow Director recipes */
6408 		if (hw->fdir_prof)
6409 			ice_fdir_replay_flows(hw);
6410 
6411 		/* replay Flow Director filters */
6412 		ice_fdir_replay_fltrs(pf);
6413 
6414 		ice_rebuild_arfs(pf);
6415 	}
6416 
6417 	ice_update_pf_netdev_link(pf);
6418 
6419 	/* tell the firmware we are up */
6420 	ret = ice_send_version(pf);
6421 	if (ret) {
6422 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6423 			ice_stat_str(ret));
6424 		goto err_vsi_rebuild;
6425 	}
6426 
6427 	ice_replay_post(hw);
6428 
6429 	/* if we get here, reset flow is successful */
6430 	clear_bit(ICE_RESET_FAILED, pf->state);
6431 
6432 	ice_plug_aux_dev(pf);
6433 	return;
6434 
6435 err_vsi_rebuild:
6436 err_sched_init_port:
6437 	ice_sched_cleanup_all(hw);
6438 err_init_ctrlq:
6439 	ice_shutdown_all_ctrlq(hw);
6440 	set_bit(ICE_RESET_FAILED, pf->state);
6441 clear_recovery:
6442 	/* set this bit in PF state to control service task scheduling */
6443 	set_bit(ICE_NEEDS_RESTART, pf->state);
6444 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6445 }
6446 
6447 /**
6448  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6449  * @vsi: Pointer to VSI structure
6450  */
6451 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6452 {
6453 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6454 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6455 	else
6456 		return ICE_RXBUF_3072;
6457 }
6458 
6459 /**
6460  * ice_change_mtu - NDO callback to change the MTU
6461  * @netdev: network interface device structure
6462  * @new_mtu: new value for maximum frame size
6463  *
6464  * Returns 0 on success, negative on failure
6465  */
6466 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6467 {
6468 	struct ice_netdev_priv *np = netdev_priv(netdev);
6469 	struct ice_vsi *vsi = np->vsi;
6470 	struct ice_pf *pf = vsi->back;
6471 	struct iidc_event *event;
6472 	u8 count = 0;
6473 	int err = 0;
6474 
6475 	if (new_mtu == (int)netdev->mtu) {
6476 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6477 		return 0;
6478 	}
6479 
6480 	if (ice_is_xdp_ena_vsi(vsi)) {
6481 		int frame_size = ice_max_xdp_frame_size(vsi);
6482 
6483 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6484 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6485 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6486 			return -EINVAL;
6487 		}
6488 	}
6489 
6490 	/* if a reset is in progress, wait for some time for it to complete */
6491 	do {
6492 		if (ice_is_reset_in_progress(pf->state)) {
6493 			count++;
6494 			usleep_range(1000, 2000);
6495 		} else {
6496 			break;
6497 		}
6498 
6499 	} while (count < 100);
6500 
6501 	if (count == 100) {
6502 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6503 		return -EBUSY;
6504 	}
6505 
6506 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6507 	if (!event)
6508 		return -ENOMEM;
6509 
6510 	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6511 	ice_send_event_to_aux(pf, event);
6512 	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6513 
6514 	netdev->mtu = (unsigned int)new_mtu;
6515 
6516 	/* if VSI is up, bring it down and then back up */
6517 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6518 		err = ice_down(vsi);
6519 		if (err) {
6520 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6521 			goto event_after;
6522 		}
6523 
6524 		err = ice_up(vsi);
6525 		if (err) {
6526 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6527 			goto event_after;
6528 		}
6529 	}
6530 
6531 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6532 event_after:
6533 	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6534 	ice_send_event_to_aux(pf, event);
6535 	kfree(event);
6536 
6537 	return err;
6538 }
6539 
6540 /**
6541  * ice_do_ioctl - Access the hwtstamp interface
6542  * @netdev: network interface device structure
6543  * @ifr: interface request data
6544  * @cmd: ioctl command
6545  */
6546 static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6547 {
6548 	struct ice_netdev_priv *np = netdev_priv(netdev);
6549 	struct ice_pf *pf = np->vsi->back;
6550 
6551 	switch (cmd) {
6552 	case SIOCGHWTSTAMP:
6553 		return ice_ptp_get_ts_config(pf, ifr);
6554 	case SIOCSHWTSTAMP:
6555 		return ice_ptp_set_ts_config(pf, ifr);
6556 	default:
6557 		return -EOPNOTSUPP;
6558 	}
6559 }
6560 
6561 /**
6562  * ice_aq_str - convert AQ err code to a string
6563  * @aq_err: the AQ error code to convert
6564  */
6565 const char *ice_aq_str(enum ice_aq_err aq_err)
6566 {
6567 	switch (aq_err) {
6568 	case ICE_AQ_RC_OK:
6569 		return "OK";
6570 	case ICE_AQ_RC_EPERM:
6571 		return "ICE_AQ_RC_EPERM";
6572 	case ICE_AQ_RC_ENOENT:
6573 		return "ICE_AQ_RC_ENOENT";
6574 	case ICE_AQ_RC_ENOMEM:
6575 		return "ICE_AQ_RC_ENOMEM";
6576 	case ICE_AQ_RC_EBUSY:
6577 		return "ICE_AQ_RC_EBUSY";
6578 	case ICE_AQ_RC_EEXIST:
6579 		return "ICE_AQ_RC_EEXIST";
6580 	case ICE_AQ_RC_EINVAL:
6581 		return "ICE_AQ_RC_EINVAL";
6582 	case ICE_AQ_RC_ENOSPC:
6583 		return "ICE_AQ_RC_ENOSPC";
6584 	case ICE_AQ_RC_ENOSYS:
6585 		return "ICE_AQ_RC_ENOSYS";
6586 	case ICE_AQ_RC_EMODE:
6587 		return "ICE_AQ_RC_EMODE";
6588 	case ICE_AQ_RC_ENOSEC:
6589 		return "ICE_AQ_RC_ENOSEC";
6590 	case ICE_AQ_RC_EBADSIG:
6591 		return "ICE_AQ_RC_EBADSIG";
6592 	case ICE_AQ_RC_ESVN:
6593 		return "ICE_AQ_RC_ESVN";
6594 	case ICE_AQ_RC_EBADMAN:
6595 		return "ICE_AQ_RC_EBADMAN";
6596 	case ICE_AQ_RC_EBADBUF:
6597 		return "ICE_AQ_RC_EBADBUF";
6598 	}
6599 
6600 	return "ICE_AQ_RC_UNKNOWN";
6601 }
6602 
6603 /**
6604  * ice_stat_str - convert status err code to a string
6605  * @stat_err: the status error code to convert
6606  */
6607 const char *ice_stat_str(enum ice_status stat_err)
6608 {
6609 	switch (stat_err) {
6610 	case ICE_SUCCESS:
6611 		return "OK";
6612 	case ICE_ERR_PARAM:
6613 		return "ICE_ERR_PARAM";
6614 	case ICE_ERR_NOT_IMPL:
6615 		return "ICE_ERR_NOT_IMPL";
6616 	case ICE_ERR_NOT_READY:
6617 		return "ICE_ERR_NOT_READY";
6618 	case ICE_ERR_NOT_SUPPORTED:
6619 		return "ICE_ERR_NOT_SUPPORTED";
6620 	case ICE_ERR_BAD_PTR:
6621 		return "ICE_ERR_BAD_PTR";
6622 	case ICE_ERR_INVAL_SIZE:
6623 		return "ICE_ERR_INVAL_SIZE";
6624 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6625 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6626 	case ICE_ERR_RESET_FAILED:
6627 		return "ICE_ERR_RESET_FAILED";
6628 	case ICE_ERR_FW_API_VER:
6629 		return "ICE_ERR_FW_API_VER";
6630 	case ICE_ERR_NO_MEMORY:
6631 		return "ICE_ERR_NO_MEMORY";
6632 	case ICE_ERR_CFG:
6633 		return "ICE_ERR_CFG";
6634 	case ICE_ERR_OUT_OF_RANGE:
6635 		return "ICE_ERR_OUT_OF_RANGE";
6636 	case ICE_ERR_ALREADY_EXISTS:
6637 		return "ICE_ERR_ALREADY_EXISTS";
6638 	case ICE_ERR_NVM:
6639 		return "ICE_ERR_NVM";
6640 	case ICE_ERR_NVM_CHECKSUM:
6641 		return "ICE_ERR_NVM_CHECKSUM";
6642 	case ICE_ERR_BUF_TOO_SHORT:
6643 		return "ICE_ERR_BUF_TOO_SHORT";
6644 	case ICE_ERR_NVM_BLANK_MODE:
6645 		return "ICE_ERR_NVM_BLANK_MODE";
6646 	case ICE_ERR_IN_USE:
6647 		return "ICE_ERR_IN_USE";
6648 	case ICE_ERR_MAX_LIMIT:
6649 		return "ICE_ERR_MAX_LIMIT";
6650 	case ICE_ERR_RESET_ONGOING:
6651 		return "ICE_ERR_RESET_ONGOING";
6652 	case ICE_ERR_HW_TABLE:
6653 		return "ICE_ERR_HW_TABLE";
6654 	case ICE_ERR_DOES_NOT_EXIST:
6655 		return "ICE_ERR_DOES_NOT_EXIST";
6656 	case ICE_ERR_FW_DDP_MISMATCH:
6657 		return "ICE_ERR_FW_DDP_MISMATCH";
6658 	case ICE_ERR_AQ_ERROR:
6659 		return "ICE_ERR_AQ_ERROR";
6660 	case ICE_ERR_AQ_TIMEOUT:
6661 		return "ICE_ERR_AQ_TIMEOUT";
6662 	case ICE_ERR_AQ_FULL:
6663 		return "ICE_ERR_AQ_FULL";
6664 	case ICE_ERR_AQ_NO_WORK:
6665 		return "ICE_ERR_AQ_NO_WORK";
6666 	case ICE_ERR_AQ_EMPTY:
6667 		return "ICE_ERR_AQ_EMPTY";
6668 	case ICE_ERR_AQ_FW_CRITICAL:
6669 		return "ICE_ERR_AQ_FW_CRITICAL";
6670 	}
6671 
6672 	return "ICE_ERR_UNKNOWN";
6673 }
6674 
6675 /**
6676  * ice_set_rss_lut - Set RSS LUT
6677  * @vsi: Pointer to VSI structure
6678  * @lut: Lookup table
6679  * @lut_size: Lookup table size
6680  *
6681  * Returns 0 on success, negative on failure
6682  */
6683 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6684 {
6685 	struct ice_aq_get_set_rss_lut_params params = {};
6686 	struct ice_hw *hw = &vsi->back->hw;
6687 	enum ice_status status;
6688 
6689 	if (!lut)
6690 		return -EINVAL;
6691 
6692 	params.vsi_handle = vsi->idx;
6693 	params.lut_size = lut_size;
6694 	params.lut_type = vsi->rss_lut_type;
6695 	params.lut = lut;
6696 
6697 	status = ice_aq_set_rss_lut(hw, &params);
6698 	if (status) {
6699 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6700 			ice_stat_str(status),
6701 			ice_aq_str(hw->adminq.sq_last_status));
6702 		return -EIO;
6703 	}
6704 
6705 	return 0;
6706 }
6707 
6708 /**
6709  * ice_set_rss_key - Set RSS key
6710  * @vsi: Pointer to the VSI structure
6711  * @seed: RSS hash seed
6712  *
6713  * Returns 0 on success, negative on failure
6714  */
6715 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6716 {
6717 	struct ice_hw *hw = &vsi->back->hw;
6718 	enum ice_status status;
6719 
6720 	if (!seed)
6721 		return -EINVAL;
6722 
6723 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6724 	if (status) {
6725 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6726 			ice_stat_str(status),
6727 			ice_aq_str(hw->adminq.sq_last_status));
6728 		return -EIO;
6729 	}
6730 
6731 	return 0;
6732 }
6733 
6734 /**
6735  * ice_get_rss_lut - Get RSS LUT
6736  * @vsi: Pointer to VSI structure
6737  * @lut: Buffer to store the lookup table entries
6738  * @lut_size: Size of buffer to store the lookup table entries
6739  *
6740  * Returns 0 on success, negative on failure
6741  */
6742 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6743 {
6744 	struct ice_aq_get_set_rss_lut_params params = {};
6745 	struct ice_hw *hw = &vsi->back->hw;
6746 	enum ice_status status;
6747 
6748 	if (!lut)
6749 		return -EINVAL;
6750 
6751 	params.vsi_handle = vsi->idx;
6752 	params.lut_size = lut_size;
6753 	params.lut_type = vsi->rss_lut_type;
6754 	params.lut = lut;
6755 
6756 	status = ice_aq_get_rss_lut(hw, &params);
6757 	if (status) {
6758 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6759 			ice_stat_str(status),
6760 			ice_aq_str(hw->adminq.sq_last_status));
6761 		return -EIO;
6762 	}
6763 
6764 	return 0;
6765 }
6766 
6767 /**
6768  * ice_get_rss_key - Get RSS key
6769  * @vsi: Pointer to VSI structure
6770  * @seed: Buffer to store the key in
6771  *
6772  * Returns 0 on success, negative on failure
6773  */
6774 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6775 {
6776 	struct ice_hw *hw = &vsi->back->hw;
6777 	enum ice_status status;
6778 
6779 	if (!seed)
6780 		return -EINVAL;
6781 
6782 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6783 	if (status) {
6784 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6785 			ice_stat_str(status),
6786 			ice_aq_str(hw->adminq.sq_last_status));
6787 		return -EIO;
6788 	}
6789 
6790 	return 0;
6791 }
6792 
6793 /**
6794  * ice_bridge_getlink - Get the hardware bridge mode
6795  * @skb: skb buff
6796  * @pid: process ID
6797  * @seq: RTNL message seq
6798  * @dev: the netdev being configured
6799  * @filter_mask: filter mask passed in
6800  * @nlflags: netlink flags passed in
6801  *
6802  * Return the bridge mode (VEB/VEPA)
6803  */
6804 static int
6805 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6806 		   struct net_device *dev, u32 filter_mask, int nlflags)
6807 {
6808 	struct ice_netdev_priv *np = netdev_priv(dev);
6809 	struct ice_vsi *vsi = np->vsi;
6810 	struct ice_pf *pf = vsi->back;
6811 	u16 bmode;
6812 
6813 	bmode = pf->first_sw->bridge_mode;
6814 
6815 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6816 				       filter_mask, NULL);
6817 }
6818 
6819 /**
6820  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6821  * @vsi: Pointer to VSI structure
6822  * @bmode: Hardware bridge mode (VEB/VEPA)
6823  *
6824  * Returns 0 on success, negative on failure
6825  */
6826 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6827 {
6828 	struct ice_aqc_vsi_props *vsi_props;
6829 	struct ice_hw *hw = &vsi->back->hw;
6830 	struct ice_vsi_ctx *ctxt;
6831 	enum ice_status status;
6832 	int ret = 0;
6833 
6834 	vsi_props = &vsi->info;
6835 
6836 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6837 	if (!ctxt)
6838 		return -ENOMEM;
6839 
6840 	ctxt->info = vsi->info;
6841 
6842 	if (bmode == BRIDGE_MODE_VEB)
6843 		/* change from VEPA to VEB mode */
6844 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6845 	else
6846 		/* change from VEB to VEPA mode */
6847 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6848 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6849 
6850 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6851 	if (status) {
6852 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6853 			bmode, ice_stat_str(status),
6854 			ice_aq_str(hw->adminq.sq_last_status));
6855 		ret = -EIO;
6856 		goto out;
6857 	}
6858 	/* Update sw flags for book keeping */
6859 	vsi_props->sw_flags = ctxt->info.sw_flags;
6860 
6861 out:
6862 	kfree(ctxt);
6863 	return ret;
6864 }
6865 
6866 /**
6867  * ice_bridge_setlink - Set the hardware bridge mode
6868  * @dev: the netdev being configured
6869  * @nlh: RTNL message
6870  * @flags: bridge setlink flags
6871  * @extack: netlink extended ack
6872  *
6873  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6874  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6875  * not already set for all VSIs connected to this switch. And also update the
6876  * unicast switch filter rules for the corresponding switch of the netdev.
6877  */
6878 static int
6879 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6880 		   u16 __always_unused flags,
6881 		   struct netlink_ext_ack __always_unused *extack)
6882 {
6883 	struct ice_netdev_priv *np = netdev_priv(dev);
6884 	struct ice_pf *pf = np->vsi->back;
6885 	struct nlattr *attr, *br_spec;
6886 	struct ice_hw *hw = &pf->hw;
6887 	enum ice_status status;
6888 	struct ice_sw *pf_sw;
6889 	int rem, v, err = 0;
6890 
6891 	pf_sw = pf->first_sw;
6892 	/* find the attribute in the netlink message */
6893 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6894 
6895 	nla_for_each_nested(attr, br_spec, rem) {
6896 		__u16 mode;
6897 
6898 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6899 			continue;
6900 		mode = nla_get_u16(attr);
6901 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6902 			return -EINVAL;
6903 		/* Continue  if bridge mode is not being flipped */
6904 		if (mode == pf_sw->bridge_mode)
6905 			continue;
6906 		/* Iterates through the PF VSI list and update the loopback
6907 		 * mode of the VSI
6908 		 */
6909 		ice_for_each_vsi(pf, v) {
6910 			if (!pf->vsi[v])
6911 				continue;
6912 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6913 			if (err)
6914 				return err;
6915 		}
6916 
6917 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6918 		/* Update the unicast switch filter rules for the corresponding
6919 		 * switch of the netdev
6920 		 */
6921 		status = ice_update_sw_rule_bridge_mode(hw);
6922 		if (status) {
6923 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6924 				   mode, ice_stat_str(status),
6925 				   ice_aq_str(hw->adminq.sq_last_status));
6926 			/* revert hw->evb_veb */
6927 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6928 			return -EIO;
6929 		}
6930 
6931 		pf_sw->bridge_mode = mode;
6932 	}
6933 
6934 	return 0;
6935 }
6936 
6937 /**
6938  * ice_tx_timeout - Respond to a Tx Hang
6939  * @netdev: network interface device structure
6940  * @txqueue: Tx queue
6941  */
6942 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6943 {
6944 	struct ice_netdev_priv *np = netdev_priv(netdev);
6945 	struct ice_ring *tx_ring = NULL;
6946 	struct ice_vsi *vsi = np->vsi;
6947 	struct ice_pf *pf = vsi->back;
6948 	u32 i;
6949 
6950 	pf->tx_timeout_count++;
6951 
6952 	/* Check if PFC is enabled for the TC to which the queue belongs
6953 	 * to. If yes then Tx timeout is not caused by a hung queue, no
6954 	 * need to reset and rebuild
6955 	 */
6956 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6957 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6958 			 txqueue);
6959 		return;
6960 	}
6961 
6962 	/* now that we have an index, find the tx_ring struct */
6963 	for (i = 0; i < vsi->num_txq; i++)
6964 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6965 			if (txqueue == vsi->tx_rings[i]->q_index) {
6966 				tx_ring = vsi->tx_rings[i];
6967 				break;
6968 			}
6969 
6970 	/* Reset recovery level if enough time has elapsed after last timeout.
6971 	 * Also ensure no new reset action happens before next timeout period.
6972 	 */
6973 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6974 		pf->tx_timeout_recovery_level = 1;
6975 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6976 				       netdev->watchdog_timeo)))
6977 		return;
6978 
6979 	if (tx_ring) {
6980 		struct ice_hw *hw = &pf->hw;
6981 		u32 head, val = 0;
6982 
6983 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6984 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6985 		/* Read interrupt register */
6986 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6987 
6988 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6989 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6990 			    head, tx_ring->next_to_use, val);
6991 	}
6992 
6993 	pf->tx_timeout_last_recovery = jiffies;
6994 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6995 		    pf->tx_timeout_recovery_level, txqueue);
6996 
6997 	switch (pf->tx_timeout_recovery_level) {
6998 	case 1:
6999 		set_bit(ICE_PFR_REQ, pf->state);
7000 		break;
7001 	case 2:
7002 		set_bit(ICE_CORER_REQ, pf->state);
7003 		break;
7004 	case 3:
7005 		set_bit(ICE_GLOBR_REQ, pf->state);
7006 		break;
7007 	default:
7008 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7009 		set_bit(ICE_DOWN, pf->state);
7010 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7011 		set_bit(ICE_SERVICE_DIS, pf->state);
7012 		break;
7013 	}
7014 
7015 	ice_service_task_schedule(pf);
7016 	pf->tx_timeout_recovery_level++;
7017 }
7018 
7019 /**
7020  * ice_open - Called when a network interface becomes active
7021  * @netdev: network interface device structure
7022  *
7023  * The open entry point is called when a network interface is made
7024  * active by the system (IFF_UP). At this point all resources needed
7025  * for transmit and receive operations are allocated, the interrupt
7026  * handler is registered with the OS, the netdev watchdog is enabled,
7027  * and the stack is notified that the interface is ready.
7028  *
7029  * Returns 0 on success, negative value on failure
7030  */
7031 int ice_open(struct net_device *netdev)
7032 {
7033 	struct ice_netdev_priv *np = netdev_priv(netdev);
7034 	struct ice_pf *pf = np->vsi->back;
7035 
7036 	if (ice_is_reset_in_progress(pf->state)) {
7037 		netdev_err(netdev, "can't open net device while reset is in progress");
7038 		return -EBUSY;
7039 	}
7040 
7041 	return ice_open_internal(netdev);
7042 }
7043 
7044 /**
7045  * ice_open_internal - Called when a network interface becomes active
7046  * @netdev: network interface device structure
7047  *
7048  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
7049  * handling routine
7050  *
7051  * Returns 0 on success, negative value on failure
7052  */
7053 int ice_open_internal(struct net_device *netdev)
7054 {
7055 	struct ice_netdev_priv *np = netdev_priv(netdev);
7056 	struct ice_vsi *vsi = np->vsi;
7057 	struct ice_pf *pf = vsi->back;
7058 	struct ice_port_info *pi;
7059 	enum ice_status status;
7060 	int err;
7061 
7062 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
7063 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
7064 		return -EIO;
7065 	}
7066 
7067 	netif_carrier_off(netdev);
7068 
7069 	pi = vsi->port_info;
7070 	status = ice_update_link_info(pi);
7071 	if (status) {
7072 		netdev_err(netdev, "Failed to get link info, error %s\n",
7073 			   ice_stat_str(status));
7074 		return -EIO;
7075 	}
7076 
7077 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
7078 
7079 	/* Set PHY if there is media, otherwise, turn off PHY */
7080 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7081 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7082 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
7083 			err = ice_init_phy_user_cfg(pi);
7084 			if (err) {
7085 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
7086 					   err);
7087 				return err;
7088 			}
7089 		}
7090 
7091 		err = ice_configure_phy(vsi);
7092 		if (err) {
7093 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
7094 				   err);
7095 			return err;
7096 		}
7097 	} else {
7098 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7099 		ice_set_link(vsi, false);
7100 	}
7101 
7102 	err = ice_vsi_open(vsi);
7103 	if (err)
7104 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
7105 			   vsi->vsi_num, vsi->vsw->sw_id);
7106 
7107 	/* Update existing tunnels information */
7108 	udp_tunnel_get_rx_info(netdev);
7109 
7110 	return err;
7111 }
7112 
7113 /**
7114  * ice_stop - Disables a network interface
7115  * @netdev: network interface device structure
7116  *
7117  * The stop entry point is called when an interface is de-activated by the OS,
7118  * and the netdevice enters the DOWN state. The hardware is still under the
7119  * driver's control, but the netdev interface is disabled.
7120  *
7121  * Returns success only - not allowed to fail
7122  */
7123 int ice_stop(struct net_device *netdev)
7124 {
7125 	struct ice_netdev_priv *np = netdev_priv(netdev);
7126 	struct ice_vsi *vsi = np->vsi;
7127 	struct ice_pf *pf = vsi->back;
7128 
7129 	if (ice_is_reset_in_progress(pf->state)) {
7130 		netdev_err(netdev, "can't stop net device while reset is in progress");
7131 		return -EBUSY;
7132 	}
7133 
7134 	ice_vsi_close(vsi);
7135 
7136 	return 0;
7137 }
7138 
7139 /**
7140  * ice_features_check - Validate encapsulated packet conforms to limits
7141  * @skb: skb buffer
7142  * @netdev: This port's netdev
7143  * @features: Offload features that the stack believes apply
7144  */
7145 static netdev_features_t
7146 ice_features_check(struct sk_buff *skb,
7147 		   struct net_device __always_unused *netdev,
7148 		   netdev_features_t features)
7149 {
7150 	size_t len;
7151 
7152 	/* No point in doing any of this if neither checksum nor GSO are
7153 	 * being requested for this frame. We can rule out both by just
7154 	 * checking for CHECKSUM_PARTIAL
7155 	 */
7156 	if (skb->ip_summed != CHECKSUM_PARTIAL)
7157 		return features;
7158 
7159 	/* We cannot support GSO if the MSS is going to be less than
7160 	 * 64 bytes. If it is then we need to drop support for GSO.
7161 	 */
7162 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
7163 		features &= ~NETIF_F_GSO_MASK;
7164 
7165 	len = skb_network_header(skb) - skb->data;
7166 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7167 		goto out_rm_features;
7168 
7169 	len = skb_transport_header(skb) - skb_network_header(skb);
7170 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7171 		goto out_rm_features;
7172 
7173 	if (skb->encapsulation) {
7174 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
7175 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7176 			goto out_rm_features;
7177 
7178 		len = skb_inner_transport_header(skb) -
7179 		      skb_inner_network_header(skb);
7180 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7181 			goto out_rm_features;
7182 	}
7183 
7184 	return features;
7185 out_rm_features:
7186 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7187 }
7188 
7189 static const struct net_device_ops ice_netdev_safe_mode_ops = {
7190 	.ndo_open = ice_open,
7191 	.ndo_stop = ice_stop,
7192 	.ndo_start_xmit = ice_start_xmit,
7193 	.ndo_set_mac_address = ice_set_mac_address,
7194 	.ndo_validate_addr = eth_validate_addr,
7195 	.ndo_change_mtu = ice_change_mtu,
7196 	.ndo_get_stats64 = ice_get_stats64,
7197 	.ndo_tx_timeout = ice_tx_timeout,
7198 	.ndo_bpf = ice_xdp_safe_mode,
7199 };
7200 
7201 static const struct net_device_ops ice_netdev_ops = {
7202 	.ndo_open = ice_open,
7203 	.ndo_stop = ice_stop,
7204 	.ndo_start_xmit = ice_start_xmit,
7205 	.ndo_features_check = ice_features_check,
7206 	.ndo_set_rx_mode = ice_set_rx_mode,
7207 	.ndo_set_mac_address = ice_set_mac_address,
7208 	.ndo_validate_addr = eth_validate_addr,
7209 	.ndo_change_mtu = ice_change_mtu,
7210 	.ndo_get_stats64 = ice_get_stats64,
7211 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7212 	.ndo_do_ioctl = ice_do_ioctl,
7213 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7214 	.ndo_set_vf_mac = ice_set_vf_mac,
7215 	.ndo_get_vf_config = ice_get_vf_cfg,
7216 	.ndo_set_vf_trust = ice_set_vf_trust,
7217 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7218 	.ndo_set_vf_link_state = ice_set_vf_link_state,
7219 	.ndo_get_vf_stats = ice_get_vf_stats,
7220 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7221 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
7222 	.ndo_set_features = ice_set_features,
7223 	.ndo_bridge_getlink = ice_bridge_getlink,
7224 	.ndo_bridge_setlink = ice_bridge_setlink,
7225 	.ndo_fdb_add = ice_fdb_add,
7226 	.ndo_fdb_del = ice_fdb_del,
7227 #ifdef CONFIG_RFS_ACCEL
7228 	.ndo_rx_flow_steer = ice_rx_flow_steer,
7229 #endif
7230 	.ndo_tx_timeout = ice_tx_timeout,
7231 	.ndo_bpf = ice_xdp,
7232 	.ndo_xdp_xmit = ice_xdp_xmit,
7233 	.ndo_xsk_wakeup = ice_xsk_wakeup,
7234 };
7235