xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision 511c537bb5647662ff7df7a41180a1721c078720)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 
17 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
18 static const char ice_driver_string[] = DRV_SUMMARY;
19 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
20 
21 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
22 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
23 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
24 
25 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
26 MODULE_DESCRIPTION(DRV_SUMMARY);
27 MODULE_LICENSE("GPL v2");
28 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
29 
30 static int debug = -1;
31 module_param(debug, int, 0644);
32 #ifndef CONFIG_DYNAMIC_DEBUG
33 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
34 #else
35 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
36 #endif /* !CONFIG_DYNAMIC_DEBUG */
37 
38 static DEFINE_IDA(ice_aux_ida);
39 
40 static struct workqueue_struct *ice_wq;
41 static const struct net_device_ops ice_netdev_safe_mode_ops;
42 static const struct net_device_ops ice_netdev_ops;
43 static int ice_vsi_open(struct ice_vsi *vsi);
44 
45 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
46 
47 static void ice_vsi_release_all(struct ice_pf *pf);
48 
49 bool netif_is_ice(struct net_device *dev)
50 {
51 	return dev && (dev->netdev_ops == &ice_netdev_ops);
52 }
53 
54 /**
55  * ice_get_tx_pending - returns number of Tx descriptors not processed
56  * @ring: the ring of descriptors
57  */
58 static u16 ice_get_tx_pending(struct ice_ring *ring)
59 {
60 	u16 head, tail;
61 
62 	head = ring->next_to_clean;
63 	tail = ring->next_to_use;
64 
65 	if (head != tail)
66 		return (head < tail) ?
67 			tail - head : (tail + ring->count - head);
68 	return 0;
69 }
70 
71 /**
72  * ice_check_for_hang_subtask - check for and recover hung queues
73  * @pf: pointer to PF struct
74  */
75 static void ice_check_for_hang_subtask(struct ice_pf *pf)
76 {
77 	struct ice_vsi *vsi = NULL;
78 	struct ice_hw *hw;
79 	unsigned int i;
80 	int packets;
81 	u32 v;
82 
83 	ice_for_each_vsi(pf, v)
84 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
85 			vsi = pf->vsi[v];
86 			break;
87 		}
88 
89 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
90 		return;
91 
92 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
93 		return;
94 
95 	hw = &vsi->back->hw;
96 
97 	for (i = 0; i < vsi->num_txq; i++) {
98 		struct ice_ring *tx_ring = vsi->tx_rings[i];
99 
100 		if (tx_ring && tx_ring->desc) {
101 			/* If packet counter has not changed the queue is
102 			 * likely stalled, so force an interrupt for this
103 			 * queue.
104 			 *
105 			 * prev_pkt would be negative if there was no
106 			 * pending work.
107 			 */
108 			packets = tx_ring->stats.pkts & INT_MAX;
109 			if (tx_ring->tx_stats.prev_pkt == packets) {
110 				/* Trigger sw interrupt to revive the queue */
111 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
112 				continue;
113 			}
114 
115 			/* Memory barrier between read of packet count and call
116 			 * to ice_get_tx_pending()
117 			 */
118 			smp_rmb();
119 			tx_ring->tx_stats.prev_pkt =
120 			    ice_get_tx_pending(tx_ring) ? packets : -1;
121 		}
122 	}
123 }
124 
125 /**
126  * ice_init_mac_fltr - Set initial MAC filters
127  * @pf: board private structure
128  *
129  * Set initial set of MAC filters for PF VSI; configure filters for permanent
130  * address and broadcast address. If an error is encountered, netdevice will be
131  * unregistered.
132  */
133 static int ice_init_mac_fltr(struct ice_pf *pf)
134 {
135 	enum ice_status status;
136 	struct ice_vsi *vsi;
137 	u8 *perm_addr;
138 
139 	vsi = ice_get_main_vsi(pf);
140 	if (!vsi)
141 		return -EINVAL;
142 
143 	perm_addr = vsi->port_info->mac.perm_addr;
144 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
145 	if (status)
146 		return -EIO;
147 
148 	return 0;
149 }
150 
151 /**
152  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
153  * @netdev: the net device on which the sync is happening
154  * @addr: MAC address to sync
155  *
156  * This is a callback function which is called by the in kernel device sync
157  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
158  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
159  * MAC filters from the hardware.
160  */
161 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
162 {
163 	struct ice_netdev_priv *np = netdev_priv(netdev);
164 	struct ice_vsi *vsi = np->vsi;
165 
166 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
167 				     ICE_FWD_TO_VSI))
168 		return -EINVAL;
169 
170 	return 0;
171 }
172 
173 /**
174  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
175  * @netdev: the net device on which the unsync is happening
176  * @addr: MAC address to unsync
177  *
178  * This is a callback function which is called by the in kernel device unsync
179  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
180  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
181  * delete the MAC filters from the hardware.
182  */
183 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
184 {
185 	struct ice_netdev_priv *np = netdev_priv(netdev);
186 	struct ice_vsi *vsi = np->vsi;
187 
188 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
189 				     ICE_FWD_TO_VSI))
190 		return -EINVAL;
191 
192 	return 0;
193 }
194 
195 /**
196  * ice_vsi_fltr_changed - check if filter state changed
197  * @vsi: VSI to be checked
198  *
199  * returns true if filter state has changed, false otherwise.
200  */
201 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
202 {
203 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
204 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
205 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
206 }
207 
208 /**
209  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
210  * @vsi: the VSI being configured
211  * @promisc_m: mask of promiscuous config bits
212  * @set_promisc: enable or disable promisc flag request
213  *
214  */
215 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
216 {
217 	struct ice_hw *hw = &vsi->back->hw;
218 	enum ice_status status = 0;
219 
220 	if (vsi->type != ICE_VSI_PF)
221 		return 0;
222 
223 	if (vsi->num_vlan > 1) {
224 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
225 						  set_promisc);
226 	} else {
227 		if (set_promisc)
228 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
229 						     0);
230 		else
231 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
232 						       0);
233 	}
234 
235 	if (status)
236 		return -EIO;
237 
238 	return 0;
239 }
240 
241 /**
242  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
243  * @vsi: ptr to the VSI
244  *
245  * Push any outstanding VSI filter changes through the AdminQ.
246  */
247 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
248 {
249 	struct device *dev = ice_pf_to_dev(vsi->back);
250 	struct net_device *netdev = vsi->netdev;
251 	bool promisc_forced_on = false;
252 	struct ice_pf *pf = vsi->back;
253 	struct ice_hw *hw = &pf->hw;
254 	enum ice_status status = 0;
255 	u32 changed_flags = 0;
256 	u8 promisc_m;
257 	int err = 0;
258 
259 	if (!vsi->netdev)
260 		return -EINVAL;
261 
262 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
263 		usleep_range(1000, 2000);
264 
265 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
266 	vsi->current_netdev_flags = vsi->netdev->flags;
267 
268 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
269 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
270 
271 	if (ice_vsi_fltr_changed(vsi)) {
272 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
273 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
274 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
275 
276 		/* grab the netdev's addr_list_lock */
277 		netif_addr_lock_bh(netdev);
278 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
279 			      ice_add_mac_to_unsync_list);
280 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
281 			      ice_add_mac_to_unsync_list);
282 		/* our temp lists are populated. release lock */
283 		netif_addr_unlock_bh(netdev);
284 	}
285 
286 	/* Remove MAC addresses in the unsync list */
287 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
288 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
289 	if (status) {
290 		netdev_err(netdev, "Failed to delete MAC filters\n");
291 		/* if we failed because of alloc failures, just bail */
292 		if (status == ICE_ERR_NO_MEMORY) {
293 			err = -ENOMEM;
294 			goto out;
295 		}
296 	}
297 
298 	/* Add MAC addresses in the sync list */
299 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
300 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
301 	/* If filter is added successfully or already exists, do not go into
302 	 * 'if' condition and report it as error. Instead continue processing
303 	 * rest of the function.
304 	 */
305 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
306 		netdev_err(netdev, "Failed to add MAC filters\n");
307 		/* If there is no more space for new umac filters, VSI
308 		 * should go into promiscuous mode. There should be some
309 		 * space reserved for promiscuous filters.
310 		 */
311 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
312 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
313 				      vsi->state)) {
314 			promisc_forced_on = true;
315 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
316 				    vsi->vsi_num);
317 		} else {
318 			err = -EIO;
319 			goto out;
320 		}
321 	}
322 	/* check for changes in promiscuous modes */
323 	if (changed_flags & IFF_ALLMULTI) {
324 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
325 			if (vsi->num_vlan > 1)
326 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
327 			else
328 				promisc_m = ICE_MCAST_PROMISC_BITS;
329 
330 			err = ice_cfg_promisc(vsi, promisc_m, true);
331 			if (err) {
332 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
333 					   vsi->vsi_num);
334 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
335 				goto out_promisc;
336 			}
337 		} else {
338 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
339 			if (vsi->num_vlan > 1)
340 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
341 			else
342 				promisc_m = ICE_MCAST_PROMISC_BITS;
343 
344 			err = ice_cfg_promisc(vsi, promisc_m, false);
345 			if (err) {
346 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
347 					   vsi->vsi_num);
348 				vsi->current_netdev_flags |= IFF_ALLMULTI;
349 				goto out_promisc;
350 			}
351 		}
352 	}
353 
354 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
355 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
356 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
357 		if (vsi->current_netdev_flags & IFF_PROMISC) {
358 			/* Apply Rx filter rule to get traffic from wire */
359 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
360 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
361 				if (err && err != -EEXIST) {
362 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
363 						   err, vsi->vsi_num);
364 					vsi->current_netdev_flags &=
365 						~IFF_PROMISC;
366 					goto out_promisc;
367 				}
368 				ice_cfg_vlan_pruning(vsi, false, false);
369 			}
370 		} else {
371 			/* Clear Rx filter to remove traffic from wire */
372 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
373 				err = ice_clear_dflt_vsi(pf->first_sw);
374 				if (err) {
375 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
376 						   err, vsi->vsi_num);
377 					vsi->current_netdev_flags |=
378 						IFF_PROMISC;
379 					goto out_promisc;
380 				}
381 				if (vsi->num_vlan > 1)
382 					ice_cfg_vlan_pruning(vsi, true, false);
383 			}
384 		}
385 	}
386 	goto exit;
387 
388 out_promisc:
389 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
390 	goto exit;
391 out:
392 	/* if something went wrong then set the changed flag so we try again */
393 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
394 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
395 exit:
396 	clear_bit(ICE_CFG_BUSY, vsi->state);
397 	return err;
398 }
399 
400 /**
401  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
402  * @pf: board private structure
403  */
404 static void ice_sync_fltr_subtask(struct ice_pf *pf)
405 {
406 	int v;
407 
408 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
409 		return;
410 
411 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
412 
413 	ice_for_each_vsi(pf, v)
414 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
415 		    ice_vsi_sync_fltr(pf->vsi[v])) {
416 			/* come back and try again later */
417 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
418 			break;
419 		}
420 }
421 
422 /**
423  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
424  * @pf: the PF
425  * @locked: is the rtnl_lock already held
426  */
427 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
428 {
429 	int node;
430 	int v;
431 
432 	ice_for_each_vsi(pf, v)
433 		if (pf->vsi[v])
434 			ice_dis_vsi(pf->vsi[v], locked);
435 
436 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
437 		pf->pf_agg_node[node].num_vsis = 0;
438 
439 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
440 		pf->vf_agg_node[node].num_vsis = 0;
441 }
442 
443 /**
444  * ice_prepare_for_reset - prep for the core to reset
445  * @pf: board private structure
446  *
447  * Inform or close all dependent features in prep for reset.
448  */
449 static void
450 ice_prepare_for_reset(struct ice_pf *pf)
451 {
452 	struct ice_hw *hw = &pf->hw;
453 	unsigned int i;
454 
455 	/* already prepared for reset */
456 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
457 		return;
458 
459 	ice_unplug_aux_dev(pf);
460 
461 	/* Notify VFs of impending reset */
462 	if (ice_check_sq_alive(hw, &hw->mailboxq))
463 		ice_vc_notify_reset(pf);
464 
465 	/* Disable VFs until reset is completed */
466 	ice_for_each_vf(pf, i)
467 		ice_set_vf_state_qs_dis(&pf->vf[i]);
468 
469 	/* clear SW filtering DB */
470 	ice_clear_hw_tbls(hw);
471 	/* disable the VSIs and their queues that are not already DOWN */
472 	ice_pf_dis_all_vsi(pf, false);
473 
474 	if (hw->port_info)
475 		ice_sched_clear_port(hw->port_info);
476 
477 	ice_shutdown_all_ctrlq(hw);
478 
479 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
480 }
481 
482 /**
483  * ice_do_reset - Initiate one of many types of resets
484  * @pf: board private structure
485  * @reset_type: reset type requested
486  * before this function was called.
487  */
488 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
489 {
490 	struct device *dev = ice_pf_to_dev(pf);
491 	struct ice_hw *hw = &pf->hw;
492 
493 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
494 
495 	ice_prepare_for_reset(pf);
496 
497 	/* trigger the reset */
498 	if (ice_reset(hw, reset_type)) {
499 		dev_err(dev, "reset %d failed\n", reset_type);
500 		set_bit(ICE_RESET_FAILED, pf->state);
501 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
502 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
503 		clear_bit(ICE_PFR_REQ, pf->state);
504 		clear_bit(ICE_CORER_REQ, pf->state);
505 		clear_bit(ICE_GLOBR_REQ, pf->state);
506 		wake_up(&pf->reset_wait_queue);
507 		return;
508 	}
509 
510 	/* PFR is a bit of a special case because it doesn't result in an OICR
511 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
512 	 * associated state bits.
513 	 */
514 	if (reset_type == ICE_RESET_PFR) {
515 		pf->pfr_count++;
516 		ice_rebuild(pf, reset_type);
517 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
518 		clear_bit(ICE_PFR_REQ, pf->state);
519 		wake_up(&pf->reset_wait_queue);
520 		ice_reset_all_vfs(pf, true);
521 	}
522 }
523 
524 /**
525  * ice_reset_subtask - Set up for resetting the device and driver
526  * @pf: board private structure
527  */
528 static void ice_reset_subtask(struct ice_pf *pf)
529 {
530 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
531 
532 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
533 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
534 	 * of reset is pending and sets bits in pf->state indicating the reset
535 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
536 	 * prepare for pending reset if not already (for PF software-initiated
537 	 * global resets the software should already be prepared for it as
538 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
539 	 * by firmware or software on other PFs, that bit is not set so prepare
540 	 * for the reset now), poll for reset done, rebuild and return.
541 	 */
542 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
543 		/* Perform the largest reset requested */
544 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
545 			reset_type = ICE_RESET_CORER;
546 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
547 			reset_type = ICE_RESET_GLOBR;
548 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
549 			reset_type = ICE_RESET_EMPR;
550 		/* return if no valid reset type requested */
551 		if (reset_type == ICE_RESET_INVAL)
552 			return;
553 		ice_prepare_for_reset(pf);
554 
555 		/* make sure we are ready to rebuild */
556 		if (ice_check_reset(&pf->hw)) {
557 			set_bit(ICE_RESET_FAILED, pf->state);
558 		} else {
559 			/* done with reset. start rebuild */
560 			pf->hw.reset_ongoing = false;
561 			ice_rebuild(pf, reset_type);
562 			/* clear bit to resume normal operations, but
563 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
564 			 */
565 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
566 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
567 			clear_bit(ICE_PFR_REQ, pf->state);
568 			clear_bit(ICE_CORER_REQ, pf->state);
569 			clear_bit(ICE_GLOBR_REQ, pf->state);
570 			wake_up(&pf->reset_wait_queue);
571 			ice_reset_all_vfs(pf, true);
572 		}
573 
574 		return;
575 	}
576 
577 	/* No pending resets to finish processing. Check for new resets */
578 	if (test_bit(ICE_PFR_REQ, pf->state))
579 		reset_type = ICE_RESET_PFR;
580 	if (test_bit(ICE_CORER_REQ, pf->state))
581 		reset_type = ICE_RESET_CORER;
582 	if (test_bit(ICE_GLOBR_REQ, pf->state))
583 		reset_type = ICE_RESET_GLOBR;
584 	/* If no valid reset type requested just return */
585 	if (reset_type == ICE_RESET_INVAL)
586 		return;
587 
588 	/* reset if not already down or busy */
589 	if (!test_bit(ICE_DOWN, pf->state) &&
590 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
591 		ice_do_reset(pf, reset_type);
592 	}
593 }
594 
595 /**
596  * ice_print_topo_conflict - print topology conflict message
597  * @vsi: the VSI whose topology status is being checked
598  */
599 static void ice_print_topo_conflict(struct ice_vsi *vsi)
600 {
601 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
602 	case ICE_AQ_LINK_TOPO_CONFLICT:
603 	case ICE_AQ_LINK_MEDIA_CONFLICT:
604 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
605 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
606 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
607 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
608 		break;
609 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
610 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
611 		break;
612 	default:
613 		break;
614 	}
615 }
616 
617 /**
618  * ice_print_link_msg - print link up or down message
619  * @vsi: the VSI whose link status is being queried
620  * @isup: boolean for if the link is now up or down
621  */
622 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
623 {
624 	struct ice_aqc_get_phy_caps_data *caps;
625 	const char *an_advertised;
626 	enum ice_status status;
627 	const char *fec_req;
628 	const char *speed;
629 	const char *fec;
630 	const char *fc;
631 	const char *an;
632 
633 	if (!vsi)
634 		return;
635 
636 	if (vsi->current_isup == isup)
637 		return;
638 
639 	vsi->current_isup = isup;
640 
641 	if (!isup) {
642 		netdev_info(vsi->netdev, "NIC Link is Down\n");
643 		return;
644 	}
645 
646 	switch (vsi->port_info->phy.link_info.link_speed) {
647 	case ICE_AQ_LINK_SPEED_100GB:
648 		speed = "100 G";
649 		break;
650 	case ICE_AQ_LINK_SPEED_50GB:
651 		speed = "50 G";
652 		break;
653 	case ICE_AQ_LINK_SPEED_40GB:
654 		speed = "40 G";
655 		break;
656 	case ICE_AQ_LINK_SPEED_25GB:
657 		speed = "25 G";
658 		break;
659 	case ICE_AQ_LINK_SPEED_20GB:
660 		speed = "20 G";
661 		break;
662 	case ICE_AQ_LINK_SPEED_10GB:
663 		speed = "10 G";
664 		break;
665 	case ICE_AQ_LINK_SPEED_5GB:
666 		speed = "5 G";
667 		break;
668 	case ICE_AQ_LINK_SPEED_2500MB:
669 		speed = "2.5 G";
670 		break;
671 	case ICE_AQ_LINK_SPEED_1000MB:
672 		speed = "1 G";
673 		break;
674 	case ICE_AQ_LINK_SPEED_100MB:
675 		speed = "100 M";
676 		break;
677 	default:
678 		speed = "Unknown ";
679 		break;
680 	}
681 
682 	switch (vsi->port_info->fc.current_mode) {
683 	case ICE_FC_FULL:
684 		fc = "Rx/Tx";
685 		break;
686 	case ICE_FC_TX_PAUSE:
687 		fc = "Tx";
688 		break;
689 	case ICE_FC_RX_PAUSE:
690 		fc = "Rx";
691 		break;
692 	case ICE_FC_NONE:
693 		fc = "None";
694 		break;
695 	default:
696 		fc = "Unknown";
697 		break;
698 	}
699 
700 	/* Get FEC mode based on negotiated link info */
701 	switch (vsi->port_info->phy.link_info.fec_info) {
702 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
703 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
704 		fec = "RS-FEC";
705 		break;
706 	case ICE_AQ_LINK_25G_KR_FEC_EN:
707 		fec = "FC-FEC/BASE-R";
708 		break;
709 	default:
710 		fec = "NONE";
711 		break;
712 	}
713 
714 	/* check if autoneg completed, might be false due to not supported */
715 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
716 		an = "True";
717 	else
718 		an = "False";
719 
720 	/* Get FEC mode requested based on PHY caps last SW configuration */
721 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
722 	if (!caps) {
723 		fec_req = "Unknown";
724 		an_advertised = "Unknown";
725 		goto done;
726 	}
727 
728 	status = ice_aq_get_phy_caps(vsi->port_info, false,
729 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
730 	if (status)
731 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
732 
733 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
734 
735 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
736 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
737 		fec_req = "RS-FEC";
738 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
739 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
740 		fec_req = "FC-FEC/BASE-R";
741 	else
742 		fec_req = "NONE";
743 
744 	kfree(caps);
745 
746 done:
747 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
748 		    speed, fec_req, fec, an_advertised, an, fc);
749 	ice_print_topo_conflict(vsi);
750 }
751 
752 /**
753  * ice_vsi_link_event - update the VSI's netdev
754  * @vsi: the VSI on which the link event occurred
755  * @link_up: whether or not the VSI needs to be set up or down
756  */
757 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
758 {
759 	if (!vsi)
760 		return;
761 
762 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
763 		return;
764 
765 	if (vsi->type == ICE_VSI_PF) {
766 		if (link_up == netif_carrier_ok(vsi->netdev))
767 			return;
768 
769 		if (link_up) {
770 			netif_carrier_on(vsi->netdev);
771 			netif_tx_wake_all_queues(vsi->netdev);
772 		} else {
773 			netif_carrier_off(vsi->netdev);
774 			netif_tx_stop_all_queues(vsi->netdev);
775 		}
776 	}
777 }
778 
779 /**
780  * ice_set_dflt_mib - send a default config MIB to the FW
781  * @pf: private PF struct
782  *
783  * This function sends a default configuration MIB to the FW.
784  *
785  * If this function errors out at any point, the driver is still able to
786  * function.  The main impact is that LFC may not operate as expected.
787  * Therefore an error state in this function should be treated with a DBG
788  * message and continue on with driver rebuild/reenable.
789  */
790 static void ice_set_dflt_mib(struct ice_pf *pf)
791 {
792 	struct device *dev = ice_pf_to_dev(pf);
793 	u8 mib_type, *buf, *lldpmib = NULL;
794 	u16 len, typelen, offset = 0;
795 	struct ice_lldp_org_tlv *tlv;
796 	struct ice_hw *hw = &pf->hw;
797 	u32 ouisubtype;
798 
799 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
800 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
801 	if (!lldpmib) {
802 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
803 			__func__);
804 		return;
805 	}
806 
807 	/* Add ETS CFG TLV */
808 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
809 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
810 		   ICE_IEEE_ETS_TLV_LEN);
811 	tlv->typelen = htons(typelen);
812 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
813 		      ICE_IEEE_SUBTYPE_ETS_CFG);
814 	tlv->ouisubtype = htonl(ouisubtype);
815 
816 	buf = tlv->tlvinfo;
817 	buf[0] = 0;
818 
819 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
820 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
821 	 * Octets 13 - 20 are TSA values - leave as zeros
822 	 */
823 	buf[5] = 0x64;
824 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
825 	offset += len + 2;
826 	tlv = (struct ice_lldp_org_tlv *)
827 		((char *)tlv + sizeof(tlv->typelen) + len);
828 
829 	/* Add ETS REC TLV */
830 	buf = tlv->tlvinfo;
831 	tlv->typelen = htons(typelen);
832 
833 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
834 		      ICE_IEEE_SUBTYPE_ETS_REC);
835 	tlv->ouisubtype = htonl(ouisubtype);
836 
837 	/* First octet of buf is reserved
838 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
839 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
840 	 * Octets 13 - 20 are TSA value - leave as zeros
841 	 */
842 	buf[5] = 0x64;
843 	offset += len + 2;
844 	tlv = (struct ice_lldp_org_tlv *)
845 		((char *)tlv + sizeof(tlv->typelen) + len);
846 
847 	/* Add PFC CFG TLV */
848 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
849 		   ICE_IEEE_PFC_TLV_LEN);
850 	tlv->typelen = htons(typelen);
851 
852 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
853 		      ICE_IEEE_SUBTYPE_PFC_CFG);
854 	tlv->ouisubtype = htonl(ouisubtype);
855 
856 	/* Octet 1 left as all zeros - PFC disabled */
857 	buf[0] = 0x08;
858 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
859 	offset += len + 2;
860 
861 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
862 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
863 
864 	kfree(lldpmib);
865 }
866 
867 /**
868  * ice_check_module_power
869  * @pf: pointer to PF struct
870  * @link_cfg_err: bitmap from the link info structure
871  *
872  * check module power level returned by a previous call to aq_get_link_info
873  * and print error messages if module power level is not supported
874  */
875 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
876 {
877 	/* if module power level is supported, clear the flag */
878 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
879 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
880 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
881 		return;
882 	}
883 
884 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
885 	 * above block didn't clear this bit, there's nothing to do
886 	 */
887 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
888 		return;
889 
890 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
891 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
892 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
893 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
894 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
895 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
896 	}
897 }
898 
899 /**
900  * ice_link_event - process the link event
901  * @pf: PF that the link event is associated with
902  * @pi: port_info for the port that the link event is associated with
903  * @link_up: true if the physical link is up and false if it is down
904  * @link_speed: current link speed received from the link event
905  *
906  * Returns 0 on success and negative on failure
907  */
908 static int
909 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
910 	       u16 link_speed)
911 {
912 	struct device *dev = ice_pf_to_dev(pf);
913 	struct ice_phy_info *phy_info;
914 	enum ice_status status;
915 	struct ice_vsi *vsi;
916 	u16 old_link_speed;
917 	bool old_link;
918 
919 	phy_info = &pi->phy;
920 	phy_info->link_info_old = phy_info->link_info;
921 
922 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
923 	old_link_speed = phy_info->link_info_old.link_speed;
924 
925 	/* update the link info structures and re-enable link events,
926 	 * don't bail on failure due to other book keeping needed
927 	 */
928 	status = ice_update_link_info(pi);
929 	if (status)
930 		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
931 			pi->lport, ice_stat_str(status),
932 			ice_aq_str(pi->hw->adminq.sq_last_status));
933 
934 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
935 
936 	/* Check if the link state is up after updating link info, and treat
937 	 * this event as an UP event since the link is actually UP now.
938 	 */
939 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
940 		link_up = true;
941 
942 	vsi = ice_get_main_vsi(pf);
943 	if (!vsi || !vsi->port_info)
944 		return -EINVAL;
945 
946 	/* turn off PHY if media was removed */
947 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
948 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
949 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
950 		ice_set_link(vsi, false);
951 	}
952 
953 	/* if the old link up/down and speed is the same as the new */
954 	if (link_up == old_link && link_speed == old_link_speed)
955 		return 0;
956 
957 	if (ice_is_dcb_active(pf)) {
958 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
959 			ice_dcb_rebuild(pf);
960 	} else {
961 		if (link_up)
962 			ice_set_dflt_mib(pf);
963 	}
964 	ice_vsi_link_event(vsi, link_up);
965 	ice_print_link_msg(vsi, link_up);
966 
967 	ice_vc_notify_link_state(pf);
968 
969 	return 0;
970 }
971 
972 /**
973  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
974  * @pf: board private structure
975  */
976 static void ice_watchdog_subtask(struct ice_pf *pf)
977 {
978 	int i;
979 
980 	/* if interface is down do nothing */
981 	if (test_bit(ICE_DOWN, pf->state) ||
982 	    test_bit(ICE_CFG_BUSY, pf->state))
983 		return;
984 
985 	/* make sure we don't do these things too often */
986 	if (time_before(jiffies,
987 			pf->serv_tmr_prev + pf->serv_tmr_period))
988 		return;
989 
990 	pf->serv_tmr_prev = jiffies;
991 
992 	/* Update the stats for active netdevs so the network stack
993 	 * can look at updated numbers whenever it cares to
994 	 */
995 	ice_update_pf_stats(pf);
996 	ice_for_each_vsi(pf, i)
997 		if (pf->vsi[i] && pf->vsi[i]->netdev)
998 			ice_update_vsi_stats(pf->vsi[i]);
999 }
1000 
1001 /**
1002  * ice_init_link_events - enable/initialize link events
1003  * @pi: pointer to the port_info instance
1004  *
1005  * Returns -EIO on failure, 0 on success
1006  */
1007 static int ice_init_link_events(struct ice_port_info *pi)
1008 {
1009 	u16 mask;
1010 
1011 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1012 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
1013 
1014 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1015 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1016 			pi->lport);
1017 		return -EIO;
1018 	}
1019 
1020 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1021 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1022 			pi->lport);
1023 		return -EIO;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 /**
1030  * ice_handle_link_event - handle link event via ARQ
1031  * @pf: PF that the link event is associated with
1032  * @event: event structure containing link status info
1033  */
1034 static int
1035 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1036 {
1037 	struct ice_aqc_get_link_status_data *link_data;
1038 	struct ice_port_info *port_info;
1039 	int status;
1040 
1041 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1042 	port_info = pf->hw.port_info;
1043 	if (!port_info)
1044 		return -EINVAL;
1045 
1046 	status = ice_link_event(pf, port_info,
1047 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1048 				le16_to_cpu(link_data->link_speed));
1049 	if (status)
1050 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1051 			status);
1052 
1053 	return status;
1054 }
1055 
1056 enum ice_aq_task_state {
1057 	ICE_AQ_TASK_WAITING = 0,
1058 	ICE_AQ_TASK_COMPLETE,
1059 	ICE_AQ_TASK_CANCELED,
1060 };
1061 
1062 struct ice_aq_task {
1063 	struct hlist_node entry;
1064 
1065 	u16 opcode;
1066 	struct ice_rq_event_info *event;
1067 	enum ice_aq_task_state state;
1068 };
1069 
1070 /**
1071  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1072  * @pf: pointer to the PF private structure
1073  * @opcode: the opcode to wait for
1074  * @timeout: how long to wait, in jiffies
1075  * @event: storage for the event info
1076  *
1077  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1078  * current thread will be put to sleep until the specified event occurs or
1079  * until the given timeout is reached.
1080  *
1081  * To obtain only the descriptor contents, pass an event without an allocated
1082  * msg_buf. If the complete data buffer is desired, allocate the
1083  * event->msg_buf with enough space ahead of time.
1084  *
1085  * Returns: zero on success, or a negative error code on failure.
1086  */
1087 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1088 			  struct ice_rq_event_info *event)
1089 {
1090 	struct device *dev = ice_pf_to_dev(pf);
1091 	struct ice_aq_task *task;
1092 	unsigned long start;
1093 	long ret;
1094 	int err;
1095 
1096 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1097 	if (!task)
1098 		return -ENOMEM;
1099 
1100 	INIT_HLIST_NODE(&task->entry);
1101 	task->opcode = opcode;
1102 	task->event = event;
1103 	task->state = ICE_AQ_TASK_WAITING;
1104 
1105 	spin_lock_bh(&pf->aq_wait_lock);
1106 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1107 	spin_unlock_bh(&pf->aq_wait_lock);
1108 
1109 	start = jiffies;
1110 
1111 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1112 					       timeout);
1113 	switch (task->state) {
1114 	case ICE_AQ_TASK_WAITING:
1115 		err = ret < 0 ? ret : -ETIMEDOUT;
1116 		break;
1117 	case ICE_AQ_TASK_CANCELED:
1118 		err = ret < 0 ? ret : -ECANCELED;
1119 		break;
1120 	case ICE_AQ_TASK_COMPLETE:
1121 		err = ret < 0 ? ret : 0;
1122 		break;
1123 	default:
1124 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1125 		err = -EINVAL;
1126 		break;
1127 	}
1128 
1129 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1130 		jiffies_to_msecs(jiffies - start),
1131 		jiffies_to_msecs(timeout),
1132 		opcode);
1133 
1134 	spin_lock_bh(&pf->aq_wait_lock);
1135 	hlist_del(&task->entry);
1136 	spin_unlock_bh(&pf->aq_wait_lock);
1137 	kfree(task);
1138 
1139 	return err;
1140 }
1141 
1142 /**
1143  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1144  * @pf: pointer to the PF private structure
1145  * @opcode: the opcode of the event
1146  * @event: the event to check
1147  *
1148  * Loops over the current list of pending threads waiting for an AdminQ event.
1149  * For each matching task, copy the contents of the event into the task
1150  * structure and wake up the thread.
1151  *
1152  * If multiple threads wait for the same opcode, they will all be woken up.
1153  *
1154  * Note that event->msg_buf will only be duplicated if the event has a buffer
1155  * with enough space already allocated. Otherwise, only the descriptor and
1156  * message length will be copied.
1157  *
1158  * Returns: true if an event was found, false otherwise
1159  */
1160 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1161 				struct ice_rq_event_info *event)
1162 {
1163 	struct ice_aq_task *task;
1164 	bool found = false;
1165 
1166 	spin_lock_bh(&pf->aq_wait_lock);
1167 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1168 		if (task->state || task->opcode != opcode)
1169 			continue;
1170 
1171 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1172 		task->event->msg_len = event->msg_len;
1173 
1174 		/* Only copy the data buffer if a destination was set */
1175 		if (task->event->msg_buf &&
1176 		    task->event->buf_len > event->buf_len) {
1177 			memcpy(task->event->msg_buf, event->msg_buf,
1178 			       event->buf_len);
1179 			task->event->buf_len = event->buf_len;
1180 		}
1181 
1182 		task->state = ICE_AQ_TASK_COMPLETE;
1183 		found = true;
1184 	}
1185 	spin_unlock_bh(&pf->aq_wait_lock);
1186 
1187 	if (found)
1188 		wake_up(&pf->aq_wait_queue);
1189 }
1190 
1191 /**
1192  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1193  * @pf: the PF private structure
1194  *
1195  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1196  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1197  */
1198 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1199 {
1200 	struct ice_aq_task *task;
1201 
1202 	spin_lock_bh(&pf->aq_wait_lock);
1203 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1204 		task->state = ICE_AQ_TASK_CANCELED;
1205 	spin_unlock_bh(&pf->aq_wait_lock);
1206 
1207 	wake_up(&pf->aq_wait_queue);
1208 }
1209 
1210 /**
1211  * __ice_clean_ctrlq - helper function to clean controlq rings
1212  * @pf: ptr to struct ice_pf
1213  * @q_type: specific Control queue type
1214  */
1215 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1216 {
1217 	struct device *dev = ice_pf_to_dev(pf);
1218 	struct ice_rq_event_info event;
1219 	struct ice_hw *hw = &pf->hw;
1220 	struct ice_ctl_q_info *cq;
1221 	u16 pending, i = 0;
1222 	const char *qtype;
1223 	u32 oldval, val;
1224 
1225 	/* Do not clean control queue if/when PF reset fails */
1226 	if (test_bit(ICE_RESET_FAILED, pf->state))
1227 		return 0;
1228 
1229 	switch (q_type) {
1230 	case ICE_CTL_Q_ADMIN:
1231 		cq = &hw->adminq;
1232 		qtype = "Admin";
1233 		break;
1234 	case ICE_CTL_Q_MAILBOX:
1235 		cq = &hw->mailboxq;
1236 		qtype = "Mailbox";
1237 		/* we are going to try to detect a malicious VF, so set the
1238 		 * state to begin detection
1239 		 */
1240 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1241 		break;
1242 	default:
1243 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1244 		return 0;
1245 	}
1246 
1247 	/* check for error indications - PF_xx_AxQLEN register layout for
1248 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1249 	 */
1250 	val = rd32(hw, cq->rq.len);
1251 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1252 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1253 		oldval = val;
1254 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1255 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1256 				qtype);
1257 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1258 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1259 				qtype);
1260 		}
1261 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1262 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1263 				qtype);
1264 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1265 			 PF_FW_ARQLEN_ARQCRIT_M);
1266 		if (oldval != val)
1267 			wr32(hw, cq->rq.len, val);
1268 	}
1269 
1270 	val = rd32(hw, cq->sq.len);
1271 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1272 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1273 		oldval = val;
1274 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1275 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1276 				qtype);
1277 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1278 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1279 				qtype);
1280 		}
1281 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1282 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1283 				qtype);
1284 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1285 			 PF_FW_ATQLEN_ATQCRIT_M);
1286 		if (oldval != val)
1287 			wr32(hw, cq->sq.len, val);
1288 	}
1289 
1290 	event.buf_len = cq->rq_buf_size;
1291 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1292 	if (!event.msg_buf)
1293 		return 0;
1294 
1295 	do {
1296 		enum ice_status ret;
1297 		u16 opcode;
1298 
1299 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1300 		if (ret == ICE_ERR_AQ_NO_WORK)
1301 			break;
1302 		if (ret) {
1303 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1304 				ice_stat_str(ret));
1305 			break;
1306 		}
1307 
1308 		opcode = le16_to_cpu(event.desc.opcode);
1309 
1310 		/* Notify any thread that might be waiting for this event */
1311 		ice_aq_check_events(pf, opcode, &event);
1312 
1313 		switch (opcode) {
1314 		case ice_aqc_opc_get_link_status:
1315 			if (ice_handle_link_event(pf, &event))
1316 				dev_err(dev, "Could not handle link event\n");
1317 			break;
1318 		case ice_aqc_opc_event_lan_overflow:
1319 			ice_vf_lan_overflow_event(pf, &event);
1320 			break;
1321 		case ice_mbx_opc_send_msg_to_pf:
1322 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1323 				ice_vc_process_vf_msg(pf, &event);
1324 			break;
1325 		case ice_aqc_opc_fw_logging:
1326 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1327 			break;
1328 		case ice_aqc_opc_lldp_set_mib_change:
1329 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1330 			break;
1331 		default:
1332 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1333 				qtype, opcode);
1334 			break;
1335 		}
1336 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1337 
1338 	kfree(event.msg_buf);
1339 
1340 	return pending && (i == ICE_DFLT_IRQ_WORK);
1341 }
1342 
1343 /**
1344  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1345  * @hw: pointer to hardware info
1346  * @cq: control queue information
1347  *
1348  * returns true if there are pending messages in a queue, false if there aren't
1349  */
1350 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1351 {
1352 	u16 ntu;
1353 
1354 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1355 	return cq->rq.next_to_clean != ntu;
1356 }
1357 
1358 /**
1359  * ice_clean_adminq_subtask - clean the AdminQ rings
1360  * @pf: board private structure
1361  */
1362 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1363 {
1364 	struct ice_hw *hw = &pf->hw;
1365 
1366 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1367 		return;
1368 
1369 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1370 		return;
1371 
1372 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1373 
1374 	/* There might be a situation where new messages arrive to a control
1375 	 * queue between processing the last message and clearing the
1376 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1377 	 * ice_ctrlq_pending) and process new messages if any.
1378 	 */
1379 	if (ice_ctrlq_pending(hw, &hw->adminq))
1380 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1381 
1382 	ice_flush(hw);
1383 }
1384 
1385 /**
1386  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1387  * @pf: board private structure
1388  */
1389 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1390 {
1391 	struct ice_hw *hw = &pf->hw;
1392 
1393 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1394 		return;
1395 
1396 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1397 		return;
1398 
1399 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1400 
1401 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1402 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1403 
1404 	ice_flush(hw);
1405 }
1406 
1407 /**
1408  * ice_service_task_schedule - schedule the service task to wake up
1409  * @pf: board private structure
1410  *
1411  * If not already scheduled, this puts the task into the work queue.
1412  */
1413 void ice_service_task_schedule(struct ice_pf *pf)
1414 {
1415 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1416 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1417 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1418 		queue_work(ice_wq, &pf->serv_task);
1419 }
1420 
1421 /**
1422  * ice_service_task_complete - finish up the service task
1423  * @pf: board private structure
1424  */
1425 static void ice_service_task_complete(struct ice_pf *pf)
1426 {
1427 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1428 
1429 	/* force memory (pf->state) to sync before next service task */
1430 	smp_mb__before_atomic();
1431 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1432 }
1433 
1434 /**
1435  * ice_service_task_stop - stop service task and cancel works
1436  * @pf: board private structure
1437  *
1438  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1439  * 1 otherwise.
1440  */
1441 static int ice_service_task_stop(struct ice_pf *pf)
1442 {
1443 	int ret;
1444 
1445 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1446 
1447 	if (pf->serv_tmr.function)
1448 		del_timer_sync(&pf->serv_tmr);
1449 	if (pf->serv_task.func)
1450 		cancel_work_sync(&pf->serv_task);
1451 
1452 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1453 	return ret;
1454 }
1455 
1456 /**
1457  * ice_service_task_restart - restart service task and schedule works
1458  * @pf: board private structure
1459  *
1460  * This function is needed for suspend and resume works (e.g WoL scenario)
1461  */
1462 static void ice_service_task_restart(struct ice_pf *pf)
1463 {
1464 	clear_bit(ICE_SERVICE_DIS, pf->state);
1465 	ice_service_task_schedule(pf);
1466 }
1467 
1468 /**
1469  * ice_service_timer - timer callback to schedule service task
1470  * @t: pointer to timer_list
1471  */
1472 static void ice_service_timer(struct timer_list *t)
1473 {
1474 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1475 
1476 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1477 	ice_service_task_schedule(pf);
1478 }
1479 
1480 /**
1481  * ice_handle_mdd_event - handle malicious driver detect event
1482  * @pf: pointer to the PF structure
1483  *
1484  * Called from service task. OICR interrupt handler indicates MDD event.
1485  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1486  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1487  * disable the queue, the PF can be configured to reset the VF using ethtool
1488  * private flag mdd-auto-reset-vf.
1489  */
1490 static void ice_handle_mdd_event(struct ice_pf *pf)
1491 {
1492 	struct device *dev = ice_pf_to_dev(pf);
1493 	struct ice_hw *hw = &pf->hw;
1494 	unsigned int i;
1495 	u32 reg;
1496 
1497 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1498 		/* Since the VF MDD event logging is rate limited, check if
1499 		 * there are pending MDD events.
1500 		 */
1501 		ice_print_vfs_mdd_events(pf);
1502 		return;
1503 	}
1504 
1505 	/* find what triggered an MDD event */
1506 	reg = rd32(hw, GL_MDET_TX_PQM);
1507 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1508 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1509 				GL_MDET_TX_PQM_PF_NUM_S;
1510 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1511 				GL_MDET_TX_PQM_VF_NUM_S;
1512 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1513 				GL_MDET_TX_PQM_MAL_TYPE_S;
1514 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1515 				GL_MDET_TX_PQM_QNUM_S);
1516 
1517 		if (netif_msg_tx_err(pf))
1518 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1519 				 event, queue, pf_num, vf_num);
1520 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1521 	}
1522 
1523 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1524 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1525 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1526 				GL_MDET_TX_TCLAN_PF_NUM_S;
1527 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1528 				GL_MDET_TX_TCLAN_VF_NUM_S;
1529 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1530 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1531 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1532 				GL_MDET_TX_TCLAN_QNUM_S);
1533 
1534 		if (netif_msg_tx_err(pf))
1535 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1536 				 event, queue, pf_num, vf_num);
1537 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1538 	}
1539 
1540 	reg = rd32(hw, GL_MDET_RX);
1541 	if (reg & GL_MDET_RX_VALID_M) {
1542 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1543 				GL_MDET_RX_PF_NUM_S;
1544 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1545 				GL_MDET_RX_VF_NUM_S;
1546 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1547 				GL_MDET_RX_MAL_TYPE_S;
1548 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1549 				GL_MDET_RX_QNUM_S);
1550 
1551 		if (netif_msg_rx_err(pf))
1552 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1553 				 event, queue, pf_num, vf_num);
1554 		wr32(hw, GL_MDET_RX, 0xffffffff);
1555 	}
1556 
1557 	/* check to see if this PF caused an MDD event */
1558 	reg = rd32(hw, PF_MDET_TX_PQM);
1559 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1560 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1561 		if (netif_msg_tx_err(pf))
1562 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1563 	}
1564 
1565 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1566 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1567 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1568 		if (netif_msg_tx_err(pf))
1569 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1570 	}
1571 
1572 	reg = rd32(hw, PF_MDET_RX);
1573 	if (reg & PF_MDET_RX_VALID_M) {
1574 		wr32(hw, PF_MDET_RX, 0xFFFF);
1575 		if (netif_msg_rx_err(pf))
1576 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1577 	}
1578 
1579 	/* Check to see if one of the VFs caused an MDD event, and then
1580 	 * increment counters and set print pending
1581 	 */
1582 	ice_for_each_vf(pf, i) {
1583 		struct ice_vf *vf = &pf->vf[i];
1584 
1585 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1586 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1587 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1588 			vf->mdd_tx_events.count++;
1589 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1590 			if (netif_msg_tx_err(pf))
1591 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1592 					 i);
1593 		}
1594 
1595 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1596 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1597 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1598 			vf->mdd_tx_events.count++;
1599 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1600 			if (netif_msg_tx_err(pf))
1601 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1602 					 i);
1603 		}
1604 
1605 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1606 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1607 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1608 			vf->mdd_tx_events.count++;
1609 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1610 			if (netif_msg_tx_err(pf))
1611 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1612 					 i);
1613 		}
1614 
1615 		reg = rd32(hw, VP_MDET_RX(i));
1616 		if (reg & VP_MDET_RX_VALID_M) {
1617 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1618 			vf->mdd_rx_events.count++;
1619 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1620 			if (netif_msg_rx_err(pf))
1621 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1622 					 i);
1623 
1624 			/* Since the queue is disabled on VF Rx MDD events, the
1625 			 * PF can be configured to reset the VF through ethtool
1626 			 * private flag mdd-auto-reset-vf.
1627 			 */
1628 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1629 				/* VF MDD event counters will be cleared by
1630 				 * reset, so print the event prior to reset.
1631 				 */
1632 				ice_print_vf_rx_mdd_event(vf);
1633 				ice_reset_vf(&pf->vf[i], false);
1634 			}
1635 		}
1636 	}
1637 
1638 	ice_print_vfs_mdd_events(pf);
1639 }
1640 
1641 /**
1642  * ice_force_phys_link_state - Force the physical link state
1643  * @vsi: VSI to force the physical link state to up/down
1644  * @link_up: true/false indicates to set the physical link to up/down
1645  *
1646  * Force the physical link state by getting the current PHY capabilities from
1647  * hardware and setting the PHY config based on the determined capabilities. If
1648  * link changes a link event will be triggered because both the Enable Automatic
1649  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1650  *
1651  * Returns 0 on success, negative on failure
1652  */
1653 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1654 {
1655 	struct ice_aqc_get_phy_caps_data *pcaps;
1656 	struct ice_aqc_set_phy_cfg_data *cfg;
1657 	struct ice_port_info *pi;
1658 	struct device *dev;
1659 	int retcode;
1660 
1661 	if (!vsi || !vsi->port_info || !vsi->back)
1662 		return -EINVAL;
1663 	if (vsi->type != ICE_VSI_PF)
1664 		return 0;
1665 
1666 	dev = ice_pf_to_dev(vsi->back);
1667 
1668 	pi = vsi->port_info;
1669 
1670 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1671 	if (!pcaps)
1672 		return -ENOMEM;
1673 
1674 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1675 				      NULL);
1676 	if (retcode) {
1677 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1678 			vsi->vsi_num, retcode);
1679 		retcode = -EIO;
1680 		goto out;
1681 	}
1682 
1683 	/* No change in link */
1684 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1685 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1686 		goto out;
1687 
1688 	/* Use the current user PHY configuration. The current user PHY
1689 	 * configuration is initialized during probe from PHY capabilities
1690 	 * software mode, and updated on set PHY configuration.
1691 	 */
1692 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1693 	if (!cfg) {
1694 		retcode = -ENOMEM;
1695 		goto out;
1696 	}
1697 
1698 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1699 	if (link_up)
1700 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1701 	else
1702 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1703 
1704 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1705 	if (retcode) {
1706 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1707 			vsi->vsi_num, retcode);
1708 		retcode = -EIO;
1709 	}
1710 
1711 	kfree(cfg);
1712 out:
1713 	kfree(pcaps);
1714 	return retcode;
1715 }
1716 
1717 /**
1718  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1719  * @pi: port info structure
1720  *
1721  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1722  */
1723 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1724 {
1725 	struct ice_aqc_get_phy_caps_data *pcaps;
1726 	struct ice_pf *pf = pi->hw->back;
1727 	enum ice_status status;
1728 	int err = 0;
1729 
1730 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1731 	if (!pcaps)
1732 		return -ENOMEM;
1733 
1734 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1735 				     NULL);
1736 
1737 	if (status) {
1738 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1739 		err = -EIO;
1740 		goto out;
1741 	}
1742 
1743 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1744 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1745 
1746 out:
1747 	kfree(pcaps);
1748 	return err;
1749 }
1750 
1751 /**
1752  * ice_init_link_dflt_override - Initialize link default override
1753  * @pi: port info structure
1754  *
1755  * Initialize link default override and PHY total port shutdown during probe
1756  */
1757 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1758 {
1759 	struct ice_link_default_override_tlv *ldo;
1760 	struct ice_pf *pf = pi->hw->back;
1761 
1762 	ldo = &pf->link_dflt_override;
1763 	if (ice_get_link_default_override(ldo, pi))
1764 		return;
1765 
1766 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1767 		return;
1768 
1769 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1770 	 * ethtool private flag) for ports with Port Disable bit set.
1771 	 */
1772 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1773 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1774 }
1775 
1776 /**
1777  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1778  * @pi: port info structure
1779  *
1780  * If default override is enabled, initialize the user PHY cfg speed and FEC
1781  * settings using the default override mask from the NVM.
1782  *
1783  * The PHY should only be configured with the default override settings the
1784  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1785  * is used to indicate that the user PHY cfg default override is initialized
1786  * and the PHY has not been configured with the default override settings. The
1787  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1788  * configured.
1789  *
1790  * This function should be called only if the FW doesn't support default
1791  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1792  */
1793 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1794 {
1795 	struct ice_link_default_override_tlv *ldo;
1796 	struct ice_aqc_set_phy_cfg_data *cfg;
1797 	struct ice_phy_info *phy = &pi->phy;
1798 	struct ice_pf *pf = pi->hw->back;
1799 
1800 	ldo = &pf->link_dflt_override;
1801 
1802 	/* If link default override is enabled, use to mask NVM PHY capabilities
1803 	 * for speed and FEC default configuration.
1804 	 */
1805 	cfg = &phy->curr_user_phy_cfg;
1806 
1807 	if (ldo->phy_type_low || ldo->phy_type_high) {
1808 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1809 				    cpu_to_le64(ldo->phy_type_low);
1810 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1811 				     cpu_to_le64(ldo->phy_type_high);
1812 	}
1813 	cfg->link_fec_opt = ldo->fec_options;
1814 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1815 
1816 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1817 }
1818 
1819 /**
1820  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1821  * @pi: port info structure
1822  *
1823  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1824  * mode to default. The PHY defaults are from get PHY capabilities topology
1825  * with media so call when media is first available. An error is returned if
1826  * called when media is not available. The PHY initialization completed state is
1827  * set here.
1828  *
1829  * These configurations are used when setting PHY
1830  * configuration. The user PHY configuration is updated on set PHY
1831  * configuration. Returns 0 on success, negative on failure
1832  */
1833 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1834 {
1835 	struct ice_aqc_get_phy_caps_data *pcaps;
1836 	struct ice_phy_info *phy = &pi->phy;
1837 	struct ice_pf *pf = pi->hw->back;
1838 	enum ice_status status;
1839 	int err = 0;
1840 
1841 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1842 		return -EIO;
1843 
1844 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1845 	if (!pcaps)
1846 		return -ENOMEM;
1847 
1848 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1849 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1850 					     pcaps, NULL);
1851 	else
1852 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1853 					     pcaps, NULL);
1854 	if (status) {
1855 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1856 		err = -EIO;
1857 		goto err_out;
1858 	}
1859 
1860 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1861 
1862 	/* check if lenient mode is supported and enabled */
1863 	if (ice_fw_supports_link_override(pi->hw) &&
1864 	    !(pcaps->module_compliance_enforcement &
1865 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1866 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1867 
1868 		/* if the FW supports default PHY configuration mode, then the driver
1869 		 * does not have to apply link override settings. If not,
1870 		 * initialize user PHY configuration with link override values
1871 		 */
1872 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1873 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1874 			ice_init_phy_cfg_dflt_override(pi);
1875 			goto out;
1876 		}
1877 	}
1878 
1879 	/* if link default override is not enabled, set user flow control and
1880 	 * FEC settings based on what get_phy_caps returned
1881 	 */
1882 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1883 						      pcaps->link_fec_options);
1884 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1885 
1886 out:
1887 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1888 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1889 err_out:
1890 	kfree(pcaps);
1891 	return err;
1892 }
1893 
1894 /**
1895  * ice_configure_phy - configure PHY
1896  * @vsi: VSI of PHY
1897  *
1898  * Set the PHY configuration. If the current PHY configuration is the same as
1899  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1900  * configure the based get PHY capabilities for topology with media.
1901  */
1902 static int ice_configure_phy(struct ice_vsi *vsi)
1903 {
1904 	struct device *dev = ice_pf_to_dev(vsi->back);
1905 	struct ice_port_info *pi = vsi->port_info;
1906 	struct ice_aqc_get_phy_caps_data *pcaps;
1907 	struct ice_aqc_set_phy_cfg_data *cfg;
1908 	struct ice_phy_info *phy = &pi->phy;
1909 	struct ice_pf *pf = vsi->back;
1910 	enum ice_status status;
1911 	int err = 0;
1912 
1913 	/* Ensure we have media as we cannot configure a medialess port */
1914 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1915 		return -EPERM;
1916 
1917 	ice_print_topo_conflict(vsi);
1918 
1919 	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1920 		return -EPERM;
1921 
1922 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1923 		return ice_force_phys_link_state(vsi, true);
1924 
1925 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1926 	if (!pcaps)
1927 		return -ENOMEM;
1928 
1929 	/* Get current PHY config */
1930 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1931 				     NULL);
1932 	if (status) {
1933 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1934 			vsi->vsi_num, ice_stat_str(status));
1935 		err = -EIO;
1936 		goto done;
1937 	}
1938 
1939 	/* If PHY enable link is configured and configuration has not changed,
1940 	 * there's nothing to do
1941 	 */
1942 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1943 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1944 		goto done;
1945 
1946 	/* Use PHY topology as baseline for configuration */
1947 	memset(pcaps, 0, sizeof(*pcaps));
1948 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1949 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1950 					     pcaps, NULL);
1951 	else
1952 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1953 					     pcaps, NULL);
1954 	if (status) {
1955 		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
1956 			vsi->vsi_num, ice_stat_str(status));
1957 		err = -EIO;
1958 		goto done;
1959 	}
1960 
1961 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1962 	if (!cfg) {
1963 		err = -ENOMEM;
1964 		goto done;
1965 	}
1966 
1967 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1968 
1969 	/* Speed - If default override pending, use curr_user_phy_cfg set in
1970 	 * ice_init_phy_user_cfg_ldo.
1971 	 */
1972 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
1973 			       vsi->back->state)) {
1974 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
1975 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
1976 	} else {
1977 		u64 phy_low = 0, phy_high = 0;
1978 
1979 		ice_update_phy_type(&phy_low, &phy_high,
1980 				    pi->phy.curr_user_speed_req);
1981 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
1982 		cfg->phy_type_high = pcaps->phy_type_high &
1983 				     cpu_to_le64(phy_high);
1984 	}
1985 
1986 	/* Can't provide what was requested; use PHY capabilities */
1987 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
1988 		cfg->phy_type_low = pcaps->phy_type_low;
1989 		cfg->phy_type_high = pcaps->phy_type_high;
1990 	}
1991 
1992 	/* FEC */
1993 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
1994 
1995 	/* Can't provide what was requested; use PHY capabilities */
1996 	if (cfg->link_fec_opt !=
1997 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
1998 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
1999 		cfg->link_fec_opt = pcaps->link_fec_options;
2000 	}
2001 
2002 	/* Flow Control - always supported; no need to check against
2003 	 * capabilities
2004 	 */
2005 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2006 
2007 	/* Enable link and link update */
2008 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2009 
2010 	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2011 	if (status) {
2012 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2013 			vsi->vsi_num, ice_stat_str(status));
2014 		err = -EIO;
2015 	}
2016 
2017 	kfree(cfg);
2018 done:
2019 	kfree(pcaps);
2020 	return err;
2021 }
2022 
2023 /**
2024  * ice_check_media_subtask - Check for media
2025  * @pf: pointer to PF struct
2026  *
2027  * If media is available, then initialize PHY user configuration if it is not
2028  * been, and configure the PHY if the interface is up.
2029  */
2030 static void ice_check_media_subtask(struct ice_pf *pf)
2031 {
2032 	struct ice_port_info *pi;
2033 	struct ice_vsi *vsi;
2034 	int err;
2035 
2036 	/* No need to check for media if it's already present */
2037 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2038 		return;
2039 
2040 	vsi = ice_get_main_vsi(pf);
2041 	if (!vsi)
2042 		return;
2043 
2044 	/* Refresh link info and check if media is present */
2045 	pi = vsi->port_info;
2046 	err = ice_update_link_info(pi);
2047 	if (err)
2048 		return;
2049 
2050 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2051 
2052 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2053 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2054 			ice_init_phy_user_cfg(pi);
2055 
2056 		/* PHY settings are reset on media insertion, reconfigure
2057 		 * PHY to preserve settings.
2058 		 */
2059 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2060 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2061 			return;
2062 
2063 		err = ice_configure_phy(vsi);
2064 		if (!err)
2065 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2066 
2067 		/* A Link Status Event will be generated; the event handler
2068 		 * will complete bringing the interface up
2069 		 */
2070 	}
2071 }
2072 
2073 /**
2074  * ice_service_task - manage and run subtasks
2075  * @work: pointer to work_struct contained by the PF struct
2076  */
2077 static void ice_service_task(struct work_struct *work)
2078 {
2079 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2080 	unsigned long start_time = jiffies;
2081 
2082 	/* subtasks */
2083 
2084 	/* process reset requests first */
2085 	ice_reset_subtask(pf);
2086 
2087 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2088 	if (ice_is_reset_in_progress(pf->state) ||
2089 	    test_bit(ICE_SUSPENDED, pf->state) ||
2090 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2091 		ice_service_task_complete(pf);
2092 		return;
2093 	}
2094 
2095 	ice_clean_adminq_subtask(pf);
2096 	ice_check_media_subtask(pf);
2097 	ice_check_for_hang_subtask(pf);
2098 	ice_sync_fltr_subtask(pf);
2099 	ice_handle_mdd_event(pf);
2100 	ice_watchdog_subtask(pf);
2101 
2102 	if (ice_is_safe_mode(pf)) {
2103 		ice_service_task_complete(pf);
2104 		return;
2105 	}
2106 
2107 	ice_process_vflr_event(pf);
2108 	ice_clean_mailboxq_subtask(pf);
2109 	ice_sync_arfs_fltrs(pf);
2110 	ice_flush_fdir_ctx(pf);
2111 
2112 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2113 	ice_service_task_complete(pf);
2114 
2115 	/* If the tasks have taken longer than one service timer period
2116 	 * or there is more work to be done, reset the service timer to
2117 	 * schedule the service task now.
2118 	 */
2119 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2120 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2121 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2122 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2123 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2124 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2125 		mod_timer(&pf->serv_tmr, jiffies);
2126 }
2127 
2128 /**
2129  * ice_set_ctrlq_len - helper function to set controlq length
2130  * @hw: pointer to the HW instance
2131  */
2132 static void ice_set_ctrlq_len(struct ice_hw *hw)
2133 {
2134 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2135 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2136 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2137 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2138 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2139 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2140 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2141 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2142 }
2143 
2144 /**
2145  * ice_schedule_reset - schedule a reset
2146  * @pf: board private structure
2147  * @reset: reset being requested
2148  */
2149 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2150 {
2151 	struct device *dev = ice_pf_to_dev(pf);
2152 
2153 	/* bail out if earlier reset has failed */
2154 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2155 		dev_dbg(dev, "earlier reset has failed\n");
2156 		return -EIO;
2157 	}
2158 	/* bail if reset/recovery already in progress */
2159 	if (ice_is_reset_in_progress(pf->state)) {
2160 		dev_dbg(dev, "Reset already in progress\n");
2161 		return -EBUSY;
2162 	}
2163 
2164 	ice_unplug_aux_dev(pf);
2165 
2166 	switch (reset) {
2167 	case ICE_RESET_PFR:
2168 		set_bit(ICE_PFR_REQ, pf->state);
2169 		break;
2170 	case ICE_RESET_CORER:
2171 		set_bit(ICE_CORER_REQ, pf->state);
2172 		break;
2173 	case ICE_RESET_GLOBR:
2174 		set_bit(ICE_GLOBR_REQ, pf->state);
2175 		break;
2176 	default:
2177 		return -EINVAL;
2178 	}
2179 
2180 	ice_service_task_schedule(pf);
2181 	return 0;
2182 }
2183 
2184 /**
2185  * ice_irq_affinity_notify - Callback for affinity changes
2186  * @notify: context as to what irq was changed
2187  * @mask: the new affinity mask
2188  *
2189  * This is a callback function used by the irq_set_affinity_notifier function
2190  * so that we may register to receive changes to the irq affinity masks.
2191  */
2192 static void
2193 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2194 			const cpumask_t *mask)
2195 {
2196 	struct ice_q_vector *q_vector =
2197 		container_of(notify, struct ice_q_vector, affinity_notify);
2198 
2199 	cpumask_copy(&q_vector->affinity_mask, mask);
2200 }
2201 
2202 /**
2203  * ice_irq_affinity_release - Callback for affinity notifier release
2204  * @ref: internal core kernel usage
2205  *
2206  * This is a callback function used by the irq_set_affinity_notifier function
2207  * to inform the current notification subscriber that they will no longer
2208  * receive notifications.
2209  */
2210 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2211 
2212 /**
2213  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2214  * @vsi: the VSI being configured
2215  */
2216 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2217 {
2218 	struct ice_hw *hw = &vsi->back->hw;
2219 	int i;
2220 
2221 	ice_for_each_q_vector(vsi, i)
2222 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2223 
2224 	ice_flush(hw);
2225 	return 0;
2226 }
2227 
2228 /**
2229  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2230  * @vsi: the VSI being configured
2231  * @basename: name for the vector
2232  */
2233 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2234 {
2235 	int q_vectors = vsi->num_q_vectors;
2236 	struct ice_pf *pf = vsi->back;
2237 	int base = vsi->base_vector;
2238 	struct device *dev;
2239 	int rx_int_idx = 0;
2240 	int tx_int_idx = 0;
2241 	int vector, err;
2242 	int irq_num;
2243 
2244 	dev = ice_pf_to_dev(pf);
2245 	for (vector = 0; vector < q_vectors; vector++) {
2246 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2247 
2248 		irq_num = pf->msix_entries[base + vector].vector;
2249 
2250 		if (q_vector->tx.ring && q_vector->rx.ring) {
2251 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2252 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2253 			tx_int_idx++;
2254 		} else if (q_vector->rx.ring) {
2255 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2256 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2257 		} else if (q_vector->tx.ring) {
2258 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2259 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2260 		} else {
2261 			/* skip this unused q_vector */
2262 			continue;
2263 		}
2264 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2265 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2266 					       IRQF_SHARED, q_vector->name,
2267 					       q_vector);
2268 		else
2269 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2270 					       0, q_vector->name, q_vector);
2271 		if (err) {
2272 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2273 				   err);
2274 			goto free_q_irqs;
2275 		}
2276 
2277 		/* register for affinity change notifications */
2278 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2279 			struct irq_affinity_notify *affinity_notify;
2280 
2281 			affinity_notify = &q_vector->affinity_notify;
2282 			affinity_notify->notify = ice_irq_affinity_notify;
2283 			affinity_notify->release = ice_irq_affinity_release;
2284 			irq_set_affinity_notifier(irq_num, affinity_notify);
2285 		}
2286 
2287 		/* assign the mask for this irq */
2288 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2289 	}
2290 
2291 	vsi->irqs_ready = true;
2292 	return 0;
2293 
2294 free_q_irqs:
2295 	while (vector) {
2296 		vector--;
2297 		irq_num = pf->msix_entries[base + vector].vector;
2298 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2299 			irq_set_affinity_notifier(irq_num, NULL);
2300 		irq_set_affinity_hint(irq_num, NULL);
2301 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2302 	}
2303 	return err;
2304 }
2305 
2306 /**
2307  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2308  * @vsi: VSI to setup Tx rings used by XDP
2309  *
2310  * Return 0 on success and negative value on error
2311  */
2312 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2313 {
2314 	struct device *dev = ice_pf_to_dev(vsi->back);
2315 	int i;
2316 
2317 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2318 		u16 xdp_q_idx = vsi->alloc_txq + i;
2319 		struct ice_ring *xdp_ring;
2320 
2321 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2322 
2323 		if (!xdp_ring)
2324 			goto free_xdp_rings;
2325 
2326 		xdp_ring->q_index = xdp_q_idx;
2327 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2328 		xdp_ring->ring_active = false;
2329 		xdp_ring->vsi = vsi;
2330 		xdp_ring->netdev = NULL;
2331 		xdp_ring->dev = dev;
2332 		xdp_ring->count = vsi->num_tx_desc;
2333 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2334 		if (ice_setup_tx_ring(xdp_ring))
2335 			goto free_xdp_rings;
2336 		ice_set_ring_xdp(xdp_ring);
2337 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2338 	}
2339 
2340 	return 0;
2341 
2342 free_xdp_rings:
2343 	for (; i >= 0; i--)
2344 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2345 			ice_free_tx_ring(vsi->xdp_rings[i]);
2346 	return -ENOMEM;
2347 }
2348 
2349 /**
2350  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2351  * @vsi: VSI to set the bpf prog on
2352  * @prog: the bpf prog pointer
2353  */
2354 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2355 {
2356 	struct bpf_prog *old_prog;
2357 	int i;
2358 
2359 	old_prog = xchg(&vsi->xdp_prog, prog);
2360 	if (old_prog)
2361 		bpf_prog_put(old_prog);
2362 
2363 	ice_for_each_rxq(vsi, i)
2364 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2365 }
2366 
2367 /**
2368  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2369  * @vsi: VSI to bring up Tx rings used by XDP
2370  * @prog: bpf program that will be assigned to VSI
2371  *
2372  * Return 0 on success and negative value on error
2373  */
2374 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2375 {
2376 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2377 	int xdp_rings_rem = vsi->num_xdp_txq;
2378 	struct ice_pf *pf = vsi->back;
2379 	struct ice_qs_cfg xdp_qs_cfg = {
2380 		.qs_mutex = &pf->avail_q_mutex,
2381 		.pf_map = pf->avail_txqs,
2382 		.pf_map_size = pf->max_pf_txqs,
2383 		.q_count = vsi->num_xdp_txq,
2384 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2385 		.vsi_map = vsi->txq_map,
2386 		.vsi_map_offset = vsi->alloc_txq,
2387 		.mapping_mode = ICE_VSI_MAP_CONTIG
2388 	};
2389 	enum ice_status status;
2390 	struct device *dev;
2391 	int i, v_idx;
2392 
2393 	dev = ice_pf_to_dev(pf);
2394 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2395 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2396 	if (!vsi->xdp_rings)
2397 		return -ENOMEM;
2398 
2399 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2400 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2401 		goto err_map_xdp;
2402 
2403 	if (ice_xdp_alloc_setup_rings(vsi))
2404 		goto clear_xdp_rings;
2405 
2406 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2407 	ice_for_each_q_vector(vsi, v_idx) {
2408 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2409 		int xdp_rings_per_v, q_id, q_base;
2410 
2411 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2412 					       vsi->num_q_vectors - v_idx);
2413 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2414 
2415 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2416 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2417 
2418 			xdp_ring->q_vector = q_vector;
2419 			xdp_ring->next = q_vector->tx.ring;
2420 			q_vector->tx.ring = xdp_ring;
2421 		}
2422 		xdp_rings_rem -= xdp_rings_per_v;
2423 	}
2424 
2425 	/* omit the scheduler update if in reset path; XDP queues will be
2426 	 * taken into account at the end of ice_vsi_rebuild, where
2427 	 * ice_cfg_vsi_lan is being called
2428 	 */
2429 	if (ice_is_reset_in_progress(pf->state))
2430 		return 0;
2431 
2432 	/* tell the Tx scheduler that right now we have
2433 	 * additional queues
2434 	 */
2435 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2436 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2437 
2438 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2439 				 max_txqs);
2440 	if (status) {
2441 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2442 			ice_stat_str(status));
2443 		goto clear_xdp_rings;
2444 	}
2445 	ice_vsi_assign_bpf_prog(vsi, prog);
2446 
2447 	return 0;
2448 clear_xdp_rings:
2449 	for (i = 0; i < vsi->num_xdp_txq; i++)
2450 		if (vsi->xdp_rings[i]) {
2451 			kfree_rcu(vsi->xdp_rings[i], rcu);
2452 			vsi->xdp_rings[i] = NULL;
2453 		}
2454 
2455 err_map_xdp:
2456 	mutex_lock(&pf->avail_q_mutex);
2457 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2458 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2459 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2460 	}
2461 	mutex_unlock(&pf->avail_q_mutex);
2462 
2463 	devm_kfree(dev, vsi->xdp_rings);
2464 	return -ENOMEM;
2465 }
2466 
2467 /**
2468  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2469  * @vsi: VSI to remove XDP rings
2470  *
2471  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2472  * resources
2473  */
2474 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2475 {
2476 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2477 	struct ice_pf *pf = vsi->back;
2478 	int i, v_idx;
2479 
2480 	/* q_vectors are freed in reset path so there's no point in detaching
2481 	 * rings; in case of rebuild being triggered not from reset bits
2482 	 * in pf->state won't be set, so additionally check first q_vector
2483 	 * against NULL
2484 	 */
2485 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2486 		goto free_qmap;
2487 
2488 	ice_for_each_q_vector(vsi, v_idx) {
2489 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2490 		struct ice_ring *ring;
2491 
2492 		ice_for_each_ring(ring, q_vector->tx)
2493 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2494 				break;
2495 
2496 		/* restore the value of last node prior to XDP setup */
2497 		q_vector->tx.ring = ring;
2498 	}
2499 
2500 free_qmap:
2501 	mutex_lock(&pf->avail_q_mutex);
2502 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2503 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2504 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2505 	}
2506 	mutex_unlock(&pf->avail_q_mutex);
2507 
2508 	for (i = 0; i < vsi->num_xdp_txq; i++)
2509 		if (vsi->xdp_rings[i]) {
2510 			if (vsi->xdp_rings[i]->desc)
2511 				ice_free_tx_ring(vsi->xdp_rings[i]);
2512 			kfree_rcu(vsi->xdp_rings[i], rcu);
2513 			vsi->xdp_rings[i] = NULL;
2514 		}
2515 
2516 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2517 	vsi->xdp_rings = NULL;
2518 
2519 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2520 		return 0;
2521 
2522 	ice_vsi_assign_bpf_prog(vsi, NULL);
2523 
2524 	/* notify Tx scheduler that we destroyed XDP queues and bring
2525 	 * back the old number of child nodes
2526 	 */
2527 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2528 		max_txqs[i] = vsi->num_txq;
2529 
2530 	/* change number of XDP Tx queues to 0 */
2531 	vsi->num_xdp_txq = 0;
2532 
2533 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2534 			       max_txqs);
2535 }
2536 
2537 /**
2538  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2539  * @vsi: VSI to schedule napi on
2540  */
2541 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2542 {
2543 	int i;
2544 
2545 	ice_for_each_rxq(vsi, i) {
2546 		struct ice_ring *rx_ring = vsi->rx_rings[i];
2547 
2548 		if (rx_ring->xsk_pool)
2549 			napi_schedule(&rx_ring->q_vector->napi);
2550 	}
2551 }
2552 
2553 /**
2554  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2555  * @vsi: VSI to setup XDP for
2556  * @prog: XDP program
2557  * @extack: netlink extended ack
2558  */
2559 static int
2560 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2561 		   struct netlink_ext_ack *extack)
2562 {
2563 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2564 	bool if_running = netif_running(vsi->netdev);
2565 	int ret = 0, xdp_ring_err = 0;
2566 
2567 	if (frame_size > vsi->rx_buf_len) {
2568 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2569 		return -EOPNOTSUPP;
2570 	}
2571 
2572 	/* need to stop netdev while setting up the program for Rx rings */
2573 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2574 		ret = ice_down(vsi);
2575 		if (ret) {
2576 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2577 			return ret;
2578 		}
2579 	}
2580 
2581 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2582 		vsi->num_xdp_txq = vsi->alloc_rxq;
2583 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2584 		if (xdp_ring_err)
2585 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2586 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2587 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2588 		if (xdp_ring_err)
2589 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2590 	} else {
2591 		ice_vsi_assign_bpf_prog(vsi, prog);
2592 	}
2593 
2594 	if (if_running)
2595 		ret = ice_up(vsi);
2596 
2597 	if (!ret && prog)
2598 		ice_vsi_rx_napi_schedule(vsi);
2599 
2600 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2601 }
2602 
2603 /**
2604  * ice_xdp - implements XDP handler
2605  * @dev: netdevice
2606  * @xdp: XDP command
2607  */
2608 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2609 {
2610 	struct ice_netdev_priv *np = netdev_priv(dev);
2611 	struct ice_vsi *vsi = np->vsi;
2612 
2613 	if (vsi->type != ICE_VSI_PF) {
2614 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2615 		return -EINVAL;
2616 	}
2617 
2618 	switch (xdp->command) {
2619 	case XDP_SETUP_PROG:
2620 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2621 	case XDP_SETUP_XSK_POOL:
2622 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2623 					  xdp->xsk.queue_id);
2624 	default:
2625 		return -EINVAL;
2626 	}
2627 }
2628 
2629 /**
2630  * ice_ena_misc_vector - enable the non-queue interrupts
2631  * @pf: board private structure
2632  */
2633 static void ice_ena_misc_vector(struct ice_pf *pf)
2634 {
2635 	struct ice_hw *hw = &pf->hw;
2636 	u32 val;
2637 
2638 	/* Disable anti-spoof detection interrupt to prevent spurious event
2639 	 * interrupts during a function reset. Anti-spoof functionally is
2640 	 * still supported.
2641 	 */
2642 	val = rd32(hw, GL_MDCK_TX_TDPU);
2643 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2644 	wr32(hw, GL_MDCK_TX_TDPU, val);
2645 
2646 	/* clear things first */
2647 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2648 	rd32(hw, PFINT_OICR);		/* read to clear */
2649 
2650 	val = (PFINT_OICR_ECC_ERR_M |
2651 	       PFINT_OICR_MAL_DETECT_M |
2652 	       PFINT_OICR_GRST_M |
2653 	       PFINT_OICR_PCI_EXCEPTION_M |
2654 	       PFINT_OICR_VFLR_M |
2655 	       PFINT_OICR_HMC_ERR_M |
2656 	       PFINT_OICR_PE_PUSH_M |
2657 	       PFINT_OICR_PE_CRITERR_M);
2658 
2659 	wr32(hw, PFINT_OICR_ENA, val);
2660 
2661 	/* SW_ITR_IDX = 0, but don't change INTENA */
2662 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2663 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2664 }
2665 
2666 /**
2667  * ice_misc_intr - misc interrupt handler
2668  * @irq: interrupt number
2669  * @data: pointer to a q_vector
2670  */
2671 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2672 {
2673 	struct ice_pf *pf = (struct ice_pf *)data;
2674 	struct ice_hw *hw = &pf->hw;
2675 	irqreturn_t ret = IRQ_NONE;
2676 	struct device *dev;
2677 	u32 oicr, ena_mask;
2678 
2679 	dev = ice_pf_to_dev(pf);
2680 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2681 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2682 
2683 	oicr = rd32(hw, PFINT_OICR);
2684 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2685 
2686 	if (oicr & PFINT_OICR_SWINT_M) {
2687 		ena_mask &= ~PFINT_OICR_SWINT_M;
2688 		pf->sw_int_count++;
2689 	}
2690 
2691 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2692 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2693 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2694 	}
2695 	if (oicr & PFINT_OICR_VFLR_M) {
2696 		/* disable any further VFLR event notifications */
2697 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2698 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2699 
2700 			reg &= ~PFINT_OICR_VFLR_M;
2701 			wr32(hw, PFINT_OICR_ENA, reg);
2702 		} else {
2703 			ena_mask &= ~PFINT_OICR_VFLR_M;
2704 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2705 		}
2706 	}
2707 
2708 	if (oicr & PFINT_OICR_GRST_M) {
2709 		u32 reset;
2710 
2711 		/* we have a reset warning */
2712 		ena_mask &= ~PFINT_OICR_GRST_M;
2713 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2714 			GLGEN_RSTAT_RESET_TYPE_S;
2715 
2716 		if (reset == ICE_RESET_CORER)
2717 			pf->corer_count++;
2718 		else if (reset == ICE_RESET_GLOBR)
2719 			pf->globr_count++;
2720 		else if (reset == ICE_RESET_EMPR)
2721 			pf->empr_count++;
2722 		else
2723 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2724 
2725 		/* If a reset cycle isn't already in progress, we set a bit in
2726 		 * pf->state so that the service task can start a reset/rebuild.
2727 		 */
2728 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2729 			if (reset == ICE_RESET_CORER)
2730 				set_bit(ICE_CORER_RECV, pf->state);
2731 			else if (reset == ICE_RESET_GLOBR)
2732 				set_bit(ICE_GLOBR_RECV, pf->state);
2733 			else
2734 				set_bit(ICE_EMPR_RECV, pf->state);
2735 
2736 			/* There are couple of different bits at play here.
2737 			 * hw->reset_ongoing indicates whether the hardware is
2738 			 * in reset. This is set to true when a reset interrupt
2739 			 * is received and set back to false after the driver
2740 			 * has determined that the hardware is out of reset.
2741 			 *
2742 			 * ICE_RESET_OICR_RECV in pf->state indicates
2743 			 * that a post reset rebuild is required before the
2744 			 * driver is operational again. This is set above.
2745 			 *
2746 			 * As this is the start of the reset/rebuild cycle, set
2747 			 * both to indicate that.
2748 			 */
2749 			hw->reset_ongoing = true;
2750 		}
2751 	}
2752 
2753 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2754 	if (oicr & ICE_AUX_CRIT_ERR) {
2755 		struct iidc_event *event;
2756 
2757 		ena_mask &= ~ICE_AUX_CRIT_ERR;
2758 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2759 		if (event) {
2760 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2761 			/* report the entire OICR value to AUX driver */
2762 			event->reg = oicr;
2763 			ice_send_event_to_aux(pf, event);
2764 			kfree(event);
2765 		}
2766 	}
2767 
2768 	/* Report any remaining unexpected interrupts */
2769 	oicr &= ena_mask;
2770 	if (oicr) {
2771 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2772 		/* If a critical error is pending there is no choice but to
2773 		 * reset the device.
2774 		 */
2775 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2776 			    PFINT_OICR_ECC_ERR_M)) {
2777 			set_bit(ICE_PFR_REQ, pf->state);
2778 			ice_service_task_schedule(pf);
2779 		}
2780 	}
2781 	ret = IRQ_HANDLED;
2782 
2783 	ice_service_task_schedule(pf);
2784 	ice_irq_dynamic_ena(hw, NULL, NULL);
2785 
2786 	return ret;
2787 }
2788 
2789 /**
2790  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2791  * @hw: pointer to HW structure
2792  */
2793 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2794 {
2795 	/* disable Admin queue Interrupt causes */
2796 	wr32(hw, PFINT_FW_CTL,
2797 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2798 
2799 	/* disable Mailbox queue Interrupt causes */
2800 	wr32(hw, PFINT_MBX_CTL,
2801 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2802 
2803 	/* disable Control queue Interrupt causes */
2804 	wr32(hw, PFINT_OICR_CTL,
2805 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2806 
2807 	ice_flush(hw);
2808 }
2809 
2810 /**
2811  * ice_free_irq_msix_misc - Unroll misc vector setup
2812  * @pf: board private structure
2813  */
2814 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2815 {
2816 	struct ice_hw *hw = &pf->hw;
2817 
2818 	ice_dis_ctrlq_interrupts(hw);
2819 
2820 	/* disable OICR interrupt */
2821 	wr32(hw, PFINT_OICR_ENA, 0);
2822 	ice_flush(hw);
2823 
2824 	if (pf->msix_entries) {
2825 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2826 		devm_free_irq(ice_pf_to_dev(pf),
2827 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2828 	}
2829 
2830 	pf->num_avail_sw_msix += 1;
2831 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2832 }
2833 
2834 /**
2835  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2836  * @hw: pointer to HW structure
2837  * @reg_idx: HW vector index to associate the control queue interrupts with
2838  */
2839 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2840 {
2841 	u32 val;
2842 
2843 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2844 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2845 	wr32(hw, PFINT_OICR_CTL, val);
2846 
2847 	/* enable Admin queue Interrupt causes */
2848 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2849 	       PFINT_FW_CTL_CAUSE_ENA_M);
2850 	wr32(hw, PFINT_FW_CTL, val);
2851 
2852 	/* enable Mailbox queue Interrupt causes */
2853 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2854 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2855 	wr32(hw, PFINT_MBX_CTL, val);
2856 
2857 	ice_flush(hw);
2858 }
2859 
2860 /**
2861  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2862  * @pf: board private structure
2863  *
2864  * This sets up the handler for MSIX 0, which is used to manage the
2865  * non-queue interrupts, e.g. AdminQ and errors. This is not used
2866  * when in MSI or Legacy interrupt mode.
2867  */
2868 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2869 {
2870 	struct device *dev = ice_pf_to_dev(pf);
2871 	struct ice_hw *hw = &pf->hw;
2872 	int oicr_idx, err = 0;
2873 
2874 	if (!pf->int_name[0])
2875 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2876 			 dev_driver_string(dev), dev_name(dev));
2877 
2878 	/* Do not request IRQ but do enable OICR interrupt since settings are
2879 	 * lost during reset. Note that this function is called only during
2880 	 * rebuild path and not while reset is in progress.
2881 	 */
2882 	if (ice_is_reset_in_progress(pf->state))
2883 		goto skip_req_irq;
2884 
2885 	/* reserve one vector in irq_tracker for misc interrupts */
2886 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2887 	if (oicr_idx < 0)
2888 		return oicr_idx;
2889 
2890 	pf->num_avail_sw_msix -= 1;
2891 	pf->oicr_idx = (u16)oicr_idx;
2892 
2893 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2894 			       ice_misc_intr, 0, pf->int_name, pf);
2895 	if (err) {
2896 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2897 			pf->int_name, err);
2898 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2899 		pf->num_avail_sw_msix += 1;
2900 		return err;
2901 	}
2902 
2903 skip_req_irq:
2904 	ice_ena_misc_vector(pf);
2905 
2906 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2907 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2908 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2909 
2910 	ice_flush(hw);
2911 	ice_irq_dynamic_ena(hw, NULL, NULL);
2912 
2913 	return 0;
2914 }
2915 
2916 /**
2917  * ice_napi_add - register NAPI handler for the VSI
2918  * @vsi: VSI for which NAPI handler is to be registered
2919  *
2920  * This function is only called in the driver's load path. Registering the NAPI
2921  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2922  * reset/rebuild, etc.)
2923  */
2924 static void ice_napi_add(struct ice_vsi *vsi)
2925 {
2926 	int v_idx;
2927 
2928 	if (!vsi->netdev)
2929 		return;
2930 
2931 	ice_for_each_q_vector(vsi, v_idx)
2932 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
2933 			       ice_napi_poll, NAPI_POLL_WEIGHT);
2934 }
2935 
2936 /**
2937  * ice_set_ops - set netdev and ethtools ops for the given netdev
2938  * @netdev: netdev instance
2939  */
2940 static void ice_set_ops(struct net_device *netdev)
2941 {
2942 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2943 
2944 	if (ice_is_safe_mode(pf)) {
2945 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
2946 		ice_set_ethtool_safe_mode_ops(netdev);
2947 		return;
2948 	}
2949 
2950 	netdev->netdev_ops = &ice_netdev_ops;
2951 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
2952 	ice_set_ethtool_ops(netdev);
2953 }
2954 
2955 /**
2956  * ice_set_netdev_features - set features for the given netdev
2957  * @netdev: netdev instance
2958  */
2959 static void ice_set_netdev_features(struct net_device *netdev)
2960 {
2961 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2962 	netdev_features_t csumo_features;
2963 	netdev_features_t vlano_features;
2964 	netdev_features_t dflt_features;
2965 	netdev_features_t tso_features;
2966 
2967 	if (ice_is_safe_mode(pf)) {
2968 		/* safe mode */
2969 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
2970 		netdev->hw_features = netdev->features;
2971 		return;
2972 	}
2973 
2974 	dflt_features = NETIF_F_SG	|
2975 			NETIF_F_HIGHDMA	|
2976 			NETIF_F_NTUPLE	|
2977 			NETIF_F_RXHASH;
2978 
2979 	csumo_features = NETIF_F_RXCSUM	  |
2980 			 NETIF_F_IP_CSUM  |
2981 			 NETIF_F_SCTP_CRC |
2982 			 NETIF_F_IPV6_CSUM;
2983 
2984 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2985 			 NETIF_F_HW_VLAN_CTAG_TX     |
2986 			 NETIF_F_HW_VLAN_CTAG_RX;
2987 
2988 	tso_features = NETIF_F_TSO			|
2989 		       NETIF_F_TSO_ECN			|
2990 		       NETIF_F_TSO6			|
2991 		       NETIF_F_GSO_GRE			|
2992 		       NETIF_F_GSO_UDP_TUNNEL		|
2993 		       NETIF_F_GSO_GRE_CSUM		|
2994 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
2995 		       NETIF_F_GSO_PARTIAL		|
2996 		       NETIF_F_GSO_IPXIP4		|
2997 		       NETIF_F_GSO_IPXIP6		|
2998 		       NETIF_F_GSO_UDP_L4;
2999 
3000 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3001 					NETIF_F_GSO_GRE_CSUM;
3002 	/* set features that user can change */
3003 	netdev->hw_features = dflt_features | csumo_features |
3004 			      vlano_features | tso_features;
3005 
3006 	/* add support for HW_CSUM on packets with MPLS header */
3007 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3008 
3009 	/* enable features */
3010 	netdev->features |= netdev->hw_features;
3011 	/* encap and VLAN devices inherit default, csumo and tso features */
3012 	netdev->hw_enc_features |= dflt_features | csumo_features |
3013 				   tso_features;
3014 	netdev->vlan_features |= dflt_features | csumo_features |
3015 				 tso_features;
3016 }
3017 
3018 /**
3019  * ice_cfg_netdev - Allocate, configure and register a netdev
3020  * @vsi: the VSI associated with the new netdev
3021  *
3022  * Returns 0 on success, negative value on failure
3023  */
3024 static int ice_cfg_netdev(struct ice_vsi *vsi)
3025 {
3026 	struct ice_pf *pf = vsi->back;
3027 	struct ice_netdev_priv *np;
3028 	struct net_device *netdev;
3029 	u8 mac_addr[ETH_ALEN];
3030 
3031 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3032 				    vsi->alloc_rxq);
3033 	if (!netdev)
3034 		return -ENOMEM;
3035 
3036 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3037 	vsi->netdev = netdev;
3038 	np = netdev_priv(netdev);
3039 	np->vsi = vsi;
3040 
3041 	ice_set_netdev_features(netdev);
3042 
3043 	ice_set_ops(netdev);
3044 
3045 	if (vsi->type == ICE_VSI_PF) {
3046 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
3047 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3048 		ether_addr_copy(netdev->dev_addr, mac_addr);
3049 		ether_addr_copy(netdev->perm_addr, mac_addr);
3050 	}
3051 
3052 	netdev->priv_flags |= IFF_UNICAST_FLT;
3053 
3054 	/* Setup netdev TC information */
3055 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3056 
3057 	/* setup watchdog timeout value to be 5 second */
3058 	netdev->watchdog_timeo = 5 * HZ;
3059 
3060 	netdev->min_mtu = ETH_MIN_MTU;
3061 	netdev->max_mtu = ICE_MAX_MTU;
3062 
3063 	return 0;
3064 }
3065 
3066 /**
3067  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3068  * @lut: Lookup table
3069  * @rss_table_size: Lookup table size
3070  * @rss_size: Range of queue number for hashing
3071  */
3072 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3073 {
3074 	u16 i;
3075 
3076 	for (i = 0; i < rss_table_size; i++)
3077 		lut[i] = i % rss_size;
3078 }
3079 
3080 /**
3081  * ice_pf_vsi_setup - Set up a PF VSI
3082  * @pf: board private structure
3083  * @pi: pointer to the port_info instance
3084  *
3085  * Returns pointer to the successfully allocated VSI software struct
3086  * on success, otherwise returns NULL on failure.
3087  */
3088 static struct ice_vsi *
3089 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3090 {
3091 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3092 }
3093 
3094 /**
3095  * ice_ctrl_vsi_setup - Set up a control VSI
3096  * @pf: board private structure
3097  * @pi: pointer to the port_info instance
3098  *
3099  * Returns pointer to the successfully allocated VSI software struct
3100  * on success, otherwise returns NULL on failure.
3101  */
3102 static struct ice_vsi *
3103 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3104 {
3105 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3106 }
3107 
3108 /**
3109  * ice_lb_vsi_setup - Set up a loopback VSI
3110  * @pf: board private structure
3111  * @pi: pointer to the port_info instance
3112  *
3113  * Returns pointer to the successfully allocated VSI software struct
3114  * on success, otherwise returns NULL on failure.
3115  */
3116 struct ice_vsi *
3117 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3118 {
3119 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3120 }
3121 
3122 /**
3123  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3124  * @netdev: network interface to be adjusted
3125  * @proto: unused protocol
3126  * @vid: VLAN ID to be added
3127  *
3128  * net_device_ops implementation for adding VLAN IDs
3129  */
3130 static int
3131 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3132 		    u16 vid)
3133 {
3134 	struct ice_netdev_priv *np = netdev_priv(netdev);
3135 	struct ice_vsi *vsi = np->vsi;
3136 	int ret;
3137 
3138 	/* VLAN 0 is added by default during load/reset */
3139 	if (!vid)
3140 		return 0;
3141 
3142 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3143 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3144 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3145 		if (ret)
3146 			return ret;
3147 	}
3148 
3149 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3150 	 * packets aren't pruned by the device's internal switch on Rx
3151 	 */
3152 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3153 	if (!ret)
3154 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3155 
3156 	return ret;
3157 }
3158 
3159 /**
3160  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3161  * @netdev: network interface to be adjusted
3162  * @proto: unused protocol
3163  * @vid: VLAN ID to be removed
3164  *
3165  * net_device_ops implementation for removing VLAN IDs
3166  */
3167 static int
3168 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3169 		     u16 vid)
3170 {
3171 	struct ice_netdev_priv *np = netdev_priv(netdev);
3172 	struct ice_vsi *vsi = np->vsi;
3173 	int ret;
3174 
3175 	/* don't allow removal of VLAN 0 */
3176 	if (!vid)
3177 		return 0;
3178 
3179 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3180 	 * information
3181 	 */
3182 	ret = ice_vsi_kill_vlan(vsi, vid);
3183 	if (ret)
3184 		return ret;
3185 
3186 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3187 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3188 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3189 
3190 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3191 	return ret;
3192 }
3193 
3194 /**
3195  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3196  * @pf: board private structure
3197  *
3198  * Returns 0 on success, negative value on failure
3199  */
3200 static int ice_setup_pf_sw(struct ice_pf *pf)
3201 {
3202 	struct ice_vsi *vsi;
3203 	int status = 0;
3204 
3205 	if (ice_is_reset_in_progress(pf->state))
3206 		return -EBUSY;
3207 
3208 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3209 	if (!vsi)
3210 		return -ENOMEM;
3211 
3212 	status = ice_cfg_netdev(vsi);
3213 	if (status) {
3214 		status = -ENODEV;
3215 		goto unroll_vsi_setup;
3216 	}
3217 	/* netdev has to be configured before setting frame size */
3218 	ice_vsi_cfg_frame_size(vsi);
3219 
3220 	/* Setup DCB netlink interface */
3221 	ice_dcbnl_setup(vsi);
3222 
3223 	/* registering the NAPI handler requires both the queues and
3224 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3225 	 * and ice_cfg_netdev() respectively
3226 	 */
3227 	ice_napi_add(vsi);
3228 
3229 	status = ice_set_cpu_rx_rmap(vsi);
3230 	if (status) {
3231 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3232 			vsi->vsi_num, status);
3233 		status = -EINVAL;
3234 		goto unroll_napi_add;
3235 	}
3236 	status = ice_init_mac_fltr(pf);
3237 	if (status)
3238 		goto free_cpu_rx_map;
3239 
3240 	return status;
3241 
3242 free_cpu_rx_map:
3243 	ice_free_cpu_rx_rmap(vsi);
3244 
3245 unroll_napi_add:
3246 	if (vsi) {
3247 		ice_napi_del(vsi);
3248 		if (vsi->netdev) {
3249 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3250 			free_netdev(vsi->netdev);
3251 			vsi->netdev = NULL;
3252 		}
3253 	}
3254 
3255 unroll_vsi_setup:
3256 	ice_vsi_release(vsi);
3257 	return status;
3258 }
3259 
3260 /**
3261  * ice_get_avail_q_count - Get count of queues in use
3262  * @pf_qmap: bitmap to get queue use count from
3263  * @lock: pointer to a mutex that protects access to pf_qmap
3264  * @size: size of the bitmap
3265  */
3266 static u16
3267 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3268 {
3269 	unsigned long bit;
3270 	u16 count = 0;
3271 
3272 	mutex_lock(lock);
3273 	for_each_clear_bit(bit, pf_qmap, size)
3274 		count++;
3275 	mutex_unlock(lock);
3276 
3277 	return count;
3278 }
3279 
3280 /**
3281  * ice_get_avail_txq_count - Get count of Tx queues in use
3282  * @pf: pointer to an ice_pf instance
3283  */
3284 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3285 {
3286 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3287 				     pf->max_pf_txqs);
3288 }
3289 
3290 /**
3291  * ice_get_avail_rxq_count - Get count of Rx queues in use
3292  * @pf: pointer to an ice_pf instance
3293  */
3294 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3295 {
3296 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3297 				     pf->max_pf_rxqs);
3298 }
3299 
3300 /**
3301  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3302  * @pf: board private structure to initialize
3303  */
3304 static void ice_deinit_pf(struct ice_pf *pf)
3305 {
3306 	ice_service_task_stop(pf);
3307 	mutex_destroy(&pf->sw_mutex);
3308 	mutex_destroy(&pf->tc_mutex);
3309 	mutex_destroy(&pf->avail_q_mutex);
3310 
3311 	if (pf->avail_txqs) {
3312 		bitmap_free(pf->avail_txqs);
3313 		pf->avail_txqs = NULL;
3314 	}
3315 
3316 	if (pf->avail_rxqs) {
3317 		bitmap_free(pf->avail_rxqs);
3318 		pf->avail_rxqs = NULL;
3319 	}
3320 }
3321 
3322 /**
3323  * ice_set_pf_caps - set PFs capability flags
3324  * @pf: pointer to the PF instance
3325  */
3326 static void ice_set_pf_caps(struct ice_pf *pf)
3327 {
3328 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3329 
3330 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3331 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3332 	if (func_caps->common_cap.rdma) {
3333 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3334 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3335 	}
3336 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3337 	if (func_caps->common_cap.dcb)
3338 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3339 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3340 	if (func_caps->common_cap.sr_iov_1_1) {
3341 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3342 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3343 					      ICE_MAX_VF_COUNT);
3344 	}
3345 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3346 	if (func_caps->common_cap.rss_table_size)
3347 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3348 
3349 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3350 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3351 		u16 unused;
3352 
3353 		/* ctrl_vsi_idx will be set to a valid value when flow director
3354 		 * is setup by ice_init_fdir
3355 		 */
3356 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3357 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3358 		/* force guaranteed filter pool for PF */
3359 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3360 				       func_caps->fd_fltr_guar);
3361 		/* force shared filter pool for PF */
3362 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3363 				       func_caps->fd_fltr_best_effort);
3364 	}
3365 
3366 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3367 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3368 }
3369 
3370 /**
3371  * ice_init_pf - Initialize general software structures (struct ice_pf)
3372  * @pf: board private structure to initialize
3373  */
3374 static int ice_init_pf(struct ice_pf *pf)
3375 {
3376 	ice_set_pf_caps(pf);
3377 
3378 	mutex_init(&pf->sw_mutex);
3379 	mutex_init(&pf->tc_mutex);
3380 
3381 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3382 	spin_lock_init(&pf->aq_wait_lock);
3383 	init_waitqueue_head(&pf->aq_wait_queue);
3384 
3385 	init_waitqueue_head(&pf->reset_wait_queue);
3386 
3387 	/* setup service timer and periodic service task */
3388 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3389 	pf->serv_tmr_period = HZ;
3390 	INIT_WORK(&pf->serv_task, ice_service_task);
3391 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3392 
3393 	mutex_init(&pf->avail_q_mutex);
3394 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3395 	if (!pf->avail_txqs)
3396 		return -ENOMEM;
3397 
3398 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3399 	if (!pf->avail_rxqs) {
3400 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3401 		pf->avail_txqs = NULL;
3402 		return -ENOMEM;
3403 	}
3404 
3405 	return 0;
3406 }
3407 
3408 /**
3409  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3410  * @pf: board private structure
3411  *
3412  * compute the number of MSIX vectors required (v_budget) and request from
3413  * the OS. Return the number of vectors reserved or negative on failure
3414  */
3415 static int ice_ena_msix_range(struct ice_pf *pf)
3416 {
3417 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3418 	struct device *dev = ice_pf_to_dev(pf);
3419 	int needed, err, i;
3420 
3421 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3422 	num_cpus = num_online_cpus();
3423 
3424 	/* reserve for LAN miscellaneous handler */
3425 	needed = ICE_MIN_LAN_OICR_MSIX;
3426 	if (v_left < needed)
3427 		goto no_hw_vecs_left_err;
3428 	v_budget += needed;
3429 	v_left -= needed;
3430 
3431 	/* reserve for flow director */
3432 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3433 		needed = ICE_FDIR_MSIX;
3434 		if (v_left < needed)
3435 			goto no_hw_vecs_left_err;
3436 		v_budget += needed;
3437 		v_left -= needed;
3438 	}
3439 
3440 	/* total used for non-traffic vectors */
3441 	v_other = v_budget;
3442 
3443 	/* reserve vectors for LAN traffic */
3444 	needed = num_cpus;
3445 	if (v_left < needed)
3446 		goto no_hw_vecs_left_err;
3447 	pf->num_lan_msix = needed;
3448 	v_budget += needed;
3449 	v_left -= needed;
3450 
3451 	/* reserve vectors for RDMA auxiliary driver */
3452 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3453 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3454 		if (v_left < needed)
3455 			goto no_hw_vecs_left_err;
3456 		pf->num_rdma_msix = needed;
3457 		v_budget += needed;
3458 		v_left -= needed;
3459 	}
3460 
3461 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3462 					sizeof(*pf->msix_entries), GFP_KERNEL);
3463 	if (!pf->msix_entries) {
3464 		err = -ENOMEM;
3465 		goto exit_err;
3466 	}
3467 
3468 	for (i = 0; i < v_budget; i++)
3469 		pf->msix_entries[i].entry = i;
3470 
3471 	/* actually reserve the vectors */
3472 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3473 					 ICE_MIN_MSIX, v_budget);
3474 	if (v_actual < 0) {
3475 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3476 		err = v_actual;
3477 		goto msix_err;
3478 	}
3479 
3480 	if (v_actual < v_budget) {
3481 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3482 			 v_budget, v_actual);
3483 
3484 		if (v_actual < ICE_MIN_MSIX) {
3485 			/* error if we can't get minimum vectors */
3486 			pci_disable_msix(pf->pdev);
3487 			err = -ERANGE;
3488 			goto msix_err;
3489 		} else {
3490 			int v_remain = v_actual - v_other;
3491 			int v_rdma = 0, v_min_rdma = 0;
3492 
3493 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3494 				/* Need at least 1 interrupt in addition to
3495 				 * AEQ MSIX
3496 				 */
3497 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3498 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3499 			}
3500 
3501 			if (v_actual == ICE_MIN_MSIX ||
3502 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3503 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3504 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3505 
3506 				pf->num_rdma_msix = 0;
3507 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3508 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3509 				   (v_remain - v_rdma < v_rdma)) {
3510 				/* Support minimum RDMA and give remaining
3511 				 * vectors to LAN MSIX
3512 				 */
3513 				pf->num_rdma_msix = v_min_rdma;
3514 				pf->num_lan_msix = v_remain - v_min_rdma;
3515 			} else {
3516 				/* Split remaining MSIX with RDMA after
3517 				 * accounting for AEQ MSIX
3518 				 */
3519 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3520 						    ICE_RDMA_NUM_AEQ_MSIX;
3521 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3522 			}
3523 
3524 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3525 				   pf->num_lan_msix);
3526 
3527 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3528 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3529 					   pf->num_rdma_msix);
3530 		}
3531 	}
3532 
3533 	return v_actual;
3534 
3535 msix_err:
3536 	devm_kfree(dev, pf->msix_entries);
3537 	goto exit_err;
3538 
3539 no_hw_vecs_left_err:
3540 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3541 		needed, v_left);
3542 	err = -ERANGE;
3543 exit_err:
3544 	pf->num_rdma_msix = 0;
3545 	pf->num_lan_msix = 0;
3546 	return err;
3547 }
3548 
3549 /**
3550  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3551  * @pf: board private structure
3552  */
3553 static void ice_dis_msix(struct ice_pf *pf)
3554 {
3555 	pci_disable_msix(pf->pdev);
3556 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3557 	pf->msix_entries = NULL;
3558 }
3559 
3560 /**
3561  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3562  * @pf: board private structure
3563  */
3564 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3565 {
3566 	ice_dis_msix(pf);
3567 
3568 	if (pf->irq_tracker) {
3569 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3570 		pf->irq_tracker = NULL;
3571 	}
3572 }
3573 
3574 /**
3575  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3576  * @pf: board private structure to initialize
3577  */
3578 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3579 {
3580 	int vectors;
3581 
3582 	vectors = ice_ena_msix_range(pf);
3583 
3584 	if (vectors < 0)
3585 		return vectors;
3586 
3587 	/* set up vector assignment tracking */
3588 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3589 				       struct_size(pf->irq_tracker, list, vectors),
3590 				       GFP_KERNEL);
3591 	if (!pf->irq_tracker) {
3592 		ice_dis_msix(pf);
3593 		return -ENOMEM;
3594 	}
3595 
3596 	/* populate SW interrupts pool with number of OS granted IRQs. */
3597 	pf->num_avail_sw_msix = (u16)vectors;
3598 	pf->irq_tracker->num_entries = (u16)vectors;
3599 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3600 
3601 	return 0;
3602 }
3603 
3604 /**
3605  * ice_is_wol_supported - check if WoL is supported
3606  * @hw: pointer to hardware info
3607  *
3608  * Check if WoL is supported based on the HW configuration.
3609  * Returns true if NVM supports and enables WoL for this port, false otherwise
3610  */
3611 bool ice_is_wol_supported(struct ice_hw *hw)
3612 {
3613 	u16 wol_ctrl;
3614 
3615 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3616 	 * word) indicates WoL is not supported on the corresponding PF ID.
3617 	 */
3618 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3619 		return false;
3620 
3621 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3622 }
3623 
3624 /**
3625  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3626  * @vsi: VSI being changed
3627  * @new_rx: new number of Rx queues
3628  * @new_tx: new number of Tx queues
3629  *
3630  * Only change the number of queues if new_tx, or new_rx is non-0.
3631  *
3632  * Returns 0 on success.
3633  */
3634 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3635 {
3636 	struct ice_pf *pf = vsi->back;
3637 	int err = 0, timeout = 50;
3638 
3639 	if (!new_rx && !new_tx)
3640 		return -EINVAL;
3641 
3642 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3643 		timeout--;
3644 		if (!timeout)
3645 			return -EBUSY;
3646 		usleep_range(1000, 2000);
3647 	}
3648 
3649 	if (new_tx)
3650 		vsi->req_txq = (u16)new_tx;
3651 	if (new_rx)
3652 		vsi->req_rxq = (u16)new_rx;
3653 
3654 	/* set for the next time the netdev is started */
3655 	if (!netif_running(vsi->netdev)) {
3656 		ice_vsi_rebuild(vsi, false);
3657 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3658 		goto done;
3659 	}
3660 
3661 	ice_vsi_close(vsi);
3662 	ice_vsi_rebuild(vsi, false);
3663 	ice_pf_dcb_recfg(pf);
3664 	ice_vsi_open(vsi);
3665 done:
3666 	clear_bit(ICE_CFG_BUSY, pf->state);
3667 	return err;
3668 }
3669 
3670 /**
3671  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3672  * @pf: PF to configure
3673  *
3674  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3675  * VSI can still Tx/Rx VLAN tagged packets.
3676  */
3677 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3678 {
3679 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3680 	struct ice_vsi_ctx *ctxt;
3681 	enum ice_status status;
3682 	struct ice_hw *hw;
3683 
3684 	if (!vsi)
3685 		return;
3686 
3687 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3688 	if (!ctxt)
3689 		return;
3690 
3691 	hw = &pf->hw;
3692 	ctxt->info = vsi->info;
3693 
3694 	ctxt->info.valid_sections =
3695 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3696 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3697 			    ICE_AQ_VSI_PROP_SW_VALID);
3698 
3699 	/* disable VLAN anti-spoof */
3700 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3701 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3702 
3703 	/* disable VLAN pruning and keep all other settings */
3704 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3705 
3706 	/* allow all VLANs on Tx and don't strip on Rx */
3707 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3708 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3709 
3710 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3711 	if (status) {
3712 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3713 			ice_stat_str(status),
3714 			ice_aq_str(hw->adminq.sq_last_status));
3715 	} else {
3716 		vsi->info.sec_flags = ctxt->info.sec_flags;
3717 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3718 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3719 	}
3720 
3721 	kfree(ctxt);
3722 }
3723 
3724 /**
3725  * ice_log_pkg_init - log result of DDP package load
3726  * @hw: pointer to hardware info
3727  * @status: status of package load
3728  */
3729 static void
3730 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3731 {
3732 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3733 	struct device *dev = ice_pf_to_dev(pf);
3734 
3735 	switch (*status) {
3736 	case ICE_SUCCESS:
3737 		/* The package download AdminQ command returned success because
3738 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3739 		 * already a package loaded on the device.
3740 		 */
3741 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3742 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3743 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3744 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3745 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3746 			    sizeof(hw->pkg_name))) {
3747 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3748 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3749 					 hw->active_pkg_name,
3750 					 hw->active_pkg_ver.major,
3751 					 hw->active_pkg_ver.minor,
3752 					 hw->active_pkg_ver.update,
3753 					 hw->active_pkg_ver.draft);
3754 			else
3755 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3756 					 hw->active_pkg_name,
3757 					 hw->active_pkg_ver.major,
3758 					 hw->active_pkg_ver.minor,
3759 					 hw->active_pkg_ver.update,
3760 					 hw->active_pkg_ver.draft);
3761 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3762 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3763 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3764 				hw->active_pkg_name,
3765 				hw->active_pkg_ver.major,
3766 				hw->active_pkg_ver.minor,
3767 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3768 			*status = ICE_ERR_NOT_SUPPORTED;
3769 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3770 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3771 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3772 				 hw->active_pkg_name,
3773 				 hw->active_pkg_ver.major,
3774 				 hw->active_pkg_ver.minor,
3775 				 hw->active_pkg_ver.update,
3776 				 hw->active_pkg_ver.draft,
3777 				 hw->pkg_name,
3778 				 hw->pkg_ver.major,
3779 				 hw->pkg_ver.minor,
3780 				 hw->pkg_ver.update,
3781 				 hw->pkg_ver.draft);
3782 		} else {
3783 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3784 			*status = ICE_ERR_NOT_SUPPORTED;
3785 		}
3786 		break;
3787 	case ICE_ERR_FW_DDP_MISMATCH:
3788 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3789 		break;
3790 	case ICE_ERR_BUF_TOO_SHORT:
3791 	case ICE_ERR_CFG:
3792 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3793 		break;
3794 	case ICE_ERR_NOT_SUPPORTED:
3795 		/* Package File version not supported */
3796 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3797 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3798 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3799 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3800 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3801 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3802 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3803 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3804 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3805 		break;
3806 	case ICE_ERR_AQ_ERROR:
3807 		switch (hw->pkg_dwnld_status) {
3808 		case ICE_AQ_RC_ENOSEC:
3809 		case ICE_AQ_RC_EBADSIG:
3810 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3811 			return;
3812 		case ICE_AQ_RC_ESVN:
3813 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3814 			return;
3815 		case ICE_AQ_RC_EBADMAN:
3816 		case ICE_AQ_RC_EBADBUF:
3817 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3818 			/* poll for reset to complete */
3819 			if (ice_check_reset(hw))
3820 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3821 			return;
3822 		default:
3823 			break;
3824 		}
3825 		fallthrough;
3826 	default:
3827 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3828 			*status);
3829 		break;
3830 	}
3831 }
3832 
3833 /**
3834  * ice_load_pkg - load/reload the DDP Package file
3835  * @firmware: firmware structure when firmware requested or NULL for reload
3836  * @pf: pointer to the PF instance
3837  *
3838  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3839  * initialize HW tables.
3840  */
3841 static void
3842 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3843 {
3844 	enum ice_status status = ICE_ERR_PARAM;
3845 	struct device *dev = ice_pf_to_dev(pf);
3846 	struct ice_hw *hw = &pf->hw;
3847 
3848 	/* Load DDP Package */
3849 	if (firmware && !hw->pkg_copy) {
3850 		status = ice_copy_and_init_pkg(hw, firmware->data,
3851 					       firmware->size);
3852 		ice_log_pkg_init(hw, &status);
3853 	} else if (!firmware && hw->pkg_copy) {
3854 		/* Reload package during rebuild after CORER/GLOBR reset */
3855 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3856 		ice_log_pkg_init(hw, &status);
3857 	} else {
3858 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3859 	}
3860 
3861 	if (status) {
3862 		/* Safe Mode */
3863 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3864 		return;
3865 	}
3866 
3867 	/* Successful download package is the precondition for advanced
3868 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3869 	 */
3870 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3871 }
3872 
3873 /**
3874  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3875  * @pf: pointer to the PF structure
3876  *
3877  * There is no error returned here because the driver should be able to handle
3878  * 128 Byte cache lines, so we only print a warning in case issues are seen,
3879  * specifically with Tx.
3880  */
3881 static void ice_verify_cacheline_size(struct ice_pf *pf)
3882 {
3883 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3884 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3885 			 ICE_CACHE_LINE_BYTES);
3886 }
3887 
3888 /**
3889  * ice_send_version - update firmware with driver version
3890  * @pf: PF struct
3891  *
3892  * Returns ICE_SUCCESS on success, else error code
3893  */
3894 static enum ice_status ice_send_version(struct ice_pf *pf)
3895 {
3896 	struct ice_driver_ver dv;
3897 
3898 	dv.major_ver = 0xff;
3899 	dv.minor_ver = 0xff;
3900 	dv.build_ver = 0xff;
3901 	dv.subbuild_ver = 0;
3902 	strscpy((char *)dv.driver_string, UTS_RELEASE,
3903 		sizeof(dv.driver_string));
3904 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3905 }
3906 
3907 /**
3908  * ice_init_fdir - Initialize flow director VSI and configuration
3909  * @pf: pointer to the PF instance
3910  *
3911  * returns 0 on success, negative on error
3912  */
3913 static int ice_init_fdir(struct ice_pf *pf)
3914 {
3915 	struct device *dev = ice_pf_to_dev(pf);
3916 	struct ice_vsi *ctrl_vsi;
3917 	int err;
3918 
3919 	/* Side Band Flow Director needs to have a control VSI.
3920 	 * Allocate it and store it in the PF.
3921 	 */
3922 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3923 	if (!ctrl_vsi) {
3924 		dev_dbg(dev, "could not create control VSI\n");
3925 		return -ENOMEM;
3926 	}
3927 
3928 	err = ice_vsi_open_ctrl(ctrl_vsi);
3929 	if (err) {
3930 		dev_dbg(dev, "could not open control VSI\n");
3931 		goto err_vsi_open;
3932 	}
3933 
3934 	mutex_init(&pf->hw.fdir_fltr_lock);
3935 
3936 	err = ice_fdir_create_dflt_rules(pf);
3937 	if (err)
3938 		goto err_fdir_rule;
3939 
3940 	return 0;
3941 
3942 err_fdir_rule:
3943 	ice_fdir_release_flows(&pf->hw);
3944 	ice_vsi_close(ctrl_vsi);
3945 err_vsi_open:
3946 	ice_vsi_release(ctrl_vsi);
3947 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
3948 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
3949 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3950 	}
3951 	return err;
3952 }
3953 
3954 /**
3955  * ice_get_opt_fw_name - return optional firmware file name or NULL
3956  * @pf: pointer to the PF instance
3957  */
3958 static char *ice_get_opt_fw_name(struct ice_pf *pf)
3959 {
3960 	/* Optional firmware name same as default with additional dash
3961 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
3962 	 */
3963 	struct pci_dev *pdev = pf->pdev;
3964 	char *opt_fw_filename;
3965 	u64 dsn;
3966 
3967 	/* Determine the name of the optional file using the DSN (two
3968 	 * dwords following the start of the DSN Capability).
3969 	 */
3970 	dsn = pci_get_dsn(pdev);
3971 	if (!dsn)
3972 		return NULL;
3973 
3974 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
3975 	if (!opt_fw_filename)
3976 		return NULL;
3977 
3978 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
3979 		 ICE_DDP_PKG_PATH, dsn);
3980 
3981 	return opt_fw_filename;
3982 }
3983 
3984 /**
3985  * ice_request_fw - Device initialization routine
3986  * @pf: pointer to the PF instance
3987  */
3988 static void ice_request_fw(struct ice_pf *pf)
3989 {
3990 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
3991 	const struct firmware *firmware = NULL;
3992 	struct device *dev = ice_pf_to_dev(pf);
3993 	int err = 0;
3994 
3995 	/* optional device-specific DDP (if present) overrides the default DDP
3996 	 * package file. kernel logs a debug message if the file doesn't exist,
3997 	 * and warning messages for other errors.
3998 	 */
3999 	if (opt_fw_filename) {
4000 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4001 		if (err) {
4002 			kfree(opt_fw_filename);
4003 			goto dflt_pkg_load;
4004 		}
4005 
4006 		/* request for firmware was successful. Download to device */
4007 		ice_load_pkg(firmware, pf);
4008 		kfree(opt_fw_filename);
4009 		release_firmware(firmware);
4010 		return;
4011 	}
4012 
4013 dflt_pkg_load:
4014 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4015 	if (err) {
4016 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4017 		return;
4018 	}
4019 
4020 	/* request for firmware was successful. Download to device */
4021 	ice_load_pkg(firmware, pf);
4022 	release_firmware(firmware);
4023 }
4024 
4025 /**
4026  * ice_print_wake_reason - show the wake up cause in the log
4027  * @pf: pointer to the PF struct
4028  */
4029 static void ice_print_wake_reason(struct ice_pf *pf)
4030 {
4031 	u32 wus = pf->wakeup_reason;
4032 	const char *wake_str;
4033 
4034 	/* if no wake event, nothing to print */
4035 	if (!wus)
4036 		return;
4037 
4038 	if (wus & PFPM_WUS_LNKC_M)
4039 		wake_str = "Link\n";
4040 	else if (wus & PFPM_WUS_MAG_M)
4041 		wake_str = "Magic Packet\n";
4042 	else if (wus & PFPM_WUS_MNG_M)
4043 		wake_str = "Management\n";
4044 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4045 		wake_str = "Firmware Reset\n";
4046 	else
4047 		wake_str = "Unknown\n";
4048 
4049 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4050 }
4051 
4052 /**
4053  * ice_register_netdev - register netdev and devlink port
4054  * @pf: pointer to the PF struct
4055  */
4056 static int ice_register_netdev(struct ice_pf *pf)
4057 {
4058 	struct ice_vsi *vsi;
4059 	int err = 0;
4060 
4061 	vsi = ice_get_main_vsi(pf);
4062 	if (!vsi || !vsi->netdev)
4063 		return -EIO;
4064 
4065 	err = register_netdev(vsi->netdev);
4066 	if (err)
4067 		goto err_register_netdev;
4068 
4069 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4070 	netif_carrier_off(vsi->netdev);
4071 	netif_tx_stop_all_queues(vsi->netdev);
4072 	err = ice_devlink_create_port(vsi);
4073 	if (err)
4074 		goto err_devlink_create;
4075 
4076 	devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
4077 
4078 	return 0;
4079 err_devlink_create:
4080 	unregister_netdev(vsi->netdev);
4081 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4082 err_register_netdev:
4083 	free_netdev(vsi->netdev);
4084 	vsi->netdev = NULL;
4085 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4086 	return err;
4087 }
4088 
4089 /**
4090  * ice_probe - Device initialization routine
4091  * @pdev: PCI device information struct
4092  * @ent: entry in ice_pci_tbl
4093  *
4094  * Returns 0 on success, negative on failure
4095  */
4096 static int
4097 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4098 {
4099 	struct device *dev = &pdev->dev;
4100 	struct ice_pf *pf;
4101 	struct ice_hw *hw;
4102 	int i, err;
4103 
4104 	/* this driver uses devres, see
4105 	 * Documentation/driver-api/driver-model/devres.rst
4106 	 */
4107 	err = pcim_enable_device(pdev);
4108 	if (err)
4109 		return err;
4110 
4111 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4112 	if (err) {
4113 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4114 		return err;
4115 	}
4116 
4117 	pf = ice_allocate_pf(dev);
4118 	if (!pf)
4119 		return -ENOMEM;
4120 
4121 	/* set up for high or low DMA */
4122 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4123 	if (err)
4124 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4125 	if (err) {
4126 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4127 		return err;
4128 	}
4129 
4130 	pci_enable_pcie_error_reporting(pdev);
4131 	pci_set_master(pdev);
4132 
4133 	pf->pdev = pdev;
4134 	pci_set_drvdata(pdev, pf);
4135 	set_bit(ICE_DOWN, pf->state);
4136 	/* Disable service task until DOWN bit is cleared */
4137 	set_bit(ICE_SERVICE_DIS, pf->state);
4138 
4139 	hw = &pf->hw;
4140 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4141 	pci_save_state(pdev);
4142 
4143 	hw->back = pf;
4144 	hw->vendor_id = pdev->vendor;
4145 	hw->device_id = pdev->device;
4146 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4147 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4148 	hw->subsystem_device_id = pdev->subsystem_device;
4149 	hw->bus.device = PCI_SLOT(pdev->devfn);
4150 	hw->bus.func = PCI_FUNC(pdev->devfn);
4151 	ice_set_ctrlq_len(hw);
4152 
4153 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4154 
4155 	err = ice_devlink_register(pf);
4156 	if (err) {
4157 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4158 		goto err_exit_unroll;
4159 	}
4160 
4161 #ifndef CONFIG_DYNAMIC_DEBUG
4162 	if (debug < -1)
4163 		hw->debug_mask = debug;
4164 #endif
4165 
4166 	err = ice_init_hw(hw);
4167 	if (err) {
4168 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4169 		err = -EIO;
4170 		goto err_exit_unroll;
4171 	}
4172 
4173 	ice_request_fw(pf);
4174 
4175 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4176 	 * set in pf->state, which will cause ice_is_safe_mode to return
4177 	 * true
4178 	 */
4179 	if (ice_is_safe_mode(pf)) {
4180 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4181 		/* we already got function/device capabilities but these don't
4182 		 * reflect what the driver needs to do in safe mode. Instead of
4183 		 * adding conditional logic everywhere to ignore these
4184 		 * device/function capabilities, override them.
4185 		 */
4186 		ice_set_safe_mode_caps(hw);
4187 	}
4188 
4189 	err = ice_init_pf(pf);
4190 	if (err) {
4191 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4192 		goto err_init_pf_unroll;
4193 	}
4194 
4195 	ice_devlink_init_regions(pf);
4196 
4197 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4198 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4199 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4200 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4201 	i = 0;
4202 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4203 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4204 			pf->hw.tnl.valid_count[TNL_VXLAN];
4205 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4206 			UDP_TUNNEL_TYPE_VXLAN;
4207 		i++;
4208 	}
4209 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4210 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4211 			pf->hw.tnl.valid_count[TNL_GENEVE];
4212 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4213 			UDP_TUNNEL_TYPE_GENEVE;
4214 		i++;
4215 	}
4216 
4217 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4218 	if (!pf->num_alloc_vsi) {
4219 		err = -EIO;
4220 		goto err_init_pf_unroll;
4221 	}
4222 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4223 		dev_warn(&pf->pdev->dev,
4224 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4225 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4226 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4227 	}
4228 
4229 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4230 			       GFP_KERNEL);
4231 	if (!pf->vsi) {
4232 		err = -ENOMEM;
4233 		goto err_init_pf_unroll;
4234 	}
4235 
4236 	err = ice_init_interrupt_scheme(pf);
4237 	if (err) {
4238 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4239 		err = -EIO;
4240 		goto err_init_vsi_unroll;
4241 	}
4242 
4243 	/* In case of MSIX we are going to setup the misc vector right here
4244 	 * to handle admin queue events etc. In case of legacy and MSI
4245 	 * the misc functionality and queue processing is combined in
4246 	 * the same vector and that gets setup at open.
4247 	 */
4248 	err = ice_req_irq_msix_misc(pf);
4249 	if (err) {
4250 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4251 		goto err_init_interrupt_unroll;
4252 	}
4253 
4254 	/* create switch struct for the switch element created by FW on boot */
4255 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4256 	if (!pf->first_sw) {
4257 		err = -ENOMEM;
4258 		goto err_msix_misc_unroll;
4259 	}
4260 
4261 	if (hw->evb_veb)
4262 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4263 	else
4264 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4265 
4266 	pf->first_sw->pf = pf;
4267 
4268 	/* record the sw_id available for later use */
4269 	pf->first_sw->sw_id = hw->port_info->sw_id;
4270 
4271 	err = ice_setup_pf_sw(pf);
4272 	if (err) {
4273 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4274 		goto err_alloc_sw_unroll;
4275 	}
4276 
4277 	clear_bit(ICE_SERVICE_DIS, pf->state);
4278 
4279 	/* tell the firmware we are up */
4280 	err = ice_send_version(pf);
4281 	if (err) {
4282 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4283 			UTS_RELEASE, err);
4284 		goto err_send_version_unroll;
4285 	}
4286 
4287 	/* since everything is good, start the service timer */
4288 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4289 
4290 	err = ice_init_link_events(pf->hw.port_info);
4291 	if (err) {
4292 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4293 		goto err_send_version_unroll;
4294 	}
4295 
4296 	/* not a fatal error if this fails */
4297 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4298 	if (err)
4299 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4300 
4301 	/* not a fatal error if this fails */
4302 	err = ice_update_link_info(pf->hw.port_info);
4303 	if (err)
4304 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4305 
4306 	ice_init_link_dflt_override(pf->hw.port_info);
4307 
4308 	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
4309 
4310 	/* if media available, initialize PHY settings */
4311 	if (pf->hw.port_info->phy.link_info.link_info &
4312 	    ICE_AQ_MEDIA_AVAILABLE) {
4313 		/* not a fatal error if this fails */
4314 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4315 		if (err)
4316 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4317 
4318 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4319 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4320 
4321 			if (vsi)
4322 				ice_configure_phy(vsi);
4323 		}
4324 	} else {
4325 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4326 	}
4327 
4328 	ice_verify_cacheline_size(pf);
4329 
4330 	/* Save wakeup reason register for later use */
4331 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4332 
4333 	/* check for a power management event */
4334 	ice_print_wake_reason(pf);
4335 
4336 	/* clear wake status, all bits */
4337 	wr32(hw, PFPM_WUS, U32_MAX);
4338 
4339 	/* Disable WoL at init, wait for user to enable */
4340 	device_set_wakeup_enable(dev, false);
4341 
4342 	if (ice_is_safe_mode(pf)) {
4343 		ice_set_safe_mode_vlan_cfg(pf);
4344 		goto probe_done;
4345 	}
4346 
4347 	/* initialize DDP driven features */
4348 
4349 	/* Note: Flow director init failure is non-fatal to load */
4350 	if (ice_init_fdir(pf))
4351 		dev_err(dev, "could not initialize flow director\n");
4352 
4353 	/* Note: DCB init failure is non-fatal to load */
4354 	if (ice_init_pf_dcb(pf, false)) {
4355 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4356 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4357 	} else {
4358 		ice_cfg_lldp_mib_change(&pf->hw, true);
4359 	}
4360 
4361 	if (ice_init_lag(pf))
4362 		dev_warn(dev, "Failed to init link aggregation support\n");
4363 
4364 	/* print PCI link speed and width */
4365 	pcie_print_link_status(pf->pdev);
4366 
4367 probe_done:
4368 	err = ice_register_netdev(pf);
4369 	if (err)
4370 		goto err_netdev_reg;
4371 
4372 	/* ready to go, so clear down state bit */
4373 	clear_bit(ICE_DOWN, pf->state);
4374 	if (ice_is_aux_ena(pf)) {
4375 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4376 		if (pf->aux_idx < 0) {
4377 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4378 			err = -ENOMEM;
4379 			goto err_netdev_reg;
4380 		}
4381 
4382 		err = ice_init_rdma(pf);
4383 		if (err) {
4384 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4385 			err = -EIO;
4386 			goto err_init_aux_unroll;
4387 		}
4388 	} else {
4389 		dev_warn(dev, "RDMA is not supported on this device\n");
4390 	}
4391 
4392 	return 0;
4393 
4394 err_init_aux_unroll:
4395 	pf->adev = NULL;
4396 	ida_free(&ice_aux_ida, pf->aux_idx);
4397 err_netdev_reg:
4398 err_send_version_unroll:
4399 	ice_vsi_release_all(pf);
4400 err_alloc_sw_unroll:
4401 	set_bit(ICE_SERVICE_DIS, pf->state);
4402 	set_bit(ICE_DOWN, pf->state);
4403 	devm_kfree(dev, pf->first_sw);
4404 err_msix_misc_unroll:
4405 	ice_free_irq_msix_misc(pf);
4406 err_init_interrupt_unroll:
4407 	ice_clear_interrupt_scheme(pf);
4408 err_init_vsi_unroll:
4409 	devm_kfree(dev, pf->vsi);
4410 err_init_pf_unroll:
4411 	ice_deinit_pf(pf);
4412 	ice_devlink_destroy_regions(pf);
4413 	ice_deinit_hw(hw);
4414 err_exit_unroll:
4415 	ice_devlink_unregister(pf);
4416 	pci_disable_pcie_error_reporting(pdev);
4417 	pci_disable_device(pdev);
4418 	return err;
4419 }
4420 
4421 /**
4422  * ice_set_wake - enable or disable Wake on LAN
4423  * @pf: pointer to the PF struct
4424  *
4425  * Simple helper for WoL control
4426  */
4427 static void ice_set_wake(struct ice_pf *pf)
4428 {
4429 	struct ice_hw *hw = &pf->hw;
4430 	bool wol = pf->wol_ena;
4431 
4432 	/* clear wake state, otherwise new wake events won't fire */
4433 	wr32(hw, PFPM_WUS, U32_MAX);
4434 
4435 	/* enable / disable APM wake up, no RMW needed */
4436 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4437 
4438 	/* set magic packet filter enabled */
4439 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4440 }
4441 
4442 /**
4443  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4444  * @pf: pointer to the PF struct
4445  *
4446  * Issue firmware command to enable multicast magic wake, making
4447  * sure that any locally administered address (LAA) is used for
4448  * wake, and that PF reset doesn't undo the LAA.
4449  */
4450 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4451 {
4452 	struct device *dev = ice_pf_to_dev(pf);
4453 	struct ice_hw *hw = &pf->hw;
4454 	enum ice_status status;
4455 	u8 mac_addr[ETH_ALEN];
4456 	struct ice_vsi *vsi;
4457 	u8 flags;
4458 
4459 	if (!pf->wol_ena)
4460 		return;
4461 
4462 	vsi = ice_get_main_vsi(pf);
4463 	if (!vsi)
4464 		return;
4465 
4466 	/* Get current MAC address in case it's an LAA */
4467 	if (vsi->netdev)
4468 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4469 	else
4470 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4471 
4472 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4473 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4474 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4475 
4476 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4477 	if (status)
4478 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4479 			ice_stat_str(status),
4480 			ice_aq_str(hw->adminq.sq_last_status));
4481 }
4482 
4483 /**
4484  * ice_remove - Device removal routine
4485  * @pdev: PCI device information struct
4486  */
4487 static void ice_remove(struct pci_dev *pdev)
4488 {
4489 	struct ice_pf *pf = pci_get_drvdata(pdev);
4490 	int i;
4491 
4492 	if (!pf)
4493 		return;
4494 
4495 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4496 		if (!ice_is_reset_in_progress(pf->state))
4497 			break;
4498 		msleep(100);
4499 	}
4500 
4501 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4502 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4503 		ice_free_vfs(pf);
4504 	}
4505 
4506 	ice_service_task_stop(pf);
4507 
4508 	ice_aq_cancel_waiting_tasks(pf);
4509 	ice_unplug_aux_dev(pf);
4510 	ida_free(&ice_aux_ida, pf->aux_idx);
4511 	set_bit(ICE_DOWN, pf->state);
4512 
4513 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4514 	ice_deinit_lag(pf);
4515 	if (!ice_is_safe_mode(pf))
4516 		ice_remove_arfs(pf);
4517 	ice_setup_mc_magic_wake(pf);
4518 	ice_vsi_release_all(pf);
4519 	ice_set_wake(pf);
4520 	ice_free_irq_msix_misc(pf);
4521 	ice_for_each_vsi(pf, i) {
4522 		if (!pf->vsi[i])
4523 			continue;
4524 		ice_vsi_free_q_vectors(pf->vsi[i]);
4525 	}
4526 	ice_deinit_pf(pf);
4527 	ice_devlink_destroy_regions(pf);
4528 	ice_deinit_hw(&pf->hw);
4529 	ice_devlink_unregister(pf);
4530 
4531 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4532 	 * do it via ice_schedule_reset() since there is no need to rebuild
4533 	 * and the service task is already stopped.
4534 	 */
4535 	ice_reset(&pf->hw, ICE_RESET_PFR);
4536 	pci_wait_for_pending_transaction(pdev);
4537 	ice_clear_interrupt_scheme(pf);
4538 	pci_disable_pcie_error_reporting(pdev);
4539 	pci_disable_device(pdev);
4540 }
4541 
4542 /**
4543  * ice_shutdown - PCI callback for shutting down device
4544  * @pdev: PCI device information struct
4545  */
4546 static void ice_shutdown(struct pci_dev *pdev)
4547 {
4548 	struct ice_pf *pf = pci_get_drvdata(pdev);
4549 
4550 	ice_remove(pdev);
4551 
4552 	if (system_state == SYSTEM_POWER_OFF) {
4553 		pci_wake_from_d3(pdev, pf->wol_ena);
4554 		pci_set_power_state(pdev, PCI_D3hot);
4555 	}
4556 }
4557 
4558 #ifdef CONFIG_PM
4559 /**
4560  * ice_prepare_for_shutdown - prep for PCI shutdown
4561  * @pf: board private structure
4562  *
4563  * Inform or close all dependent features in prep for PCI device shutdown
4564  */
4565 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4566 {
4567 	struct ice_hw *hw = &pf->hw;
4568 	u32 v;
4569 
4570 	/* Notify VFs of impending reset */
4571 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4572 		ice_vc_notify_reset(pf);
4573 
4574 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4575 
4576 	/* disable the VSIs and their queues that are not already DOWN */
4577 	ice_pf_dis_all_vsi(pf, false);
4578 
4579 	ice_for_each_vsi(pf, v)
4580 		if (pf->vsi[v])
4581 			pf->vsi[v]->vsi_num = 0;
4582 
4583 	ice_shutdown_all_ctrlq(hw);
4584 }
4585 
4586 /**
4587  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4588  * @pf: board private structure to reinitialize
4589  *
4590  * This routine reinitialize interrupt scheme that was cleared during
4591  * power management suspend callback.
4592  *
4593  * This should be called during resume routine to re-allocate the q_vectors
4594  * and reacquire interrupts.
4595  */
4596 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4597 {
4598 	struct device *dev = ice_pf_to_dev(pf);
4599 	int ret, v;
4600 
4601 	/* Since we clear MSIX flag during suspend, we need to
4602 	 * set it back during resume...
4603 	 */
4604 
4605 	ret = ice_init_interrupt_scheme(pf);
4606 	if (ret) {
4607 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4608 		return ret;
4609 	}
4610 
4611 	/* Remap vectors and rings, after successful re-init interrupts */
4612 	ice_for_each_vsi(pf, v) {
4613 		if (!pf->vsi[v])
4614 			continue;
4615 
4616 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4617 		if (ret)
4618 			goto err_reinit;
4619 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4620 	}
4621 
4622 	ret = ice_req_irq_msix_misc(pf);
4623 	if (ret) {
4624 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4625 			ret);
4626 		goto err_reinit;
4627 	}
4628 
4629 	return 0;
4630 
4631 err_reinit:
4632 	while (v--)
4633 		if (pf->vsi[v])
4634 			ice_vsi_free_q_vectors(pf->vsi[v]);
4635 
4636 	return ret;
4637 }
4638 
4639 /**
4640  * ice_suspend
4641  * @dev: generic device information structure
4642  *
4643  * Power Management callback to quiesce the device and prepare
4644  * for D3 transition.
4645  */
4646 static int __maybe_unused ice_suspend(struct device *dev)
4647 {
4648 	struct pci_dev *pdev = to_pci_dev(dev);
4649 	struct ice_pf *pf;
4650 	int disabled, v;
4651 
4652 	pf = pci_get_drvdata(pdev);
4653 
4654 	if (!ice_pf_state_is_nominal(pf)) {
4655 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4656 		return -EBUSY;
4657 	}
4658 
4659 	/* Stop watchdog tasks until resume completion.
4660 	 * Even though it is most likely that the service task is
4661 	 * disabled if the device is suspended or down, the service task's
4662 	 * state is controlled by a different state bit, and we should
4663 	 * store and honor whatever state that bit is in at this point.
4664 	 */
4665 	disabled = ice_service_task_stop(pf);
4666 
4667 	ice_unplug_aux_dev(pf);
4668 
4669 	/* Already suspended?, then there is nothing to do */
4670 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4671 		if (!disabled)
4672 			ice_service_task_restart(pf);
4673 		return 0;
4674 	}
4675 
4676 	if (test_bit(ICE_DOWN, pf->state) ||
4677 	    ice_is_reset_in_progress(pf->state)) {
4678 		dev_err(dev, "can't suspend device in reset or already down\n");
4679 		if (!disabled)
4680 			ice_service_task_restart(pf);
4681 		return 0;
4682 	}
4683 
4684 	ice_setup_mc_magic_wake(pf);
4685 
4686 	ice_prepare_for_shutdown(pf);
4687 
4688 	ice_set_wake(pf);
4689 
4690 	/* Free vectors, clear the interrupt scheme and release IRQs
4691 	 * for proper hibernation, especially with large number of CPUs.
4692 	 * Otherwise hibernation might fail when mapping all the vectors back
4693 	 * to CPU0.
4694 	 */
4695 	ice_free_irq_msix_misc(pf);
4696 	ice_for_each_vsi(pf, v) {
4697 		if (!pf->vsi[v])
4698 			continue;
4699 		ice_vsi_free_q_vectors(pf->vsi[v]);
4700 	}
4701 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4702 	ice_clear_interrupt_scheme(pf);
4703 
4704 	pci_save_state(pdev);
4705 	pci_wake_from_d3(pdev, pf->wol_ena);
4706 	pci_set_power_state(pdev, PCI_D3hot);
4707 	return 0;
4708 }
4709 
4710 /**
4711  * ice_resume - PM callback for waking up from D3
4712  * @dev: generic device information structure
4713  */
4714 static int __maybe_unused ice_resume(struct device *dev)
4715 {
4716 	struct pci_dev *pdev = to_pci_dev(dev);
4717 	enum ice_reset_req reset_type;
4718 	struct ice_pf *pf;
4719 	struct ice_hw *hw;
4720 	int ret;
4721 
4722 	pci_set_power_state(pdev, PCI_D0);
4723 	pci_restore_state(pdev);
4724 	pci_save_state(pdev);
4725 
4726 	if (!pci_device_is_present(pdev))
4727 		return -ENODEV;
4728 
4729 	ret = pci_enable_device_mem(pdev);
4730 	if (ret) {
4731 		dev_err(dev, "Cannot enable device after suspend\n");
4732 		return ret;
4733 	}
4734 
4735 	pf = pci_get_drvdata(pdev);
4736 	hw = &pf->hw;
4737 
4738 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4739 	ice_print_wake_reason(pf);
4740 
4741 	/* We cleared the interrupt scheme when we suspended, so we need to
4742 	 * restore it now to resume device functionality.
4743 	 */
4744 	ret = ice_reinit_interrupt_scheme(pf);
4745 	if (ret)
4746 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4747 
4748 	clear_bit(ICE_DOWN, pf->state);
4749 	/* Now perform PF reset and rebuild */
4750 	reset_type = ICE_RESET_PFR;
4751 	/* re-enable service task for reset, but allow reset to schedule it */
4752 	clear_bit(ICE_SERVICE_DIS, pf->state);
4753 
4754 	if (ice_schedule_reset(pf, reset_type))
4755 		dev_err(dev, "Reset during resume failed.\n");
4756 
4757 	clear_bit(ICE_SUSPENDED, pf->state);
4758 	ice_service_task_restart(pf);
4759 
4760 	/* Restart the service task */
4761 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4762 
4763 	return 0;
4764 }
4765 #endif /* CONFIG_PM */
4766 
4767 /**
4768  * ice_pci_err_detected - warning that PCI error has been detected
4769  * @pdev: PCI device information struct
4770  * @err: the type of PCI error
4771  *
4772  * Called to warn that something happened on the PCI bus and the error handling
4773  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4774  */
4775 static pci_ers_result_t
4776 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4777 {
4778 	struct ice_pf *pf = pci_get_drvdata(pdev);
4779 
4780 	if (!pf) {
4781 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4782 			__func__, err);
4783 		return PCI_ERS_RESULT_DISCONNECT;
4784 	}
4785 
4786 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4787 		ice_service_task_stop(pf);
4788 
4789 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4790 			set_bit(ICE_PFR_REQ, pf->state);
4791 			ice_prepare_for_reset(pf);
4792 		}
4793 	}
4794 
4795 	return PCI_ERS_RESULT_NEED_RESET;
4796 }
4797 
4798 /**
4799  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4800  * @pdev: PCI device information struct
4801  *
4802  * Called to determine if the driver can recover from the PCI slot reset by
4803  * using a register read to determine if the device is recoverable.
4804  */
4805 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4806 {
4807 	struct ice_pf *pf = pci_get_drvdata(pdev);
4808 	pci_ers_result_t result;
4809 	int err;
4810 	u32 reg;
4811 
4812 	err = pci_enable_device_mem(pdev);
4813 	if (err) {
4814 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4815 			err);
4816 		result = PCI_ERS_RESULT_DISCONNECT;
4817 	} else {
4818 		pci_set_master(pdev);
4819 		pci_restore_state(pdev);
4820 		pci_save_state(pdev);
4821 		pci_wake_from_d3(pdev, false);
4822 
4823 		/* Check for life */
4824 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4825 		if (!reg)
4826 			result = PCI_ERS_RESULT_RECOVERED;
4827 		else
4828 			result = PCI_ERS_RESULT_DISCONNECT;
4829 	}
4830 
4831 	err = pci_aer_clear_nonfatal_status(pdev);
4832 	if (err)
4833 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4834 			err);
4835 		/* non-fatal, continue */
4836 
4837 	return result;
4838 }
4839 
4840 /**
4841  * ice_pci_err_resume - restart operations after PCI error recovery
4842  * @pdev: PCI device information struct
4843  *
4844  * Called to allow the driver to bring things back up after PCI error and/or
4845  * reset recovery have finished
4846  */
4847 static void ice_pci_err_resume(struct pci_dev *pdev)
4848 {
4849 	struct ice_pf *pf = pci_get_drvdata(pdev);
4850 
4851 	if (!pf) {
4852 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4853 			__func__);
4854 		return;
4855 	}
4856 
4857 	if (test_bit(ICE_SUSPENDED, pf->state)) {
4858 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4859 			__func__);
4860 		return;
4861 	}
4862 
4863 	ice_restore_all_vfs_msi_state(pdev);
4864 
4865 	ice_do_reset(pf, ICE_RESET_PFR);
4866 	ice_service_task_restart(pf);
4867 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4868 }
4869 
4870 /**
4871  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4872  * @pdev: PCI device information struct
4873  */
4874 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4875 {
4876 	struct ice_pf *pf = pci_get_drvdata(pdev);
4877 
4878 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4879 		ice_service_task_stop(pf);
4880 
4881 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4882 			set_bit(ICE_PFR_REQ, pf->state);
4883 			ice_prepare_for_reset(pf);
4884 		}
4885 	}
4886 }
4887 
4888 /**
4889  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4890  * @pdev: PCI device information struct
4891  */
4892 static void ice_pci_err_reset_done(struct pci_dev *pdev)
4893 {
4894 	ice_pci_err_resume(pdev);
4895 }
4896 
4897 /* ice_pci_tbl - PCI Device ID Table
4898  *
4899  * Wildcard entries (PCI_ANY_ID) should come last
4900  * Last entry must be all 0s
4901  *
4902  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
4903  *   Class, Class Mask, private data (not used) }
4904  */
4905 static const struct pci_device_id ice_pci_tbl[] = {
4906 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4907 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4908 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4909 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4910 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4911 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4912 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4913 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4914 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4915 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4916 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4917 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4918 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4919 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4920 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
4921 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
4922 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
4923 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
4924 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
4925 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
4926 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
4927 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
4928 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
4929 	/* required last entry */
4930 	{ 0, }
4931 };
4932 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
4933 
4934 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
4935 
4936 static const struct pci_error_handlers ice_pci_err_handler = {
4937 	.error_detected = ice_pci_err_detected,
4938 	.slot_reset = ice_pci_err_slot_reset,
4939 	.reset_prepare = ice_pci_err_reset_prepare,
4940 	.reset_done = ice_pci_err_reset_done,
4941 	.resume = ice_pci_err_resume
4942 };
4943 
4944 static struct pci_driver ice_driver = {
4945 	.name = KBUILD_MODNAME,
4946 	.id_table = ice_pci_tbl,
4947 	.probe = ice_probe,
4948 	.remove = ice_remove,
4949 #ifdef CONFIG_PM
4950 	.driver.pm = &ice_pm_ops,
4951 #endif /* CONFIG_PM */
4952 	.shutdown = ice_shutdown,
4953 	.sriov_configure = ice_sriov_configure,
4954 	.err_handler = &ice_pci_err_handler
4955 };
4956 
4957 /**
4958  * ice_module_init - Driver registration routine
4959  *
4960  * ice_module_init is the first routine called when the driver is
4961  * loaded. All it does is register with the PCI subsystem.
4962  */
4963 static int __init ice_module_init(void)
4964 {
4965 	int status;
4966 
4967 	pr_info("%s\n", ice_driver_string);
4968 	pr_info("%s\n", ice_copyright);
4969 
4970 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
4971 	if (!ice_wq) {
4972 		pr_err("Failed to create workqueue\n");
4973 		return -ENOMEM;
4974 	}
4975 
4976 	status = pci_register_driver(&ice_driver);
4977 	if (status) {
4978 		pr_err("failed to register PCI driver, err %d\n", status);
4979 		destroy_workqueue(ice_wq);
4980 	}
4981 
4982 	return status;
4983 }
4984 module_init(ice_module_init);
4985 
4986 /**
4987  * ice_module_exit - Driver exit cleanup routine
4988  *
4989  * ice_module_exit is called just before the driver is removed
4990  * from memory.
4991  */
4992 static void __exit ice_module_exit(void)
4993 {
4994 	pci_unregister_driver(&ice_driver);
4995 	destroy_workqueue(ice_wq);
4996 	pr_info("module unloaded\n");
4997 }
4998 module_exit(ice_module_exit);
4999 
5000 /**
5001  * ice_set_mac_address - NDO callback to set MAC address
5002  * @netdev: network interface device structure
5003  * @pi: pointer to an address structure
5004  *
5005  * Returns 0 on success, negative on failure
5006  */
5007 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5008 {
5009 	struct ice_netdev_priv *np = netdev_priv(netdev);
5010 	struct ice_vsi *vsi = np->vsi;
5011 	struct ice_pf *pf = vsi->back;
5012 	struct ice_hw *hw = &pf->hw;
5013 	struct sockaddr *addr = pi;
5014 	enum ice_status status;
5015 	u8 flags = 0;
5016 	int err = 0;
5017 	u8 *mac;
5018 
5019 	mac = (u8 *)addr->sa_data;
5020 
5021 	if (!is_valid_ether_addr(mac))
5022 		return -EADDRNOTAVAIL;
5023 
5024 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5025 		netdev_warn(netdev, "already using mac %pM\n", mac);
5026 		return 0;
5027 	}
5028 
5029 	if (test_bit(ICE_DOWN, pf->state) ||
5030 	    ice_is_reset_in_progress(pf->state)) {
5031 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5032 			   mac);
5033 		return -EBUSY;
5034 	}
5035 
5036 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5037 	status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
5038 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5039 		err = -EADDRNOTAVAIL;
5040 		goto err_update_filters;
5041 	}
5042 
5043 	/* Add filter for new MAC. If filter exists, return success */
5044 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5045 	if (status == ICE_ERR_ALREADY_EXISTS) {
5046 		/* Although this MAC filter is already present in hardware it's
5047 		 * possible in some cases (e.g. bonding) that dev_addr was
5048 		 * modified outside of the driver and needs to be restored back
5049 		 * to this value.
5050 		 */
5051 		memcpy(netdev->dev_addr, mac, netdev->addr_len);
5052 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5053 		return 0;
5054 	}
5055 
5056 	/* error if the new filter addition failed */
5057 	if (status)
5058 		err = -EADDRNOTAVAIL;
5059 
5060 err_update_filters:
5061 	if (err) {
5062 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5063 			   mac);
5064 		return err;
5065 	}
5066 
5067 	/* change the netdev's MAC address */
5068 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
5069 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5070 		   netdev->dev_addr);
5071 
5072 	/* write new MAC address to the firmware */
5073 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5074 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5075 	if (status) {
5076 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5077 			   mac, ice_stat_str(status));
5078 	}
5079 	return 0;
5080 }
5081 
5082 /**
5083  * ice_set_rx_mode - NDO callback to set the netdev filters
5084  * @netdev: network interface device structure
5085  */
5086 static void ice_set_rx_mode(struct net_device *netdev)
5087 {
5088 	struct ice_netdev_priv *np = netdev_priv(netdev);
5089 	struct ice_vsi *vsi = np->vsi;
5090 
5091 	if (!vsi)
5092 		return;
5093 
5094 	/* Set the flags to synchronize filters
5095 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5096 	 * flags
5097 	 */
5098 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5099 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5100 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5101 
5102 	/* schedule our worker thread which will take care of
5103 	 * applying the new filter changes
5104 	 */
5105 	ice_service_task_schedule(vsi->back);
5106 }
5107 
5108 /**
5109  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5110  * @netdev: network interface device structure
5111  * @queue_index: Queue ID
5112  * @maxrate: maximum bandwidth in Mbps
5113  */
5114 static int
5115 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5116 {
5117 	struct ice_netdev_priv *np = netdev_priv(netdev);
5118 	struct ice_vsi *vsi = np->vsi;
5119 	enum ice_status status;
5120 	u16 q_handle;
5121 	u8 tc;
5122 
5123 	/* Validate maxrate requested is within permitted range */
5124 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5125 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5126 			   maxrate, queue_index);
5127 		return -EINVAL;
5128 	}
5129 
5130 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5131 	tc = ice_dcb_get_tc(vsi, queue_index);
5132 
5133 	/* Set BW back to default, when user set maxrate to 0 */
5134 	if (!maxrate)
5135 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5136 					       q_handle, ICE_MAX_BW);
5137 	else
5138 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5139 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5140 	if (status) {
5141 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5142 			   ice_stat_str(status));
5143 		return -EIO;
5144 	}
5145 
5146 	return 0;
5147 }
5148 
5149 /**
5150  * ice_fdb_add - add an entry to the hardware database
5151  * @ndm: the input from the stack
5152  * @tb: pointer to array of nladdr (unused)
5153  * @dev: the net device pointer
5154  * @addr: the MAC address entry being added
5155  * @vid: VLAN ID
5156  * @flags: instructions from stack about fdb operation
5157  * @extack: netlink extended ack
5158  */
5159 static int
5160 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5161 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5162 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5163 {
5164 	int err;
5165 
5166 	if (vid) {
5167 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5168 		return -EINVAL;
5169 	}
5170 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5171 		netdev_err(dev, "FDB only supports static addresses\n");
5172 		return -EINVAL;
5173 	}
5174 
5175 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5176 		err = dev_uc_add_excl(dev, addr);
5177 	else if (is_multicast_ether_addr(addr))
5178 		err = dev_mc_add_excl(dev, addr);
5179 	else
5180 		err = -EINVAL;
5181 
5182 	/* Only return duplicate errors if NLM_F_EXCL is set */
5183 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5184 		err = 0;
5185 
5186 	return err;
5187 }
5188 
5189 /**
5190  * ice_fdb_del - delete an entry from the hardware database
5191  * @ndm: the input from the stack
5192  * @tb: pointer to array of nladdr (unused)
5193  * @dev: the net device pointer
5194  * @addr: the MAC address entry being added
5195  * @vid: VLAN ID
5196  */
5197 static int
5198 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5199 	    struct net_device *dev, const unsigned char *addr,
5200 	    __always_unused u16 vid)
5201 {
5202 	int err;
5203 
5204 	if (ndm->ndm_state & NUD_PERMANENT) {
5205 		netdev_err(dev, "FDB only supports static addresses\n");
5206 		return -EINVAL;
5207 	}
5208 
5209 	if (is_unicast_ether_addr(addr))
5210 		err = dev_uc_del(dev, addr);
5211 	else if (is_multicast_ether_addr(addr))
5212 		err = dev_mc_del(dev, addr);
5213 	else
5214 		err = -EINVAL;
5215 
5216 	return err;
5217 }
5218 
5219 /**
5220  * ice_set_features - set the netdev feature flags
5221  * @netdev: ptr to the netdev being adjusted
5222  * @features: the feature set that the stack is suggesting
5223  */
5224 static int
5225 ice_set_features(struct net_device *netdev, netdev_features_t features)
5226 {
5227 	struct ice_netdev_priv *np = netdev_priv(netdev);
5228 	struct ice_vsi *vsi = np->vsi;
5229 	struct ice_pf *pf = vsi->back;
5230 	int ret = 0;
5231 
5232 	/* Don't set any netdev advanced features with device in Safe Mode */
5233 	if (ice_is_safe_mode(vsi->back)) {
5234 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5235 		return ret;
5236 	}
5237 
5238 	/* Do not change setting during reset */
5239 	if (ice_is_reset_in_progress(pf->state)) {
5240 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5241 		return -EBUSY;
5242 	}
5243 
5244 	/* Multiple features can be changed in one call so keep features in
5245 	 * separate if/else statements to guarantee each feature is checked
5246 	 */
5247 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5248 		ice_vsi_manage_rss_lut(vsi, true);
5249 	else if (!(features & NETIF_F_RXHASH) &&
5250 		 netdev->features & NETIF_F_RXHASH)
5251 		ice_vsi_manage_rss_lut(vsi, false);
5252 
5253 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5254 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5255 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5256 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5257 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5258 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5259 
5260 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5261 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5262 		ret = ice_vsi_manage_vlan_insertion(vsi);
5263 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5264 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5265 		ret = ice_vsi_manage_vlan_insertion(vsi);
5266 
5267 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5268 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5269 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5270 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5271 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5272 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5273 
5274 	if ((features & NETIF_F_NTUPLE) &&
5275 	    !(netdev->features & NETIF_F_NTUPLE)) {
5276 		ice_vsi_manage_fdir(vsi, true);
5277 		ice_init_arfs(vsi);
5278 	} else if (!(features & NETIF_F_NTUPLE) &&
5279 		 (netdev->features & NETIF_F_NTUPLE)) {
5280 		ice_vsi_manage_fdir(vsi, false);
5281 		ice_clear_arfs(vsi);
5282 	}
5283 
5284 	return ret;
5285 }
5286 
5287 /**
5288  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5289  * @vsi: VSI to setup VLAN properties for
5290  */
5291 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5292 {
5293 	int ret = 0;
5294 
5295 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5296 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5297 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5298 		ret = ice_vsi_manage_vlan_insertion(vsi);
5299 
5300 	return ret;
5301 }
5302 
5303 /**
5304  * ice_vsi_cfg - Setup the VSI
5305  * @vsi: the VSI being configured
5306  *
5307  * Return 0 on success and negative value on error
5308  */
5309 int ice_vsi_cfg(struct ice_vsi *vsi)
5310 {
5311 	int err;
5312 
5313 	if (vsi->netdev) {
5314 		ice_set_rx_mode(vsi->netdev);
5315 
5316 		err = ice_vsi_vlan_setup(vsi);
5317 
5318 		if (err)
5319 			return err;
5320 	}
5321 	ice_vsi_cfg_dcb_rings(vsi);
5322 
5323 	err = ice_vsi_cfg_lan_txqs(vsi);
5324 	if (!err && ice_is_xdp_ena_vsi(vsi))
5325 		err = ice_vsi_cfg_xdp_txqs(vsi);
5326 	if (!err)
5327 		err = ice_vsi_cfg_rxqs(vsi);
5328 
5329 	return err;
5330 }
5331 
5332 /* THEORY OF MODERATION:
5333  * The below code creates custom DIM profiles for use by this driver, because
5334  * the ice driver hardware works differently than the hardware that DIMLIB was
5335  * originally made for. ice hardware doesn't have packet count limits that
5336  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5337  * and this code adds that capability to be used by the driver when it's using
5338  * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5339  * for how to "respond" to traffic and interrupts, so this driver uses a
5340  * slightly different set of moderation parameters to get best performance.
5341  */
5342 struct ice_dim {
5343 	/* the throttle rate for interrupts, basically worst case delay before
5344 	 * an initial interrupt fires, value is stored in microseconds.
5345 	 */
5346 	u16 itr;
5347 	/* the rate limit for interrupts, which can cap a delay from a small
5348 	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5349 	 * could yield as much as 500,000 interrupts per second, but with a
5350 	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5351 	 * is stored in microseconds.
5352 	 */
5353 	u16 intrl;
5354 };
5355 
5356 /* Make a different profile for Rx that doesn't allow quite so aggressive
5357  * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5358  * second. The INTRL/rate parameters here are only useful to cap small ITR
5359  * values, which is why for larger ITR's - like 128, which can only generate
5360  * 8k interrupts per second, there is no point to rate limit and the values
5361  * are set to zero. The rate limit values do affect latency, and so must
5362  * be reasonably small so to not impact latency sensitive tests.
5363  */
5364 static const struct ice_dim rx_profile[] = {
5365 	{2, 10},
5366 	{8, 16},
5367 	{32, 0},
5368 	{96, 0},
5369 	{128, 0}
5370 };
5371 
5372 /* The transmit profile, which has the same sorts of values
5373  * as the previous struct
5374  */
5375 static const struct ice_dim tx_profile[] = {
5376 	{2, 10},
5377 	{8, 16},
5378 	{64, 0},
5379 	{128, 0},
5380 	{256, 0}
5381 };
5382 
5383 static void ice_tx_dim_work(struct work_struct *work)
5384 {
5385 	struct ice_ring_container *rc;
5386 	struct ice_q_vector *q_vector;
5387 	struct dim *dim;
5388 	u16 itr, intrl;
5389 
5390 	dim = container_of(work, struct dim, work);
5391 	rc = container_of(dim, struct ice_ring_container, dim);
5392 	q_vector = container_of(rc, struct ice_q_vector, tx);
5393 
5394 	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5395 		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5396 
5397 	/* look up the values in our local table */
5398 	itr = tx_profile[dim->profile_ix].itr;
5399 	intrl = tx_profile[dim->profile_ix].intrl;
5400 
5401 	ice_write_itr(rc, itr);
5402 	ice_write_intrl(q_vector, intrl);
5403 
5404 	dim->state = DIM_START_MEASURE;
5405 }
5406 
5407 static void ice_rx_dim_work(struct work_struct *work)
5408 {
5409 	struct ice_ring_container *rc;
5410 	struct ice_q_vector *q_vector;
5411 	struct dim *dim;
5412 	u16 itr, intrl;
5413 
5414 	dim = container_of(work, struct dim, work);
5415 	rc = container_of(dim, struct ice_ring_container, dim);
5416 	q_vector = container_of(rc, struct ice_q_vector, rx);
5417 
5418 	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5419 		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5420 
5421 	/* look up the values in our local table */
5422 	itr = rx_profile[dim->profile_ix].itr;
5423 	intrl = rx_profile[dim->profile_ix].intrl;
5424 
5425 	ice_write_itr(rc, itr);
5426 	ice_write_intrl(q_vector, intrl);
5427 
5428 	dim->state = DIM_START_MEASURE;
5429 }
5430 
5431 /**
5432  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5433  * @vsi: the VSI being configured
5434  */
5435 static void ice_napi_enable_all(struct ice_vsi *vsi)
5436 {
5437 	int q_idx;
5438 
5439 	if (!vsi->netdev)
5440 		return;
5441 
5442 	ice_for_each_q_vector(vsi, q_idx) {
5443 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5444 
5445 		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5446 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5447 
5448 		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5449 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5450 
5451 		if (q_vector->rx.ring || q_vector->tx.ring)
5452 			napi_enable(&q_vector->napi);
5453 	}
5454 }
5455 
5456 /**
5457  * ice_up_complete - Finish the last steps of bringing up a connection
5458  * @vsi: The VSI being configured
5459  *
5460  * Return 0 on success and negative value on error
5461  */
5462 static int ice_up_complete(struct ice_vsi *vsi)
5463 {
5464 	struct ice_pf *pf = vsi->back;
5465 	int err;
5466 
5467 	ice_vsi_cfg_msix(vsi);
5468 
5469 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5470 	 * Tx queue group list was configured and the context bits were
5471 	 * programmed using ice_vsi_cfg_txqs
5472 	 */
5473 	err = ice_vsi_start_all_rx_rings(vsi);
5474 	if (err)
5475 		return err;
5476 
5477 	clear_bit(ICE_VSI_DOWN, vsi->state);
5478 	ice_napi_enable_all(vsi);
5479 	ice_vsi_ena_irq(vsi);
5480 
5481 	if (vsi->port_info &&
5482 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5483 	    vsi->netdev) {
5484 		ice_print_link_msg(vsi, true);
5485 		netif_tx_start_all_queues(vsi->netdev);
5486 		netif_carrier_on(vsi->netdev);
5487 	}
5488 
5489 	ice_service_task_schedule(pf);
5490 
5491 	return 0;
5492 }
5493 
5494 /**
5495  * ice_up - Bring the connection back up after being down
5496  * @vsi: VSI being configured
5497  */
5498 int ice_up(struct ice_vsi *vsi)
5499 {
5500 	int err;
5501 
5502 	err = ice_vsi_cfg(vsi);
5503 	if (!err)
5504 		err = ice_up_complete(vsi);
5505 
5506 	return err;
5507 }
5508 
5509 /**
5510  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5511  * @ring: Tx or Rx ring to read stats from
5512  * @pkts: packets stats counter
5513  * @bytes: bytes stats counter
5514  *
5515  * This function fetches stats from the ring considering the atomic operations
5516  * that needs to be performed to read u64 values in 32 bit machine.
5517  */
5518 static void
5519 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5520 {
5521 	unsigned int start;
5522 	*pkts = 0;
5523 	*bytes = 0;
5524 
5525 	if (!ring)
5526 		return;
5527 	do {
5528 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5529 		*pkts = ring->stats.pkts;
5530 		*bytes = ring->stats.bytes;
5531 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5532 }
5533 
5534 /**
5535  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5536  * @vsi: the VSI to be updated
5537  * @rings: rings to work on
5538  * @count: number of rings
5539  */
5540 static void
5541 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5542 			     u16 count)
5543 {
5544 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5545 	u16 i;
5546 
5547 	for (i = 0; i < count; i++) {
5548 		struct ice_ring *ring;
5549 		u64 pkts, bytes;
5550 
5551 		ring = READ_ONCE(rings[i]);
5552 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5553 		vsi_stats->tx_packets += pkts;
5554 		vsi_stats->tx_bytes += bytes;
5555 		vsi->tx_restart += ring->tx_stats.restart_q;
5556 		vsi->tx_busy += ring->tx_stats.tx_busy;
5557 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5558 	}
5559 }
5560 
5561 /**
5562  * ice_update_vsi_ring_stats - Update VSI stats counters
5563  * @vsi: the VSI to be updated
5564  */
5565 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5566 {
5567 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5568 	struct ice_ring *ring;
5569 	u64 pkts, bytes;
5570 	int i;
5571 
5572 	/* reset netdev stats */
5573 	vsi_stats->tx_packets = 0;
5574 	vsi_stats->tx_bytes = 0;
5575 	vsi_stats->rx_packets = 0;
5576 	vsi_stats->rx_bytes = 0;
5577 
5578 	/* reset non-netdev (extended) stats */
5579 	vsi->tx_restart = 0;
5580 	vsi->tx_busy = 0;
5581 	vsi->tx_linearize = 0;
5582 	vsi->rx_buf_failed = 0;
5583 	vsi->rx_page_failed = 0;
5584 
5585 	rcu_read_lock();
5586 
5587 	/* update Tx rings counters */
5588 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5589 
5590 	/* update Rx rings counters */
5591 	ice_for_each_rxq(vsi, i) {
5592 		ring = READ_ONCE(vsi->rx_rings[i]);
5593 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5594 		vsi_stats->rx_packets += pkts;
5595 		vsi_stats->rx_bytes += bytes;
5596 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5597 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5598 	}
5599 
5600 	/* update XDP Tx rings counters */
5601 	if (ice_is_xdp_ena_vsi(vsi))
5602 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5603 					     vsi->num_xdp_txq);
5604 
5605 	rcu_read_unlock();
5606 }
5607 
5608 /**
5609  * ice_update_vsi_stats - Update VSI stats counters
5610  * @vsi: the VSI to be updated
5611  */
5612 void ice_update_vsi_stats(struct ice_vsi *vsi)
5613 {
5614 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5615 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5616 	struct ice_pf *pf = vsi->back;
5617 
5618 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5619 	    test_bit(ICE_CFG_BUSY, pf->state))
5620 		return;
5621 
5622 	/* get stats as recorded by Tx/Rx rings */
5623 	ice_update_vsi_ring_stats(vsi);
5624 
5625 	/* get VSI stats as recorded by the hardware */
5626 	ice_update_eth_stats(vsi);
5627 
5628 	cur_ns->tx_errors = cur_es->tx_errors;
5629 	cur_ns->rx_dropped = cur_es->rx_discards;
5630 	cur_ns->tx_dropped = cur_es->tx_discards;
5631 	cur_ns->multicast = cur_es->rx_multicast;
5632 
5633 	/* update some more netdev stats if this is main VSI */
5634 	if (vsi->type == ICE_VSI_PF) {
5635 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5636 		cur_ns->rx_errors = pf->stats.crc_errors +
5637 				    pf->stats.illegal_bytes +
5638 				    pf->stats.rx_len_errors +
5639 				    pf->stats.rx_undersize +
5640 				    pf->hw_csum_rx_error +
5641 				    pf->stats.rx_jabber +
5642 				    pf->stats.rx_fragments +
5643 				    pf->stats.rx_oversize;
5644 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5645 		/* record drops from the port level */
5646 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5647 	}
5648 }
5649 
5650 /**
5651  * ice_update_pf_stats - Update PF port stats counters
5652  * @pf: PF whose stats needs to be updated
5653  */
5654 void ice_update_pf_stats(struct ice_pf *pf)
5655 {
5656 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5657 	struct ice_hw *hw = &pf->hw;
5658 	u16 fd_ctr_base;
5659 	u8 port;
5660 
5661 	port = hw->port_info->lport;
5662 	prev_ps = &pf->stats_prev;
5663 	cur_ps = &pf->stats;
5664 
5665 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5666 			  &prev_ps->eth.rx_bytes,
5667 			  &cur_ps->eth.rx_bytes);
5668 
5669 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5670 			  &prev_ps->eth.rx_unicast,
5671 			  &cur_ps->eth.rx_unicast);
5672 
5673 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5674 			  &prev_ps->eth.rx_multicast,
5675 			  &cur_ps->eth.rx_multicast);
5676 
5677 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5678 			  &prev_ps->eth.rx_broadcast,
5679 			  &cur_ps->eth.rx_broadcast);
5680 
5681 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5682 			  &prev_ps->eth.rx_discards,
5683 			  &cur_ps->eth.rx_discards);
5684 
5685 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5686 			  &prev_ps->eth.tx_bytes,
5687 			  &cur_ps->eth.tx_bytes);
5688 
5689 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5690 			  &prev_ps->eth.tx_unicast,
5691 			  &cur_ps->eth.tx_unicast);
5692 
5693 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5694 			  &prev_ps->eth.tx_multicast,
5695 			  &cur_ps->eth.tx_multicast);
5696 
5697 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5698 			  &prev_ps->eth.tx_broadcast,
5699 			  &cur_ps->eth.tx_broadcast);
5700 
5701 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5702 			  &prev_ps->tx_dropped_link_down,
5703 			  &cur_ps->tx_dropped_link_down);
5704 
5705 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5706 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5707 
5708 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5709 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5710 
5711 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5712 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5713 
5714 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5715 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5716 
5717 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5718 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5719 
5720 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5721 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5722 
5723 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5724 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5725 
5726 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5727 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5728 
5729 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5730 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5731 
5732 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5733 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5734 
5735 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5736 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5737 
5738 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5739 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5740 
5741 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5742 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5743 
5744 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5745 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5746 
5747 	fd_ctr_base = hw->fd_ctr_base;
5748 
5749 	ice_stat_update40(hw,
5750 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5751 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5752 			  &cur_ps->fd_sb_match);
5753 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5754 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5755 
5756 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5757 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5758 
5759 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5760 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5761 
5762 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5763 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5764 
5765 	ice_update_dcb_stats(pf);
5766 
5767 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5768 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5769 
5770 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5771 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5772 
5773 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5774 			  &prev_ps->mac_local_faults,
5775 			  &cur_ps->mac_local_faults);
5776 
5777 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5778 			  &prev_ps->mac_remote_faults,
5779 			  &cur_ps->mac_remote_faults);
5780 
5781 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5782 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5783 
5784 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5785 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5786 
5787 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5788 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5789 
5790 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5791 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5792 
5793 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5794 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5795 
5796 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5797 
5798 	pf->stat_prev_loaded = true;
5799 }
5800 
5801 /**
5802  * ice_get_stats64 - get statistics for network device structure
5803  * @netdev: network interface device structure
5804  * @stats: main device statistics structure
5805  */
5806 static
5807 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5808 {
5809 	struct ice_netdev_priv *np = netdev_priv(netdev);
5810 	struct rtnl_link_stats64 *vsi_stats;
5811 	struct ice_vsi *vsi = np->vsi;
5812 
5813 	vsi_stats = &vsi->net_stats;
5814 
5815 	if (!vsi->num_txq || !vsi->num_rxq)
5816 		return;
5817 
5818 	/* netdev packet/byte stats come from ring counter. These are obtained
5819 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5820 	 * But, only call the update routine and read the registers if VSI is
5821 	 * not down.
5822 	 */
5823 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
5824 		ice_update_vsi_ring_stats(vsi);
5825 	stats->tx_packets = vsi_stats->tx_packets;
5826 	stats->tx_bytes = vsi_stats->tx_bytes;
5827 	stats->rx_packets = vsi_stats->rx_packets;
5828 	stats->rx_bytes = vsi_stats->rx_bytes;
5829 
5830 	/* The rest of the stats can be read from the hardware but instead we
5831 	 * just return values that the watchdog task has already obtained from
5832 	 * the hardware.
5833 	 */
5834 	stats->multicast = vsi_stats->multicast;
5835 	stats->tx_errors = vsi_stats->tx_errors;
5836 	stats->tx_dropped = vsi_stats->tx_dropped;
5837 	stats->rx_errors = vsi_stats->rx_errors;
5838 	stats->rx_dropped = vsi_stats->rx_dropped;
5839 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5840 	stats->rx_length_errors = vsi_stats->rx_length_errors;
5841 }
5842 
5843 /**
5844  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5845  * @vsi: VSI having NAPI disabled
5846  */
5847 static void ice_napi_disable_all(struct ice_vsi *vsi)
5848 {
5849 	int q_idx;
5850 
5851 	if (!vsi->netdev)
5852 		return;
5853 
5854 	ice_for_each_q_vector(vsi, q_idx) {
5855 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5856 
5857 		if (q_vector->rx.ring || q_vector->tx.ring)
5858 			napi_disable(&q_vector->napi);
5859 
5860 		cancel_work_sync(&q_vector->tx.dim.work);
5861 		cancel_work_sync(&q_vector->rx.dim.work);
5862 	}
5863 }
5864 
5865 /**
5866  * ice_down - Shutdown the connection
5867  * @vsi: The VSI being stopped
5868  */
5869 int ice_down(struct ice_vsi *vsi)
5870 {
5871 	int i, tx_err, rx_err, link_err = 0;
5872 
5873 	/* Caller of this function is expected to set the
5874 	 * vsi->state ICE_DOWN bit
5875 	 */
5876 	if (vsi->netdev) {
5877 		netif_carrier_off(vsi->netdev);
5878 		netif_tx_disable(vsi->netdev);
5879 	}
5880 
5881 	ice_vsi_dis_irq(vsi);
5882 
5883 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5884 	if (tx_err)
5885 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5886 			   vsi->vsi_num, tx_err);
5887 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5888 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5889 		if (tx_err)
5890 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5891 				   vsi->vsi_num, tx_err);
5892 	}
5893 
5894 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
5895 	if (rx_err)
5896 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5897 			   vsi->vsi_num, rx_err);
5898 
5899 	ice_napi_disable_all(vsi);
5900 
5901 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5902 		link_err = ice_force_phys_link_state(vsi, false);
5903 		if (link_err)
5904 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5905 				   vsi->vsi_num, link_err);
5906 	}
5907 
5908 	ice_for_each_txq(vsi, i)
5909 		ice_clean_tx_ring(vsi->tx_rings[i]);
5910 
5911 	ice_for_each_rxq(vsi, i)
5912 		ice_clean_rx_ring(vsi->rx_rings[i]);
5913 
5914 	if (tx_err || rx_err || link_err) {
5915 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5916 			   vsi->vsi_num, vsi->vsw->sw_id);
5917 		return -EIO;
5918 	}
5919 
5920 	return 0;
5921 }
5922 
5923 /**
5924  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5925  * @vsi: VSI having resources allocated
5926  *
5927  * Return 0 on success, negative on failure
5928  */
5929 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5930 {
5931 	int i, err = 0;
5932 
5933 	if (!vsi->num_txq) {
5934 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
5935 			vsi->vsi_num);
5936 		return -EINVAL;
5937 	}
5938 
5939 	ice_for_each_txq(vsi, i) {
5940 		struct ice_ring *ring = vsi->tx_rings[i];
5941 
5942 		if (!ring)
5943 			return -EINVAL;
5944 
5945 		ring->netdev = vsi->netdev;
5946 		err = ice_setup_tx_ring(ring);
5947 		if (err)
5948 			break;
5949 	}
5950 
5951 	return err;
5952 }
5953 
5954 /**
5955  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5956  * @vsi: VSI having resources allocated
5957  *
5958  * Return 0 on success, negative on failure
5959  */
5960 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5961 {
5962 	int i, err = 0;
5963 
5964 	if (!vsi->num_rxq) {
5965 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
5966 			vsi->vsi_num);
5967 		return -EINVAL;
5968 	}
5969 
5970 	ice_for_each_rxq(vsi, i) {
5971 		struct ice_ring *ring = vsi->rx_rings[i];
5972 
5973 		if (!ring)
5974 			return -EINVAL;
5975 
5976 		ring->netdev = vsi->netdev;
5977 		err = ice_setup_rx_ring(ring);
5978 		if (err)
5979 			break;
5980 	}
5981 
5982 	return err;
5983 }
5984 
5985 /**
5986  * ice_vsi_open_ctrl - open control VSI for use
5987  * @vsi: the VSI to open
5988  *
5989  * Initialization of the Control VSI
5990  *
5991  * Returns 0 on success, negative value on error
5992  */
5993 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
5994 {
5995 	char int_name[ICE_INT_NAME_STR_LEN];
5996 	struct ice_pf *pf = vsi->back;
5997 	struct device *dev;
5998 	int err;
5999 
6000 	dev = ice_pf_to_dev(pf);
6001 	/* allocate descriptors */
6002 	err = ice_vsi_setup_tx_rings(vsi);
6003 	if (err)
6004 		goto err_setup_tx;
6005 
6006 	err = ice_vsi_setup_rx_rings(vsi);
6007 	if (err)
6008 		goto err_setup_rx;
6009 
6010 	err = ice_vsi_cfg(vsi);
6011 	if (err)
6012 		goto err_setup_rx;
6013 
6014 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6015 		 dev_driver_string(dev), dev_name(dev));
6016 	err = ice_vsi_req_irq_msix(vsi, int_name);
6017 	if (err)
6018 		goto err_setup_rx;
6019 
6020 	ice_vsi_cfg_msix(vsi);
6021 
6022 	err = ice_vsi_start_all_rx_rings(vsi);
6023 	if (err)
6024 		goto err_up_complete;
6025 
6026 	clear_bit(ICE_VSI_DOWN, vsi->state);
6027 	ice_vsi_ena_irq(vsi);
6028 
6029 	return 0;
6030 
6031 err_up_complete:
6032 	ice_down(vsi);
6033 err_setup_rx:
6034 	ice_vsi_free_rx_rings(vsi);
6035 err_setup_tx:
6036 	ice_vsi_free_tx_rings(vsi);
6037 
6038 	return err;
6039 }
6040 
6041 /**
6042  * ice_vsi_open - Called when a network interface is made active
6043  * @vsi: the VSI to open
6044  *
6045  * Initialization of the VSI
6046  *
6047  * Returns 0 on success, negative value on error
6048  */
6049 static int ice_vsi_open(struct ice_vsi *vsi)
6050 {
6051 	char int_name[ICE_INT_NAME_STR_LEN];
6052 	struct ice_pf *pf = vsi->back;
6053 	int err;
6054 
6055 	/* allocate descriptors */
6056 	err = ice_vsi_setup_tx_rings(vsi);
6057 	if (err)
6058 		goto err_setup_tx;
6059 
6060 	err = ice_vsi_setup_rx_rings(vsi);
6061 	if (err)
6062 		goto err_setup_rx;
6063 
6064 	err = ice_vsi_cfg(vsi);
6065 	if (err)
6066 		goto err_setup_rx;
6067 
6068 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6069 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6070 	err = ice_vsi_req_irq_msix(vsi, int_name);
6071 	if (err)
6072 		goto err_setup_rx;
6073 
6074 	/* Notify the stack of the actual queue counts. */
6075 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6076 	if (err)
6077 		goto err_set_qs;
6078 
6079 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6080 	if (err)
6081 		goto err_set_qs;
6082 
6083 	err = ice_up_complete(vsi);
6084 	if (err)
6085 		goto err_up_complete;
6086 
6087 	return 0;
6088 
6089 err_up_complete:
6090 	ice_down(vsi);
6091 err_set_qs:
6092 	ice_vsi_free_irq(vsi);
6093 err_setup_rx:
6094 	ice_vsi_free_rx_rings(vsi);
6095 err_setup_tx:
6096 	ice_vsi_free_tx_rings(vsi);
6097 
6098 	return err;
6099 }
6100 
6101 /**
6102  * ice_vsi_release_all - Delete all VSIs
6103  * @pf: PF from which all VSIs are being removed
6104  */
6105 static void ice_vsi_release_all(struct ice_pf *pf)
6106 {
6107 	int err, i;
6108 
6109 	if (!pf->vsi)
6110 		return;
6111 
6112 	ice_for_each_vsi(pf, i) {
6113 		if (!pf->vsi[i])
6114 			continue;
6115 
6116 		err = ice_vsi_release(pf->vsi[i]);
6117 		if (err)
6118 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6119 				i, err, pf->vsi[i]->vsi_num);
6120 	}
6121 }
6122 
6123 /**
6124  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6125  * @pf: pointer to the PF instance
6126  * @type: VSI type to rebuild
6127  *
6128  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6129  */
6130 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6131 {
6132 	struct device *dev = ice_pf_to_dev(pf);
6133 	enum ice_status status;
6134 	int i, err;
6135 
6136 	ice_for_each_vsi(pf, i) {
6137 		struct ice_vsi *vsi = pf->vsi[i];
6138 
6139 		if (!vsi || vsi->type != type)
6140 			continue;
6141 
6142 		/* rebuild the VSI */
6143 		err = ice_vsi_rebuild(vsi, true);
6144 		if (err) {
6145 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6146 				err, vsi->idx, ice_vsi_type_str(type));
6147 			return err;
6148 		}
6149 
6150 		/* replay filters for the VSI */
6151 		status = ice_replay_vsi(&pf->hw, vsi->idx);
6152 		if (status) {
6153 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6154 				ice_stat_str(status), vsi->idx,
6155 				ice_vsi_type_str(type));
6156 			return -EIO;
6157 		}
6158 
6159 		/* Re-map HW VSI number, using VSI handle that has been
6160 		 * previously validated in ice_replay_vsi() call above
6161 		 */
6162 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6163 
6164 		/* enable the VSI */
6165 		err = ice_ena_vsi(vsi, false);
6166 		if (err) {
6167 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6168 				err, vsi->idx, ice_vsi_type_str(type));
6169 			return err;
6170 		}
6171 
6172 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6173 			 ice_vsi_type_str(type));
6174 	}
6175 
6176 	return 0;
6177 }
6178 
6179 /**
6180  * ice_update_pf_netdev_link - Update PF netdev link status
6181  * @pf: pointer to the PF instance
6182  */
6183 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6184 {
6185 	bool link_up;
6186 	int i;
6187 
6188 	ice_for_each_vsi(pf, i) {
6189 		struct ice_vsi *vsi = pf->vsi[i];
6190 
6191 		if (!vsi || vsi->type != ICE_VSI_PF)
6192 			return;
6193 
6194 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6195 		if (link_up) {
6196 			netif_carrier_on(pf->vsi[i]->netdev);
6197 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6198 		} else {
6199 			netif_carrier_off(pf->vsi[i]->netdev);
6200 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6201 		}
6202 	}
6203 }
6204 
6205 /**
6206  * ice_rebuild - rebuild after reset
6207  * @pf: PF to rebuild
6208  * @reset_type: type of reset
6209  *
6210  * Do not rebuild VF VSI in this flow because that is already handled via
6211  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6212  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6213  * to reset/rebuild all the VF VSI twice.
6214  */
6215 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6216 {
6217 	struct device *dev = ice_pf_to_dev(pf);
6218 	struct ice_hw *hw = &pf->hw;
6219 	enum ice_status ret;
6220 	int err;
6221 
6222 	if (test_bit(ICE_DOWN, pf->state))
6223 		goto clear_recovery;
6224 
6225 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6226 
6227 	ret = ice_init_all_ctrlq(hw);
6228 	if (ret) {
6229 		dev_err(dev, "control queues init failed %s\n",
6230 			ice_stat_str(ret));
6231 		goto err_init_ctrlq;
6232 	}
6233 
6234 	/* if DDP was previously loaded successfully */
6235 	if (!ice_is_safe_mode(pf)) {
6236 		/* reload the SW DB of filter tables */
6237 		if (reset_type == ICE_RESET_PFR)
6238 			ice_fill_blk_tbls(hw);
6239 		else
6240 			/* Reload DDP Package after CORER/GLOBR reset */
6241 			ice_load_pkg(NULL, pf);
6242 	}
6243 
6244 	ret = ice_clear_pf_cfg(hw);
6245 	if (ret) {
6246 		dev_err(dev, "clear PF configuration failed %s\n",
6247 			ice_stat_str(ret));
6248 		goto err_init_ctrlq;
6249 	}
6250 
6251 	if (pf->first_sw->dflt_vsi_ena)
6252 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6253 	/* clear the default VSI configuration if it exists */
6254 	pf->first_sw->dflt_vsi = NULL;
6255 	pf->first_sw->dflt_vsi_ena = false;
6256 
6257 	ice_clear_pxe_mode(hw);
6258 
6259 	ret = ice_init_nvm(hw);
6260 	if (ret) {
6261 		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6262 		goto err_init_ctrlq;
6263 	}
6264 
6265 	ret = ice_get_caps(hw);
6266 	if (ret) {
6267 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6268 		goto err_init_ctrlq;
6269 	}
6270 
6271 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6272 	if (ret) {
6273 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6274 		goto err_init_ctrlq;
6275 	}
6276 
6277 	err = ice_sched_init_port(hw->port_info);
6278 	if (err)
6279 		goto err_sched_init_port;
6280 
6281 	/* start misc vector */
6282 	err = ice_req_irq_msix_misc(pf);
6283 	if (err) {
6284 		dev_err(dev, "misc vector setup failed: %d\n", err);
6285 		goto err_sched_init_port;
6286 	}
6287 
6288 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6289 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6290 		if (!rd32(hw, PFQF_FD_SIZE)) {
6291 			u16 unused, guar, b_effort;
6292 
6293 			guar = hw->func_caps.fd_fltr_guar;
6294 			b_effort = hw->func_caps.fd_fltr_best_effort;
6295 
6296 			/* force guaranteed filter pool for PF */
6297 			ice_alloc_fd_guar_item(hw, &unused, guar);
6298 			/* force shared filter pool for PF */
6299 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6300 		}
6301 	}
6302 
6303 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6304 		ice_dcb_rebuild(pf);
6305 
6306 	/* rebuild PF VSI */
6307 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6308 	if (err) {
6309 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6310 		goto err_vsi_rebuild;
6311 	}
6312 
6313 	/* If Flow Director is active */
6314 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6315 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6316 		if (err) {
6317 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6318 			goto err_vsi_rebuild;
6319 		}
6320 
6321 		/* replay HW Flow Director recipes */
6322 		if (hw->fdir_prof)
6323 			ice_fdir_replay_flows(hw);
6324 
6325 		/* replay Flow Director filters */
6326 		ice_fdir_replay_fltrs(pf);
6327 
6328 		ice_rebuild_arfs(pf);
6329 	}
6330 
6331 	ice_update_pf_netdev_link(pf);
6332 
6333 	/* tell the firmware we are up */
6334 	ret = ice_send_version(pf);
6335 	if (ret) {
6336 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6337 			ice_stat_str(ret));
6338 		goto err_vsi_rebuild;
6339 	}
6340 
6341 	ice_replay_post(hw);
6342 
6343 	/* if we get here, reset flow is successful */
6344 	clear_bit(ICE_RESET_FAILED, pf->state);
6345 
6346 	ice_plug_aux_dev(pf);
6347 	return;
6348 
6349 err_vsi_rebuild:
6350 err_sched_init_port:
6351 	ice_sched_cleanup_all(hw);
6352 err_init_ctrlq:
6353 	ice_shutdown_all_ctrlq(hw);
6354 	set_bit(ICE_RESET_FAILED, pf->state);
6355 clear_recovery:
6356 	/* set this bit in PF state to control service task scheduling */
6357 	set_bit(ICE_NEEDS_RESTART, pf->state);
6358 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6359 }
6360 
6361 /**
6362  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6363  * @vsi: Pointer to VSI structure
6364  */
6365 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6366 {
6367 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6368 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6369 	else
6370 		return ICE_RXBUF_3072;
6371 }
6372 
6373 /**
6374  * ice_change_mtu - NDO callback to change the MTU
6375  * @netdev: network interface device structure
6376  * @new_mtu: new value for maximum frame size
6377  *
6378  * Returns 0 on success, negative on failure
6379  */
6380 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6381 {
6382 	struct ice_netdev_priv *np = netdev_priv(netdev);
6383 	struct ice_vsi *vsi = np->vsi;
6384 	struct ice_pf *pf = vsi->back;
6385 	struct iidc_event *event;
6386 	u8 count = 0;
6387 	int err = 0;
6388 
6389 	if (new_mtu == (int)netdev->mtu) {
6390 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6391 		return 0;
6392 	}
6393 
6394 	if (ice_is_xdp_ena_vsi(vsi)) {
6395 		int frame_size = ice_max_xdp_frame_size(vsi);
6396 
6397 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6398 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6399 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6400 			return -EINVAL;
6401 		}
6402 	}
6403 
6404 	/* if a reset is in progress, wait for some time for it to complete */
6405 	do {
6406 		if (ice_is_reset_in_progress(pf->state)) {
6407 			count++;
6408 			usleep_range(1000, 2000);
6409 		} else {
6410 			break;
6411 		}
6412 
6413 	} while (count < 100);
6414 
6415 	if (count == 100) {
6416 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6417 		return -EBUSY;
6418 	}
6419 
6420 	event = kzalloc(sizeof(*event), GFP_KERNEL);
6421 	if (!event)
6422 		return -ENOMEM;
6423 
6424 	set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6425 	ice_send_event_to_aux(pf, event);
6426 	clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6427 
6428 	netdev->mtu = (unsigned int)new_mtu;
6429 
6430 	/* if VSI is up, bring it down and then back up */
6431 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6432 		err = ice_down(vsi);
6433 		if (err) {
6434 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6435 			goto event_after;
6436 		}
6437 
6438 		err = ice_up(vsi);
6439 		if (err) {
6440 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6441 			goto event_after;
6442 		}
6443 	}
6444 
6445 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6446 event_after:
6447 	set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6448 	ice_send_event_to_aux(pf, event);
6449 	kfree(event);
6450 
6451 	return err;
6452 }
6453 
6454 /**
6455  * ice_aq_str - convert AQ err code to a string
6456  * @aq_err: the AQ error code to convert
6457  */
6458 const char *ice_aq_str(enum ice_aq_err aq_err)
6459 {
6460 	switch (aq_err) {
6461 	case ICE_AQ_RC_OK:
6462 		return "OK";
6463 	case ICE_AQ_RC_EPERM:
6464 		return "ICE_AQ_RC_EPERM";
6465 	case ICE_AQ_RC_ENOENT:
6466 		return "ICE_AQ_RC_ENOENT";
6467 	case ICE_AQ_RC_ENOMEM:
6468 		return "ICE_AQ_RC_ENOMEM";
6469 	case ICE_AQ_RC_EBUSY:
6470 		return "ICE_AQ_RC_EBUSY";
6471 	case ICE_AQ_RC_EEXIST:
6472 		return "ICE_AQ_RC_EEXIST";
6473 	case ICE_AQ_RC_EINVAL:
6474 		return "ICE_AQ_RC_EINVAL";
6475 	case ICE_AQ_RC_ENOSPC:
6476 		return "ICE_AQ_RC_ENOSPC";
6477 	case ICE_AQ_RC_ENOSYS:
6478 		return "ICE_AQ_RC_ENOSYS";
6479 	case ICE_AQ_RC_EMODE:
6480 		return "ICE_AQ_RC_EMODE";
6481 	case ICE_AQ_RC_ENOSEC:
6482 		return "ICE_AQ_RC_ENOSEC";
6483 	case ICE_AQ_RC_EBADSIG:
6484 		return "ICE_AQ_RC_EBADSIG";
6485 	case ICE_AQ_RC_ESVN:
6486 		return "ICE_AQ_RC_ESVN";
6487 	case ICE_AQ_RC_EBADMAN:
6488 		return "ICE_AQ_RC_EBADMAN";
6489 	case ICE_AQ_RC_EBADBUF:
6490 		return "ICE_AQ_RC_EBADBUF";
6491 	}
6492 
6493 	return "ICE_AQ_RC_UNKNOWN";
6494 }
6495 
6496 /**
6497  * ice_stat_str - convert status err code to a string
6498  * @stat_err: the status error code to convert
6499  */
6500 const char *ice_stat_str(enum ice_status stat_err)
6501 {
6502 	switch (stat_err) {
6503 	case ICE_SUCCESS:
6504 		return "OK";
6505 	case ICE_ERR_PARAM:
6506 		return "ICE_ERR_PARAM";
6507 	case ICE_ERR_NOT_IMPL:
6508 		return "ICE_ERR_NOT_IMPL";
6509 	case ICE_ERR_NOT_READY:
6510 		return "ICE_ERR_NOT_READY";
6511 	case ICE_ERR_NOT_SUPPORTED:
6512 		return "ICE_ERR_NOT_SUPPORTED";
6513 	case ICE_ERR_BAD_PTR:
6514 		return "ICE_ERR_BAD_PTR";
6515 	case ICE_ERR_INVAL_SIZE:
6516 		return "ICE_ERR_INVAL_SIZE";
6517 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6518 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6519 	case ICE_ERR_RESET_FAILED:
6520 		return "ICE_ERR_RESET_FAILED";
6521 	case ICE_ERR_FW_API_VER:
6522 		return "ICE_ERR_FW_API_VER";
6523 	case ICE_ERR_NO_MEMORY:
6524 		return "ICE_ERR_NO_MEMORY";
6525 	case ICE_ERR_CFG:
6526 		return "ICE_ERR_CFG";
6527 	case ICE_ERR_OUT_OF_RANGE:
6528 		return "ICE_ERR_OUT_OF_RANGE";
6529 	case ICE_ERR_ALREADY_EXISTS:
6530 		return "ICE_ERR_ALREADY_EXISTS";
6531 	case ICE_ERR_NVM:
6532 		return "ICE_ERR_NVM";
6533 	case ICE_ERR_NVM_CHECKSUM:
6534 		return "ICE_ERR_NVM_CHECKSUM";
6535 	case ICE_ERR_BUF_TOO_SHORT:
6536 		return "ICE_ERR_BUF_TOO_SHORT";
6537 	case ICE_ERR_NVM_BLANK_MODE:
6538 		return "ICE_ERR_NVM_BLANK_MODE";
6539 	case ICE_ERR_IN_USE:
6540 		return "ICE_ERR_IN_USE";
6541 	case ICE_ERR_MAX_LIMIT:
6542 		return "ICE_ERR_MAX_LIMIT";
6543 	case ICE_ERR_RESET_ONGOING:
6544 		return "ICE_ERR_RESET_ONGOING";
6545 	case ICE_ERR_HW_TABLE:
6546 		return "ICE_ERR_HW_TABLE";
6547 	case ICE_ERR_DOES_NOT_EXIST:
6548 		return "ICE_ERR_DOES_NOT_EXIST";
6549 	case ICE_ERR_FW_DDP_MISMATCH:
6550 		return "ICE_ERR_FW_DDP_MISMATCH";
6551 	case ICE_ERR_AQ_ERROR:
6552 		return "ICE_ERR_AQ_ERROR";
6553 	case ICE_ERR_AQ_TIMEOUT:
6554 		return "ICE_ERR_AQ_TIMEOUT";
6555 	case ICE_ERR_AQ_FULL:
6556 		return "ICE_ERR_AQ_FULL";
6557 	case ICE_ERR_AQ_NO_WORK:
6558 		return "ICE_ERR_AQ_NO_WORK";
6559 	case ICE_ERR_AQ_EMPTY:
6560 		return "ICE_ERR_AQ_EMPTY";
6561 	case ICE_ERR_AQ_FW_CRITICAL:
6562 		return "ICE_ERR_AQ_FW_CRITICAL";
6563 	}
6564 
6565 	return "ICE_ERR_UNKNOWN";
6566 }
6567 
6568 /**
6569  * ice_set_rss_lut - Set RSS LUT
6570  * @vsi: Pointer to VSI structure
6571  * @lut: Lookup table
6572  * @lut_size: Lookup table size
6573  *
6574  * Returns 0 on success, negative on failure
6575  */
6576 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6577 {
6578 	struct ice_aq_get_set_rss_lut_params params = {};
6579 	struct ice_hw *hw = &vsi->back->hw;
6580 	enum ice_status status;
6581 
6582 	if (!lut)
6583 		return -EINVAL;
6584 
6585 	params.vsi_handle = vsi->idx;
6586 	params.lut_size = lut_size;
6587 	params.lut_type = vsi->rss_lut_type;
6588 	params.lut = lut;
6589 
6590 	status = ice_aq_set_rss_lut(hw, &params);
6591 	if (status) {
6592 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6593 			ice_stat_str(status),
6594 			ice_aq_str(hw->adminq.sq_last_status));
6595 		return -EIO;
6596 	}
6597 
6598 	return 0;
6599 }
6600 
6601 /**
6602  * ice_set_rss_key - Set RSS key
6603  * @vsi: Pointer to the VSI structure
6604  * @seed: RSS hash seed
6605  *
6606  * Returns 0 on success, negative on failure
6607  */
6608 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6609 {
6610 	struct ice_hw *hw = &vsi->back->hw;
6611 	enum ice_status status;
6612 
6613 	if (!seed)
6614 		return -EINVAL;
6615 
6616 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6617 	if (status) {
6618 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6619 			ice_stat_str(status),
6620 			ice_aq_str(hw->adminq.sq_last_status));
6621 		return -EIO;
6622 	}
6623 
6624 	return 0;
6625 }
6626 
6627 /**
6628  * ice_get_rss_lut - Get RSS LUT
6629  * @vsi: Pointer to VSI structure
6630  * @lut: Buffer to store the lookup table entries
6631  * @lut_size: Size of buffer to store the lookup table entries
6632  *
6633  * Returns 0 on success, negative on failure
6634  */
6635 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6636 {
6637 	struct ice_aq_get_set_rss_lut_params params = {};
6638 	struct ice_hw *hw = &vsi->back->hw;
6639 	enum ice_status status;
6640 
6641 	if (!lut)
6642 		return -EINVAL;
6643 
6644 	params.vsi_handle = vsi->idx;
6645 	params.lut_size = lut_size;
6646 	params.lut_type = vsi->rss_lut_type;
6647 	params.lut = lut;
6648 
6649 	status = ice_aq_get_rss_lut(hw, &params);
6650 	if (status) {
6651 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6652 			ice_stat_str(status),
6653 			ice_aq_str(hw->adminq.sq_last_status));
6654 		return -EIO;
6655 	}
6656 
6657 	return 0;
6658 }
6659 
6660 /**
6661  * ice_get_rss_key - Get RSS key
6662  * @vsi: Pointer to VSI structure
6663  * @seed: Buffer to store the key in
6664  *
6665  * Returns 0 on success, negative on failure
6666  */
6667 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6668 {
6669 	struct ice_hw *hw = &vsi->back->hw;
6670 	enum ice_status status;
6671 
6672 	if (!seed)
6673 		return -EINVAL;
6674 
6675 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6676 	if (status) {
6677 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6678 			ice_stat_str(status),
6679 			ice_aq_str(hw->adminq.sq_last_status));
6680 		return -EIO;
6681 	}
6682 
6683 	return 0;
6684 }
6685 
6686 /**
6687  * ice_bridge_getlink - Get the hardware bridge mode
6688  * @skb: skb buff
6689  * @pid: process ID
6690  * @seq: RTNL message seq
6691  * @dev: the netdev being configured
6692  * @filter_mask: filter mask passed in
6693  * @nlflags: netlink flags passed in
6694  *
6695  * Return the bridge mode (VEB/VEPA)
6696  */
6697 static int
6698 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6699 		   struct net_device *dev, u32 filter_mask, int nlflags)
6700 {
6701 	struct ice_netdev_priv *np = netdev_priv(dev);
6702 	struct ice_vsi *vsi = np->vsi;
6703 	struct ice_pf *pf = vsi->back;
6704 	u16 bmode;
6705 
6706 	bmode = pf->first_sw->bridge_mode;
6707 
6708 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6709 				       filter_mask, NULL);
6710 }
6711 
6712 /**
6713  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6714  * @vsi: Pointer to VSI structure
6715  * @bmode: Hardware bridge mode (VEB/VEPA)
6716  *
6717  * Returns 0 on success, negative on failure
6718  */
6719 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6720 {
6721 	struct ice_aqc_vsi_props *vsi_props;
6722 	struct ice_hw *hw = &vsi->back->hw;
6723 	struct ice_vsi_ctx *ctxt;
6724 	enum ice_status status;
6725 	int ret = 0;
6726 
6727 	vsi_props = &vsi->info;
6728 
6729 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6730 	if (!ctxt)
6731 		return -ENOMEM;
6732 
6733 	ctxt->info = vsi->info;
6734 
6735 	if (bmode == BRIDGE_MODE_VEB)
6736 		/* change from VEPA to VEB mode */
6737 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6738 	else
6739 		/* change from VEB to VEPA mode */
6740 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6741 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6742 
6743 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6744 	if (status) {
6745 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6746 			bmode, ice_stat_str(status),
6747 			ice_aq_str(hw->adminq.sq_last_status));
6748 		ret = -EIO;
6749 		goto out;
6750 	}
6751 	/* Update sw flags for book keeping */
6752 	vsi_props->sw_flags = ctxt->info.sw_flags;
6753 
6754 out:
6755 	kfree(ctxt);
6756 	return ret;
6757 }
6758 
6759 /**
6760  * ice_bridge_setlink - Set the hardware bridge mode
6761  * @dev: the netdev being configured
6762  * @nlh: RTNL message
6763  * @flags: bridge setlink flags
6764  * @extack: netlink extended ack
6765  *
6766  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6767  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6768  * not already set for all VSIs connected to this switch. And also update the
6769  * unicast switch filter rules for the corresponding switch of the netdev.
6770  */
6771 static int
6772 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6773 		   u16 __always_unused flags,
6774 		   struct netlink_ext_ack __always_unused *extack)
6775 {
6776 	struct ice_netdev_priv *np = netdev_priv(dev);
6777 	struct ice_pf *pf = np->vsi->back;
6778 	struct nlattr *attr, *br_spec;
6779 	struct ice_hw *hw = &pf->hw;
6780 	enum ice_status status;
6781 	struct ice_sw *pf_sw;
6782 	int rem, v, err = 0;
6783 
6784 	pf_sw = pf->first_sw;
6785 	/* find the attribute in the netlink message */
6786 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6787 
6788 	nla_for_each_nested(attr, br_spec, rem) {
6789 		__u16 mode;
6790 
6791 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6792 			continue;
6793 		mode = nla_get_u16(attr);
6794 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6795 			return -EINVAL;
6796 		/* Continue  if bridge mode is not being flipped */
6797 		if (mode == pf_sw->bridge_mode)
6798 			continue;
6799 		/* Iterates through the PF VSI list and update the loopback
6800 		 * mode of the VSI
6801 		 */
6802 		ice_for_each_vsi(pf, v) {
6803 			if (!pf->vsi[v])
6804 				continue;
6805 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6806 			if (err)
6807 				return err;
6808 		}
6809 
6810 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6811 		/* Update the unicast switch filter rules for the corresponding
6812 		 * switch of the netdev
6813 		 */
6814 		status = ice_update_sw_rule_bridge_mode(hw);
6815 		if (status) {
6816 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6817 				   mode, ice_stat_str(status),
6818 				   ice_aq_str(hw->adminq.sq_last_status));
6819 			/* revert hw->evb_veb */
6820 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6821 			return -EIO;
6822 		}
6823 
6824 		pf_sw->bridge_mode = mode;
6825 	}
6826 
6827 	return 0;
6828 }
6829 
6830 /**
6831  * ice_tx_timeout - Respond to a Tx Hang
6832  * @netdev: network interface device structure
6833  * @txqueue: Tx queue
6834  */
6835 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6836 {
6837 	struct ice_netdev_priv *np = netdev_priv(netdev);
6838 	struct ice_ring *tx_ring = NULL;
6839 	struct ice_vsi *vsi = np->vsi;
6840 	struct ice_pf *pf = vsi->back;
6841 	u32 i;
6842 
6843 	pf->tx_timeout_count++;
6844 
6845 	/* Check if PFC is enabled for the TC to which the queue belongs
6846 	 * to. If yes then Tx timeout is not caused by a hung queue, no
6847 	 * need to reset and rebuild
6848 	 */
6849 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6850 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6851 			 txqueue);
6852 		return;
6853 	}
6854 
6855 	/* now that we have an index, find the tx_ring struct */
6856 	for (i = 0; i < vsi->num_txq; i++)
6857 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6858 			if (txqueue == vsi->tx_rings[i]->q_index) {
6859 				tx_ring = vsi->tx_rings[i];
6860 				break;
6861 			}
6862 
6863 	/* Reset recovery level if enough time has elapsed after last timeout.
6864 	 * Also ensure no new reset action happens before next timeout period.
6865 	 */
6866 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6867 		pf->tx_timeout_recovery_level = 1;
6868 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6869 				       netdev->watchdog_timeo)))
6870 		return;
6871 
6872 	if (tx_ring) {
6873 		struct ice_hw *hw = &pf->hw;
6874 		u32 head, val = 0;
6875 
6876 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6877 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6878 		/* Read interrupt register */
6879 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6880 
6881 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6882 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6883 			    head, tx_ring->next_to_use, val);
6884 	}
6885 
6886 	pf->tx_timeout_last_recovery = jiffies;
6887 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6888 		    pf->tx_timeout_recovery_level, txqueue);
6889 
6890 	switch (pf->tx_timeout_recovery_level) {
6891 	case 1:
6892 		set_bit(ICE_PFR_REQ, pf->state);
6893 		break;
6894 	case 2:
6895 		set_bit(ICE_CORER_REQ, pf->state);
6896 		break;
6897 	case 3:
6898 		set_bit(ICE_GLOBR_REQ, pf->state);
6899 		break;
6900 	default:
6901 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
6902 		set_bit(ICE_DOWN, pf->state);
6903 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
6904 		set_bit(ICE_SERVICE_DIS, pf->state);
6905 		break;
6906 	}
6907 
6908 	ice_service_task_schedule(pf);
6909 	pf->tx_timeout_recovery_level++;
6910 }
6911 
6912 /**
6913  * ice_open - Called when a network interface becomes active
6914  * @netdev: network interface device structure
6915  *
6916  * The open entry point is called when a network interface is made
6917  * active by the system (IFF_UP). At this point all resources needed
6918  * for transmit and receive operations are allocated, the interrupt
6919  * handler is registered with the OS, the netdev watchdog is enabled,
6920  * and the stack is notified that the interface is ready.
6921  *
6922  * Returns 0 on success, negative value on failure
6923  */
6924 int ice_open(struct net_device *netdev)
6925 {
6926 	struct ice_netdev_priv *np = netdev_priv(netdev);
6927 	struct ice_pf *pf = np->vsi->back;
6928 
6929 	if (ice_is_reset_in_progress(pf->state)) {
6930 		netdev_err(netdev, "can't open net device while reset is in progress");
6931 		return -EBUSY;
6932 	}
6933 
6934 	return ice_open_internal(netdev);
6935 }
6936 
6937 /**
6938  * ice_open_internal - Called when a network interface becomes active
6939  * @netdev: network interface device structure
6940  *
6941  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
6942  * handling routine
6943  *
6944  * Returns 0 on success, negative value on failure
6945  */
6946 int ice_open_internal(struct net_device *netdev)
6947 {
6948 	struct ice_netdev_priv *np = netdev_priv(netdev);
6949 	struct ice_vsi *vsi = np->vsi;
6950 	struct ice_pf *pf = vsi->back;
6951 	struct ice_port_info *pi;
6952 	enum ice_status status;
6953 	int err;
6954 
6955 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
6956 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6957 		return -EIO;
6958 	}
6959 
6960 	netif_carrier_off(netdev);
6961 
6962 	pi = vsi->port_info;
6963 	status = ice_update_link_info(pi);
6964 	if (status) {
6965 		netdev_err(netdev, "Failed to get link info, error %s\n",
6966 			   ice_stat_str(status));
6967 		return -EIO;
6968 	}
6969 
6970 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
6971 
6972 	/* Set PHY if there is media, otherwise, turn off PHY */
6973 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
6974 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6975 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
6976 			err = ice_init_phy_user_cfg(pi);
6977 			if (err) {
6978 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
6979 					   err);
6980 				return err;
6981 			}
6982 		}
6983 
6984 		err = ice_configure_phy(vsi);
6985 		if (err) {
6986 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
6987 				   err);
6988 			return err;
6989 		}
6990 	} else {
6991 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6992 		ice_set_link(vsi, false);
6993 	}
6994 
6995 	err = ice_vsi_open(vsi);
6996 	if (err)
6997 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6998 			   vsi->vsi_num, vsi->vsw->sw_id);
6999 
7000 	/* Update existing tunnels information */
7001 	udp_tunnel_get_rx_info(netdev);
7002 
7003 	return err;
7004 }
7005 
7006 /**
7007  * ice_stop - Disables a network interface
7008  * @netdev: network interface device structure
7009  *
7010  * The stop entry point is called when an interface is de-activated by the OS,
7011  * and the netdevice enters the DOWN state. The hardware is still under the
7012  * driver's control, but the netdev interface is disabled.
7013  *
7014  * Returns success only - not allowed to fail
7015  */
7016 int ice_stop(struct net_device *netdev)
7017 {
7018 	struct ice_netdev_priv *np = netdev_priv(netdev);
7019 	struct ice_vsi *vsi = np->vsi;
7020 	struct ice_pf *pf = vsi->back;
7021 
7022 	if (ice_is_reset_in_progress(pf->state)) {
7023 		netdev_err(netdev, "can't stop net device while reset is in progress");
7024 		return -EBUSY;
7025 	}
7026 
7027 	ice_vsi_close(vsi);
7028 
7029 	return 0;
7030 }
7031 
7032 /**
7033  * ice_features_check - Validate encapsulated packet conforms to limits
7034  * @skb: skb buffer
7035  * @netdev: This port's netdev
7036  * @features: Offload features that the stack believes apply
7037  */
7038 static netdev_features_t
7039 ice_features_check(struct sk_buff *skb,
7040 		   struct net_device __always_unused *netdev,
7041 		   netdev_features_t features)
7042 {
7043 	size_t len;
7044 
7045 	/* No point in doing any of this if neither checksum nor GSO are
7046 	 * being requested for this frame. We can rule out both by just
7047 	 * checking for CHECKSUM_PARTIAL
7048 	 */
7049 	if (skb->ip_summed != CHECKSUM_PARTIAL)
7050 		return features;
7051 
7052 	/* We cannot support GSO if the MSS is going to be less than
7053 	 * 64 bytes. If it is then we need to drop support for GSO.
7054 	 */
7055 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
7056 		features &= ~NETIF_F_GSO_MASK;
7057 
7058 	len = skb_network_header(skb) - skb->data;
7059 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7060 		goto out_rm_features;
7061 
7062 	len = skb_transport_header(skb) - skb_network_header(skb);
7063 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7064 		goto out_rm_features;
7065 
7066 	if (skb->encapsulation) {
7067 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
7068 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7069 			goto out_rm_features;
7070 
7071 		len = skb_inner_transport_header(skb) -
7072 		      skb_inner_network_header(skb);
7073 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7074 			goto out_rm_features;
7075 	}
7076 
7077 	return features;
7078 out_rm_features:
7079 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7080 }
7081 
7082 static const struct net_device_ops ice_netdev_safe_mode_ops = {
7083 	.ndo_open = ice_open,
7084 	.ndo_stop = ice_stop,
7085 	.ndo_start_xmit = ice_start_xmit,
7086 	.ndo_set_mac_address = ice_set_mac_address,
7087 	.ndo_validate_addr = eth_validate_addr,
7088 	.ndo_change_mtu = ice_change_mtu,
7089 	.ndo_get_stats64 = ice_get_stats64,
7090 	.ndo_tx_timeout = ice_tx_timeout,
7091 };
7092 
7093 static const struct net_device_ops ice_netdev_ops = {
7094 	.ndo_open = ice_open,
7095 	.ndo_stop = ice_stop,
7096 	.ndo_start_xmit = ice_start_xmit,
7097 	.ndo_features_check = ice_features_check,
7098 	.ndo_set_rx_mode = ice_set_rx_mode,
7099 	.ndo_set_mac_address = ice_set_mac_address,
7100 	.ndo_validate_addr = eth_validate_addr,
7101 	.ndo_change_mtu = ice_change_mtu,
7102 	.ndo_get_stats64 = ice_get_stats64,
7103 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7104 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7105 	.ndo_set_vf_mac = ice_set_vf_mac,
7106 	.ndo_get_vf_config = ice_get_vf_cfg,
7107 	.ndo_set_vf_trust = ice_set_vf_trust,
7108 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7109 	.ndo_set_vf_link_state = ice_set_vf_link_state,
7110 	.ndo_get_vf_stats = ice_get_vf_stats,
7111 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7112 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
7113 	.ndo_set_features = ice_set_features,
7114 	.ndo_bridge_getlink = ice_bridge_getlink,
7115 	.ndo_bridge_setlink = ice_bridge_setlink,
7116 	.ndo_fdb_add = ice_fdb_add,
7117 	.ndo_fdb_del = ice_fdb_del,
7118 #ifdef CONFIG_RFS_ACCEL
7119 	.ndo_rx_flow_steer = ice_rx_flow_steer,
7120 #endif
7121 	.ndo_tx_timeout = ice_tx_timeout,
7122 	.ndo_bpf = ice_xdp,
7123 	.ndo_xdp_xmit = ice_xdp_xmit,
7124 	.ndo_xsk_wakeup = ice_xsk_wakeup,
7125 };
7126