xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision efc7d01a9ecdc59946fad1743d96a0db9925064c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include "ice.h"
10 #include "ice_base.h"
11 #include "ice_lib.h"
12 #include "ice_fltr.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 
17 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
18 static const char ice_driver_string[] = DRV_SUMMARY;
19 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
20 
21 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
22 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
23 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
24 
25 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
26 MODULE_DESCRIPTION(DRV_SUMMARY);
27 MODULE_LICENSE("GPL v2");
28 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
29 
30 static int debug = -1;
31 module_param(debug, int, 0644);
32 #ifndef CONFIG_DYNAMIC_DEBUG
33 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
34 #else
35 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
36 #endif /* !CONFIG_DYNAMIC_DEBUG */
37 
38 static struct workqueue_struct *ice_wq;
39 static const struct net_device_ops ice_netdev_safe_mode_ops;
40 static const struct net_device_ops ice_netdev_ops;
41 static int ice_vsi_open(struct ice_vsi *vsi);
42 
43 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
44 
45 static void ice_vsi_release_all(struct ice_pf *pf);
46 
47 /**
48  * ice_get_tx_pending - returns number of Tx descriptors not processed
49  * @ring: the ring of descriptors
50  */
51 static u16 ice_get_tx_pending(struct ice_ring *ring)
52 {
53 	u16 head, tail;
54 
55 	head = ring->next_to_clean;
56 	tail = ring->next_to_use;
57 
58 	if (head != tail)
59 		return (head < tail) ?
60 			tail - head : (tail + ring->count - head);
61 	return 0;
62 }
63 
64 /**
65  * ice_check_for_hang_subtask - check for and recover hung queues
66  * @pf: pointer to PF struct
67  */
68 static void ice_check_for_hang_subtask(struct ice_pf *pf)
69 {
70 	struct ice_vsi *vsi = NULL;
71 	struct ice_hw *hw;
72 	unsigned int i;
73 	int packets;
74 	u32 v;
75 
76 	ice_for_each_vsi(pf, v)
77 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
78 			vsi = pf->vsi[v];
79 			break;
80 		}
81 
82 	if (!vsi || test_bit(__ICE_DOWN, vsi->state))
83 		return;
84 
85 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
86 		return;
87 
88 	hw = &vsi->back->hw;
89 
90 	for (i = 0; i < vsi->num_txq; i++) {
91 		struct ice_ring *tx_ring = vsi->tx_rings[i];
92 
93 		if (tx_ring && tx_ring->desc) {
94 			/* If packet counter has not changed the queue is
95 			 * likely stalled, so force an interrupt for this
96 			 * queue.
97 			 *
98 			 * prev_pkt would be negative if there was no
99 			 * pending work.
100 			 */
101 			packets = tx_ring->stats.pkts & INT_MAX;
102 			if (tx_ring->tx_stats.prev_pkt == packets) {
103 				/* Trigger sw interrupt to revive the queue */
104 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
105 				continue;
106 			}
107 
108 			/* Memory barrier between read of packet count and call
109 			 * to ice_get_tx_pending()
110 			 */
111 			smp_rmb();
112 			tx_ring->tx_stats.prev_pkt =
113 			    ice_get_tx_pending(tx_ring) ? packets : -1;
114 		}
115 	}
116 }
117 
118 /**
119  * ice_init_mac_fltr - Set initial MAC filters
120  * @pf: board private structure
121  *
122  * Set initial set of MAC filters for PF VSI; configure filters for permanent
123  * address and broadcast address. If an error is encountered, netdevice will be
124  * unregistered.
125  */
126 static int ice_init_mac_fltr(struct ice_pf *pf)
127 {
128 	enum ice_status status;
129 	struct ice_vsi *vsi;
130 	u8 *perm_addr;
131 
132 	vsi = ice_get_main_vsi(pf);
133 	if (!vsi)
134 		return -EINVAL;
135 
136 	perm_addr = vsi->port_info->mac.perm_addr;
137 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
138 	if (!status)
139 		return 0;
140 
141 	/* We aren't useful with no MAC filters, so unregister if we
142 	 * had an error
143 	 */
144 	if (vsi->netdev->reg_state == NETREG_REGISTERED) {
145 		dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
146 			ice_stat_str(status));
147 		unregister_netdev(vsi->netdev);
148 		free_netdev(vsi->netdev);
149 		vsi->netdev = NULL;
150 	}
151 
152 	return -EIO;
153 }
154 
155 /**
156  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
157  * @netdev: the net device on which the sync is happening
158  * @addr: MAC address to sync
159  *
160  * This is a callback function which is called by the in kernel device sync
161  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
162  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
163  * MAC filters from the hardware.
164  */
165 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
166 {
167 	struct ice_netdev_priv *np = netdev_priv(netdev);
168 	struct ice_vsi *vsi = np->vsi;
169 
170 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
171 				     ICE_FWD_TO_VSI))
172 		return -EINVAL;
173 
174 	return 0;
175 }
176 
177 /**
178  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
179  * @netdev: the net device on which the unsync is happening
180  * @addr: MAC address to unsync
181  *
182  * This is a callback function which is called by the in kernel device unsync
183  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
184  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
185  * delete the MAC filters from the hardware.
186  */
187 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
188 {
189 	struct ice_netdev_priv *np = netdev_priv(netdev);
190 	struct ice_vsi *vsi = np->vsi;
191 
192 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
193 				     ICE_FWD_TO_VSI))
194 		return -EINVAL;
195 
196 	return 0;
197 }
198 
199 /**
200  * ice_vsi_fltr_changed - check if filter state changed
201  * @vsi: VSI to be checked
202  *
203  * returns true if filter state has changed, false otherwise.
204  */
205 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
206 {
207 	return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
208 	       test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
209 	       test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
210 }
211 
212 /**
213  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
214  * @vsi: the VSI being configured
215  * @promisc_m: mask of promiscuous config bits
216  * @set_promisc: enable or disable promisc flag request
217  *
218  */
219 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
220 {
221 	struct ice_hw *hw = &vsi->back->hw;
222 	enum ice_status status = 0;
223 
224 	if (vsi->type != ICE_VSI_PF)
225 		return 0;
226 
227 	if (vsi->vlan_ena) {
228 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
229 						  set_promisc);
230 	} else {
231 		if (set_promisc)
232 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
233 						     0);
234 		else
235 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
236 						       0);
237 	}
238 
239 	if (status)
240 		return -EIO;
241 
242 	return 0;
243 }
244 
245 /**
246  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
247  * @vsi: ptr to the VSI
248  *
249  * Push any outstanding VSI filter changes through the AdminQ.
250  */
251 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
252 {
253 	struct device *dev = ice_pf_to_dev(vsi->back);
254 	struct net_device *netdev = vsi->netdev;
255 	bool promisc_forced_on = false;
256 	struct ice_pf *pf = vsi->back;
257 	struct ice_hw *hw = &pf->hw;
258 	enum ice_status status = 0;
259 	u32 changed_flags = 0;
260 	u8 promisc_m;
261 	int err = 0;
262 
263 	if (!vsi->netdev)
264 		return -EINVAL;
265 
266 	while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
267 		usleep_range(1000, 2000);
268 
269 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
270 	vsi->current_netdev_flags = vsi->netdev->flags;
271 
272 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
273 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
274 
275 	if (ice_vsi_fltr_changed(vsi)) {
276 		clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
277 		clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
278 		clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
279 
280 		/* grab the netdev's addr_list_lock */
281 		netif_addr_lock_bh(netdev);
282 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
283 			      ice_add_mac_to_unsync_list);
284 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
285 			      ice_add_mac_to_unsync_list);
286 		/* our temp lists are populated. release lock */
287 		netif_addr_unlock_bh(netdev);
288 	}
289 
290 	/* Remove MAC addresses in the unsync list */
291 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
292 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
293 	if (status) {
294 		netdev_err(netdev, "Failed to delete MAC filters\n");
295 		/* if we failed because of alloc failures, just bail */
296 		if (status == ICE_ERR_NO_MEMORY) {
297 			err = -ENOMEM;
298 			goto out;
299 		}
300 	}
301 
302 	/* Add MAC addresses in the sync list */
303 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
304 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
305 	/* If filter is added successfully or already exists, do not go into
306 	 * 'if' condition and report it as error. Instead continue processing
307 	 * rest of the function.
308 	 */
309 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
310 		netdev_err(netdev, "Failed to add MAC filters\n");
311 		/* If there is no more space for new umac filters, VSI
312 		 * should go into promiscuous mode. There should be some
313 		 * space reserved for promiscuous filters.
314 		 */
315 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
316 		    !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
317 				      vsi->state)) {
318 			promisc_forced_on = true;
319 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
320 				    vsi->vsi_num);
321 		} else {
322 			err = -EIO;
323 			goto out;
324 		}
325 	}
326 	/* check for changes in promiscuous modes */
327 	if (changed_flags & IFF_ALLMULTI) {
328 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
329 			if (vsi->vlan_ena)
330 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
331 			else
332 				promisc_m = ICE_MCAST_PROMISC_BITS;
333 
334 			err = ice_cfg_promisc(vsi, promisc_m, true);
335 			if (err) {
336 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
337 					   vsi->vsi_num);
338 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
339 				goto out_promisc;
340 			}
341 		} else {
342 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
343 			if (vsi->vlan_ena)
344 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
345 			else
346 				promisc_m = ICE_MCAST_PROMISC_BITS;
347 
348 			err = ice_cfg_promisc(vsi, promisc_m, false);
349 			if (err) {
350 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
351 					   vsi->vsi_num);
352 				vsi->current_netdev_flags |= IFF_ALLMULTI;
353 				goto out_promisc;
354 			}
355 		}
356 	}
357 
358 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
359 	    test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
360 		clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
361 		if (vsi->current_netdev_flags & IFF_PROMISC) {
362 			/* Apply Rx filter rule to get traffic from wire */
363 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
364 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
365 				if (err && err != -EEXIST) {
366 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
367 						   err, vsi->vsi_num);
368 					vsi->current_netdev_flags &=
369 						~IFF_PROMISC;
370 					goto out_promisc;
371 				}
372 				ice_cfg_vlan_pruning(vsi, false, false);
373 			}
374 		} else {
375 			/* Clear Rx filter to remove traffic from wire */
376 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
377 				err = ice_clear_dflt_vsi(pf->first_sw);
378 				if (err) {
379 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
380 						   err, vsi->vsi_num);
381 					vsi->current_netdev_flags |=
382 						IFF_PROMISC;
383 					goto out_promisc;
384 				}
385 				if (vsi->num_vlan > 1)
386 					ice_cfg_vlan_pruning(vsi, true, false);
387 			}
388 		}
389 	}
390 	goto exit;
391 
392 out_promisc:
393 	set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
394 	goto exit;
395 out:
396 	/* if something went wrong then set the changed flag so we try again */
397 	set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
398 	set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
399 exit:
400 	clear_bit(__ICE_CFG_BUSY, vsi->state);
401 	return err;
402 }
403 
404 /**
405  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
406  * @pf: board private structure
407  */
408 static void ice_sync_fltr_subtask(struct ice_pf *pf)
409 {
410 	int v;
411 
412 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
413 		return;
414 
415 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
416 
417 	ice_for_each_vsi(pf, v)
418 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
419 		    ice_vsi_sync_fltr(pf->vsi[v])) {
420 			/* come back and try again later */
421 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
422 			break;
423 		}
424 }
425 
426 /**
427  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
428  * @pf: the PF
429  * @locked: is the rtnl_lock already held
430  */
431 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
432 {
433 	int v;
434 
435 	ice_for_each_vsi(pf, v)
436 		if (pf->vsi[v])
437 			ice_dis_vsi(pf->vsi[v], locked);
438 }
439 
440 /**
441  * ice_prepare_for_reset - prep for the core to reset
442  * @pf: board private structure
443  *
444  * Inform or close all dependent features in prep for reset.
445  */
446 static void
447 ice_prepare_for_reset(struct ice_pf *pf)
448 {
449 	struct ice_hw *hw = &pf->hw;
450 	unsigned int i;
451 
452 	/* already prepared for reset */
453 	if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
454 		return;
455 
456 	/* Notify VFs of impending reset */
457 	if (ice_check_sq_alive(hw, &hw->mailboxq))
458 		ice_vc_notify_reset(pf);
459 
460 	/* Disable VFs until reset is completed */
461 	ice_for_each_vf(pf, i)
462 		ice_set_vf_state_qs_dis(&pf->vf[i]);
463 
464 	/* clear SW filtering DB */
465 	ice_clear_hw_tbls(hw);
466 	/* disable the VSIs and their queues that are not already DOWN */
467 	ice_pf_dis_all_vsi(pf, false);
468 
469 	if (hw->port_info)
470 		ice_sched_clear_port(hw->port_info);
471 
472 	ice_shutdown_all_ctrlq(hw);
473 
474 	set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
475 }
476 
477 /**
478  * ice_do_reset - Initiate one of many types of resets
479  * @pf: board private structure
480  * @reset_type: reset type requested
481  * before this function was called.
482  */
483 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
484 {
485 	struct device *dev = ice_pf_to_dev(pf);
486 	struct ice_hw *hw = &pf->hw;
487 
488 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
489 
490 	ice_prepare_for_reset(pf);
491 
492 	/* trigger the reset */
493 	if (ice_reset(hw, reset_type)) {
494 		dev_err(dev, "reset %d failed\n", reset_type);
495 		set_bit(__ICE_RESET_FAILED, pf->state);
496 		clear_bit(__ICE_RESET_OICR_RECV, pf->state);
497 		clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
498 		clear_bit(__ICE_PFR_REQ, pf->state);
499 		clear_bit(__ICE_CORER_REQ, pf->state);
500 		clear_bit(__ICE_GLOBR_REQ, pf->state);
501 		return;
502 	}
503 
504 	/* PFR is a bit of a special case because it doesn't result in an OICR
505 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
506 	 * associated state bits.
507 	 */
508 	if (reset_type == ICE_RESET_PFR) {
509 		pf->pfr_count++;
510 		ice_rebuild(pf, reset_type);
511 		clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
512 		clear_bit(__ICE_PFR_REQ, pf->state);
513 		ice_reset_all_vfs(pf, true);
514 	}
515 }
516 
517 /**
518  * ice_reset_subtask - Set up for resetting the device and driver
519  * @pf: board private structure
520  */
521 static void ice_reset_subtask(struct ice_pf *pf)
522 {
523 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
524 
525 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
526 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
527 	 * of reset is pending and sets bits in pf->state indicating the reset
528 	 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
529 	 * prepare for pending reset if not already (for PF software-initiated
530 	 * global resets the software should already be prepared for it as
531 	 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
532 	 * by firmware or software on other PFs, that bit is not set so prepare
533 	 * for the reset now), poll for reset done, rebuild and return.
534 	 */
535 	if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
536 		/* Perform the largest reset requested */
537 		if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
538 			reset_type = ICE_RESET_CORER;
539 		if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
540 			reset_type = ICE_RESET_GLOBR;
541 		if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
542 			reset_type = ICE_RESET_EMPR;
543 		/* return if no valid reset type requested */
544 		if (reset_type == ICE_RESET_INVAL)
545 			return;
546 		ice_prepare_for_reset(pf);
547 
548 		/* make sure we are ready to rebuild */
549 		if (ice_check_reset(&pf->hw)) {
550 			set_bit(__ICE_RESET_FAILED, pf->state);
551 		} else {
552 			/* done with reset. start rebuild */
553 			pf->hw.reset_ongoing = false;
554 			ice_rebuild(pf, reset_type);
555 			/* clear bit to resume normal operations, but
556 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
557 			 */
558 			clear_bit(__ICE_RESET_OICR_RECV, pf->state);
559 			clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
560 			clear_bit(__ICE_PFR_REQ, pf->state);
561 			clear_bit(__ICE_CORER_REQ, pf->state);
562 			clear_bit(__ICE_GLOBR_REQ, pf->state);
563 			ice_reset_all_vfs(pf, true);
564 		}
565 
566 		return;
567 	}
568 
569 	/* No pending resets to finish processing. Check for new resets */
570 	if (test_bit(__ICE_PFR_REQ, pf->state))
571 		reset_type = ICE_RESET_PFR;
572 	if (test_bit(__ICE_CORER_REQ, pf->state))
573 		reset_type = ICE_RESET_CORER;
574 	if (test_bit(__ICE_GLOBR_REQ, pf->state))
575 		reset_type = ICE_RESET_GLOBR;
576 	/* If no valid reset type requested just return */
577 	if (reset_type == ICE_RESET_INVAL)
578 		return;
579 
580 	/* reset if not already down or busy */
581 	if (!test_bit(__ICE_DOWN, pf->state) &&
582 	    !test_bit(__ICE_CFG_BUSY, pf->state)) {
583 		ice_do_reset(pf, reset_type);
584 	}
585 }
586 
587 /**
588  * ice_print_topo_conflict - print topology conflict message
589  * @vsi: the VSI whose topology status is being checked
590  */
591 static void ice_print_topo_conflict(struct ice_vsi *vsi)
592 {
593 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
594 	case ICE_AQ_LINK_TOPO_CONFLICT:
595 	case ICE_AQ_LINK_MEDIA_CONFLICT:
596 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
597 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
598 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
599 		netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
600 		break;
601 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
602 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
603 		break;
604 	default:
605 		break;
606 	}
607 }
608 
609 /**
610  * ice_print_link_msg - print link up or down message
611  * @vsi: the VSI whose link status is being queried
612  * @isup: boolean for if the link is now up or down
613  */
614 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
615 {
616 	struct ice_aqc_get_phy_caps_data *caps;
617 	const char *an_advertised;
618 	enum ice_status status;
619 	const char *fec_req;
620 	const char *speed;
621 	const char *fec;
622 	const char *fc;
623 	const char *an;
624 
625 	if (!vsi)
626 		return;
627 
628 	if (vsi->current_isup == isup)
629 		return;
630 
631 	vsi->current_isup = isup;
632 
633 	if (!isup) {
634 		netdev_info(vsi->netdev, "NIC Link is Down\n");
635 		return;
636 	}
637 
638 	switch (vsi->port_info->phy.link_info.link_speed) {
639 	case ICE_AQ_LINK_SPEED_100GB:
640 		speed = "100 G";
641 		break;
642 	case ICE_AQ_LINK_SPEED_50GB:
643 		speed = "50 G";
644 		break;
645 	case ICE_AQ_LINK_SPEED_40GB:
646 		speed = "40 G";
647 		break;
648 	case ICE_AQ_LINK_SPEED_25GB:
649 		speed = "25 G";
650 		break;
651 	case ICE_AQ_LINK_SPEED_20GB:
652 		speed = "20 G";
653 		break;
654 	case ICE_AQ_LINK_SPEED_10GB:
655 		speed = "10 G";
656 		break;
657 	case ICE_AQ_LINK_SPEED_5GB:
658 		speed = "5 G";
659 		break;
660 	case ICE_AQ_LINK_SPEED_2500MB:
661 		speed = "2.5 G";
662 		break;
663 	case ICE_AQ_LINK_SPEED_1000MB:
664 		speed = "1 G";
665 		break;
666 	case ICE_AQ_LINK_SPEED_100MB:
667 		speed = "100 M";
668 		break;
669 	default:
670 		speed = "Unknown";
671 		break;
672 	}
673 
674 	switch (vsi->port_info->fc.current_mode) {
675 	case ICE_FC_FULL:
676 		fc = "Rx/Tx";
677 		break;
678 	case ICE_FC_TX_PAUSE:
679 		fc = "Tx";
680 		break;
681 	case ICE_FC_RX_PAUSE:
682 		fc = "Rx";
683 		break;
684 	case ICE_FC_NONE:
685 		fc = "None";
686 		break;
687 	default:
688 		fc = "Unknown";
689 		break;
690 	}
691 
692 	/* Get FEC mode based on negotiated link info */
693 	switch (vsi->port_info->phy.link_info.fec_info) {
694 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
695 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
696 		fec = "RS-FEC";
697 		break;
698 	case ICE_AQ_LINK_25G_KR_FEC_EN:
699 		fec = "FC-FEC/BASE-R";
700 		break;
701 	default:
702 		fec = "NONE";
703 		break;
704 	}
705 
706 	/* check if autoneg completed, might be false due to not supported */
707 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
708 		an = "True";
709 	else
710 		an = "False";
711 
712 	/* Get FEC mode requested based on PHY caps last SW configuration */
713 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
714 	if (!caps) {
715 		fec_req = "Unknown";
716 		an_advertised = "Unknown";
717 		goto done;
718 	}
719 
720 	status = ice_aq_get_phy_caps(vsi->port_info, false,
721 				     ICE_AQC_REPORT_SW_CFG, caps, NULL);
722 	if (status)
723 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
724 
725 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
726 
727 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
728 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
729 		fec_req = "RS-FEC";
730 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
731 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
732 		fec_req = "FC-FEC/BASE-R";
733 	else
734 		fec_req = "NONE";
735 
736 	kfree(caps);
737 
738 done:
739 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
740 		    speed, fec_req, fec, an_advertised, an, fc);
741 	ice_print_topo_conflict(vsi);
742 }
743 
744 /**
745  * ice_vsi_link_event - update the VSI's netdev
746  * @vsi: the VSI on which the link event occurred
747  * @link_up: whether or not the VSI needs to be set up or down
748  */
749 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
750 {
751 	if (!vsi)
752 		return;
753 
754 	if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
755 		return;
756 
757 	if (vsi->type == ICE_VSI_PF) {
758 		if (link_up == netif_carrier_ok(vsi->netdev))
759 			return;
760 
761 		if (link_up) {
762 			netif_carrier_on(vsi->netdev);
763 			netif_tx_wake_all_queues(vsi->netdev);
764 		} else {
765 			netif_carrier_off(vsi->netdev);
766 			netif_tx_stop_all_queues(vsi->netdev);
767 		}
768 	}
769 }
770 
771 /**
772  * ice_set_dflt_mib - send a default config MIB to the FW
773  * @pf: private PF struct
774  *
775  * This function sends a default configuration MIB to the FW.
776  *
777  * If this function errors out at any point, the driver is still able to
778  * function.  The main impact is that LFC may not operate as expected.
779  * Therefore an error state in this function should be treated with a DBG
780  * message and continue on with driver rebuild/reenable.
781  */
782 static void ice_set_dflt_mib(struct ice_pf *pf)
783 {
784 	struct device *dev = ice_pf_to_dev(pf);
785 	u8 mib_type, *buf, *lldpmib = NULL;
786 	u16 len, typelen, offset = 0;
787 	struct ice_lldp_org_tlv *tlv;
788 	struct ice_hw *hw;
789 	u32 ouisubtype;
790 
791 	if (!pf) {
792 		dev_dbg(dev, "%s NULL pf pointer\n", __func__);
793 		return;
794 	}
795 
796 	hw = &pf->hw;
797 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
798 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
799 	if (!lldpmib) {
800 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
801 			__func__);
802 		return;
803 	}
804 
805 	/* Add ETS CFG TLV */
806 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
807 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
808 		   ICE_IEEE_ETS_TLV_LEN);
809 	tlv->typelen = htons(typelen);
810 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
811 		      ICE_IEEE_SUBTYPE_ETS_CFG);
812 	tlv->ouisubtype = htonl(ouisubtype);
813 
814 	buf = tlv->tlvinfo;
815 	buf[0] = 0;
816 
817 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
818 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
819 	 * Octets 13 - 20 are TSA values - leave as zeros
820 	 */
821 	buf[5] = 0x64;
822 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
823 	offset += len + 2;
824 	tlv = (struct ice_lldp_org_tlv *)
825 		((char *)tlv + sizeof(tlv->typelen) + len);
826 
827 	/* Add ETS REC TLV */
828 	buf = tlv->tlvinfo;
829 	tlv->typelen = htons(typelen);
830 
831 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
832 		      ICE_IEEE_SUBTYPE_ETS_REC);
833 	tlv->ouisubtype = htonl(ouisubtype);
834 
835 	/* First octet of buf is reserved
836 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
837 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
838 	 * Octets 13 - 20 are TSA value - leave as zeros
839 	 */
840 	buf[5] = 0x64;
841 	offset += len + 2;
842 	tlv = (struct ice_lldp_org_tlv *)
843 		((char *)tlv + sizeof(tlv->typelen) + len);
844 
845 	/* Add PFC CFG TLV */
846 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
847 		   ICE_IEEE_PFC_TLV_LEN);
848 	tlv->typelen = htons(typelen);
849 
850 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
851 		      ICE_IEEE_SUBTYPE_PFC_CFG);
852 	tlv->ouisubtype = htonl(ouisubtype);
853 
854 	/* Octet 1 left as all zeros - PFC disabled */
855 	buf[0] = 0x08;
856 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
857 	offset += len + 2;
858 
859 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
860 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
861 
862 	kfree(lldpmib);
863 }
864 
865 /**
866  * ice_link_event - process the link event
867  * @pf: PF that the link event is associated with
868  * @pi: port_info for the port that the link event is associated with
869  * @link_up: true if the physical link is up and false if it is down
870  * @link_speed: current link speed received from the link event
871  *
872  * Returns 0 on success and negative on failure
873  */
874 static int
875 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
876 	       u16 link_speed)
877 {
878 	struct device *dev = ice_pf_to_dev(pf);
879 	struct ice_phy_info *phy_info;
880 	struct ice_vsi *vsi;
881 	u16 old_link_speed;
882 	bool old_link;
883 	int result;
884 
885 	phy_info = &pi->phy;
886 	phy_info->link_info_old = phy_info->link_info;
887 
888 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
889 	old_link_speed = phy_info->link_info_old.link_speed;
890 
891 	/* update the link info structures and re-enable link events,
892 	 * don't bail on failure due to other book keeping needed
893 	 */
894 	result = ice_update_link_info(pi);
895 	if (result)
896 		dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
897 			pi->lport);
898 
899 	/* Check if the link state is up after updating link info, and treat
900 	 * this event as an UP event since the link is actually UP now.
901 	 */
902 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
903 		link_up = true;
904 
905 	vsi = ice_get_main_vsi(pf);
906 	if (!vsi || !vsi->port_info)
907 		return -EINVAL;
908 
909 	/* turn off PHY if media was removed */
910 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
911 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
912 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
913 
914 		result = ice_aq_set_link_restart_an(pi, false, NULL);
915 		if (result) {
916 			dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
917 				vsi->vsi_num, result);
918 			return result;
919 		}
920 	}
921 
922 	/* if the old link up/down and speed is the same as the new */
923 	if (link_up == old_link && link_speed == old_link_speed)
924 		return result;
925 
926 	if (ice_is_dcb_active(pf)) {
927 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
928 			ice_dcb_rebuild(pf);
929 	} else {
930 		if (link_up)
931 			ice_set_dflt_mib(pf);
932 	}
933 	ice_vsi_link_event(vsi, link_up);
934 	ice_print_link_msg(vsi, link_up);
935 
936 	ice_vc_notify_link_state(pf);
937 
938 	return result;
939 }
940 
941 /**
942  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
943  * @pf: board private structure
944  */
945 static void ice_watchdog_subtask(struct ice_pf *pf)
946 {
947 	int i;
948 
949 	/* if interface is down do nothing */
950 	if (test_bit(__ICE_DOWN, pf->state) ||
951 	    test_bit(__ICE_CFG_BUSY, pf->state))
952 		return;
953 
954 	/* make sure we don't do these things too often */
955 	if (time_before(jiffies,
956 			pf->serv_tmr_prev + pf->serv_tmr_period))
957 		return;
958 
959 	pf->serv_tmr_prev = jiffies;
960 
961 	/* Update the stats for active netdevs so the network stack
962 	 * can look at updated numbers whenever it cares to
963 	 */
964 	ice_update_pf_stats(pf);
965 	ice_for_each_vsi(pf, i)
966 		if (pf->vsi[i] && pf->vsi[i]->netdev)
967 			ice_update_vsi_stats(pf->vsi[i]);
968 }
969 
970 /**
971  * ice_init_link_events - enable/initialize link events
972  * @pi: pointer to the port_info instance
973  *
974  * Returns -EIO on failure, 0 on success
975  */
976 static int ice_init_link_events(struct ice_port_info *pi)
977 {
978 	u16 mask;
979 
980 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
981 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
982 
983 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
984 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
985 			pi->lport);
986 		return -EIO;
987 	}
988 
989 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
990 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
991 			pi->lport);
992 		return -EIO;
993 	}
994 
995 	return 0;
996 }
997 
998 /**
999  * ice_handle_link_event - handle link event via ARQ
1000  * @pf: PF that the link event is associated with
1001  * @event: event structure containing link status info
1002  */
1003 static int
1004 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1005 {
1006 	struct ice_aqc_get_link_status_data *link_data;
1007 	struct ice_port_info *port_info;
1008 	int status;
1009 
1010 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1011 	port_info = pf->hw.port_info;
1012 	if (!port_info)
1013 		return -EINVAL;
1014 
1015 	status = ice_link_event(pf, port_info,
1016 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1017 				le16_to_cpu(link_data->link_speed));
1018 	if (status)
1019 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1020 			status);
1021 
1022 	return status;
1023 }
1024 
1025 enum ice_aq_task_state {
1026 	ICE_AQ_TASK_WAITING = 0,
1027 	ICE_AQ_TASK_COMPLETE,
1028 	ICE_AQ_TASK_CANCELED,
1029 };
1030 
1031 struct ice_aq_task {
1032 	struct hlist_node entry;
1033 
1034 	u16 opcode;
1035 	struct ice_rq_event_info *event;
1036 	enum ice_aq_task_state state;
1037 };
1038 
1039 /**
1040  * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
1041  * @pf: pointer to the PF private structure
1042  * @opcode: the opcode to wait for
1043  * @timeout: how long to wait, in jiffies
1044  * @event: storage for the event info
1045  *
1046  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1047  * current thread will be put to sleep until the specified event occurs or
1048  * until the given timeout is reached.
1049  *
1050  * To obtain only the descriptor contents, pass an event without an allocated
1051  * msg_buf. If the complete data buffer is desired, allocate the
1052  * event->msg_buf with enough space ahead of time.
1053  *
1054  * Returns: zero on success, or a negative error code on failure.
1055  */
1056 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1057 			  struct ice_rq_event_info *event)
1058 {
1059 	struct ice_aq_task *task;
1060 	long ret;
1061 	int err;
1062 
1063 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1064 	if (!task)
1065 		return -ENOMEM;
1066 
1067 	INIT_HLIST_NODE(&task->entry);
1068 	task->opcode = opcode;
1069 	task->event = event;
1070 	task->state = ICE_AQ_TASK_WAITING;
1071 
1072 	spin_lock_bh(&pf->aq_wait_lock);
1073 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1074 	spin_unlock_bh(&pf->aq_wait_lock);
1075 
1076 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1077 					       timeout);
1078 	switch (task->state) {
1079 	case ICE_AQ_TASK_WAITING:
1080 		err = ret < 0 ? ret : -ETIMEDOUT;
1081 		break;
1082 	case ICE_AQ_TASK_CANCELED:
1083 		err = ret < 0 ? ret : -ECANCELED;
1084 		break;
1085 	case ICE_AQ_TASK_COMPLETE:
1086 		err = ret < 0 ? ret : 0;
1087 		break;
1088 	default:
1089 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1090 		err = -EINVAL;
1091 		break;
1092 	}
1093 
1094 	spin_lock_bh(&pf->aq_wait_lock);
1095 	hlist_del(&task->entry);
1096 	spin_unlock_bh(&pf->aq_wait_lock);
1097 	kfree(task);
1098 
1099 	return err;
1100 }
1101 
1102 /**
1103  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1104  * @pf: pointer to the PF private structure
1105  * @opcode: the opcode of the event
1106  * @event: the event to check
1107  *
1108  * Loops over the current list of pending threads waiting for an AdminQ event.
1109  * For each matching task, copy the contents of the event into the task
1110  * structure and wake up the thread.
1111  *
1112  * If multiple threads wait for the same opcode, they will all be woken up.
1113  *
1114  * Note that event->msg_buf will only be duplicated if the event has a buffer
1115  * with enough space already allocated. Otherwise, only the descriptor and
1116  * message length will be copied.
1117  *
1118  * Returns: true if an event was found, false otherwise
1119  */
1120 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1121 				struct ice_rq_event_info *event)
1122 {
1123 	struct ice_aq_task *task;
1124 	bool found = false;
1125 
1126 	spin_lock_bh(&pf->aq_wait_lock);
1127 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1128 		if (task->state || task->opcode != opcode)
1129 			continue;
1130 
1131 		memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1132 		task->event->msg_len = event->msg_len;
1133 
1134 		/* Only copy the data buffer if a destination was set */
1135 		if (task->event->msg_buf &&
1136 		    task->event->buf_len > event->buf_len) {
1137 			memcpy(task->event->msg_buf, event->msg_buf,
1138 			       event->buf_len);
1139 			task->event->buf_len = event->buf_len;
1140 		}
1141 
1142 		task->state = ICE_AQ_TASK_COMPLETE;
1143 		found = true;
1144 	}
1145 	spin_unlock_bh(&pf->aq_wait_lock);
1146 
1147 	if (found)
1148 		wake_up(&pf->aq_wait_queue);
1149 }
1150 
1151 /**
1152  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1153  * @pf: the PF private structure
1154  *
1155  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1156  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1157  */
1158 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1159 {
1160 	struct ice_aq_task *task;
1161 
1162 	spin_lock_bh(&pf->aq_wait_lock);
1163 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1164 		task->state = ICE_AQ_TASK_CANCELED;
1165 	spin_unlock_bh(&pf->aq_wait_lock);
1166 
1167 	wake_up(&pf->aq_wait_queue);
1168 }
1169 
1170 /**
1171  * __ice_clean_ctrlq - helper function to clean controlq rings
1172  * @pf: ptr to struct ice_pf
1173  * @q_type: specific Control queue type
1174  */
1175 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1176 {
1177 	struct device *dev = ice_pf_to_dev(pf);
1178 	struct ice_rq_event_info event;
1179 	struct ice_hw *hw = &pf->hw;
1180 	struct ice_ctl_q_info *cq;
1181 	u16 pending, i = 0;
1182 	const char *qtype;
1183 	u32 oldval, val;
1184 
1185 	/* Do not clean control queue if/when PF reset fails */
1186 	if (test_bit(__ICE_RESET_FAILED, pf->state))
1187 		return 0;
1188 
1189 	switch (q_type) {
1190 	case ICE_CTL_Q_ADMIN:
1191 		cq = &hw->adminq;
1192 		qtype = "Admin";
1193 		break;
1194 	case ICE_CTL_Q_MAILBOX:
1195 		cq = &hw->mailboxq;
1196 		qtype = "Mailbox";
1197 		break;
1198 	default:
1199 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1200 		return 0;
1201 	}
1202 
1203 	/* check for error indications - PF_xx_AxQLEN register layout for
1204 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1205 	 */
1206 	val = rd32(hw, cq->rq.len);
1207 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1208 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1209 		oldval = val;
1210 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1211 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1212 				qtype);
1213 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1214 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1215 				qtype);
1216 		}
1217 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1218 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1219 				qtype);
1220 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1221 			 PF_FW_ARQLEN_ARQCRIT_M);
1222 		if (oldval != val)
1223 			wr32(hw, cq->rq.len, val);
1224 	}
1225 
1226 	val = rd32(hw, cq->sq.len);
1227 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1228 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1229 		oldval = val;
1230 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1231 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1232 				qtype);
1233 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1234 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1235 				qtype);
1236 		}
1237 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1238 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1239 				qtype);
1240 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1241 			 PF_FW_ATQLEN_ATQCRIT_M);
1242 		if (oldval != val)
1243 			wr32(hw, cq->sq.len, val);
1244 	}
1245 
1246 	event.buf_len = cq->rq_buf_size;
1247 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1248 	if (!event.msg_buf)
1249 		return 0;
1250 
1251 	do {
1252 		enum ice_status ret;
1253 		u16 opcode;
1254 
1255 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1256 		if (ret == ICE_ERR_AQ_NO_WORK)
1257 			break;
1258 		if (ret) {
1259 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1260 				ice_stat_str(ret));
1261 			break;
1262 		}
1263 
1264 		opcode = le16_to_cpu(event.desc.opcode);
1265 
1266 		/* Notify any thread that might be waiting for this event */
1267 		ice_aq_check_events(pf, opcode, &event);
1268 
1269 		switch (opcode) {
1270 		case ice_aqc_opc_get_link_status:
1271 			if (ice_handle_link_event(pf, &event))
1272 				dev_err(dev, "Could not handle link event\n");
1273 			break;
1274 		case ice_aqc_opc_event_lan_overflow:
1275 			ice_vf_lan_overflow_event(pf, &event);
1276 			break;
1277 		case ice_mbx_opc_send_msg_to_pf:
1278 			ice_vc_process_vf_msg(pf, &event);
1279 			break;
1280 		case ice_aqc_opc_fw_logging:
1281 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1282 			break;
1283 		case ice_aqc_opc_lldp_set_mib_change:
1284 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1285 			break;
1286 		default:
1287 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1288 				qtype, opcode);
1289 			break;
1290 		}
1291 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1292 
1293 	kfree(event.msg_buf);
1294 
1295 	return pending && (i == ICE_DFLT_IRQ_WORK);
1296 }
1297 
1298 /**
1299  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1300  * @hw: pointer to hardware info
1301  * @cq: control queue information
1302  *
1303  * returns true if there are pending messages in a queue, false if there aren't
1304  */
1305 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1306 {
1307 	u16 ntu;
1308 
1309 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1310 	return cq->rq.next_to_clean != ntu;
1311 }
1312 
1313 /**
1314  * ice_clean_adminq_subtask - clean the AdminQ rings
1315  * @pf: board private structure
1316  */
1317 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1318 {
1319 	struct ice_hw *hw = &pf->hw;
1320 
1321 	if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1322 		return;
1323 
1324 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1325 		return;
1326 
1327 	clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1328 
1329 	/* There might be a situation where new messages arrive to a control
1330 	 * queue between processing the last message and clearing the
1331 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1332 	 * ice_ctrlq_pending) and process new messages if any.
1333 	 */
1334 	if (ice_ctrlq_pending(hw, &hw->adminq))
1335 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1336 
1337 	ice_flush(hw);
1338 }
1339 
1340 /**
1341  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1342  * @pf: board private structure
1343  */
1344 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1345 {
1346 	struct ice_hw *hw = &pf->hw;
1347 
1348 	if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1349 		return;
1350 
1351 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1352 		return;
1353 
1354 	clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1355 
1356 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1357 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1358 
1359 	ice_flush(hw);
1360 }
1361 
1362 /**
1363  * ice_service_task_schedule - schedule the service task to wake up
1364  * @pf: board private structure
1365  *
1366  * If not already scheduled, this puts the task into the work queue.
1367  */
1368 void ice_service_task_schedule(struct ice_pf *pf)
1369 {
1370 	if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1371 	    !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1372 	    !test_bit(__ICE_NEEDS_RESTART, pf->state))
1373 		queue_work(ice_wq, &pf->serv_task);
1374 }
1375 
1376 /**
1377  * ice_service_task_complete - finish up the service task
1378  * @pf: board private structure
1379  */
1380 static void ice_service_task_complete(struct ice_pf *pf)
1381 {
1382 	WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1383 
1384 	/* force memory (pf->state) to sync before next service task */
1385 	smp_mb__before_atomic();
1386 	clear_bit(__ICE_SERVICE_SCHED, pf->state);
1387 }
1388 
1389 /**
1390  * ice_service_task_stop - stop service task and cancel works
1391  * @pf: board private structure
1392  *
1393  * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
1394  * 1 otherwise.
1395  */
1396 static int ice_service_task_stop(struct ice_pf *pf)
1397 {
1398 	int ret;
1399 
1400 	ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
1401 
1402 	if (pf->serv_tmr.function)
1403 		del_timer_sync(&pf->serv_tmr);
1404 	if (pf->serv_task.func)
1405 		cancel_work_sync(&pf->serv_task);
1406 
1407 	clear_bit(__ICE_SERVICE_SCHED, pf->state);
1408 	return ret;
1409 }
1410 
1411 /**
1412  * ice_service_task_restart - restart service task and schedule works
1413  * @pf: board private structure
1414  *
1415  * This function is needed for suspend and resume works (e.g WoL scenario)
1416  */
1417 static void ice_service_task_restart(struct ice_pf *pf)
1418 {
1419 	clear_bit(__ICE_SERVICE_DIS, pf->state);
1420 	ice_service_task_schedule(pf);
1421 }
1422 
1423 /**
1424  * ice_service_timer - timer callback to schedule service task
1425  * @t: pointer to timer_list
1426  */
1427 static void ice_service_timer(struct timer_list *t)
1428 {
1429 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1430 
1431 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1432 	ice_service_task_schedule(pf);
1433 }
1434 
1435 /**
1436  * ice_handle_mdd_event - handle malicious driver detect event
1437  * @pf: pointer to the PF structure
1438  *
1439  * Called from service task. OICR interrupt handler indicates MDD event.
1440  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1441  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1442  * disable the queue, the PF can be configured to reset the VF using ethtool
1443  * private flag mdd-auto-reset-vf.
1444  */
1445 static void ice_handle_mdd_event(struct ice_pf *pf)
1446 {
1447 	struct device *dev = ice_pf_to_dev(pf);
1448 	struct ice_hw *hw = &pf->hw;
1449 	unsigned int i;
1450 	u32 reg;
1451 
1452 	if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
1453 		/* Since the VF MDD event logging is rate limited, check if
1454 		 * there are pending MDD events.
1455 		 */
1456 		ice_print_vfs_mdd_events(pf);
1457 		return;
1458 	}
1459 
1460 	/* find what triggered an MDD event */
1461 	reg = rd32(hw, GL_MDET_TX_PQM);
1462 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1463 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1464 				GL_MDET_TX_PQM_PF_NUM_S;
1465 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1466 				GL_MDET_TX_PQM_VF_NUM_S;
1467 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1468 				GL_MDET_TX_PQM_MAL_TYPE_S;
1469 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1470 				GL_MDET_TX_PQM_QNUM_S);
1471 
1472 		if (netif_msg_tx_err(pf))
1473 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1474 				 event, queue, pf_num, vf_num);
1475 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1476 	}
1477 
1478 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1479 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1480 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1481 				GL_MDET_TX_TCLAN_PF_NUM_S;
1482 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1483 				GL_MDET_TX_TCLAN_VF_NUM_S;
1484 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1485 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1486 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1487 				GL_MDET_TX_TCLAN_QNUM_S);
1488 
1489 		if (netif_msg_tx_err(pf))
1490 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1491 				 event, queue, pf_num, vf_num);
1492 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1493 	}
1494 
1495 	reg = rd32(hw, GL_MDET_RX);
1496 	if (reg & GL_MDET_RX_VALID_M) {
1497 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1498 				GL_MDET_RX_PF_NUM_S;
1499 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1500 				GL_MDET_RX_VF_NUM_S;
1501 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1502 				GL_MDET_RX_MAL_TYPE_S;
1503 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1504 				GL_MDET_RX_QNUM_S);
1505 
1506 		if (netif_msg_rx_err(pf))
1507 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1508 				 event, queue, pf_num, vf_num);
1509 		wr32(hw, GL_MDET_RX, 0xffffffff);
1510 	}
1511 
1512 	/* check to see if this PF caused an MDD event */
1513 	reg = rd32(hw, PF_MDET_TX_PQM);
1514 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1515 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1516 		if (netif_msg_tx_err(pf))
1517 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1518 	}
1519 
1520 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1521 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1522 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1523 		if (netif_msg_tx_err(pf))
1524 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1525 	}
1526 
1527 	reg = rd32(hw, PF_MDET_RX);
1528 	if (reg & PF_MDET_RX_VALID_M) {
1529 		wr32(hw, PF_MDET_RX, 0xFFFF);
1530 		if (netif_msg_rx_err(pf))
1531 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1532 	}
1533 
1534 	/* Check to see if one of the VFs caused an MDD event, and then
1535 	 * increment counters and set print pending
1536 	 */
1537 	ice_for_each_vf(pf, i) {
1538 		struct ice_vf *vf = &pf->vf[i];
1539 
1540 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1541 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1542 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1543 			vf->mdd_tx_events.count++;
1544 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1545 			if (netif_msg_tx_err(pf))
1546 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1547 					 i);
1548 		}
1549 
1550 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1551 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1552 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1553 			vf->mdd_tx_events.count++;
1554 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1555 			if (netif_msg_tx_err(pf))
1556 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1557 					 i);
1558 		}
1559 
1560 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1561 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1562 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1563 			vf->mdd_tx_events.count++;
1564 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1565 			if (netif_msg_tx_err(pf))
1566 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1567 					 i);
1568 		}
1569 
1570 		reg = rd32(hw, VP_MDET_RX(i));
1571 		if (reg & VP_MDET_RX_VALID_M) {
1572 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1573 			vf->mdd_rx_events.count++;
1574 			set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1575 			if (netif_msg_rx_err(pf))
1576 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1577 					 i);
1578 
1579 			/* Since the queue is disabled on VF Rx MDD events, the
1580 			 * PF can be configured to reset the VF through ethtool
1581 			 * private flag mdd-auto-reset-vf.
1582 			 */
1583 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1584 				/* VF MDD event counters will be cleared by
1585 				 * reset, so print the event prior to reset.
1586 				 */
1587 				ice_print_vf_rx_mdd_event(vf);
1588 				ice_reset_vf(&pf->vf[i], false);
1589 			}
1590 		}
1591 	}
1592 
1593 	ice_print_vfs_mdd_events(pf);
1594 }
1595 
1596 /**
1597  * ice_force_phys_link_state - Force the physical link state
1598  * @vsi: VSI to force the physical link state to up/down
1599  * @link_up: true/false indicates to set the physical link to up/down
1600  *
1601  * Force the physical link state by getting the current PHY capabilities from
1602  * hardware and setting the PHY config based on the determined capabilities. If
1603  * link changes a link event will be triggered because both the Enable Automatic
1604  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1605  *
1606  * Returns 0 on success, negative on failure
1607  */
1608 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1609 {
1610 	struct ice_aqc_get_phy_caps_data *pcaps;
1611 	struct ice_aqc_set_phy_cfg_data *cfg;
1612 	struct ice_port_info *pi;
1613 	struct device *dev;
1614 	int retcode;
1615 
1616 	if (!vsi || !vsi->port_info || !vsi->back)
1617 		return -EINVAL;
1618 	if (vsi->type != ICE_VSI_PF)
1619 		return 0;
1620 
1621 	dev = ice_pf_to_dev(vsi->back);
1622 
1623 	pi = vsi->port_info;
1624 
1625 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1626 	if (!pcaps)
1627 		return -ENOMEM;
1628 
1629 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1630 				      NULL);
1631 	if (retcode) {
1632 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1633 			vsi->vsi_num, retcode);
1634 		retcode = -EIO;
1635 		goto out;
1636 	}
1637 
1638 	/* No change in link */
1639 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1640 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1641 		goto out;
1642 
1643 	/* Use the current user PHY configuration. The current user PHY
1644 	 * configuration is initialized during probe from PHY capabilities
1645 	 * software mode, and updated on set PHY configuration.
1646 	 */
1647 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1648 	if (!cfg) {
1649 		retcode = -ENOMEM;
1650 		goto out;
1651 	}
1652 
1653 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1654 	if (link_up)
1655 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1656 	else
1657 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1658 
1659 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1660 	if (retcode) {
1661 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1662 			vsi->vsi_num, retcode);
1663 		retcode = -EIO;
1664 	}
1665 
1666 	kfree(cfg);
1667 out:
1668 	kfree(pcaps);
1669 	return retcode;
1670 }
1671 
1672 /**
1673  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1674  * @pi: port info structure
1675  *
1676  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1677  */
1678 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1679 {
1680 	struct ice_aqc_get_phy_caps_data *pcaps;
1681 	struct ice_pf *pf = pi->hw->back;
1682 	enum ice_status status;
1683 	int err = 0;
1684 
1685 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1686 	if (!pcaps)
1687 		return -ENOMEM;
1688 
1689 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
1690 				     NULL);
1691 
1692 	if (status) {
1693 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1694 		err = -EIO;
1695 		goto out;
1696 	}
1697 
1698 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1699 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1700 
1701 out:
1702 	kfree(pcaps);
1703 	return err;
1704 }
1705 
1706 /**
1707  * ice_init_link_dflt_override - Initialize link default override
1708  * @pi: port info structure
1709  *
1710  * Initialize link default override and PHY total port shutdown during probe
1711  */
1712 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1713 {
1714 	struct ice_link_default_override_tlv *ldo;
1715 	struct ice_pf *pf = pi->hw->back;
1716 
1717 	ldo = &pf->link_dflt_override;
1718 	if (ice_get_link_default_override(ldo, pi))
1719 		return;
1720 
1721 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1722 		return;
1723 
1724 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1725 	 * ethtool private flag) for ports with Port Disable bit set.
1726 	 */
1727 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1728 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1729 }
1730 
1731 /**
1732  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1733  * @pi: port info structure
1734  *
1735  * If default override is enabled, initialized the user PHY cfg speed and FEC
1736  * settings using the default override mask from the NVM.
1737  *
1738  * The PHY should only be configured with the default override settings the
1739  * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1740  * is used to indicate that the user PHY cfg default override is initialized
1741  * and the PHY has not been configured with the default override settings. The
1742  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1743  * configured.
1744  */
1745 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1746 {
1747 	struct ice_link_default_override_tlv *ldo;
1748 	struct ice_aqc_set_phy_cfg_data *cfg;
1749 	struct ice_phy_info *phy = &pi->phy;
1750 	struct ice_pf *pf = pi->hw->back;
1751 
1752 	ldo = &pf->link_dflt_override;
1753 
1754 	/* If link default override is enabled, use to mask NVM PHY capabilities
1755 	 * for speed and FEC default configuration.
1756 	 */
1757 	cfg = &phy->curr_user_phy_cfg;
1758 
1759 	if (ldo->phy_type_low || ldo->phy_type_high) {
1760 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1761 				    cpu_to_le64(ldo->phy_type_low);
1762 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1763 				     cpu_to_le64(ldo->phy_type_high);
1764 	}
1765 	cfg->link_fec_opt = ldo->fec_options;
1766 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1767 
1768 	set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1769 }
1770 
1771 /**
1772  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1773  * @pi: port info structure
1774  *
1775  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1776  * mode to default. The PHY defaults are from get PHY capabilities topology
1777  * with media so call when media is first available. An error is returned if
1778  * called when media is not available. The PHY initialization completed state is
1779  * set here.
1780  *
1781  * These configurations are used when setting PHY
1782  * configuration. The user PHY configuration is updated on set PHY
1783  * configuration. Returns 0 on success, negative on failure
1784  */
1785 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1786 {
1787 	struct ice_aqc_get_phy_caps_data *pcaps;
1788 	struct ice_phy_info *phy = &pi->phy;
1789 	struct ice_pf *pf = pi->hw->back;
1790 	enum ice_status status;
1791 	struct ice_vsi *vsi;
1792 	int err = 0;
1793 
1794 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1795 		return -EIO;
1796 
1797 	vsi = ice_get_main_vsi(pf);
1798 	if (!vsi)
1799 		return -EINVAL;
1800 
1801 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1802 	if (!pcaps)
1803 		return -ENOMEM;
1804 
1805 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1806 				     NULL);
1807 	if (status) {
1808 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1809 		err = -EIO;
1810 		goto err_out;
1811 	}
1812 
1813 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1814 
1815 	/* check if lenient mode is supported and enabled */
1816 	if (ice_fw_supports_link_override(&vsi->back->hw) &&
1817 	    !(pcaps->module_compliance_enforcement &
1818 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1819 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1820 
1821 		/* if link default override is enabled, initialize user PHY
1822 		 * configuration with link default override values
1823 		 */
1824 		if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
1825 			ice_init_phy_cfg_dflt_override(pi);
1826 			goto out;
1827 		}
1828 	}
1829 
1830 	/* if link default override is not enabled, initialize PHY using
1831 	 * topology with media
1832 	 */
1833 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1834 						      pcaps->link_fec_options);
1835 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1836 
1837 out:
1838 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1839 	set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
1840 err_out:
1841 	kfree(pcaps);
1842 	return err;
1843 }
1844 
1845 /**
1846  * ice_configure_phy - configure PHY
1847  * @vsi: VSI of PHY
1848  *
1849  * Set the PHY configuration. If the current PHY configuration is the same as
1850  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1851  * configure the based get PHY capabilities for topology with media.
1852  */
1853 static int ice_configure_phy(struct ice_vsi *vsi)
1854 {
1855 	struct device *dev = ice_pf_to_dev(vsi->back);
1856 	struct ice_aqc_get_phy_caps_data *pcaps;
1857 	struct ice_aqc_set_phy_cfg_data *cfg;
1858 	struct ice_port_info *pi;
1859 	enum ice_status status;
1860 	int err = 0;
1861 
1862 	pi = vsi->port_info;
1863 	if (!pi)
1864 		return -EINVAL;
1865 
1866 	/* Ensure we have media as we cannot configure a medialess port */
1867 	if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1868 		return -EPERM;
1869 
1870 	ice_print_topo_conflict(vsi);
1871 
1872 	if (vsi->port_info->phy.link_info.topo_media_conflict ==
1873 	    ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1874 		return -EPERM;
1875 
1876 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
1877 		return ice_force_phys_link_state(vsi, true);
1878 
1879 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1880 	if (!pcaps)
1881 		return -ENOMEM;
1882 
1883 	/* Get current PHY config */
1884 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1885 				     NULL);
1886 	if (status) {
1887 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1888 			vsi->vsi_num, ice_stat_str(status));
1889 		err = -EIO;
1890 		goto done;
1891 	}
1892 
1893 	/* If PHY enable link is configured and configuration has not changed,
1894 	 * there's nothing to do
1895 	 */
1896 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1897 	    ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
1898 		goto done;
1899 
1900 	/* Use PHY topology as baseline for configuration */
1901 	memset(pcaps, 0, sizeof(*pcaps));
1902 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
1903 				     NULL);
1904 	if (status) {
1905 		dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
1906 			vsi->vsi_num, ice_stat_str(status));
1907 		err = -EIO;
1908 		goto done;
1909 	}
1910 
1911 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1912 	if (!cfg) {
1913 		err = -ENOMEM;
1914 		goto done;
1915 	}
1916 
1917 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1918 
1919 	/* Speed - If default override pending, use curr_user_phy_cfg set in
1920 	 * ice_init_phy_user_cfg_ldo.
1921 	 */
1922 	if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
1923 			       vsi->back->state)) {
1924 		cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
1925 		cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
1926 	} else {
1927 		u64 phy_low = 0, phy_high = 0;
1928 
1929 		ice_update_phy_type(&phy_low, &phy_high,
1930 				    pi->phy.curr_user_speed_req);
1931 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
1932 		cfg->phy_type_high = pcaps->phy_type_high &
1933 				     cpu_to_le64(phy_high);
1934 	}
1935 
1936 	/* Can't provide what was requested; use PHY capabilities */
1937 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
1938 		cfg->phy_type_low = pcaps->phy_type_low;
1939 		cfg->phy_type_high = pcaps->phy_type_high;
1940 	}
1941 
1942 	/* FEC */
1943 	ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
1944 
1945 	/* Can't provide what was requested; use PHY capabilities */
1946 	if (cfg->link_fec_opt !=
1947 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
1948 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
1949 		cfg->link_fec_opt = pcaps->link_fec_options;
1950 	}
1951 
1952 	/* Flow Control - always supported; no need to check against
1953 	 * capabilities
1954 	 */
1955 	ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
1956 
1957 	/* Enable link and link update */
1958 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
1959 
1960 	status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1961 	if (status) {
1962 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
1963 			vsi->vsi_num, ice_stat_str(status));
1964 		err = -EIO;
1965 	}
1966 
1967 	kfree(cfg);
1968 done:
1969 	kfree(pcaps);
1970 	return err;
1971 }
1972 
1973 /**
1974  * ice_check_media_subtask - Check for media
1975  * @pf: pointer to PF struct
1976  *
1977  * If media is available, then initialize PHY user configuration if it is not
1978  * been, and configure the PHY if the interface is up.
1979  */
1980 static void ice_check_media_subtask(struct ice_pf *pf)
1981 {
1982 	struct ice_port_info *pi;
1983 	struct ice_vsi *vsi;
1984 	int err;
1985 
1986 	/* No need to check for media if it's already present */
1987 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
1988 		return;
1989 
1990 	vsi = ice_get_main_vsi(pf);
1991 	if (!vsi)
1992 		return;
1993 
1994 	/* Refresh link info and check if media is present */
1995 	pi = vsi->port_info;
1996 	err = ice_update_link_info(pi);
1997 	if (err)
1998 		return;
1999 
2000 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2001 		if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
2002 			ice_init_phy_user_cfg(pi);
2003 
2004 		/* PHY settings are reset on media insertion, reconfigure
2005 		 * PHY to preserve settings.
2006 		 */
2007 		if (test_bit(__ICE_DOWN, vsi->state) &&
2008 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2009 			return;
2010 
2011 		err = ice_configure_phy(vsi);
2012 		if (!err)
2013 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2014 
2015 		/* A Link Status Event will be generated; the event handler
2016 		 * will complete bringing the interface up
2017 		 */
2018 	}
2019 }
2020 
2021 /**
2022  * ice_service_task - manage and run subtasks
2023  * @work: pointer to work_struct contained by the PF struct
2024  */
2025 static void ice_service_task(struct work_struct *work)
2026 {
2027 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2028 	unsigned long start_time = jiffies;
2029 
2030 	/* subtasks */
2031 
2032 	/* process reset requests first */
2033 	ice_reset_subtask(pf);
2034 
2035 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2036 	if (ice_is_reset_in_progress(pf->state) ||
2037 	    test_bit(__ICE_SUSPENDED, pf->state) ||
2038 	    test_bit(__ICE_NEEDS_RESTART, pf->state)) {
2039 		ice_service_task_complete(pf);
2040 		return;
2041 	}
2042 
2043 	ice_clean_adminq_subtask(pf);
2044 	ice_check_media_subtask(pf);
2045 	ice_check_for_hang_subtask(pf);
2046 	ice_sync_fltr_subtask(pf);
2047 	ice_handle_mdd_event(pf);
2048 	ice_watchdog_subtask(pf);
2049 
2050 	if (ice_is_safe_mode(pf)) {
2051 		ice_service_task_complete(pf);
2052 		return;
2053 	}
2054 
2055 	ice_process_vflr_event(pf);
2056 	ice_clean_mailboxq_subtask(pf);
2057 	ice_sync_arfs_fltrs(pf);
2058 	/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
2059 	ice_service_task_complete(pf);
2060 
2061 	/* If the tasks have taken longer than one service timer period
2062 	 * or there is more work to be done, reset the service timer to
2063 	 * schedule the service task now.
2064 	 */
2065 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2066 	    test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
2067 	    test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
2068 	    test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2069 	    test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
2070 		mod_timer(&pf->serv_tmr, jiffies);
2071 }
2072 
2073 /**
2074  * ice_set_ctrlq_len - helper function to set controlq length
2075  * @hw: pointer to the HW instance
2076  */
2077 static void ice_set_ctrlq_len(struct ice_hw *hw)
2078 {
2079 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2080 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2081 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2082 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2083 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2084 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2085 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2086 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2087 }
2088 
2089 /**
2090  * ice_schedule_reset - schedule a reset
2091  * @pf: board private structure
2092  * @reset: reset being requested
2093  */
2094 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2095 {
2096 	struct device *dev = ice_pf_to_dev(pf);
2097 
2098 	/* bail out if earlier reset has failed */
2099 	if (test_bit(__ICE_RESET_FAILED, pf->state)) {
2100 		dev_dbg(dev, "earlier reset has failed\n");
2101 		return -EIO;
2102 	}
2103 	/* bail if reset/recovery already in progress */
2104 	if (ice_is_reset_in_progress(pf->state)) {
2105 		dev_dbg(dev, "Reset already in progress\n");
2106 		return -EBUSY;
2107 	}
2108 
2109 	switch (reset) {
2110 	case ICE_RESET_PFR:
2111 		set_bit(__ICE_PFR_REQ, pf->state);
2112 		break;
2113 	case ICE_RESET_CORER:
2114 		set_bit(__ICE_CORER_REQ, pf->state);
2115 		break;
2116 	case ICE_RESET_GLOBR:
2117 		set_bit(__ICE_GLOBR_REQ, pf->state);
2118 		break;
2119 	default:
2120 		return -EINVAL;
2121 	}
2122 
2123 	ice_service_task_schedule(pf);
2124 	return 0;
2125 }
2126 
2127 /**
2128  * ice_irq_affinity_notify - Callback for affinity changes
2129  * @notify: context as to what irq was changed
2130  * @mask: the new affinity mask
2131  *
2132  * This is a callback function used by the irq_set_affinity_notifier function
2133  * so that we may register to receive changes to the irq affinity masks.
2134  */
2135 static void
2136 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2137 			const cpumask_t *mask)
2138 {
2139 	struct ice_q_vector *q_vector =
2140 		container_of(notify, struct ice_q_vector, affinity_notify);
2141 
2142 	cpumask_copy(&q_vector->affinity_mask, mask);
2143 }
2144 
2145 /**
2146  * ice_irq_affinity_release - Callback for affinity notifier release
2147  * @ref: internal core kernel usage
2148  *
2149  * This is a callback function used by the irq_set_affinity_notifier function
2150  * to inform the current notification subscriber that they will no longer
2151  * receive notifications.
2152  */
2153 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2154 
2155 /**
2156  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2157  * @vsi: the VSI being configured
2158  */
2159 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2160 {
2161 	struct ice_hw *hw = &vsi->back->hw;
2162 	int i;
2163 
2164 	ice_for_each_q_vector(vsi, i)
2165 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2166 
2167 	ice_flush(hw);
2168 	return 0;
2169 }
2170 
2171 /**
2172  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2173  * @vsi: the VSI being configured
2174  * @basename: name for the vector
2175  */
2176 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2177 {
2178 	int q_vectors = vsi->num_q_vectors;
2179 	struct ice_pf *pf = vsi->back;
2180 	int base = vsi->base_vector;
2181 	struct device *dev;
2182 	int rx_int_idx = 0;
2183 	int tx_int_idx = 0;
2184 	int vector, err;
2185 	int irq_num;
2186 
2187 	dev = ice_pf_to_dev(pf);
2188 	for (vector = 0; vector < q_vectors; vector++) {
2189 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2190 
2191 		irq_num = pf->msix_entries[base + vector].vector;
2192 
2193 		if (q_vector->tx.ring && q_vector->rx.ring) {
2194 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2195 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2196 			tx_int_idx++;
2197 		} else if (q_vector->rx.ring) {
2198 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2199 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2200 		} else if (q_vector->tx.ring) {
2201 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2202 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2203 		} else {
2204 			/* skip this unused q_vector */
2205 			continue;
2206 		}
2207 		err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
2208 				       q_vector->name, q_vector);
2209 		if (err) {
2210 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2211 				   err);
2212 			goto free_q_irqs;
2213 		}
2214 
2215 		/* register for affinity change notifications */
2216 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2217 			struct irq_affinity_notify *affinity_notify;
2218 
2219 			affinity_notify = &q_vector->affinity_notify;
2220 			affinity_notify->notify = ice_irq_affinity_notify;
2221 			affinity_notify->release = ice_irq_affinity_release;
2222 			irq_set_affinity_notifier(irq_num, affinity_notify);
2223 		}
2224 
2225 		/* assign the mask for this irq */
2226 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2227 	}
2228 
2229 	vsi->irqs_ready = true;
2230 	return 0;
2231 
2232 free_q_irqs:
2233 	while (vector) {
2234 		vector--;
2235 		irq_num = pf->msix_entries[base + vector].vector;
2236 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2237 			irq_set_affinity_notifier(irq_num, NULL);
2238 		irq_set_affinity_hint(irq_num, NULL);
2239 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2240 	}
2241 	return err;
2242 }
2243 
2244 /**
2245  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2246  * @vsi: VSI to setup Tx rings used by XDP
2247  *
2248  * Return 0 on success and negative value on error
2249  */
2250 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2251 {
2252 	struct device *dev = ice_pf_to_dev(vsi->back);
2253 	int i;
2254 
2255 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2256 		u16 xdp_q_idx = vsi->alloc_txq + i;
2257 		struct ice_ring *xdp_ring;
2258 
2259 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2260 
2261 		if (!xdp_ring)
2262 			goto free_xdp_rings;
2263 
2264 		xdp_ring->q_index = xdp_q_idx;
2265 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2266 		xdp_ring->ring_active = false;
2267 		xdp_ring->vsi = vsi;
2268 		xdp_ring->netdev = NULL;
2269 		xdp_ring->dev = dev;
2270 		xdp_ring->count = vsi->num_tx_desc;
2271 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2272 		if (ice_setup_tx_ring(xdp_ring))
2273 			goto free_xdp_rings;
2274 		ice_set_ring_xdp(xdp_ring);
2275 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2276 	}
2277 
2278 	return 0;
2279 
2280 free_xdp_rings:
2281 	for (; i >= 0; i--)
2282 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2283 			ice_free_tx_ring(vsi->xdp_rings[i]);
2284 	return -ENOMEM;
2285 }
2286 
2287 /**
2288  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2289  * @vsi: VSI to set the bpf prog on
2290  * @prog: the bpf prog pointer
2291  */
2292 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2293 {
2294 	struct bpf_prog *old_prog;
2295 	int i;
2296 
2297 	old_prog = xchg(&vsi->xdp_prog, prog);
2298 	if (old_prog)
2299 		bpf_prog_put(old_prog);
2300 
2301 	ice_for_each_rxq(vsi, i)
2302 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2303 }
2304 
2305 /**
2306  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2307  * @vsi: VSI to bring up Tx rings used by XDP
2308  * @prog: bpf program that will be assigned to VSI
2309  *
2310  * Return 0 on success and negative value on error
2311  */
2312 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2313 {
2314 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2315 	int xdp_rings_rem = vsi->num_xdp_txq;
2316 	struct ice_pf *pf = vsi->back;
2317 	struct ice_qs_cfg xdp_qs_cfg = {
2318 		.qs_mutex = &pf->avail_q_mutex,
2319 		.pf_map = pf->avail_txqs,
2320 		.pf_map_size = pf->max_pf_txqs,
2321 		.q_count = vsi->num_xdp_txq,
2322 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2323 		.vsi_map = vsi->txq_map,
2324 		.vsi_map_offset = vsi->alloc_txq,
2325 		.mapping_mode = ICE_VSI_MAP_CONTIG
2326 	};
2327 	enum ice_status status;
2328 	struct device *dev;
2329 	int i, v_idx;
2330 
2331 	dev = ice_pf_to_dev(pf);
2332 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2333 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2334 	if (!vsi->xdp_rings)
2335 		return -ENOMEM;
2336 
2337 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2338 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2339 		goto err_map_xdp;
2340 
2341 	if (ice_xdp_alloc_setup_rings(vsi))
2342 		goto clear_xdp_rings;
2343 
2344 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2345 	ice_for_each_q_vector(vsi, v_idx) {
2346 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2347 		int xdp_rings_per_v, q_id, q_base;
2348 
2349 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2350 					       vsi->num_q_vectors - v_idx);
2351 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2352 
2353 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2354 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2355 
2356 			xdp_ring->q_vector = q_vector;
2357 			xdp_ring->next = q_vector->tx.ring;
2358 			q_vector->tx.ring = xdp_ring;
2359 		}
2360 		xdp_rings_rem -= xdp_rings_per_v;
2361 	}
2362 
2363 	/* omit the scheduler update if in reset path; XDP queues will be
2364 	 * taken into account at the end of ice_vsi_rebuild, where
2365 	 * ice_cfg_vsi_lan is being called
2366 	 */
2367 	if (ice_is_reset_in_progress(pf->state))
2368 		return 0;
2369 
2370 	/* tell the Tx scheduler that right now we have
2371 	 * additional queues
2372 	 */
2373 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2374 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2375 
2376 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2377 				 max_txqs);
2378 	if (status) {
2379 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2380 			ice_stat_str(status));
2381 		goto clear_xdp_rings;
2382 	}
2383 	ice_vsi_assign_bpf_prog(vsi, prog);
2384 
2385 	return 0;
2386 clear_xdp_rings:
2387 	for (i = 0; i < vsi->num_xdp_txq; i++)
2388 		if (vsi->xdp_rings[i]) {
2389 			kfree_rcu(vsi->xdp_rings[i], rcu);
2390 			vsi->xdp_rings[i] = NULL;
2391 		}
2392 
2393 err_map_xdp:
2394 	mutex_lock(&pf->avail_q_mutex);
2395 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2396 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2397 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2398 	}
2399 	mutex_unlock(&pf->avail_q_mutex);
2400 
2401 	devm_kfree(dev, vsi->xdp_rings);
2402 	return -ENOMEM;
2403 }
2404 
2405 /**
2406  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2407  * @vsi: VSI to remove XDP rings
2408  *
2409  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2410  * resources
2411  */
2412 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2413 {
2414 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2415 	struct ice_pf *pf = vsi->back;
2416 	int i, v_idx;
2417 
2418 	/* q_vectors are freed in reset path so there's no point in detaching
2419 	 * rings; in case of rebuild being triggered not from reset reset bits
2420 	 * in pf->state won't be set, so additionally check first q_vector
2421 	 * against NULL
2422 	 */
2423 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2424 		goto free_qmap;
2425 
2426 	ice_for_each_q_vector(vsi, v_idx) {
2427 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2428 		struct ice_ring *ring;
2429 
2430 		ice_for_each_ring(ring, q_vector->tx)
2431 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2432 				break;
2433 
2434 		/* restore the value of last node prior to XDP setup */
2435 		q_vector->tx.ring = ring;
2436 	}
2437 
2438 free_qmap:
2439 	mutex_lock(&pf->avail_q_mutex);
2440 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2441 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2442 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2443 	}
2444 	mutex_unlock(&pf->avail_q_mutex);
2445 
2446 	for (i = 0; i < vsi->num_xdp_txq; i++)
2447 		if (vsi->xdp_rings[i]) {
2448 			if (vsi->xdp_rings[i]->desc)
2449 				ice_free_tx_ring(vsi->xdp_rings[i]);
2450 			kfree_rcu(vsi->xdp_rings[i], rcu);
2451 			vsi->xdp_rings[i] = NULL;
2452 		}
2453 
2454 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2455 	vsi->xdp_rings = NULL;
2456 
2457 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2458 		return 0;
2459 
2460 	ice_vsi_assign_bpf_prog(vsi, NULL);
2461 
2462 	/* notify Tx scheduler that we destroyed XDP queues and bring
2463 	 * back the old number of child nodes
2464 	 */
2465 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2466 		max_txqs[i] = vsi->num_txq;
2467 
2468 	/* change number of XDP Tx queues to 0 */
2469 	vsi->num_xdp_txq = 0;
2470 
2471 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2472 			       max_txqs);
2473 }
2474 
2475 /**
2476  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2477  * @vsi: VSI to setup XDP for
2478  * @prog: XDP program
2479  * @extack: netlink extended ack
2480  */
2481 static int
2482 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2483 		   struct netlink_ext_ack *extack)
2484 {
2485 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2486 	bool if_running = netif_running(vsi->netdev);
2487 	int ret = 0, xdp_ring_err = 0;
2488 
2489 	if (frame_size > vsi->rx_buf_len) {
2490 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2491 		return -EOPNOTSUPP;
2492 	}
2493 
2494 	/* need to stop netdev while setting up the program for Rx rings */
2495 	if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
2496 		ret = ice_down(vsi);
2497 		if (ret) {
2498 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2499 			return ret;
2500 		}
2501 	}
2502 
2503 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2504 		vsi->num_xdp_txq = vsi->alloc_rxq;
2505 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2506 		if (xdp_ring_err)
2507 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2508 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2509 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2510 		if (xdp_ring_err)
2511 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2512 	} else {
2513 		ice_vsi_assign_bpf_prog(vsi, prog);
2514 	}
2515 
2516 	if (if_running)
2517 		ret = ice_up(vsi);
2518 
2519 	if (!ret && prog && vsi->xsk_pools) {
2520 		int i;
2521 
2522 		ice_for_each_rxq(vsi, i) {
2523 			struct ice_ring *rx_ring = vsi->rx_rings[i];
2524 
2525 			if (rx_ring->xsk_pool)
2526 				napi_schedule(&rx_ring->q_vector->napi);
2527 		}
2528 	}
2529 
2530 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2531 }
2532 
2533 /**
2534  * ice_xdp - implements XDP handler
2535  * @dev: netdevice
2536  * @xdp: XDP command
2537  */
2538 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2539 {
2540 	struct ice_netdev_priv *np = netdev_priv(dev);
2541 	struct ice_vsi *vsi = np->vsi;
2542 
2543 	if (vsi->type != ICE_VSI_PF) {
2544 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2545 		return -EINVAL;
2546 	}
2547 
2548 	switch (xdp->command) {
2549 	case XDP_SETUP_PROG:
2550 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2551 	case XDP_SETUP_XSK_POOL:
2552 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2553 					  xdp->xsk.queue_id);
2554 	default:
2555 		return -EINVAL;
2556 	}
2557 }
2558 
2559 /**
2560  * ice_ena_misc_vector - enable the non-queue interrupts
2561  * @pf: board private structure
2562  */
2563 static void ice_ena_misc_vector(struct ice_pf *pf)
2564 {
2565 	struct ice_hw *hw = &pf->hw;
2566 	u32 val;
2567 
2568 	/* Disable anti-spoof detection interrupt to prevent spurious event
2569 	 * interrupts during a function reset. Anti-spoof functionally is
2570 	 * still supported.
2571 	 */
2572 	val = rd32(hw, GL_MDCK_TX_TDPU);
2573 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2574 	wr32(hw, GL_MDCK_TX_TDPU, val);
2575 
2576 	/* clear things first */
2577 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2578 	rd32(hw, PFINT_OICR);		/* read to clear */
2579 
2580 	val = (PFINT_OICR_ECC_ERR_M |
2581 	       PFINT_OICR_MAL_DETECT_M |
2582 	       PFINT_OICR_GRST_M |
2583 	       PFINT_OICR_PCI_EXCEPTION_M |
2584 	       PFINT_OICR_VFLR_M |
2585 	       PFINT_OICR_HMC_ERR_M |
2586 	       PFINT_OICR_PE_CRITERR_M);
2587 
2588 	wr32(hw, PFINT_OICR_ENA, val);
2589 
2590 	/* SW_ITR_IDX = 0, but don't change INTENA */
2591 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2592 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2593 }
2594 
2595 /**
2596  * ice_misc_intr - misc interrupt handler
2597  * @irq: interrupt number
2598  * @data: pointer to a q_vector
2599  */
2600 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2601 {
2602 	struct ice_pf *pf = (struct ice_pf *)data;
2603 	struct ice_hw *hw = &pf->hw;
2604 	irqreturn_t ret = IRQ_NONE;
2605 	struct device *dev;
2606 	u32 oicr, ena_mask;
2607 
2608 	dev = ice_pf_to_dev(pf);
2609 	set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
2610 	set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2611 
2612 	oicr = rd32(hw, PFINT_OICR);
2613 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2614 
2615 	if (oicr & PFINT_OICR_SWINT_M) {
2616 		ena_mask &= ~PFINT_OICR_SWINT_M;
2617 		pf->sw_int_count++;
2618 	}
2619 
2620 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2621 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2622 		set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
2623 	}
2624 	if (oicr & PFINT_OICR_VFLR_M) {
2625 		/* disable any further VFLR event notifications */
2626 		if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
2627 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2628 
2629 			reg &= ~PFINT_OICR_VFLR_M;
2630 			wr32(hw, PFINT_OICR_ENA, reg);
2631 		} else {
2632 			ena_mask &= ~PFINT_OICR_VFLR_M;
2633 			set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
2634 		}
2635 	}
2636 
2637 	if (oicr & PFINT_OICR_GRST_M) {
2638 		u32 reset;
2639 
2640 		/* we have a reset warning */
2641 		ena_mask &= ~PFINT_OICR_GRST_M;
2642 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2643 			GLGEN_RSTAT_RESET_TYPE_S;
2644 
2645 		if (reset == ICE_RESET_CORER)
2646 			pf->corer_count++;
2647 		else if (reset == ICE_RESET_GLOBR)
2648 			pf->globr_count++;
2649 		else if (reset == ICE_RESET_EMPR)
2650 			pf->empr_count++;
2651 		else
2652 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2653 
2654 		/* If a reset cycle isn't already in progress, we set a bit in
2655 		 * pf->state so that the service task can start a reset/rebuild.
2656 		 * We also make note of which reset happened so that peer
2657 		 * devices/drivers can be informed.
2658 		 */
2659 		if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
2660 			if (reset == ICE_RESET_CORER)
2661 				set_bit(__ICE_CORER_RECV, pf->state);
2662 			else if (reset == ICE_RESET_GLOBR)
2663 				set_bit(__ICE_GLOBR_RECV, pf->state);
2664 			else
2665 				set_bit(__ICE_EMPR_RECV, pf->state);
2666 
2667 			/* There are couple of different bits at play here.
2668 			 * hw->reset_ongoing indicates whether the hardware is
2669 			 * in reset. This is set to true when a reset interrupt
2670 			 * is received and set back to false after the driver
2671 			 * has determined that the hardware is out of reset.
2672 			 *
2673 			 * __ICE_RESET_OICR_RECV in pf->state indicates
2674 			 * that a post reset rebuild is required before the
2675 			 * driver is operational again. This is set above.
2676 			 *
2677 			 * As this is the start of the reset/rebuild cycle, set
2678 			 * both to indicate that.
2679 			 */
2680 			hw->reset_ongoing = true;
2681 		}
2682 	}
2683 
2684 	if (oicr & PFINT_OICR_HMC_ERR_M) {
2685 		ena_mask &= ~PFINT_OICR_HMC_ERR_M;
2686 		dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
2687 			rd32(hw, PFHMC_ERRORINFO),
2688 			rd32(hw, PFHMC_ERRORDATA));
2689 	}
2690 
2691 	/* Report any remaining unexpected interrupts */
2692 	oicr &= ena_mask;
2693 	if (oicr) {
2694 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2695 		/* If a critical error is pending there is no choice but to
2696 		 * reset the device.
2697 		 */
2698 		if (oicr & (PFINT_OICR_PE_CRITERR_M |
2699 			    PFINT_OICR_PCI_EXCEPTION_M |
2700 			    PFINT_OICR_ECC_ERR_M)) {
2701 			set_bit(__ICE_PFR_REQ, pf->state);
2702 			ice_service_task_schedule(pf);
2703 		}
2704 	}
2705 	ret = IRQ_HANDLED;
2706 
2707 	ice_service_task_schedule(pf);
2708 	ice_irq_dynamic_ena(hw, NULL, NULL);
2709 
2710 	return ret;
2711 }
2712 
2713 /**
2714  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2715  * @hw: pointer to HW structure
2716  */
2717 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2718 {
2719 	/* disable Admin queue Interrupt causes */
2720 	wr32(hw, PFINT_FW_CTL,
2721 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2722 
2723 	/* disable Mailbox queue Interrupt causes */
2724 	wr32(hw, PFINT_MBX_CTL,
2725 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2726 
2727 	/* disable Control queue Interrupt causes */
2728 	wr32(hw, PFINT_OICR_CTL,
2729 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2730 
2731 	ice_flush(hw);
2732 }
2733 
2734 /**
2735  * ice_free_irq_msix_misc - Unroll misc vector setup
2736  * @pf: board private structure
2737  */
2738 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2739 {
2740 	struct ice_hw *hw = &pf->hw;
2741 
2742 	ice_dis_ctrlq_interrupts(hw);
2743 
2744 	/* disable OICR interrupt */
2745 	wr32(hw, PFINT_OICR_ENA, 0);
2746 	ice_flush(hw);
2747 
2748 	if (pf->msix_entries) {
2749 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2750 		devm_free_irq(ice_pf_to_dev(pf),
2751 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2752 	}
2753 
2754 	pf->num_avail_sw_msix += 1;
2755 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2756 }
2757 
2758 /**
2759  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2760  * @hw: pointer to HW structure
2761  * @reg_idx: HW vector index to associate the control queue interrupts with
2762  */
2763 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2764 {
2765 	u32 val;
2766 
2767 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2768 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2769 	wr32(hw, PFINT_OICR_CTL, val);
2770 
2771 	/* enable Admin queue Interrupt causes */
2772 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2773 	       PFINT_FW_CTL_CAUSE_ENA_M);
2774 	wr32(hw, PFINT_FW_CTL, val);
2775 
2776 	/* enable Mailbox queue Interrupt causes */
2777 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2778 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2779 	wr32(hw, PFINT_MBX_CTL, val);
2780 
2781 	ice_flush(hw);
2782 }
2783 
2784 /**
2785  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2786  * @pf: board private structure
2787  *
2788  * This sets up the handler for MSIX 0, which is used to manage the
2789  * non-queue interrupts, e.g. AdminQ and errors. This is not used
2790  * when in MSI or Legacy interrupt mode.
2791  */
2792 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2793 {
2794 	struct device *dev = ice_pf_to_dev(pf);
2795 	struct ice_hw *hw = &pf->hw;
2796 	int oicr_idx, err = 0;
2797 
2798 	if (!pf->int_name[0])
2799 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2800 			 dev_driver_string(dev), dev_name(dev));
2801 
2802 	/* Do not request IRQ but do enable OICR interrupt since settings are
2803 	 * lost during reset. Note that this function is called only during
2804 	 * rebuild path and not while reset is in progress.
2805 	 */
2806 	if (ice_is_reset_in_progress(pf->state))
2807 		goto skip_req_irq;
2808 
2809 	/* reserve one vector in irq_tracker for misc interrupts */
2810 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2811 	if (oicr_idx < 0)
2812 		return oicr_idx;
2813 
2814 	pf->num_avail_sw_msix -= 1;
2815 	pf->oicr_idx = (u16)oicr_idx;
2816 
2817 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2818 			       ice_misc_intr, 0, pf->int_name, pf);
2819 	if (err) {
2820 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
2821 			pf->int_name, err);
2822 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2823 		pf->num_avail_sw_msix += 1;
2824 		return err;
2825 	}
2826 
2827 skip_req_irq:
2828 	ice_ena_misc_vector(pf);
2829 
2830 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2831 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2832 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2833 
2834 	ice_flush(hw);
2835 	ice_irq_dynamic_ena(hw, NULL, NULL);
2836 
2837 	return 0;
2838 }
2839 
2840 /**
2841  * ice_napi_add - register NAPI handler for the VSI
2842  * @vsi: VSI for which NAPI handler is to be registered
2843  *
2844  * This function is only called in the driver's load path. Registering the NAPI
2845  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2846  * reset/rebuild, etc.)
2847  */
2848 static void ice_napi_add(struct ice_vsi *vsi)
2849 {
2850 	int v_idx;
2851 
2852 	if (!vsi->netdev)
2853 		return;
2854 
2855 	ice_for_each_q_vector(vsi, v_idx)
2856 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
2857 			       ice_napi_poll, NAPI_POLL_WEIGHT);
2858 }
2859 
2860 /**
2861  * ice_set_ops - set netdev and ethtools ops for the given netdev
2862  * @netdev: netdev instance
2863  */
2864 static void ice_set_ops(struct net_device *netdev)
2865 {
2866 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2867 
2868 	if (ice_is_safe_mode(pf)) {
2869 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
2870 		ice_set_ethtool_safe_mode_ops(netdev);
2871 		return;
2872 	}
2873 
2874 	netdev->netdev_ops = &ice_netdev_ops;
2875 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
2876 	ice_set_ethtool_ops(netdev);
2877 }
2878 
2879 /**
2880  * ice_set_netdev_features - set features for the given netdev
2881  * @netdev: netdev instance
2882  */
2883 static void ice_set_netdev_features(struct net_device *netdev)
2884 {
2885 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2886 	netdev_features_t csumo_features;
2887 	netdev_features_t vlano_features;
2888 	netdev_features_t dflt_features;
2889 	netdev_features_t tso_features;
2890 
2891 	if (ice_is_safe_mode(pf)) {
2892 		/* safe mode */
2893 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
2894 		netdev->hw_features = netdev->features;
2895 		return;
2896 	}
2897 
2898 	dflt_features = NETIF_F_SG	|
2899 			NETIF_F_HIGHDMA	|
2900 			NETIF_F_NTUPLE	|
2901 			NETIF_F_RXHASH;
2902 
2903 	csumo_features = NETIF_F_RXCSUM	  |
2904 			 NETIF_F_IP_CSUM  |
2905 			 NETIF_F_SCTP_CRC |
2906 			 NETIF_F_IPV6_CSUM;
2907 
2908 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2909 			 NETIF_F_HW_VLAN_CTAG_TX     |
2910 			 NETIF_F_HW_VLAN_CTAG_RX;
2911 
2912 	tso_features = NETIF_F_TSO			|
2913 		       NETIF_F_TSO_ECN			|
2914 		       NETIF_F_TSO6			|
2915 		       NETIF_F_GSO_GRE			|
2916 		       NETIF_F_GSO_UDP_TUNNEL		|
2917 		       NETIF_F_GSO_GRE_CSUM		|
2918 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
2919 		       NETIF_F_GSO_PARTIAL		|
2920 		       NETIF_F_GSO_IPXIP4		|
2921 		       NETIF_F_GSO_IPXIP6		|
2922 		       NETIF_F_GSO_UDP_L4;
2923 
2924 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
2925 					NETIF_F_GSO_GRE_CSUM;
2926 	/* set features that user can change */
2927 	netdev->hw_features = dflt_features | csumo_features |
2928 			      vlano_features | tso_features;
2929 
2930 	/* add support for HW_CSUM on packets with MPLS header */
2931 	netdev->mpls_features =  NETIF_F_HW_CSUM;
2932 
2933 	/* enable features */
2934 	netdev->features |= netdev->hw_features;
2935 	/* encap and VLAN devices inherit default, csumo and tso features */
2936 	netdev->hw_enc_features |= dflt_features | csumo_features |
2937 				   tso_features;
2938 	netdev->vlan_features |= dflt_features | csumo_features |
2939 				 tso_features;
2940 }
2941 
2942 /**
2943  * ice_cfg_netdev - Allocate, configure and register a netdev
2944  * @vsi: the VSI associated with the new netdev
2945  *
2946  * Returns 0 on success, negative value on failure
2947  */
2948 static int ice_cfg_netdev(struct ice_vsi *vsi)
2949 {
2950 	struct ice_pf *pf = vsi->back;
2951 	struct ice_netdev_priv *np;
2952 	struct net_device *netdev;
2953 	u8 mac_addr[ETH_ALEN];
2954 	int err;
2955 
2956 	err = ice_devlink_create_port(pf);
2957 	if (err)
2958 		return err;
2959 
2960 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
2961 				    vsi->alloc_rxq);
2962 	if (!netdev) {
2963 		err = -ENOMEM;
2964 		goto err_destroy_devlink_port;
2965 	}
2966 
2967 	vsi->netdev = netdev;
2968 	np = netdev_priv(netdev);
2969 	np->vsi = vsi;
2970 
2971 	ice_set_netdev_features(netdev);
2972 
2973 	ice_set_ops(netdev);
2974 
2975 	if (vsi->type == ICE_VSI_PF) {
2976 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
2977 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
2978 		ether_addr_copy(netdev->dev_addr, mac_addr);
2979 		ether_addr_copy(netdev->perm_addr, mac_addr);
2980 	}
2981 
2982 	netdev->priv_flags |= IFF_UNICAST_FLT;
2983 
2984 	/* Setup netdev TC information */
2985 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
2986 
2987 	/* setup watchdog timeout value to be 5 second */
2988 	netdev->watchdog_timeo = 5 * HZ;
2989 
2990 	netdev->min_mtu = ETH_MIN_MTU;
2991 	netdev->max_mtu = ICE_MAX_MTU;
2992 
2993 	err = register_netdev(vsi->netdev);
2994 	if (err)
2995 		goto err_free_netdev;
2996 
2997 	devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
2998 
2999 	netif_carrier_off(vsi->netdev);
3000 
3001 	/* make sure transmit queues start off as stopped */
3002 	netif_tx_stop_all_queues(vsi->netdev);
3003 
3004 	return 0;
3005 
3006 err_free_netdev:
3007 	free_netdev(vsi->netdev);
3008 	vsi->netdev = NULL;
3009 err_destroy_devlink_port:
3010 	ice_devlink_destroy_port(pf);
3011 	return err;
3012 }
3013 
3014 /**
3015  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3016  * @lut: Lookup table
3017  * @rss_table_size: Lookup table size
3018  * @rss_size: Range of queue number for hashing
3019  */
3020 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3021 {
3022 	u16 i;
3023 
3024 	for (i = 0; i < rss_table_size; i++)
3025 		lut[i] = i % rss_size;
3026 }
3027 
3028 /**
3029  * ice_pf_vsi_setup - Set up a PF VSI
3030  * @pf: board private structure
3031  * @pi: pointer to the port_info instance
3032  *
3033  * Returns pointer to the successfully allocated VSI software struct
3034  * on success, otherwise returns NULL on failure.
3035  */
3036 static struct ice_vsi *
3037 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3038 {
3039 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3040 }
3041 
3042 /**
3043  * ice_ctrl_vsi_setup - Set up a control VSI
3044  * @pf: board private structure
3045  * @pi: pointer to the port_info instance
3046  *
3047  * Returns pointer to the successfully allocated VSI software struct
3048  * on success, otherwise returns NULL on failure.
3049  */
3050 static struct ice_vsi *
3051 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3052 {
3053 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3054 }
3055 
3056 /**
3057  * ice_lb_vsi_setup - Set up a loopback VSI
3058  * @pf: board private structure
3059  * @pi: pointer to the port_info instance
3060  *
3061  * Returns pointer to the successfully allocated VSI software struct
3062  * on success, otherwise returns NULL on failure.
3063  */
3064 struct ice_vsi *
3065 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3066 {
3067 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3068 }
3069 
3070 /**
3071  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3072  * @netdev: network interface to be adjusted
3073  * @proto: unused protocol
3074  * @vid: VLAN ID to be added
3075  *
3076  * net_device_ops implementation for adding VLAN IDs
3077  */
3078 static int
3079 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3080 		    u16 vid)
3081 {
3082 	struct ice_netdev_priv *np = netdev_priv(netdev);
3083 	struct ice_vsi *vsi = np->vsi;
3084 	int ret;
3085 
3086 	if (vid >= VLAN_N_VID) {
3087 		netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
3088 			   vid, VLAN_N_VID);
3089 		return -EINVAL;
3090 	}
3091 
3092 	if (vsi->info.pvid)
3093 		return -EINVAL;
3094 
3095 	/* VLAN 0 is added by default during load/reset */
3096 	if (!vid)
3097 		return 0;
3098 
3099 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3100 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3101 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3102 		if (ret)
3103 			return ret;
3104 	}
3105 
3106 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3107 	 * packets aren't pruned by the device's internal switch on Rx
3108 	 */
3109 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3110 	if (!ret) {
3111 		vsi->vlan_ena = true;
3112 		set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3113 	}
3114 
3115 	return ret;
3116 }
3117 
3118 /**
3119  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3120  * @netdev: network interface to be adjusted
3121  * @proto: unused protocol
3122  * @vid: VLAN ID to be removed
3123  *
3124  * net_device_ops implementation for removing VLAN IDs
3125  */
3126 static int
3127 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3128 		     u16 vid)
3129 {
3130 	struct ice_netdev_priv *np = netdev_priv(netdev);
3131 	struct ice_vsi *vsi = np->vsi;
3132 	int ret;
3133 
3134 	if (vsi->info.pvid)
3135 		return -EINVAL;
3136 
3137 	/* don't allow removal of VLAN 0 */
3138 	if (!vid)
3139 		return 0;
3140 
3141 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3142 	 * information
3143 	 */
3144 	ret = ice_vsi_kill_vlan(vsi, vid);
3145 	if (ret)
3146 		return ret;
3147 
3148 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3149 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3150 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3151 
3152 	vsi->vlan_ena = false;
3153 	set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3154 	return ret;
3155 }
3156 
3157 /**
3158  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3159  * @pf: board private structure
3160  *
3161  * Returns 0 on success, negative value on failure
3162  */
3163 static int ice_setup_pf_sw(struct ice_pf *pf)
3164 {
3165 	struct ice_vsi *vsi;
3166 	int status = 0;
3167 
3168 	if (ice_is_reset_in_progress(pf->state))
3169 		return -EBUSY;
3170 
3171 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3172 	if (!vsi)
3173 		return -ENOMEM;
3174 
3175 	status = ice_cfg_netdev(vsi);
3176 	if (status) {
3177 		status = -ENODEV;
3178 		goto unroll_vsi_setup;
3179 	}
3180 	/* netdev has to be configured before setting frame size */
3181 	ice_vsi_cfg_frame_size(vsi);
3182 
3183 	/* Setup DCB netlink interface */
3184 	ice_dcbnl_setup(vsi);
3185 
3186 	/* registering the NAPI handler requires both the queues and
3187 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3188 	 * and ice_cfg_netdev() respectively
3189 	 */
3190 	ice_napi_add(vsi);
3191 
3192 	status = ice_set_cpu_rx_rmap(vsi);
3193 	if (status) {
3194 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3195 			vsi->vsi_num, status);
3196 		status = -EINVAL;
3197 		goto unroll_napi_add;
3198 	}
3199 	status = ice_init_mac_fltr(pf);
3200 	if (status)
3201 		goto free_cpu_rx_map;
3202 
3203 	return status;
3204 
3205 free_cpu_rx_map:
3206 	ice_free_cpu_rx_rmap(vsi);
3207 
3208 unroll_napi_add:
3209 	if (vsi) {
3210 		ice_napi_del(vsi);
3211 		if (vsi->netdev) {
3212 			if (vsi->netdev->reg_state == NETREG_REGISTERED)
3213 				unregister_netdev(vsi->netdev);
3214 			free_netdev(vsi->netdev);
3215 			vsi->netdev = NULL;
3216 		}
3217 	}
3218 
3219 unroll_vsi_setup:
3220 	ice_vsi_release(vsi);
3221 	return status;
3222 }
3223 
3224 /**
3225  * ice_get_avail_q_count - Get count of queues in use
3226  * @pf_qmap: bitmap to get queue use count from
3227  * @lock: pointer to a mutex that protects access to pf_qmap
3228  * @size: size of the bitmap
3229  */
3230 static u16
3231 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3232 {
3233 	unsigned long bit;
3234 	u16 count = 0;
3235 
3236 	mutex_lock(lock);
3237 	for_each_clear_bit(bit, pf_qmap, size)
3238 		count++;
3239 	mutex_unlock(lock);
3240 
3241 	return count;
3242 }
3243 
3244 /**
3245  * ice_get_avail_txq_count - Get count of Tx queues in use
3246  * @pf: pointer to an ice_pf instance
3247  */
3248 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3249 {
3250 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3251 				     pf->max_pf_txqs);
3252 }
3253 
3254 /**
3255  * ice_get_avail_rxq_count - Get count of Rx queues in use
3256  * @pf: pointer to an ice_pf instance
3257  */
3258 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3259 {
3260 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3261 				     pf->max_pf_rxqs);
3262 }
3263 
3264 /**
3265  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3266  * @pf: board private structure to initialize
3267  */
3268 static void ice_deinit_pf(struct ice_pf *pf)
3269 {
3270 	ice_service_task_stop(pf);
3271 	mutex_destroy(&pf->sw_mutex);
3272 	mutex_destroy(&pf->tc_mutex);
3273 	mutex_destroy(&pf->avail_q_mutex);
3274 
3275 	if (pf->avail_txqs) {
3276 		bitmap_free(pf->avail_txqs);
3277 		pf->avail_txqs = NULL;
3278 	}
3279 
3280 	if (pf->avail_rxqs) {
3281 		bitmap_free(pf->avail_rxqs);
3282 		pf->avail_rxqs = NULL;
3283 	}
3284 }
3285 
3286 /**
3287  * ice_set_pf_caps - set PFs capability flags
3288  * @pf: pointer to the PF instance
3289  */
3290 static void ice_set_pf_caps(struct ice_pf *pf)
3291 {
3292 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3293 
3294 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3295 	if (func_caps->common_cap.dcb)
3296 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3297 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3298 	if (func_caps->common_cap.sr_iov_1_1) {
3299 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3300 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3301 					      ICE_MAX_VF_COUNT);
3302 	}
3303 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3304 	if (func_caps->common_cap.rss_table_size)
3305 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3306 
3307 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3308 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3309 		u16 unused;
3310 
3311 		/* ctrl_vsi_idx will be set to a valid value when flow director
3312 		 * is setup by ice_init_fdir
3313 		 */
3314 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3315 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3316 		/* force guaranteed filter pool for PF */
3317 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3318 				       func_caps->fd_fltr_guar);
3319 		/* force shared filter pool for PF */
3320 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3321 				       func_caps->fd_fltr_best_effort);
3322 	}
3323 
3324 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3325 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3326 }
3327 
3328 /**
3329  * ice_init_pf - Initialize general software structures (struct ice_pf)
3330  * @pf: board private structure to initialize
3331  */
3332 static int ice_init_pf(struct ice_pf *pf)
3333 {
3334 	ice_set_pf_caps(pf);
3335 
3336 	mutex_init(&pf->sw_mutex);
3337 	mutex_init(&pf->tc_mutex);
3338 
3339 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3340 	spin_lock_init(&pf->aq_wait_lock);
3341 	init_waitqueue_head(&pf->aq_wait_queue);
3342 
3343 	/* setup service timer and periodic service task */
3344 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3345 	pf->serv_tmr_period = HZ;
3346 	INIT_WORK(&pf->serv_task, ice_service_task);
3347 	clear_bit(__ICE_SERVICE_SCHED, pf->state);
3348 
3349 	mutex_init(&pf->avail_q_mutex);
3350 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3351 	if (!pf->avail_txqs)
3352 		return -ENOMEM;
3353 
3354 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3355 	if (!pf->avail_rxqs) {
3356 		devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3357 		pf->avail_txqs = NULL;
3358 		return -ENOMEM;
3359 	}
3360 
3361 	return 0;
3362 }
3363 
3364 /**
3365  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3366  * @pf: board private structure
3367  *
3368  * compute the number of MSIX vectors required (v_budget) and request from
3369  * the OS. Return the number of vectors reserved or negative on failure
3370  */
3371 static int ice_ena_msix_range(struct ice_pf *pf)
3372 {
3373 	struct device *dev = ice_pf_to_dev(pf);
3374 	int v_left, v_actual, v_budget = 0;
3375 	int needed, err, i;
3376 
3377 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3378 
3379 	/* reserve one vector for miscellaneous handler */
3380 	needed = 1;
3381 	if (v_left < needed)
3382 		goto no_hw_vecs_left_err;
3383 	v_budget += needed;
3384 	v_left -= needed;
3385 
3386 	/* reserve vectors for LAN traffic */
3387 	needed = min_t(int, num_online_cpus(), v_left);
3388 	if (v_left < needed)
3389 		goto no_hw_vecs_left_err;
3390 	pf->num_lan_msix = needed;
3391 	v_budget += needed;
3392 	v_left -= needed;
3393 
3394 	/* reserve one vector for flow director */
3395 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3396 		needed = ICE_FDIR_MSIX;
3397 		if (v_left < needed)
3398 			goto no_hw_vecs_left_err;
3399 		v_budget += needed;
3400 		v_left -= needed;
3401 	}
3402 
3403 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3404 					sizeof(*pf->msix_entries), GFP_KERNEL);
3405 
3406 	if (!pf->msix_entries) {
3407 		err = -ENOMEM;
3408 		goto exit_err;
3409 	}
3410 
3411 	for (i = 0; i < v_budget; i++)
3412 		pf->msix_entries[i].entry = i;
3413 
3414 	/* actually reserve the vectors */
3415 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3416 					 ICE_MIN_MSIX, v_budget);
3417 
3418 	if (v_actual < 0) {
3419 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3420 		err = v_actual;
3421 		goto msix_err;
3422 	}
3423 
3424 	if (v_actual < v_budget) {
3425 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3426 			 v_budget, v_actual);
3427 /* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
3428 #define ICE_MIN_LAN_VECS 2
3429 #define ICE_MIN_RDMA_VECS 2
3430 #define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
3431 
3432 		if (v_actual < ICE_MIN_LAN_VECS) {
3433 			/* error if we can't get minimum vectors */
3434 			pci_disable_msix(pf->pdev);
3435 			err = -ERANGE;
3436 			goto msix_err;
3437 		} else {
3438 			pf->num_lan_msix = ICE_MIN_LAN_VECS;
3439 		}
3440 	}
3441 
3442 	return v_actual;
3443 
3444 msix_err:
3445 	devm_kfree(dev, pf->msix_entries);
3446 	goto exit_err;
3447 
3448 no_hw_vecs_left_err:
3449 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3450 		needed, v_left);
3451 	err = -ERANGE;
3452 exit_err:
3453 	pf->num_lan_msix = 0;
3454 	return err;
3455 }
3456 
3457 /**
3458  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3459  * @pf: board private structure
3460  */
3461 static void ice_dis_msix(struct ice_pf *pf)
3462 {
3463 	pci_disable_msix(pf->pdev);
3464 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3465 	pf->msix_entries = NULL;
3466 }
3467 
3468 /**
3469  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3470  * @pf: board private structure
3471  */
3472 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3473 {
3474 	ice_dis_msix(pf);
3475 
3476 	if (pf->irq_tracker) {
3477 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3478 		pf->irq_tracker = NULL;
3479 	}
3480 }
3481 
3482 /**
3483  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3484  * @pf: board private structure to initialize
3485  */
3486 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3487 {
3488 	int vectors;
3489 
3490 	vectors = ice_ena_msix_range(pf);
3491 
3492 	if (vectors < 0)
3493 		return vectors;
3494 
3495 	/* set up vector assignment tracking */
3496 	pf->irq_tracker =
3497 		devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
3498 			     (sizeof(u16) * vectors), GFP_KERNEL);
3499 	if (!pf->irq_tracker) {
3500 		ice_dis_msix(pf);
3501 		return -ENOMEM;
3502 	}
3503 
3504 	/* populate SW interrupts pool with number of OS granted IRQs. */
3505 	pf->num_avail_sw_msix = (u16)vectors;
3506 	pf->irq_tracker->num_entries = (u16)vectors;
3507 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3508 
3509 	return 0;
3510 }
3511 
3512 /**
3513  * ice_is_wol_supported - get NVM state of WoL
3514  * @pf: board private structure
3515  *
3516  * Check if WoL is supported based on the HW configuration.
3517  * Returns true if NVM supports and enables WoL for this port, false otherwise
3518  */
3519 bool ice_is_wol_supported(struct ice_pf *pf)
3520 {
3521 	struct ice_hw *hw = &pf->hw;
3522 	u16 wol_ctrl;
3523 
3524 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3525 	 * word) indicates WoL is not supported on the corresponding PF ID.
3526 	 */
3527 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3528 		return false;
3529 
3530 	return !(BIT(hw->pf_id) & wol_ctrl);
3531 }
3532 
3533 /**
3534  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3535  * @vsi: VSI being changed
3536  * @new_rx: new number of Rx queues
3537  * @new_tx: new number of Tx queues
3538  *
3539  * Only change the number of queues if new_tx, or new_rx is non-0.
3540  *
3541  * Returns 0 on success.
3542  */
3543 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3544 {
3545 	struct ice_pf *pf = vsi->back;
3546 	int err = 0, timeout = 50;
3547 
3548 	if (!new_rx && !new_tx)
3549 		return -EINVAL;
3550 
3551 	while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
3552 		timeout--;
3553 		if (!timeout)
3554 			return -EBUSY;
3555 		usleep_range(1000, 2000);
3556 	}
3557 
3558 	if (new_tx)
3559 		vsi->req_txq = (u16)new_tx;
3560 	if (new_rx)
3561 		vsi->req_rxq = (u16)new_rx;
3562 
3563 	/* set for the next time the netdev is started */
3564 	if (!netif_running(vsi->netdev)) {
3565 		ice_vsi_rebuild(vsi, false);
3566 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3567 		goto done;
3568 	}
3569 
3570 	ice_vsi_close(vsi);
3571 	ice_vsi_rebuild(vsi, false);
3572 	ice_pf_dcb_recfg(pf);
3573 	ice_vsi_open(vsi);
3574 done:
3575 	clear_bit(__ICE_CFG_BUSY, pf->state);
3576 	return err;
3577 }
3578 
3579 /**
3580  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3581  * @pf: PF to configure
3582  *
3583  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3584  * VSI can still Tx/Rx VLAN tagged packets.
3585  */
3586 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3587 {
3588 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3589 	struct ice_vsi_ctx *ctxt;
3590 	enum ice_status status;
3591 	struct ice_hw *hw;
3592 
3593 	if (!vsi)
3594 		return;
3595 
3596 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3597 	if (!ctxt)
3598 		return;
3599 
3600 	hw = &pf->hw;
3601 	ctxt->info = vsi->info;
3602 
3603 	ctxt->info.valid_sections =
3604 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3605 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3606 			    ICE_AQ_VSI_PROP_SW_VALID);
3607 
3608 	/* disable VLAN anti-spoof */
3609 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3610 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3611 
3612 	/* disable VLAN pruning and keep all other settings */
3613 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3614 
3615 	/* allow all VLANs on Tx and don't strip on Rx */
3616 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3617 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3618 
3619 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3620 	if (status) {
3621 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3622 			ice_stat_str(status),
3623 			ice_aq_str(hw->adminq.sq_last_status));
3624 	} else {
3625 		vsi->info.sec_flags = ctxt->info.sec_flags;
3626 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3627 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3628 	}
3629 
3630 	kfree(ctxt);
3631 }
3632 
3633 /**
3634  * ice_log_pkg_init - log result of DDP package load
3635  * @hw: pointer to hardware info
3636  * @status: status of package load
3637  */
3638 static void
3639 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3640 {
3641 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3642 	struct device *dev = ice_pf_to_dev(pf);
3643 
3644 	switch (*status) {
3645 	case ICE_SUCCESS:
3646 		/* The package download AdminQ command returned success because
3647 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3648 		 * already a package loaded on the device.
3649 		 */
3650 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3651 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3652 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3653 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3654 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3655 			    sizeof(hw->pkg_name))) {
3656 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3657 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3658 					 hw->active_pkg_name,
3659 					 hw->active_pkg_ver.major,
3660 					 hw->active_pkg_ver.minor,
3661 					 hw->active_pkg_ver.update,
3662 					 hw->active_pkg_ver.draft);
3663 			else
3664 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3665 					 hw->active_pkg_name,
3666 					 hw->active_pkg_ver.major,
3667 					 hw->active_pkg_ver.minor,
3668 					 hw->active_pkg_ver.update,
3669 					 hw->active_pkg_ver.draft);
3670 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3671 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3672 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3673 				hw->active_pkg_name,
3674 				hw->active_pkg_ver.major,
3675 				hw->active_pkg_ver.minor,
3676 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3677 			*status = ICE_ERR_NOT_SUPPORTED;
3678 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3679 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3680 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3681 				 hw->active_pkg_name,
3682 				 hw->active_pkg_ver.major,
3683 				 hw->active_pkg_ver.minor,
3684 				 hw->active_pkg_ver.update,
3685 				 hw->active_pkg_ver.draft,
3686 				 hw->pkg_name,
3687 				 hw->pkg_ver.major,
3688 				 hw->pkg_ver.minor,
3689 				 hw->pkg_ver.update,
3690 				 hw->pkg_ver.draft);
3691 		} else {
3692 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3693 			*status = ICE_ERR_NOT_SUPPORTED;
3694 		}
3695 		break;
3696 	case ICE_ERR_FW_DDP_MISMATCH:
3697 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3698 		break;
3699 	case ICE_ERR_BUF_TOO_SHORT:
3700 	case ICE_ERR_CFG:
3701 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3702 		break;
3703 	case ICE_ERR_NOT_SUPPORTED:
3704 		/* Package File version not supported */
3705 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3706 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3707 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3708 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3709 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3710 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3711 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3712 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3713 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3714 		break;
3715 	case ICE_ERR_AQ_ERROR:
3716 		switch (hw->pkg_dwnld_status) {
3717 		case ICE_AQ_RC_ENOSEC:
3718 		case ICE_AQ_RC_EBADSIG:
3719 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3720 			return;
3721 		case ICE_AQ_RC_ESVN:
3722 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3723 			return;
3724 		case ICE_AQ_RC_EBADMAN:
3725 		case ICE_AQ_RC_EBADBUF:
3726 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3727 			/* poll for reset to complete */
3728 			if (ice_check_reset(hw))
3729 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3730 			return;
3731 		default:
3732 			break;
3733 		}
3734 		fallthrough;
3735 	default:
3736 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3737 			*status);
3738 		break;
3739 	}
3740 }
3741 
3742 /**
3743  * ice_load_pkg - load/reload the DDP Package file
3744  * @firmware: firmware structure when firmware requested or NULL for reload
3745  * @pf: pointer to the PF instance
3746  *
3747  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3748  * initialize HW tables.
3749  */
3750 static void
3751 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3752 {
3753 	enum ice_status status = ICE_ERR_PARAM;
3754 	struct device *dev = ice_pf_to_dev(pf);
3755 	struct ice_hw *hw = &pf->hw;
3756 
3757 	/* Load DDP Package */
3758 	if (firmware && !hw->pkg_copy) {
3759 		status = ice_copy_and_init_pkg(hw, firmware->data,
3760 					       firmware->size);
3761 		ice_log_pkg_init(hw, &status);
3762 	} else if (!firmware && hw->pkg_copy) {
3763 		/* Reload package during rebuild after CORER/GLOBR reset */
3764 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3765 		ice_log_pkg_init(hw, &status);
3766 	} else {
3767 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3768 	}
3769 
3770 	if (status) {
3771 		/* Safe Mode */
3772 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3773 		return;
3774 	}
3775 
3776 	/* Successful download package is the precondition for advanced
3777 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3778 	 */
3779 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3780 }
3781 
3782 /**
3783  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3784  * @pf: pointer to the PF structure
3785  *
3786  * There is no error returned here because the driver should be able to handle
3787  * 128 Byte cache lines, so we only print a warning in case issues are seen,
3788  * specifically with Tx.
3789  */
3790 static void ice_verify_cacheline_size(struct ice_pf *pf)
3791 {
3792 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3793 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3794 			 ICE_CACHE_LINE_BYTES);
3795 }
3796 
3797 /**
3798  * ice_send_version - update firmware with driver version
3799  * @pf: PF struct
3800  *
3801  * Returns ICE_SUCCESS on success, else error code
3802  */
3803 static enum ice_status ice_send_version(struct ice_pf *pf)
3804 {
3805 	struct ice_driver_ver dv;
3806 
3807 	dv.major_ver = 0xff;
3808 	dv.minor_ver = 0xff;
3809 	dv.build_ver = 0xff;
3810 	dv.subbuild_ver = 0;
3811 	strscpy((char *)dv.driver_string, UTS_RELEASE,
3812 		sizeof(dv.driver_string));
3813 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3814 }
3815 
3816 /**
3817  * ice_init_fdir - Initialize flow director VSI and configuration
3818  * @pf: pointer to the PF instance
3819  *
3820  * returns 0 on success, negative on error
3821  */
3822 static int ice_init_fdir(struct ice_pf *pf)
3823 {
3824 	struct device *dev = ice_pf_to_dev(pf);
3825 	struct ice_vsi *ctrl_vsi;
3826 	int err;
3827 
3828 	/* Side Band Flow Director needs to have a control VSI.
3829 	 * Allocate it and store it in the PF.
3830 	 */
3831 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3832 	if (!ctrl_vsi) {
3833 		dev_dbg(dev, "could not create control VSI\n");
3834 		return -ENOMEM;
3835 	}
3836 
3837 	err = ice_vsi_open_ctrl(ctrl_vsi);
3838 	if (err) {
3839 		dev_dbg(dev, "could not open control VSI\n");
3840 		goto err_vsi_open;
3841 	}
3842 
3843 	mutex_init(&pf->hw.fdir_fltr_lock);
3844 
3845 	err = ice_fdir_create_dflt_rules(pf);
3846 	if (err)
3847 		goto err_fdir_rule;
3848 
3849 	return 0;
3850 
3851 err_fdir_rule:
3852 	ice_fdir_release_flows(&pf->hw);
3853 	ice_vsi_close(ctrl_vsi);
3854 err_vsi_open:
3855 	ice_vsi_release(ctrl_vsi);
3856 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
3857 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
3858 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3859 	}
3860 	return err;
3861 }
3862 
3863 /**
3864  * ice_get_opt_fw_name - return optional firmware file name or NULL
3865  * @pf: pointer to the PF instance
3866  */
3867 static char *ice_get_opt_fw_name(struct ice_pf *pf)
3868 {
3869 	/* Optional firmware name same as default with additional dash
3870 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
3871 	 */
3872 	struct pci_dev *pdev = pf->pdev;
3873 	char *opt_fw_filename;
3874 	u64 dsn;
3875 
3876 	/* Determine the name of the optional file using the DSN (two
3877 	 * dwords following the start of the DSN Capability).
3878 	 */
3879 	dsn = pci_get_dsn(pdev);
3880 	if (!dsn)
3881 		return NULL;
3882 
3883 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
3884 	if (!opt_fw_filename)
3885 		return NULL;
3886 
3887 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
3888 		 ICE_DDP_PKG_PATH, dsn);
3889 
3890 	return opt_fw_filename;
3891 }
3892 
3893 /**
3894  * ice_request_fw - Device initialization routine
3895  * @pf: pointer to the PF instance
3896  */
3897 static void ice_request_fw(struct ice_pf *pf)
3898 {
3899 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
3900 	const struct firmware *firmware = NULL;
3901 	struct device *dev = ice_pf_to_dev(pf);
3902 	int err = 0;
3903 
3904 	/* optional device-specific DDP (if present) overrides the default DDP
3905 	 * package file. kernel logs a debug message if the file doesn't exist,
3906 	 * and warning messages for other errors.
3907 	 */
3908 	if (opt_fw_filename) {
3909 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
3910 		if (err) {
3911 			kfree(opt_fw_filename);
3912 			goto dflt_pkg_load;
3913 		}
3914 
3915 		/* request for firmware was successful. Download to device */
3916 		ice_load_pkg(firmware, pf);
3917 		kfree(opt_fw_filename);
3918 		release_firmware(firmware);
3919 		return;
3920 	}
3921 
3922 dflt_pkg_load:
3923 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
3924 	if (err) {
3925 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
3926 		return;
3927 	}
3928 
3929 	/* request for firmware was successful. Download to device */
3930 	ice_load_pkg(firmware, pf);
3931 	release_firmware(firmware);
3932 }
3933 
3934 /**
3935  * ice_print_wake_reason - show the wake up cause in the log
3936  * @pf: pointer to the PF struct
3937  */
3938 static void ice_print_wake_reason(struct ice_pf *pf)
3939 {
3940 	u32 wus = pf->wakeup_reason;
3941 	const char *wake_str;
3942 
3943 	/* if no wake event, nothing to print */
3944 	if (!wus)
3945 		return;
3946 
3947 	if (wus & PFPM_WUS_LNKC_M)
3948 		wake_str = "Link\n";
3949 	else if (wus & PFPM_WUS_MAG_M)
3950 		wake_str = "Magic Packet\n";
3951 	else if (wus & PFPM_WUS_MNG_M)
3952 		wake_str = "Management\n";
3953 	else if (wus & PFPM_WUS_FW_RST_WK_M)
3954 		wake_str = "Firmware Reset\n";
3955 	else
3956 		wake_str = "Unknown\n";
3957 
3958 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
3959 }
3960 
3961 /**
3962  * ice_probe - Device initialization routine
3963  * @pdev: PCI device information struct
3964  * @ent: entry in ice_pci_tbl
3965  *
3966  * Returns 0 on success, negative on failure
3967  */
3968 static int
3969 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
3970 {
3971 	struct device *dev = &pdev->dev;
3972 	struct ice_pf *pf;
3973 	struct ice_hw *hw;
3974 	int i, err;
3975 
3976 	/* this driver uses devres, see
3977 	 * Documentation/driver-api/driver-model/devres.rst
3978 	 */
3979 	err = pcim_enable_device(pdev);
3980 	if (err)
3981 		return err;
3982 
3983 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3984 	if (err) {
3985 		dev_err(dev, "BAR0 I/O map error %d\n", err);
3986 		return err;
3987 	}
3988 
3989 	pf = ice_allocate_pf(dev);
3990 	if (!pf)
3991 		return -ENOMEM;
3992 
3993 	/* set up for high or low DMA */
3994 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3995 	if (err)
3996 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3997 	if (err) {
3998 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
3999 		return err;
4000 	}
4001 
4002 	pci_enable_pcie_error_reporting(pdev);
4003 	pci_set_master(pdev);
4004 
4005 	pf->pdev = pdev;
4006 	pci_set_drvdata(pdev, pf);
4007 	set_bit(__ICE_DOWN, pf->state);
4008 	/* Disable service task until DOWN bit is cleared */
4009 	set_bit(__ICE_SERVICE_DIS, pf->state);
4010 
4011 	hw = &pf->hw;
4012 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4013 	pci_save_state(pdev);
4014 
4015 	hw->back = pf;
4016 	hw->vendor_id = pdev->vendor;
4017 	hw->device_id = pdev->device;
4018 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4019 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4020 	hw->subsystem_device_id = pdev->subsystem_device;
4021 	hw->bus.device = PCI_SLOT(pdev->devfn);
4022 	hw->bus.func = PCI_FUNC(pdev->devfn);
4023 	ice_set_ctrlq_len(hw);
4024 
4025 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4026 
4027 	err = ice_devlink_register(pf);
4028 	if (err) {
4029 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4030 		goto err_exit_unroll;
4031 	}
4032 
4033 #ifndef CONFIG_DYNAMIC_DEBUG
4034 	if (debug < -1)
4035 		hw->debug_mask = debug;
4036 #endif
4037 
4038 	err = ice_init_hw(hw);
4039 	if (err) {
4040 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4041 		err = -EIO;
4042 		goto err_exit_unroll;
4043 	}
4044 
4045 	ice_request_fw(pf);
4046 
4047 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4048 	 * set in pf->state, which will cause ice_is_safe_mode to return
4049 	 * true
4050 	 */
4051 	if (ice_is_safe_mode(pf)) {
4052 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4053 		/* we already got function/device capabilities but these don't
4054 		 * reflect what the driver needs to do in safe mode. Instead of
4055 		 * adding conditional logic everywhere to ignore these
4056 		 * device/function capabilities, override them.
4057 		 */
4058 		ice_set_safe_mode_caps(hw);
4059 	}
4060 
4061 	err = ice_init_pf(pf);
4062 	if (err) {
4063 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4064 		goto err_init_pf_unroll;
4065 	}
4066 
4067 	ice_devlink_init_regions(pf);
4068 
4069 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4070 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4071 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4072 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4073 	i = 0;
4074 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4075 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4076 			pf->hw.tnl.valid_count[TNL_VXLAN];
4077 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4078 			UDP_TUNNEL_TYPE_VXLAN;
4079 		i++;
4080 	}
4081 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4082 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4083 			pf->hw.tnl.valid_count[TNL_GENEVE];
4084 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4085 			UDP_TUNNEL_TYPE_GENEVE;
4086 		i++;
4087 	}
4088 
4089 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4090 	if (!pf->num_alloc_vsi) {
4091 		err = -EIO;
4092 		goto err_init_pf_unroll;
4093 	}
4094 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4095 		dev_warn(&pf->pdev->dev,
4096 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4097 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4098 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4099 	}
4100 
4101 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4102 			       GFP_KERNEL);
4103 	if (!pf->vsi) {
4104 		err = -ENOMEM;
4105 		goto err_init_pf_unroll;
4106 	}
4107 
4108 	err = ice_init_interrupt_scheme(pf);
4109 	if (err) {
4110 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4111 		err = -EIO;
4112 		goto err_init_vsi_unroll;
4113 	}
4114 
4115 	/* In case of MSIX we are going to setup the misc vector right here
4116 	 * to handle admin queue events etc. In case of legacy and MSI
4117 	 * the misc functionality and queue processing is combined in
4118 	 * the same vector and that gets setup at open.
4119 	 */
4120 	err = ice_req_irq_msix_misc(pf);
4121 	if (err) {
4122 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4123 		goto err_init_interrupt_unroll;
4124 	}
4125 
4126 	/* create switch struct for the switch element created by FW on boot */
4127 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4128 	if (!pf->first_sw) {
4129 		err = -ENOMEM;
4130 		goto err_msix_misc_unroll;
4131 	}
4132 
4133 	if (hw->evb_veb)
4134 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4135 	else
4136 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4137 
4138 	pf->first_sw->pf = pf;
4139 
4140 	/* record the sw_id available for later use */
4141 	pf->first_sw->sw_id = hw->port_info->sw_id;
4142 
4143 	err = ice_setup_pf_sw(pf);
4144 	if (err) {
4145 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4146 		goto err_alloc_sw_unroll;
4147 	}
4148 
4149 	clear_bit(__ICE_SERVICE_DIS, pf->state);
4150 
4151 	/* tell the firmware we are up */
4152 	err = ice_send_version(pf);
4153 	if (err) {
4154 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4155 			UTS_RELEASE, err);
4156 		goto err_send_version_unroll;
4157 	}
4158 
4159 	/* since everything is good, start the service timer */
4160 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4161 
4162 	err = ice_init_link_events(pf->hw.port_info);
4163 	if (err) {
4164 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4165 		goto err_send_version_unroll;
4166 	}
4167 
4168 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4169 	if (err) {
4170 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4171 		goto err_send_version_unroll;
4172 	}
4173 
4174 	err = ice_update_link_info(pf->hw.port_info);
4175 	if (err) {
4176 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4177 		goto err_send_version_unroll;
4178 	}
4179 
4180 	ice_init_link_dflt_override(pf->hw.port_info);
4181 
4182 	/* if media available, initialize PHY settings */
4183 	if (pf->hw.port_info->phy.link_info.link_info &
4184 	    ICE_AQ_MEDIA_AVAILABLE) {
4185 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4186 		if (err) {
4187 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4188 			goto err_send_version_unroll;
4189 		}
4190 
4191 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4192 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4193 
4194 			if (vsi)
4195 				ice_configure_phy(vsi);
4196 		}
4197 	} else {
4198 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4199 	}
4200 
4201 	ice_verify_cacheline_size(pf);
4202 
4203 	/* Save wakeup reason register for later use */
4204 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4205 
4206 	/* check for a power management event */
4207 	ice_print_wake_reason(pf);
4208 
4209 	/* clear wake status, all bits */
4210 	wr32(hw, PFPM_WUS, U32_MAX);
4211 
4212 	/* Disable WoL at init, wait for user to enable */
4213 	device_set_wakeup_enable(dev, false);
4214 
4215 	if (ice_is_safe_mode(pf)) {
4216 		ice_set_safe_mode_vlan_cfg(pf);
4217 		goto probe_done;
4218 	}
4219 
4220 	/* initialize DDP driven features */
4221 
4222 	/* Note: Flow director init failure is non-fatal to load */
4223 	if (ice_init_fdir(pf))
4224 		dev_err(dev, "could not initialize flow director\n");
4225 
4226 	/* Note: DCB init failure is non-fatal to load */
4227 	if (ice_init_pf_dcb(pf, false)) {
4228 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4229 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4230 	} else {
4231 		ice_cfg_lldp_mib_change(&pf->hw, true);
4232 	}
4233 
4234 	/* print PCI link speed and width */
4235 	pcie_print_link_status(pf->pdev);
4236 
4237 probe_done:
4238 	/* ready to go, so clear down state bit */
4239 	clear_bit(__ICE_DOWN, pf->state);
4240 	return 0;
4241 
4242 err_send_version_unroll:
4243 	ice_vsi_release_all(pf);
4244 err_alloc_sw_unroll:
4245 	ice_devlink_destroy_port(pf);
4246 	set_bit(__ICE_SERVICE_DIS, pf->state);
4247 	set_bit(__ICE_DOWN, pf->state);
4248 	devm_kfree(dev, pf->first_sw);
4249 err_msix_misc_unroll:
4250 	ice_free_irq_msix_misc(pf);
4251 err_init_interrupt_unroll:
4252 	ice_clear_interrupt_scheme(pf);
4253 err_init_vsi_unroll:
4254 	devm_kfree(dev, pf->vsi);
4255 err_init_pf_unroll:
4256 	ice_deinit_pf(pf);
4257 	ice_devlink_destroy_regions(pf);
4258 	ice_deinit_hw(hw);
4259 err_exit_unroll:
4260 	ice_devlink_unregister(pf);
4261 	pci_disable_pcie_error_reporting(pdev);
4262 	pci_disable_device(pdev);
4263 	return err;
4264 }
4265 
4266 /**
4267  * ice_set_wake - enable or disable Wake on LAN
4268  * @pf: pointer to the PF struct
4269  *
4270  * Simple helper for WoL control
4271  */
4272 static void ice_set_wake(struct ice_pf *pf)
4273 {
4274 	struct ice_hw *hw = &pf->hw;
4275 	bool wol = pf->wol_ena;
4276 
4277 	/* clear wake state, otherwise new wake events won't fire */
4278 	wr32(hw, PFPM_WUS, U32_MAX);
4279 
4280 	/* enable / disable APM wake up, no RMW needed */
4281 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4282 
4283 	/* set magic packet filter enabled */
4284 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4285 }
4286 
4287 /**
4288  * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
4289  * @pf: pointer to the PF struct
4290  *
4291  * Issue firmware command to enable multicast magic wake, making
4292  * sure that any locally administered address (LAA) is used for
4293  * wake, and that PF reset doesn't undo the LAA.
4294  */
4295 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4296 {
4297 	struct device *dev = ice_pf_to_dev(pf);
4298 	struct ice_hw *hw = &pf->hw;
4299 	enum ice_status status;
4300 	u8 mac_addr[ETH_ALEN];
4301 	struct ice_vsi *vsi;
4302 	u8 flags;
4303 
4304 	if (!pf->wol_ena)
4305 		return;
4306 
4307 	vsi = ice_get_main_vsi(pf);
4308 	if (!vsi)
4309 		return;
4310 
4311 	/* Get current MAC address in case it's an LAA */
4312 	if (vsi->netdev)
4313 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4314 	else
4315 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4316 
4317 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4318 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4319 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4320 
4321 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4322 	if (status)
4323 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4324 			ice_stat_str(status),
4325 			ice_aq_str(hw->adminq.sq_last_status));
4326 }
4327 
4328 /**
4329  * ice_remove - Device removal routine
4330  * @pdev: PCI device information struct
4331  */
4332 static void ice_remove(struct pci_dev *pdev)
4333 {
4334 	struct ice_pf *pf = pci_get_drvdata(pdev);
4335 	int i;
4336 
4337 	if (!pf)
4338 		return;
4339 
4340 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4341 		if (!ice_is_reset_in_progress(pf->state))
4342 			break;
4343 		msleep(100);
4344 	}
4345 
4346 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4347 		set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
4348 		ice_free_vfs(pf);
4349 	}
4350 
4351 	set_bit(__ICE_DOWN, pf->state);
4352 	ice_service_task_stop(pf);
4353 
4354 	ice_aq_cancel_waiting_tasks(pf);
4355 
4356 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4357 	if (!ice_is_safe_mode(pf))
4358 		ice_remove_arfs(pf);
4359 	ice_setup_mc_magic_wake(pf);
4360 	ice_devlink_destroy_port(pf);
4361 	ice_vsi_release_all(pf);
4362 	ice_set_wake(pf);
4363 	ice_free_irq_msix_misc(pf);
4364 	ice_for_each_vsi(pf, i) {
4365 		if (!pf->vsi[i])
4366 			continue;
4367 		ice_vsi_free_q_vectors(pf->vsi[i]);
4368 	}
4369 	ice_deinit_pf(pf);
4370 	ice_devlink_destroy_regions(pf);
4371 	ice_deinit_hw(&pf->hw);
4372 	ice_devlink_unregister(pf);
4373 
4374 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4375 	 * do it via ice_schedule_reset() since there is no need to rebuild
4376 	 * and the service task is already stopped.
4377 	 */
4378 	ice_reset(&pf->hw, ICE_RESET_PFR);
4379 	pci_wait_for_pending_transaction(pdev);
4380 	ice_clear_interrupt_scheme(pf);
4381 	pci_disable_pcie_error_reporting(pdev);
4382 	pci_disable_device(pdev);
4383 }
4384 
4385 /**
4386  * ice_shutdown - PCI callback for shutting down device
4387  * @pdev: PCI device information struct
4388  */
4389 static void ice_shutdown(struct pci_dev *pdev)
4390 {
4391 	struct ice_pf *pf = pci_get_drvdata(pdev);
4392 
4393 	ice_remove(pdev);
4394 
4395 	if (system_state == SYSTEM_POWER_OFF) {
4396 		pci_wake_from_d3(pdev, pf->wol_ena);
4397 		pci_set_power_state(pdev, PCI_D3hot);
4398 	}
4399 }
4400 
4401 #ifdef CONFIG_PM
4402 /**
4403  * ice_prepare_for_shutdown - prep for PCI shutdown
4404  * @pf: board private structure
4405  *
4406  * Inform or close all dependent features in prep for PCI device shutdown
4407  */
4408 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4409 {
4410 	struct ice_hw *hw = &pf->hw;
4411 	u32 v;
4412 
4413 	/* Notify VFs of impending reset */
4414 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4415 		ice_vc_notify_reset(pf);
4416 
4417 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4418 
4419 	/* disable the VSIs and their queues that are not already DOWN */
4420 	ice_pf_dis_all_vsi(pf, false);
4421 
4422 	ice_for_each_vsi(pf, v)
4423 		if (pf->vsi[v])
4424 			pf->vsi[v]->vsi_num = 0;
4425 
4426 	ice_shutdown_all_ctrlq(hw);
4427 }
4428 
4429 /**
4430  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4431  * @pf: board private structure to reinitialize
4432  *
4433  * This routine reinitialize interrupt scheme that was cleared during
4434  * power management suspend callback.
4435  *
4436  * This should be called during resume routine to re-allocate the q_vectors
4437  * and reacquire interrupts.
4438  */
4439 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4440 {
4441 	struct device *dev = ice_pf_to_dev(pf);
4442 	int ret, v;
4443 
4444 	/* Since we clear MSIX flag during suspend, we need to
4445 	 * set it back during resume...
4446 	 */
4447 
4448 	ret = ice_init_interrupt_scheme(pf);
4449 	if (ret) {
4450 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4451 		return ret;
4452 	}
4453 
4454 	/* Remap vectors and rings, after successful re-init interrupts */
4455 	ice_for_each_vsi(pf, v) {
4456 		if (!pf->vsi[v])
4457 			continue;
4458 
4459 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4460 		if (ret)
4461 			goto err_reinit;
4462 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4463 	}
4464 
4465 	ret = ice_req_irq_msix_misc(pf);
4466 	if (ret) {
4467 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4468 			ret);
4469 		goto err_reinit;
4470 	}
4471 
4472 	return 0;
4473 
4474 err_reinit:
4475 	while (v--)
4476 		if (pf->vsi[v])
4477 			ice_vsi_free_q_vectors(pf->vsi[v]);
4478 
4479 	return ret;
4480 }
4481 
4482 /**
4483  * ice_suspend
4484  * @dev: generic device information structure
4485  *
4486  * Power Management callback to quiesce the device and prepare
4487  * for D3 transition.
4488  */
4489 static int __maybe_unused ice_suspend(struct device *dev)
4490 {
4491 	struct pci_dev *pdev = to_pci_dev(dev);
4492 	struct ice_pf *pf;
4493 	int disabled, v;
4494 
4495 	pf = pci_get_drvdata(pdev);
4496 
4497 	if (!ice_pf_state_is_nominal(pf)) {
4498 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4499 		return -EBUSY;
4500 	}
4501 
4502 	/* Stop watchdog tasks until resume completion.
4503 	 * Even though it is most likely that the service task is
4504 	 * disabled if the device is suspended or down, the service task's
4505 	 * state is controlled by a different state bit, and we should
4506 	 * store and honor whatever state that bit is in at this point.
4507 	 */
4508 	disabled = ice_service_task_stop(pf);
4509 
4510 	/* Already suspended?, then there is nothing to do */
4511 	if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
4512 		if (!disabled)
4513 			ice_service_task_restart(pf);
4514 		return 0;
4515 	}
4516 
4517 	if (test_bit(__ICE_DOWN, pf->state) ||
4518 	    ice_is_reset_in_progress(pf->state)) {
4519 		dev_err(dev, "can't suspend device in reset or already down\n");
4520 		if (!disabled)
4521 			ice_service_task_restart(pf);
4522 		return 0;
4523 	}
4524 
4525 	ice_setup_mc_magic_wake(pf);
4526 
4527 	ice_prepare_for_shutdown(pf);
4528 
4529 	ice_set_wake(pf);
4530 
4531 	/* Free vectors, clear the interrupt scheme and release IRQs
4532 	 * for proper hibernation, especially with large number of CPUs.
4533 	 * Otherwise hibernation might fail when mapping all the vectors back
4534 	 * to CPU0.
4535 	 */
4536 	ice_free_irq_msix_misc(pf);
4537 	ice_for_each_vsi(pf, v) {
4538 		if (!pf->vsi[v])
4539 			continue;
4540 		ice_vsi_free_q_vectors(pf->vsi[v]);
4541 	}
4542 	ice_clear_interrupt_scheme(pf);
4543 
4544 	pci_save_state(pdev);
4545 	pci_wake_from_d3(pdev, pf->wol_ena);
4546 	pci_set_power_state(pdev, PCI_D3hot);
4547 	return 0;
4548 }
4549 
4550 /**
4551  * ice_resume - PM callback for waking up from D3
4552  * @dev: generic device information structure
4553  */
4554 static int __maybe_unused ice_resume(struct device *dev)
4555 {
4556 	struct pci_dev *pdev = to_pci_dev(dev);
4557 	enum ice_reset_req reset_type;
4558 	struct ice_pf *pf;
4559 	struct ice_hw *hw;
4560 	int ret;
4561 
4562 	pci_set_power_state(pdev, PCI_D0);
4563 	pci_restore_state(pdev);
4564 	pci_save_state(pdev);
4565 
4566 	if (!pci_device_is_present(pdev))
4567 		return -ENODEV;
4568 
4569 	ret = pci_enable_device_mem(pdev);
4570 	if (ret) {
4571 		dev_err(dev, "Cannot enable device after suspend\n");
4572 		return ret;
4573 	}
4574 
4575 	pf = pci_get_drvdata(pdev);
4576 	hw = &pf->hw;
4577 
4578 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4579 	ice_print_wake_reason(pf);
4580 
4581 	/* We cleared the interrupt scheme when we suspended, so we need to
4582 	 * restore it now to resume device functionality.
4583 	 */
4584 	ret = ice_reinit_interrupt_scheme(pf);
4585 	if (ret)
4586 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4587 
4588 	clear_bit(__ICE_DOWN, pf->state);
4589 	/* Now perform PF reset and rebuild */
4590 	reset_type = ICE_RESET_PFR;
4591 	/* re-enable service task for reset, but allow reset to schedule it */
4592 	clear_bit(__ICE_SERVICE_DIS, pf->state);
4593 
4594 	if (ice_schedule_reset(pf, reset_type))
4595 		dev_err(dev, "Reset during resume failed.\n");
4596 
4597 	clear_bit(__ICE_SUSPENDED, pf->state);
4598 	ice_service_task_restart(pf);
4599 
4600 	/* Restart the service task */
4601 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4602 
4603 	return 0;
4604 }
4605 #endif /* CONFIG_PM */
4606 
4607 /**
4608  * ice_pci_err_detected - warning that PCI error has been detected
4609  * @pdev: PCI device information struct
4610  * @err: the type of PCI error
4611  *
4612  * Called to warn that something happened on the PCI bus and the error handling
4613  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4614  */
4615 static pci_ers_result_t
4616 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4617 {
4618 	struct ice_pf *pf = pci_get_drvdata(pdev);
4619 
4620 	if (!pf) {
4621 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4622 			__func__, err);
4623 		return PCI_ERS_RESULT_DISCONNECT;
4624 	}
4625 
4626 	if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4627 		ice_service_task_stop(pf);
4628 
4629 		if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4630 			set_bit(__ICE_PFR_REQ, pf->state);
4631 			ice_prepare_for_reset(pf);
4632 		}
4633 	}
4634 
4635 	return PCI_ERS_RESULT_NEED_RESET;
4636 }
4637 
4638 /**
4639  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4640  * @pdev: PCI device information struct
4641  *
4642  * Called to determine if the driver can recover from the PCI slot reset by
4643  * using a register read to determine if the device is recoverable.
4644  */
4645 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4646 {
4647 	struct ice_pf *pf = pci_get_drvdata(pdev);
4648 	pci_ers_result_t result;
4649 	int err;
4650 	u32 reg;
4651 
4652 	err = pci_enable_device_mem(pdev);
4653 	if (err) {
4654 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4655 			err);
4656 		result = PCI_ERS_RESULT_DISCONNECT;
4657 	} else {
4658 		pci_set_master(pdev);
4659 		pci_restore_state(pdev);
4660 		pci_save_state(pdev);
4661 		pci_wake_from_d3(pdev, false);
4662 
4663 		/* Check for life */
4664 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4665 		if (!reg)
4666 			result = PCI_ERS_RESULT_RECOVERED;
4667 		else
4668 			result = PCI_ERS_RESULT_DISCONNECT;
4669 	}
4670 
4671 	err = pci_aer_clear_nonfatal_status(pdev);
4672 	if (err)
4673 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4674 			err);
4675 		/* non-fatal, continue */
4676 
4677 	return result;
4678 }
4679 
4680 /**
4681  * ice_pci_err_resume - restart operations after PCI error recovery
4682  * @pdev: PCI device information struct
4683  *
4684  * Called to allow the driver to bring things back up after PCI error and/or
4685  * reset recovery have finished
4686  */
4687 static void ice_pci_err_resume(struct pci_dev *pdev)
4688 {
4689 	struct ice_pf *pf = pci_get_drvdata(pdev);
4690 
4691 	if (!pf) {
4692 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4693 			__func__);
4694 		return;
4695 	}
4696 
4697 	if (test_bit(__ICE_SUSPENDED, pf->state)) {
4698 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4699 			__func__);
4700 		return;
4701 	}
4702 
4703 	ice_restore_all_vfs_msi_state(pdev);
4704 
4705 	ice_do_reset(pf, ICE_RESET_PFR);
4706 	ice_service_task_restart(pf);
4707 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4708 }
4709 
4710 /**
4711  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4712  * @pdev: PCI device information struct
4713  */
4714 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4715 {
4716 	struct ice_pf *pf = pci_get_drvdata(pdev);
4717 
4718 	if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4719 		ice_service_task_stop(pf);
4720 
4721 		if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4722 			set_bit(__ICE_PFR_REQ, pf->state);
4723 			ice_prepare_for_reset(pf);
4724 		}
4725 	}
4726 }
4727 
4728 /**
4729  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4730  * @pdev: PCI device information struct
4731  */
4732 static void ice_pci_err_reset_done(struct pci_dev *pdev)
4733 {
4734 	ice_pci_err_resume(pdev);
4735 }
4736 
4737 /* ice_pci_tbl - PCI Device ID Table
4738  *
4739  * Wildcard entries (PCI_ANY_ID) should come last
4740  * Last entry must be all 0s
4741  *
4742  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
4743  *   Class, Class Mask, private data (not used) }
4744  */
4745 static const struct pci_device_id ice_pci_tbl[] = {
4746 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4747 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4748 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4749 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4750 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4751 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4752 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4753 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4754 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4755 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4756 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4757 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4758 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4759 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4760 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
4761 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
4762 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
4763 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
4764 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
4765 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
4766 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
4767 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
4768 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
4769 	/* required last entry */
4770 	{ 0, }
4771 };
4772 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
4773 
4774 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
4775 
4776 static const struct pci_error_handlers ice_pci_err_handler = {
4777 	.error_detected = ice_pci_err_detected,
4778 	.slot_reset = ice_pci_err_slot_reset,
4779 	.reset_prepare = ice_pci_err_reset_prepare,
4780 	.reset_done = ice_pci_err_reset_done,
4781 	.resume = ice_pci_err_resume
4782 };
4783 
4784 static struct pci_driver ice_driver = {
4785 	.name = KBUILD_MODNAME,
4786 	.id_table = ice_pci_tbl,
4787 	.probe = ice_probe,
4788 	.remove = ice_remove,
4789 #ifdef CONFIG_PM
4790 	.driver.pm = &ice_pm_ops,
4791 #endif /* CONFIG_PM */
4792 	.shutdown = ice_shutdown,
4793 	.sriov_configure = ice_sriov_configure,
4794 	.err_handler = &ice_pci_err_handler
4795 };
4796 
4797 /**
4798  * ice_module_init - Driver registration routine
4799  *
4800  * ice_module_init is the first routine called when the driver is
4801  * loaded. All it does is register with the PCI subsystem.
4802  */
4803 static int __init ice_module_init(void)
4804 {
4805 	int status;
4806 
4807 	pr_info("%s\n", ice_driver_string);
4808 	pr_info("%s\n", ice_copyright);
4809 
4810 	ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
4811 	if (!ice_wq) {
4812 		pr_err("Failed to create workqueue\n");
4813 		return -ENOMEM;
4814 	}
4815 
4816 	status = pci_register_driver(&ice_driver);
4817 	if (status) {
4818 		pr_err("failed to register PCI driver, err %d\n", status);
4819 		destroy_workqueue(ice_wq);
4820 	}
4821 
4822 	return status;
4823 }
4824 module_init(ice_module_init);
4825 
4826 /**
4827  * ice_module_exit - Driver exit cleanup routine
4828  *
4829  * ice_module_exit is called just before the driver is removed
4830  * from memory.
4831  */
4832 static void __exit ice_module_exit(void)
4833 {
4834 	pci_unregister_driver(&ice_driver);
4835 	destroy_workqueue(ice_wq);
4836 	pr_info("module unloaded\n");
4837 }
4838 module_exit(ice_module_exit);
4839 
4840 /**
4841  * ice_set_mac_address - NDO callback to set MAC address
4842  * @netdev: network interface device structure
4843  * @pi: pointer to an address structure
4844  *
4845  * Returns 0 on success, negative on failure
4846  */
4847 static int ice_set_mac_address(struct net_device *netdev, void *pi)
4848 {
4849 	struct ice_netdev_priv *np = netdev_priv(netdev);
4850 	struct ice_vsi *vsi = np->vsi;
4851 	struct ice_pf *pf = vsi->back;
4852 	struct ice_hw *hw = &pf->hw;
4853 	struct sockaddr *addr = pi;
4854 	enum ice_status status;
4855 	u8 flags = 0;
4856 	int err = 0;
4857 	u8 *mac;
4858 
4859 	mac = (u8 *)addr->sa_data;
4860 
4861 	if (!is_valid_ether_addr(mac))
4862 		return -EADDRNOTAVAIL;
4863 
4864 	if (ether_addr_equal(netdev->dev_addr, mac)) {
4865 		netdev_warn(netdev, "already using mac %pM\n", mac);
4866 		return 0;
4867 	}
4868 
4869 	if (test_bit(__ICE_DOWN, pf->state) ||
4870 	    ice_is_reset_in_progress(pf->state)) {
4871 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
4872 			   mac);
4873 		return -EBUSY;
4874 	}
4875 
4876 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
4877 	status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
4878 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
4879 		err = -EADDRNOTAVAIL;
4880 		goto err_update_filters;
4881 	}
4882 
4883 	/* Add filter for new MAC. If filter exists, just return success */
4884 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
4885 	if (status == ICE_ERR_ALREADY_EXISTS) {
4886 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
4887 		return 0;
4888 	}
4889 
4890 	/* error if the new filter addition failed */
4891 	if (status)
4892 		err = -EADDRNOTAVAIL;
4893 
4894 err_update_filters:
4895 	if (err) {
4896 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
4897 			   mac);
4898 		return err;
4899 	}
4900 
4901 	/* change the netdev's MAC address */
4902 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
4903 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
4904 		   netdev->dev_addr);
4905 
4906 	/* write new MAC address to the firmware */
4907 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4908 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
4909 	if (status) {
4910 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
4911 			   mac, ice_stat_str(status));
4912 	}
4913 	return 0;
4914 }
4915 
4916 /**
4917  * ice_set_rx_mode - NDO callback to set the netdev filters
4918  * @netdev: network interface device structure
4919  */
4920 static void ice_set_rx_mode(struct net_device *netdev)
4921 {
4922 	struct ice_netdev_priv *np = netdev_priv(netdev);
4923 	struct ice_vsi *vsi = np->vsi;
4924 
4925 	if (!vsi)
4926 		return;
4927 
4928 	/* Set the flags to synchronize filters
4929 	 * ndo_set_rx_mode may be triggered even without a change in netdev
4930 	 * flags
4931 	 */
4932 	set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
4933 	set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
4934 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
4935 
4936 	/* schedule our worker thread which will take care of
4937 	 * applying the new filter changes
4938 	 */
4939 	ice_service_task_schedule(vsi->back);
4940 }
4941 
4942 /**
4943  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
4944  * @netdev: network interface device structure
4945  * @queue_index: Queue ID
4946  * @maxrate: maximum bandwidth in Mbps
4947  */
4948 static int
4949 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
4950 {
4951 	struct ice_netdev_priv *np = netdev_priv(netdev);
4952 	struct ice_vsi *vsi = np->vsi;
4953 	enum ice_status status;
4954 	u16 q_handle;
4955 	u8 tc;
4956 
4957 	/* Validate maxrate requested is within permitted range */
4958 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
4959 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
4960 			   maxrate, queue_index);
4961 		return -EINVAL;
4962 	}
4963 
4964 	q_handle = vsi->tx_rings[queue_index]->q_handle;
4965 	tc = ice_dcb_get_tc(vsi, queue_index);
4966 
4967 	/* Set BW back to default, when user set maxrate to 0 */
4968 	if (!maxrate)
4969 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
4970 					       q_handle, ICE_MAX_BW);
4971 	else
4972 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
4973 					  q_handle, ICE_MAX_BW, maxrate * 1000);
4974 	if (status) {
4975 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
4976 			   ice_stat_str(status));
4977 		return -EIO;
4978 	}
4979 
4980 	return 0;
4981 }
4982 
4983 /**
4984  * ice_fdb_add - add an entry to the hardware database
4985  * @ndm: the input from the stack
4986  * @tb: pointer to array of nladdr (unused)
4987  * @dev: the net device pointer
4988  * @addr: the MAC address entry being added
4989  * @vid: VLAN ID
4990  * @flags: instructions from stack about fdb operation
4991  * @extack: netlink extended ack
4992  */
4993 static int
4994 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
4995 	    struct net_device *dev, const unsigned char *addr, u16 vid,
4996 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
4997 {
4998 	int err;
4999 
5000 	if (vid) {
5001 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5002 		return -EINVAL;
5003 	}
5004 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5005 		netdev_err(dev, "FDB only supports static addresses\n");
5006 		return -EINVAL;
5007 	}
5008 
5009 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5010 		err = dev_uc_add_excl(dev, addr);
5011 	else if (is_multicast_ether_addr(addr))
5012 		err = dev_mc_add_excl(dev, addr);
5013 	else
5014 		err = -EINVAL;
5015 
5016 	/* Only return duplicate errors if NLM_F_EXCL is set */
5017 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5018 		err = 0;
5019 
5020 	return err;
5021 }
5022 
5023 /**
5024  * ice_fdb_del - delete an entry from the hardware database
5025  * @ndm: the input from the stack
5026  * @tb: pointer to array of nladdr (unused)
5027  * @dev: the net device pointer
5028  * @addr: the MAC address entry being added
5029  * @vid: VLAN ID
5030  */
5031 static int
5032 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5033 	    struct net_device *dev, const unsigned char *addr,
5034 	    __always_unused u16 vid)
5035 {
5036 	int err;
5037 
5038 	if (ndm->ndm_state & NUD_PERMANENT) {
5039 		netdev_err(dev, "FDB only supports static addresses\n");
5040 		return -EINVAL;
5041 	}
5042 
5043 	if (is_unicast_ether_addr(addr))
5044 		err = dev_uc_del(dev, addr);
5045 	else if (is_multicast_ether_addr(addr))
5046 		err = dev_mc_del(dev, addr);
5047 	else
5048 		err = -EINVAL;
5049 
5050 	return err;
5051 }
5052 
5053 /**
5054  * ice_set_features - set the netdev feature flags
5055  * @netdev: ptr to the netdev being adjusted
5056  * @features: the feature set that the stack is suggesting
5057  */
5058 static int
5059 ice_set_features(struct net_device *netdev, netdev_features_t features)
5060 {
5061 	struct ice_netdev_priv *np = netdev_priv(netdev);
5062 	struct ice_vsi *vsi = np->vsi;
5063 	struct ice_pf *pf = vsi->back;
5064 	int ret = 0;
5065 
5066 	/* Don't set any netdev advanced features with device in Safe Mode */
5067 	if (ice_is_safe_mode(vsi->back)) {
5068 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5069 		return ret;
5070 	}
5071 
5072 	/* Do not change setting during reset */
5073 	if (ice_is_reset_in_progress(pf->state)) {
5074 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5075 		return -EBUSY;
5076 	}
5077 
5078 	/* Multiple features can be changed in one call so keep features in
5079 	 * separate if/else statements to guarantee each feature is checked
5080 	 */
5081 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5082 		ret = ice_vsi_manage_rss_lut(vsi, true);
5083 	else if (!(features & NETIF_F_RXHASH) &&
5084 		 netdev->features & NETIF_F_RXHASH)
5085 		ret = ice_vsi_manage_rss_lut(vsi, false);
5086 
5087 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5088 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5089 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5090 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5091 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5092 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5093 
5094 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5095 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5096 		ret = ice_vsi_manage_vlan_insertion(vsi);
5097 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5098 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5099 		ret = ice_vsi_manage_vlan_insertion(vsi);
5100 
5101 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5102 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5103 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5104 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5105 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5106 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5107 
5108 	if ((features & NETIF_F_NTUPLE) &&
5109 	    !(netdev->features & NETIF_F_NTUPLE)) {
5110 		ice_vsi_manage_fdir(vsi, true);
5111 		ice_init_arfs(vsi);
5112 	} else if (!(features & NETIF_F_NTUPLE) &&
5113 		 (netdev->features & NETIF_F_NTUPLE)) {
5114 		ice_vsi_manage_fdir(vsi, false);
5115 		ice_clear_arfs(vsi);
5116 	}
5117 
5118 	return ret;
5119 }
5120 
5121 /**
5122  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5123  * @vsi: VSI to setup VLAN properties for
5124  */
5125 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5126 {
5127 	int ret = 0;
5128 
5129 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5130 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5131 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5132 		ret = ice_vsi_manage_vlan_insertion(vsi);
5133 
5134 	return ret;
5135 }
5136 
5137 /**
5138  * ice_vsi_cfg - Setup the VSI
5139  * @vsi: the VSI being configured
5140  *
5141  * Return 0 on success and negative value on error
5142  */
5143 int ice_vsi_cfg(struct ice_vsi *vsi)
5144 {
5145 	int err;
5146 
5147 	if (vsi->netdev) {
5148 		ice_set_rx_mode(vsi->netdev);
5149 
5150 		err = ice_vsi_vlan_setup(vsi);
5151 
5152 		if (err)
5153 			return err;
5154 	}
5155 	ice_vsi_cfg_dcb_rings(vsi);
5156 
5157 	err = ice_vsi_cfg_lan_txqs(vsi);
5158 	if (!err && ice_is_xdp_ena_vsi(vsi))
5159 		err = ice_vsi_cfg_xdp_txqs(vsi);
5160 	if (!err)
5161 		err = ice_vsi_cfg_rxqs(vsi);
5162 
5163 	return err;
5164 }
5165 
5166 /**
5167  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5168  * @vsi: the VSI being configured
5169  */
5170 static void ice_napi_enable_all(struct ice_vsi *vsi)
5171 {
5172 	int q_idx;
5173 
5174 	if (!vsi->netdev)
5175 		return;
5176 
5177 	ice_for_each_q_vector(vsi, q_idx) {
5178 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5179 
5180 		if (q_vector->rx.ring || q_vector->tx.ring)
5181 			napi_enable(&q_vector->napi);
5182 	}
5183 }
5184 
5185 /**
5186  * ice_up_complete - Finish the last steps of bringing up a connection
5187  * @vsi: The VSI being configured
5188  *
5189  * Return 0 on success and negative value on error
5190  */
5191 static int ice_up_complete(struct ice_vsi *vsi)
5192 {
5193 	struct ice_pf *pf = vsi->back;
5194 	int err;
5195 
5196 	ice_vsi_cfg_msix(vsi);
5197 
5198 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5199 	 * Tx queue group list was configured and the context bits were
5200 	 * programmed using ice_vsi_cfg_txqs
5201 	 */
5202 	err = ice_vsi_start_all_rx_rings(vsi);
5203 	if (err)
5204 		return err;
5205 
5206 	clear_bit(__ICE_DOWN, vsi->state);
5207 	ice_napi_enable_all(vsi);
5208 	ice_vsi_ena_irq(vsi);
5209 
5210 	if (vsi->port_info &&
5211 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5212 	    vsi->netdev) {
5213 		ice_print_link_msg(vsi, true);
5214 		netif_tx_start_all_queues(vsi->netdev);
5215 		netif_carrier_on(vsi->netdev);
5216 	}
5217 
5218 	ice_service_task_schedule(pf);
5219 
5220 	return 0;
5221 }
5222 
5223 /**
5224  * ice_up - Bring the connection back up after being down
5225  * @vsi: VSI being configured
5226  */
5227 int ice_up(struct ice_vsi *vsi)
5228 {
5229 	int err;
5230 
5231 	err = ice_vsi_cfg(vsi);
5232 	if (!err)
5233 		err = ice_up_complete(vsi);
5234 
5235 	return err;
5236 }
5237 
5238 /**
5239  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5240  * @ring: Tx or Rx ring to read stats from
5241  * @pkts: packets stats counter
5242  * @bytes: bytes stats counter
5243  *
5244  * This function fetches stats from the ring considering the atomic operations
5245  * that needs to be performed to read u64 values in 32 bit machine.
5246  */
5247 static void
5248 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5249 {
5250 	unsigned int start;
5251 	*pkts = 0;
5252 	*bytes = 0;
5253 
5254 	if (!ring)
5255 		return;
5256 	do {
5257 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5258 		*pkts = ring->stats.pkts;
5259 		*bytes = ring->stats.bytes;
5260 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5261 }
5262 
5263 /**
5264  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5265  * @vsi: the VSI to be updated
5266  * @rings: rings to work on
5267  * @count: number of rings
5268  */
5269 static void
5270 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5271 			     u16 count)
5272 {
5273 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5274 	u16 i;
5275 
5276 	for (i = 0; i < count; i++) {
5277 		struct ice_ring *ring;
5278 		u64 pkts, bytes;
5279 
5280 		ring = READ_ONCE(rings[i]);
5281 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5282 		vsi_stats->tx_packets += pkts;
5283 		vsi_stats->tx_bytes += bytes;
5284 		vsi->tx_restart += ring->tx_stats.restart_q;
5285 		vsi->tx_busy += ring->tx_stats.tx_busy;
5286 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5287 	}
5288 }
5289 
5290 /**
5291  * ice_update_vsi_ring_stats - Update VSI stats counters
5292  * @vsi: the VSI to be updated
5293  */
5294 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5295 {
5296 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5297 	struct ice_ring *ring;
5298 	u64 pkts, bytes;
5299 	int i;
5300 
5301 	/* reset netdev stats */
5302 	vsi_stats->tx_packets = 0;
5303 	vsi_stats->tx_bytes = 0;
5304 	vsi_stats->rx_packets = 0;
5305 	vsi_stats->rx_bytes = 0;
5306 
5307 	/* reset non-netdev (extended) stats */
5308 	vsi->tx_restart = 0;
5309 	vsi->tx_busy = 0;
5310 	vsi->tx_linearize = 0;
5311 	vsi->rx_buf_failed = 0;
5312 	vsi->rx_page_failed = 0;
5313 	vsi->rx_gro_dropped = 0;
5314 
5315 	rcu_read_lock();
5316 
5317 	/* update Tx rings counters */
5318 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5319 
5320 	/* update Rx rings counters */
5321 	ice_for_each_rxq(vsi, i) {
5322 		ring = READ_ONCE(vsi->rx_rings[i]);
5323 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5324 		vsi_stats->rx_packets += pkts;
5325 		vsi_stats->rx_bytes += bytes;
5326 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5327 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5328 		vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
5329 	}
5330 
5331 	/* update XDP Tx rings counters */
5332 	if (ice_is_xdp_ena_vsi(vsi))
5333 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5334 					     vsi->num_xdp_txq);
5335 
5336 	rcu_read_unlock();
5337 }
5338 
5339 /**
5340  * ice_update_vsi_stats - Update VSI stats counters
5341  * @vsi: the VSI to be updated
5342  */
5343 void ice_update_vsi_stats(struct ice_vsi *vsi)
5344 {
5345 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5346 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5347 	struct ice_pf *pf = vsi->back;
5348 
5349 	if (test_bit(__ICE_DOWN, vsi->state) ||
5350 	    test_bit(__ICE_CFG_BUSY, pf->state))
5351 		return;
5352 
5353 	/* get stats as recorded by Tx/Rx rings */
5354 	ice_update_vsi_ring_stats(vsi);
5355 
5356 	/* get VSI stats as recorded by the hardware */
5357 	ice_update_eth_stats(vsi);
5358 
5359 	cur_ns->tx_errors = cur_es->tx_errors;
5360 	cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
5361 	cur_ns->tx_dropped = cur_es->tx_discards;
5362 	cur_ns->multicast = cur_es->rx_multicast;
5363 
5364 	/* update some more netdev stats if this is main VSI */
5365 	if (vsi->type == ICE_VSI_PF) {
5366 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5367 		cur_ns->rx_errors = pf->stats.crc_errors +
5368 				    pf->stats.illegal_bytes +
5369 				    pf->stats.rx_len_errors +
5370 				    pf->stats.rx_undersize +
5371 				    pf->hw_csum_rx_error +
5372 				    pf->stats.rx_jabber +
5373 				    pf->stats.rx_fragments +
5374 				    pf->stats.rx_oversize;
5375 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5376 		/* record drops from the port level */
5377 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5378 	}
5379 }
5380 
5381 /**
5382  * ice_update_pf_stats - Update PF port stats counters
5383  * @pf: PF whose stats needs to be updated
5384  */
5385 void ice_update_pf_stats(struct ice_pf *pf)
5386 {
5387 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5388 	struct ice_hw *hw = &pf->hw;
5389 	u16 fd_ctr_base;
5390 	u8 port;
5391 
5392 	port = hw->port_info->lport;
5393 	prev_ps = &pf->stats_prev;
5394 	cur_ps = &pf->stats;
5395 
5396 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5397 			  &prev_ps->eth.rx_bytes,
5398 			  &cur_ps->eth.rx_bytes);
5399 
5400 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5401 			  &prev_ps->eth.rx_unicast,
5402 			  &cur_ps->eth.rx_unicast);
5403 
5404 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5405 			  &prev_ps->eth.rx_multicast,
5406 			  &cur_ps->eth.rx_multicast);
5407 
5408 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5409 			  &prev_ps->eth.rx_broadcast,
5410 			  &cur_ps->eth.rx_broadcast);
5411 
5412 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5413 			  &prev_ps->eth.rx_discards,
5414 			  &cur_ps->eth.rx_discards);
5415 
5416 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5417 			  &prev_ps->eth.tx_bytes,
5418 			  &cur_ps->eth.tx_bytes);
5419 
5420 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5421 			  &prev_ps->eth.tx_unicast,
5422 			  &cur_ps->eth.tx_unicast);
5423 
5424 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5425 			  &prev_ps->eth.tx_multicast,
5426 			  &cur_ps->eth.tx_multicast);
5427 
5428 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5429 			  &prev_ps->eth.tx_broadcast,
5430 			  &cur_ps->eth.tx_broadcast);
5431 
5432 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5433 			  &prev_ps->tx_dropped_link_down,
5434 			  &cur_ps->tx_dropped_link_down);
5435 
5436 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5437 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5438 
5439 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5440 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5441 
5442 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5443 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5444 
5445 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5446 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5447 
5448 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5449 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5450 
5451 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5452 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5453 
5454 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5455 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5456 
5457 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5458 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5459 
5460 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5461 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5462 
5463 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5464 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5465 
5466 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5467 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5468 
5469 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5470 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5471 
5472 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5473 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5474 
5475 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5476 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5477 
5478 	fd_ctr_base = hw->fd_ctr_base;
5479 
5480 	ice_stat_update40(hw,
5481 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5482 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5483 			  &cur_ps->fd_sb_match);
5484 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5485 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5486 
5487 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5488 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5489 
5490 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5491 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5492 
5493 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5494 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5495 
5496 	ice_update_dcb_stats(pf);
5497 
5498 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5499 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5500 
5501 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5502 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5503 
5504 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5505 			  &prev_ps->mac_local_faults,
5506 			  &cur_ps->mac_local_faults);
5507 
5508 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5509 			  &prev_ps->mac_remote_faults,
5510 			  &cur_ps->mac_remote_faults);
5511 
5512 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5513 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5514 
5515 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5516 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5517 
5518 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5519 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5520 
5521 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5522 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5523 
5524 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5525 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5526 
5527 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5528 
5529 	pf->stat_prev_loaded = true;
5530 }
5531 
5532 /**
5533  * ice_get_stats64 - get statistics for network device structure
5534  * @netdev: network interface device structure
5535  * @stats: main device statistics structure
5536  */
5537 static
5538 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5539 {
5540 	struct ice_netdev_priv *np = netdev_priv(netdev);
5541 	struct rtnl_link_stats64 *vsi_stats;
5542 	struct ice_vsi *vsi = np->vsi;
5543 
5544 	vsi_stats = &vsi->net_stats;
5545 
5546 	if (!vsi->num_txq || !vsi->num_rxq)
5547 		return;
5548 
5549 	/* netdev packet/byte stats come from ring counter. These are obtained
5550 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5551 	 * But, only call the update routine and read the registers if VSI is
5552 	 * not down.
5553 	 */
5554 	if (!test_bit(__ICE_DOWN, vsi->state))
5555 		ice_update_vsi_ring_stats(vsi);
5556 	stats->tx_packets = vsi_stats->tx_packets;
5557 	stats->tx_bytes = vsi_stats->tx_bytes;
5558 	stats->rx_packets = vsi_stats->rx_packets;
5559 	stats->rx_bytes = vsi_stats->rx_bytes;
5560 
5561 	/* The rest of the stats can be read from the hardware but instead we
5562 	 * just return values that the watchdog task has already obtained from
5563 	 * the hardware.
5564 	 */
5565 	stats->multicast = vsi_stats->multicast;
5566 	stats->tx_errors = vsi_stats->tx_errors;
5567 	stats->tx_dropped = vsi_stats->tx_dropped;
5568 	stats->rx_errors = vsi_stats->rx_errors;
5569 	stats->rx_dropped = vsi_stats->rx_dropped;
5570 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5571 	stats->rx_length_errors = vsi_stats->rx_length_errors;
5572 }
5573 
5574 /**
5575  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5576  * @vsi: VSI having NAPI disabled
5577  */
5578 static void ice_napi_disable_all(struct ice_vsi *vsi)
5579 {
5580 	int q_idx;
5581 
5582 	if (!vsi->netdev)
5583 		return;
5584 
5585 	ice_for_each_q_vector(vsi, q_idx) {
5586 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5587 
5588 		if (q_vector->rx.ring || q_vector->tx.ring)
5589 			napi_disable(&q_vector->napi);
5590 	}
5591 }
5592 
5593 /**
5594  * ice_down - Shutdown the connection
5595  * @vsi: The VSI being stopped
5596  */
5597 int ice_down(struct ice_vsi *vsi)
5598 {
5599 	int i, tx_err, rx_err, link_err = 0;
5600 
5601 	/* Caller of this function is expected to set the
5602 	 * vsi->state __ICE_DOWN bit
5603 	 */
5604 	if (vsi->netdev) {
5605 		netif_carrier_off(vsi->netdev);
5606 		netif_tx_disable(vsi->netdev);
5607 	}
5608 
5609 	ice_vsi_dis_irq(vsi);
5610 
5611 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5612 	if (tx_err)
5613 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5614 			   vsi->vsi_num, tx_err);
5615 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5616 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5617 		if (tx_err)
5618 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5619 				   vsi->vsi_num, tx_err);
5620 	}
5621 
5622 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
5623 	if (rx_err)
5624 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5625 			   vsi->vsi_num, rx_err);
5626 
5627 	ice_napi_disable_all(vsi);
5628 
5629 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5630 		link_err = ice_force_phys_link_state(vsi, false);
5631 		if (link_err)
5632 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5633 				   vsi->vsi_num, link_err);
5634 	}
5635 
5636 	ice_for_each_txq(vsi, i)
5637 		ice_clean_tx_ring(vsi->tx_rings[i]);
5638 
5639 	ice_for_each_rxq(vsi, i)
5640 		ice_clean_rx_ring(vsi->rx_rings[i]);
5641 
5642 	if (tx_err || rx_err || link_err) {
5643 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5644 			   vsi->vsi_num, vsi->vsw->sw_id);
5645 		return -EIO;
5646 	}
5647 
5648 	return 0;
5649 }
5650 
5651 /**
5652  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5653  * @vsi: VSI having resources allocated
5654  *
5655  * Return 0 on success, negative on failure
5656  */
5657 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5658 {
5659 	int i, err = 0;
5660 
5661 	if (!vsi->num_txq) {
5662 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
5663 			vsi->vsi_num);
5664 		return -EINVAL;
5665 	}
5666 
5667 	ice_for_each_txq(vsi, i) {
5668 		struct ice_ring *ring = vsi->tx_rings[i];
5669 
5670 		if (!ring)
5671 			return -EINVAL;
5672 
5673 		ring->netdev = vsi->netdev;
5674 		err = ice_setup_tx_ring(ring);
5675 		if (err)
5676 			break;
5677 	}
5678 
5679 	return err;
5680 }
5681 
5682 /**
5683  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5684  * @vsi: VSI having resources allocated
5685  *
5686  * Return 0 on success, negative on failure
5687  */
5688 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5689 {
5690 	int i, err = 0;
5691 
5692 	if (!vsi->num_rxq) {
5693 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
5694 			vsi->vsi_num);
5695 		return -EINVAL;
5696 	}
5697 
5698 	ice_for_each_rxq(vsi, i) {
5699 		struct ice_ring *ring = vsi->rx_rings[i];
5700 
5701 		if (!ring)
5702 			return -EINVAL;
5703 
5704 		ring->netdev = vsi->netdev;
5705 		err = ice_setup_rx_ring(ring);
5706 		if (err)
5707 			break;
5708 	}
5709 
5710 	return err;
5711 }
5712 
5713 /**
5714  * ice_vsi_open_ctrl - open control VSI for use
5715  * @vsi: the VSI to open
5716  *
5717  * Initialization of the Control VSI
5718  *
5719  * Returns 0 on success, negative value on error
5720  */
5721 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
5722 {
5723 	char int_name[ICE_INT_NAME_STR_LEN];
5724 	struct ice_pf *pf = vsi->back;
5725 	struct device *dev;
5726 	int err;
5727 
5728 	dev = ice_pf_to_dev(pf);
5729 	/* allocate descriptors */
5730 	err = ice_vsi_setup_tx_rings(vsi);
5731 	if (err)
5732 		goto err_setup_tx;
5733 
5734 	err = ice_vsi_setup_rx_rings(vsi);
5735 	if (err)
5736 		goto err_setup_rx;
5737 
5738 	err = ice_vsi_cfg(vsi);
5739 	if (err)
5740 		goto err_setup_rx;
5741 
5742 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
5743 		 dev_driver_string(dev), dev_name(dev));
5744 	err = ice_vsi_req_irq_msix(vsi, int_name);
5745 	if (err)
5746 		goto err_setup_rx;
5747 
5748 	ice_vsi_cfg_msix(vsi);
5749 
5750 	err = ice_vsi_start_all_rx_rings(vsi);
5751 	if (err)
5752 		goto err_up_complete;
5753 
5754 	clear_bit(__ICE_DOWN, vsi->state);
5755 	ice_vsi_ena_irq(vsi);
5756 
5757 	return 0;
5758 
5759 err_up_complete:
5760 	ice_down(vsi);
5761 err_setup_rx:
5762 	ice_vsi_free_rx_rings(vsi);
5763 err_setup_tx:
5764 	ice_vsi_free_tx_rings(vsi);
5765 
5766 	return err;
5767 }
5768 
5769 /**
5770  * ice_vsi_open - Called when a network interface is made active
5771  * @vsi: the VSI to open
5772  *
5773  * Initialization of the VSI
5774  *
5775  * Returns 0 on success, negative value on error
5776  */
5777 static int ice_vsi_open(struct ice_vsi *vsi)
5778 {
5779 	char int_name[ICE_INT_NAME_STR_LEN];
5780 	struct ice_pf *pf = vsi->back;
5781 	int err;
5782 
5783 	/* allocate descriptors */
5784 	err = ice_vsi_setup_tx_rings(vsi);
5785 	if (err)
5786 		goto err_setup_tx;
5787 
5788 	err = ice_vsi_setup_rx_rings(vsi);
5789 	if (err)
5790 		goto err_setup_rx;
5791 
5792 	err = ice_vsi_cfg(vsi);
5793 	if (err)
5794 		goto err_setup_rx;
5795 
5796 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5797 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
5798 	err = ice_vsi_req_irq_msix(vsi, int_name);
5799 	if (err)
5800 		goto err_setup_rx;
5801 
5802 	/* Notify the stack of the actual queue counts. */
5803 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5804 	if (err)
5805 		goto err_set_qs;
5806 
5807 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5808 	if (err)
5809 		goto err_set_qs;
5810 
5811 	err = ice_up_complete(vsi);
5812 	if (err)
5813 		goto err_up_complete;
5814 
5815 	return 0;
5816 
5817 err_up_complete:
5818 	ice_down(vsi);
5819 err_set_qs:
5820 	ice_vsi_free_irq(vsi);
5821 err_setup_rx:
5822 	ice_vsi_free_rx_rings(vsi);
5823 err_setup_tx:
5824 	ice_vsi_free_tx_rings(vsi);
5825 
5826 	return err;
5827 }
5828 
5829 /**
5830  * ice_vsi_release_all - Delete all VSIs
5831  * @pf: PF from which all VSIs are being removed
5832  */
5833 static void ice_vsi_release_all(struct ice_pf *pf)
5834 {
5835 	int err, i;
5836 
5837 	if (!pf->vsi)
5838 		return;
5839 
5840 	ice_for_each_vsi(pf, i) {
5841 		if (!pf->vsi[i])
5842 			continue;
5843 
5844 		err = ice_vsi_release(pf->vsi[i]);
5845 		if (err)
5846 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
5847 				i, err, pf->vsi[i]->vsi_num);
5848 	}
5849 }
5850 
5851 /**
5852  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
5853  * @pf: pointer to the PF instance
5854  * @type: VSI type to rebuild
5855  *
5856  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
5857  */
5858 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
5859 {
5860 	struct device *dev = ice_pf_to_dev(pf);
5861 	enum ice_status status;
5862 	int i, err;
5863 
5864 	ice_for_each_vsi(pf, i) {
5865 		struct ice_vsi *vsi = pf->vsi[i];
5866 
5867 		if (!vsi || vsi->type != type)
5868 			continue;
5869 
5870 		/* rebuild the VSI */
5871 		err = ice_vsi_rebuild(vsi, true);
5872 		if (err) {
5873 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
5874 				err, vsi->idx, ice_vsi_type_str(type));
5875 			return err;
5876 		}
5877 
5878 		/* replay filters for the VSI */
5879 		status = ice_replay_vsi(&pf->hw, vsi->idx);
5880 		if (status) {
5881 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
5882 				ice_stat_str(status), vsi->idx,
5883 				ice_vsi_type_str(type));
5884 			return -EIO;
5885 		}
5886 
5887 		/* Re-map HW VSI number, using VSI handle that has been
5888 		 * previously validated in ice_replay_vsi() call above
5889 		 */
5890 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
5891 
5892 		/* enable the VSI */
5893 		err = ice_ena_vsi(vsi, false);
5894 		if (err) {
5895 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
5896 				err, vsi->idx, ice_vsi_type_str(type));
5897 			return err;
5898 		}
5899 
5900 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
5901 			 ice_vsi_type_str(type));
5902 	}
5903 
5904 	return 0;
5905 }
5906 
5907 /**
5908  * ice_update_pf_netdev_link - Update PF netdev link status
5909  * @pf: pointer to the PF instance
5910  */
5911 static void ice_update_pf_netdev_link(struct ice_pf *pf)
5912 {
5913 	bool link_up;
5914 	int i;
5915 
5916 	ice_for_each_vsi(pf, i) {
5917 		struct ice_vsi *vsi = pf->vsi[i];
5918 
5919 		if (!vsi || vsi->type != ICE_VSI_PF)
5920 			return;
5921 
5922 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
5923 		if (link_up) {
5924 			netif_carrier_on(pf->vsi[i]->netdev);
5925 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
5926 		} else {
5927 			netif_carrier_off(pf->vsi[i]->netdev);
5928 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
5929 		}
5930 	}
5931 }
5932 
5933 /**
5934  * ice_rebuild - rebuild after reset
5935  * @pf: PF to rebuild
5936  * @reset_type: type of reset
5937  *
5938  * Do not rebuild VF VSI in this flow because that is already handled via
5939  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
5940  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
5941  * to reset/rebuild all the VF VSI twice.
5942  */
5943 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
5944 {
5945 	struct device *dev = ice_pf_to_dev(pf);
5946 	struct ice_hw *hw = &pf->hw;
5947 	enum ice_status ret;
5948 	int err;
5949 
5950 	if (test_bit(__ICE_DOWN, pf->state))
5951 		goto clear_recovery;
5952 
5953 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
5954 
5955 	ret = ice_init_all_ctrlq(hw);
5956 	if (ret) {
5957 		dev_err(dev, "control queues init failed %s\n",
5958 			ice_stat_str(ret));
5959 		goto err_init_ctrlq;
5960 	}
5961 
5962 	/* if DDP was previously loaded successfully */
5963 	if (!ice_is_safe_mode(pf)) {
5964 		/* reload the SW DB of filter tables */
5965 		if (reset_type == ICE_RESET_PFR)
5966 			ice_fill_blk_tbls(hw);
5967 		else
5968 			/* Reload DDP Package after CORER/GLOBR reset */
5969 			ice_load_pkg(NULL, pf);
5970 	}
5971 
5972 	ret = ice_clear_pf_cfg(hw);
5973 	if (ret) {
5974 		dev_err(dev, "clear PF configuration failed %s\n",
5975 			ice_stat_str(ret));
5976 		goto err_init_ctrlq;
5977 	}
5978 
5979 	if (pf->first_sw->dflt_vsi_ena)
5980 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
5981 	/* clear the default VSI configuration if it exists */
5982 	pf->first_sw->dflt_vsi = NULL;
5983 	pf->first_sw->dflt_vsi_ena = false;
5984 
5985 	ice_clear_pxe_mode(hw);
5986 
5987 	ret = ice_get_caps(hw);
5988 	if (ret) {
5989 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
5990 		goto err_init_ctrlq;
5991 	}
5992 
5993 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
5994 	if (ret) {
5995 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
5996 		goto err_init_ctrlq;
5997 	}
5998 
5999 	err = ice_sched_init_port(hw->port_info);
6000 	if (err)
6001 		goto err_sched_init_port;
6002 
6003 	/* start misc vector */
6004 	err = ice_req_irq_msix_misc(pf);
6005 	if (err) {
6006 		dev_err(dev, "misc vector setup failed: %d\n", err);
6007 		goto err_sched_init_port;
6008 	}
6009 
6010 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6011 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6012 		if (!rd32(hw, PFQF_FD_SIZE)) {
6013 			u16 unused, guar, b_effort;
6014 
6015 			guar = hw->func_caps.fd_fltr_guar;
6016 			b_effort = hw->func_caps.fd_fltr_best_effort;
6017 
6018 			/* force guaranteed filter pool for PF */
6019 			ice_alloc_fd_guar_item(hw, &unused, guar);
6020 			/* force shared filter pool for PF */
6021 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6022 		}
6023 	}
6024 
6025 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6026 		ice_dcb_rebuild(pf);
6027 
6028 	/* rebuild PF VSI */
6029 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6030 	if (err) {
6031 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6032 		goto err_vsi_rebuild;
6033 	}
6034 
6035 	/* If Flow Director is active */
6036 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6037 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6038 		if (err) {
6039 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6040 			goto err_vsi_rebuild;
6041 		}
6042 
6043 		/* replay HW Flow Director recipes */
6044 		if (hw->fdir_prof)
6045 			ice_fdir_replay_flows(hw);
6046 
6047 		/* replay Flow Director filters */
6048 		ice_fdir_replay_fltrs(pf);
6049 
6050 		ice_rebuild_arfs(pf);
6051 	}
6052 
6053 	ice_update_pf_netdev_link(pf);
6054 
6055 	/* tell the firmware we are up */
6056 	ret = ice_send_version(pf);
6057 	if (ret) {
6058 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6059 			ice_stat_str(ret));
6060 		goto err_vsi_rebuild;
6061 	}
6062 
6063 	ice_replay_post(hw);
6064 
6065 	/* if we get here, reset flow is successful */
6066 	clear_bit(__ICE_RESET_FAILED, pf->state);
6067 	return;
6068 
6069 err_vsi_rebuild:
6070 err_sched_init_port:
6071 	ice_sched_cleanup_all(hw);
6072 err_init_ctrlq:
6073 	ice_shutdown_all_ctrlq(hw);
6074 	set_bit(__ICE_RESET_FAILED, pf->state);
6075 clear_recovery:
6076 	/* set this bit in PF state to control service task scheduling */
6077 	set_bit(__ICE_NEEDS_RESTART, pf->state);
6078 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6079 }
6080 
6081 /**
6082  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6083  * @vsi: Pointer to VSI structure
6084  */
6085 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6086 {
6087 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6088 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6089 	else
6090 		return ICE_RXBUF_3072;
6091 }
6092 
6093 /**
6094  * ice_change_mtu - NDO callback to change the MTU
6095  * @netdev: network interface device structure
6096  * @new_mtu: new value for maximum frame size
6097  *
6098  * Returns 0 on success, negative on failure
6099  */
6100 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6101 {
6102 	struct ice_netdev_priv *np = netdev_priv(netdev);
6103 	struct ice_vsi *vsi = np->vsi;
6104 	struct ice_pf *pf = vsi->back;
6105 	u8 count = 0;
6106 
6107 	if (new_mtu == (int)netdev->mtu) {
6108 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6109 		return 0;
6110 	}
6111 
6112 	if (ice_is_xdp_ena_vsi(vsi)) {
6113 		int frame_size = ice_max_xdp_frame_size(vsi);
6114 
6115 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6116 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6117 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6118 			return -EINVAL;
6119 		}
6120 	}
6121 
6122 	if (new_mtu < (int)netdev->min_mtu) {
6123 		netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
6124 			   netdev->min_mtu);
6125 		return -EINVAL;
6126 	} else if (new_mtu > (int)netdev->max_mtu) {
6127 		netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
6128 			   netdev->min_mtu);
6129 		return -EINVAL;
6130 	}
6131 	/* if a reset is in progress, wait for some time for it to complete */
6132 	do {
6133 		if (ice_is_reset_in_progress(pf->state)) {
6134 			count++;
6135 			usleep_range(1000, 2000);
6136 		} else {
6137 			break;
6138 		}
6139 
6140 	} while (count < 100);
6141 
6142 	if (count == 100) {
6143 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6144 		return -EBUSY;
6145 	}
6146 
6147 	netdev->mtu = (unsigned int)new_mtu;
6148 
6149 	/* if VSI is up, bring it down and then back up */
6150 	if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
6151 		int err;
6152 
6153 		err = ice_down(vsi);
6154 		if (err) {
6155 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6156 			return err;
6157 		}
6158 
6159 		err = ice_up(vsi);
6160 		if (err) {
6161 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6162 			return err;
6163 		}
6164 	}
6165 
6166 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6167 	return 0;
6168 }
6169 
6170 /**
6171  * ice_aq_str - convert AQ err code to a string
6172  * @aq_err: the AQ error code to convert
6173  */
6174 const char *ice_aq_str(enum ice_aq_err aq_err)
6175 {
6176 	switch (aq_err) {
6177 	case ICE_AQ_RC_OK:
6178 		return "OK";
6179 	case ICE_AQ_RC_EPERM:
6180 		return "ICE_AQ_RC_EPERM";
6181 	case ICE_AQ_RC_ENOENT:
6182 		return "ICE_AQ_RC_ENOENT";
6183 	case ICE_AQ_RC_ENOMEM:
6184 		return "ICE_AQ_RC_ENOMEM";
6185 	case ICE_AQ_RC_EBUSY:
6186 		return "ICE_AQ_RC_EBUSY";
6187 	case ICE_AQ_RC_EEXIST:
6188 		return "ICE_AQ_RC_EEXIST";
6189 	case ICE_AQ_RC_EINVAL:
6190 		return "ICE_AQ_RC_EINVAL";
6191 	case ICE_AQ_RC_ENOSPC:
6192 		return "ICE_AQ_RC_ENOSPC";
6193 	case ICE_AQ_RC_ENOSYS:
6194 		return "ICE_AQ_RC_ENOSYS";
6195 	case ICE_AQ_RC_EMODE:
6196 		return "ICE_AQ_RC_EMODE";
6197 	case ICE_AQ_RC_ENOSEC:
6198 		return "ICE_AQ_RC_ENOSEC";
6199 	case ICE_AQ_RC_EBADSIG:
6200 		return "ICE_AQ_RC_EBADSIG";
6201 	case ICE_AQ_RC_ESVN:
6202 		return "ICE_AQ_RC_ESVN";
6203 	case ICE_AQ_RC_EBADMAN:
6204 		return "ICE_AQ_RC_EBADMAN";
6205 	case ICE_AQ_RC_EBADBUF:
6206 		return "ICE_AQ_RC_EBADBUF";
6207 	}
6208 
6209 	return "ICE_AQ_RC_UNKNOWN";
6210 }
6211 
6212 /**
6213  * ice_stat_str - convert status err code to a string
6214  * @stat_err: the status error code to convert
6215  */
6216 const char *ice_stat_str(enum ice_status stat_err)
6217 {
6218 	switch (stat_err) {
6219 	case ICE_SUCCESS:
6220 		return "OK";
6221 	case ICE_ERR_PARAM:
6222 		return "ICE_ERR_PARAM";
6223 	case ICE_ERR_NOT_IMPL:
6224 		return "ICE_ERR_NOT_IMPL";
6225 	case ICE_ERR_NOT_READY:
6226 		return "ICE_ERR_NOT_READY";
6227 	case ICE_ERR_NOT_SUPPORTED:
6228 		return "ICE_ERR_NOT_SUPPORTED";
6229 	case ICE_ERR_BAD_PTR:
6230 		return "ICE_ERR_BAD_PTR";
6231 	case ICE_ERR_INVAL_SIZE:
6232 		return "ICE_ERR_INVAL_SIZE";
6233 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6234 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6235 	case ICE_ERR_RESET_FAILED:
6236 		return "ICE_ERR_RESET_FAILED";
6237 	case ICE_ERR_FW_API_VER:
6238 		return "ICE_ERR_FW_API_VER";
6239 	case ICE_ERR_NO_MEMORY:
6240 		return "ICE_ERR_NO_MEMORY";
6241 	case ICE_ERR_CFG:
6242 		return "ICE_ERR_CFG";
6243 	case ICE_ERR_OUT_OF_RANGE:
6244 		return "ICE_ERR_OUT_OF_RANGE";
6245 	case ICE_ERR_ALREADY_EXISTS:
6246 		return "ICE_ERR_ALREADY_EXISTS";
6247 	case ICE_ERR_NVM_CHECKSUM:
6248 		return "ICE_ERR_NVM_CHECKSUM";
6249 	case ICE_ERR_BUF_TOO_SHORT:
6250 		return "ICE_ERR_BUF_TOO_SHORT";
6251 	case ICE_ERR_NVM_BLANK_MODE:
6252 		return "ICE_ERR_NVM_BLANK_MODE";
6253 	case ICE_ERR_IN_USE:
6254 		return "ICE_ERR_IN_USE";
6255 	case ICE_ERR_MAX_LIMIT:
6256 		return "ICE_ERR_MAX_LIMIT";
6257 	case ICE_ERR_RESET_ONGOING:
6258 		return "ICE_ERR_RESET_ONGOING";
6259 	case ICE_ERR_HW_TABLE:
6260 		return "ICE_ERR_HW_TABLE";
6261 	case ICE_ERR_DOES_NOT_EXIST:
6262 		return "ICE_ERR_DOES_NOT_EXIST";
6263 	case ICE_ERR_FW_DDP_MISMATCH:
6264 		return "ICE_ERR_FW_DDP_MISMATCH";
6265 	case ICE_ERR_AQ_ERROR:
6266 		return "ICE_ERR_AQ_ERROR";
6267 	case ICE_ERR_AQ_TIMEOUT:
6268 		return "ICE_ERR_AQ_TIMEOUT";
6269 	case ICE_ERR_AQ_FULL:
6270 		return "ICE_ERR_AQ_FULL";
6271 	case ICE_ERR_AQ_NO_WORK:
6272 		return "ICE_ERR_AQ_NO_WORK";
6273 	case ICE_ERR_AQ_EMPTY:
6274 		return "ICE_ERR_AQ_EMPTY";
6275 	case ICE_ERR_AQ_FW_CRITICAL:
6276 		return "ICE_ERR_AQ_FW_CRITICAL";
6277 	}
6278 
6279 	return "ICE_ERR_UNKNOWN";
6280 }
6281 
6282 /**
6283  * ice_set_rss - Set RSS keys and lut
6284  * @vsi: Pointer to VSI structure
6285  * @seed: RSS hash seed
6286  * @lut: Lookup table
6287  * @lut_size: Lookup table size
6288  *
6289  * Returns 0 on success, negative on failure
6290  */
6291 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6292 {
6293 	struct ice_pf *pf = vsi->back;
6294 	struct ice_hw *hw = &pf->hw;
6295 	enum ice_status status;
6296 	struct device *dev;
6297 
6298 	dev = ice_pf_to_dev(pf);
6299 	if (seed) {
6300 		struct ice_aqc_get_set_rss_keys *buf =
6301 				  (struct ice_aqc_get_set_rss_keys *)seed;
6302 
6303 		status = ice_aq_set_rss_key(hw, vsi->idx, buf);
6304 
6305 		if (status) {
6306 			dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
6307 				ice_stat_str(status),
6308 				ice_aq_str(hw->adminq.sq_last_status));
6309 			return -EIO;
6310 		}
6311 	}
6312 
6313 	if (lut) {
6314 		status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6315 					    lut, lut_size);
6316 		if (status) {
6317 			dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
6318 				ice_stat_str(status),
6319 				ice_aq_str(hw->adminq.sq_last_status));
6320 			return -EIO;
6321 		}
6322 	}
6323 
6324 	return 0;
6325 }
6326 
6327 /**
6328  * ice_get_rss - Get RSS keys and lut
6329  * @vsi: Pointer to VSI structure
6330  * @seed: Buffer to store the keys
6331  * @lut: Buffer to store the lookup table entries
6332  * @lut_size: Size of buffer to store the lookup table entries
6333  *
6334  * Returns 0 on success, negative on failure
6335  */
6336 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6337 {
6338 	struct ice_pf *pf = vsi->back;
6339 	struct ice_hw *hw = &pf->hw;
6340 	enum ice_status status;
6341 	struct device *dev;
6342 
6343 	dev = ice_pf_to_dev(pf);
6344 	if (seed) {
6345 		struct ice_aqc_get_set_rss_keys *buf =
6346 				  (struct ice_aqc_get_set_rss_keys *)seed;
6347 
6348 		status = ice_aq_get_rss_key(hw, vsi->idx, buf);
6349 		if (status) {
6350 			dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
6351 				ice_stat_str(status),
6352 				ice_aq_str(hw->adminq.sq_last_status));
6353 			return -EIO;
6354 		}
6355 	}
6356 
6357 	if (lut) {
6358 		status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6359 					    lut, lut_size);
6360 		if (status) {
6361 			dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
6362 				ice_stat_str(status),
6363 				ice_aq_str(hw->adminq.sq_last_status));
6364 			return -EIO;
6365 		}
6366 	}
6367 
6368 	return 0;
6369 }
6370 
6371 /**
6372  * ice_bridge_getlink - Get the hardware bridge mode
6373  * @skb: skb buff
6374  * @pid: process ID
6375  * @seq: RTNL message seq
6376  * @dev: the netdev being configured
6377  * @filter_mask: filter mask passed in
6378  * @nlflags: netlink flags passed in
6379  *
6380  * Return the bridge mode (VEB/VEPA)
6381  */
6382 static int
6383 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6384 		   struct net_device *dev, u32 filter_mask, int nlflags)
6385 {
6386 	struct ice_netdev_priv *np = netdev_priv(dev);
6387 	struct ice_vsi *vsi = np->vsi;
6388 	struct ice_pf *pf = vsi->back;
6389 	u16 bmode;
6390 
6391 	bmode = pf->first_sw->bridge_mode;
6392 
6393 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6394 				       filter_mask, NULL);
6395 }
6396 
6397 /**
6398  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6399  * @vsi: Pointer to VSI structure
6400  * @bmode: Hardware bridge mode (VEB/VEPA)
6401  *
6402  * Returns 0 on success, negative on failure
6403  */
6404 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6405 {
6406 	struct ice_aqc_vsi_props *vsi_props;
6407 	struct ice_hw *hw = &vsi->back->hw;
6408 	struct ice_vsi_ctx *ctxt;
6409 	enum ice_status status;
6410 	int ret = 0;
6411 
6412 	vsi_props = &vsi->info;
6413 
6414 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6415 	if (!ctxt)
6416 		return -ENOMEM;
6417 
6418 	ctxt->info = vsi->info;
6419 
6420 	if (bmode == BRIDGE_MODE_VEB)
6421 		/* change from VEPA to VEB mode */
6422 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6423 	else
6424 		/* change from VEB to VEPA mode */
6425 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6426 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6427 
6428 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6429 	if (status) {
6430 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6431 			bmode, ice_stat_str(status),
6432 			ice_aq_str(hw->adminq.sq_last_status));
6433 		ret = -EIO;
6434 		goto out;
6435 	}
6436 	/* Update sw flags for book keeping */
6437 	vsi_props->sw_flags = ctxt->info.sw_flags;
6438 
6439 out:
6440 	kfree(ctxt);
6441 	return ret;
6442 }
6443 
6444 /**
6445  * ice_bridge_setlink - Set the hardware bridge mode
6446  * @dev: the netdev being configured
6447  * @nlh: RTNL message
6448  * @flags: bridge setlink flags
6449  * @extack: netlink extended ack
6450  *
6451  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6452  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6453  * not already set for all VSIs connected to this switch. And also update the
6454  * unicast switch filter rules for the corresponding switch of the netdev.
6455  */
6456 static int
6457 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6458 		   u16 __always_unused flags,
6459 		   struct netlink_ext_ack __always_unused *extack)
6460 {
6461 	struct ice_netdev_priv *np = netdev_priv(dev);
6462 	struct ice_pf *pf = np->vsi->back;
6463 	struct nlattr *attr, *br_spec;
6464 	struct ice_hw *hw = &pf->hw;
6465 	enum ice_status status;
6466 	struct ice_sw *pf_sw;
6467 	int rem, v, err = 0;
6468 
6469 	pf_sw = pf->first_sw;
6470 	/* find the attribute in the netlink message */
6471 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6472 
6473 	nla_for_each_nested(attr, br_spec, rem) {
6474 		__u16 mode;
6475 
6476 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6477 			continue;
6478 		mode = nla_get_u16(attr);
6479 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6480 			return -EINVAL;
6481 		/* Continue  if bridge mode is not being flipped */
6482 		if (mode == pf_sw->bridge_mode)
6483 			continue;
6484 		/* Iterates through the PF VSI list and update the loopback
6485 		 * mode of the VSI
6486 		 */
6487 		ice_for_each_vsi(pf, v) {
6488 			if (!pf->vsi[v])
6489 				continue;
6490 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6491 			if (err)
6492 				return err;
6493 		}
6494 
6495 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6496 		/* Update the unicast switch filter rules for the corresponding
6497 		 * switch of the netdev
6498 		 */
6499 		status = ice_update_sw_rule_bridge_mode(hw);
6500 		if (status) {
6501 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6502 				   mode, ice_stat_str(status),
6503 				   ice_aq_str(hw->adminq.sq_last_status));
6504 			/* revert hw->evb_veb */
6505 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6506 			return -EIO;
6507 		}
6508 
6509 		pf_sw->bridge_mode = mode;
6510 	}
6511 
6512 	return 0;
6513 }
6514 
6515 /**
6516  * ice_tx_timeout - Respond to a Tx Hang
6517  * @netdev: network interface device structure
6518  * @txqueue: Tx queue
6519  */
6520 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6521 {
6522 	struct ice_netdev_priv *np = netdev_priv(netdev);
6523 	struct ice_ring *tx_ring = NULL;
6524 	struct ice_vsi *vsi = np->vsi;
6525 	struct ice_pf *pf = vsi->back;
6526 	u32 i;
6527 
6528 	pf->tx_timeout_count++;
6529 
6530 	/* Check if PFC is enabled for the TC to which the queue belongs
6531 	 * to. If yes then Tx timeout is not caused by a hung queue, no
6532 	 * need to reset and rebuild
6533 	 */
6534 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6535 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6536 			 txqueue);
6537 		return;
6538 	}
6539 
6540 	/* now that we have an index, find the tx_ring struct */
6541 	for (i = 0; i < vsi->num_txq; i++)
6542 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6543 			if (txqueue == vsi->tx_rings[i]->q_index) {
6544 				tx_ring = vsi->tx_rings[i];
6545 				break;
6546 			}
6547 
6548 	/* Reset recovery level if enough time has elapsed after last timeout.
6549 	 * Also ensure no new reset action happens before next timeout period.
6550 	 */
6551 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6552 		pf->tx_timeout_recovery_level = 1;
6553 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6554 				       netdev->watchdog_timeo)))
6555 		return;
6556 
6557 	if (tx_ring) {
6558 		struct ice_hw *hw = &pf->hw;
6559 		u32 head, val = 0;
6560 
6561 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6562 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6563 		/* Read interrupt register */
6564 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6565 
6566 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6567 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6568 			    head, tx_ring->next_to_use, val);
6569 	}
6570 
6571 	pf->tx_timeout_last_recovery = jiffies;
6572 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6573 		    pf->tx_timeout_recovery_level, txqueue);
6574 
6575 	switch (pf->tx_timeout_recovery_level) {
6576 	case 1:
6577 		set_bit(__ICE_PFR_REQ, pf->state);
6578 		break;
6579 	case 2:
6580 		set_bit(__ICE_CORER_REQ, pf->state);
6581 		break;
6582 	case 3:
6583 		set_bit(__ICE_GLOBR_REQ, pf->state);
6584 		break;
6585 	default:
6586 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
6587 		set_bit(__ICE_DOWN, pf->state);
6588 		set_bit(__ICE_NEEDS_RESTART, vsi->state);
6589 		set_bit(__ICE_SERVICE_DIS, pf->state);
6590 		break;
6591 	}
6592 
6593 	ice_service_task_schedule(pf);
6594 	pf->tx_timeout_recovery_level++;
6595 }
6596 
6597 /**
6598  * ice_open - Called when a network interface becomes active
6599  * @netdev: network interface device structure
6600  *
6601  * The open entry point is called when a network interface is made
6602  * active by the system (IFF_UP). At this point all resources needed
6603  * for transmit and receive operations are allocated, the interrupt
6604  * handler is registered with the OS, the netdev watchdog is enabled,
6605  * and the stack is notified that the interface is ready.
6606  *
6607  * Returns 0 on success, negative value on failure
6608  */
6609 int ice_open(struct net_device *netdev)
6610 {
6611 	struct ice_netdev_priv *np = netdev_priv(netdev);
6612 	struct ice_vsi *vsi = np->vsi;
6613 	struct ice_pf *pf = vsi->back;
6614 	struct ice_port_info *pi;
6615 	int err;
6616 
6617 	if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
6618 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6619 		return -EIO;
6620 	}
6621 
6622 	if (test_bit(__ICE_DOWN, pf->state)) {
6623 		netdev_err(netdev, "device is not ready yet\n");
6624 		return -EBUSY;
6625 	}
6626 
6627 	netif_carrier_off(netdev);
6628 
6629 	pi = vsi->port_info;
6630 	err = ice_update_link_info(pi);
6631 	if (err) {
6632 		netdev_err(netdev, "Failed to get link info, error %d\n",
6633 			   err);
6634 		return err;
6635 	}
6636 
6637 	/* Set PHY if there is media, otherwise, turn off PHY */
6638 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
6639 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6640 		if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
6641 			err = ice_init_phy_user_cfg(pi);
6642 			if (err) {
6643 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
6644 					   err);
6645 				return err;
6646 			}
6647 		}
6648 
6649 		err = ice_configure_phy(vsi);
6650 		if (err) {
6651 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
6652 				   err);
6653 			return err;
6654 		}
6655 	} else {
6656 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6657 		err = ice_aq_set_link_restart_an(pi, false, NULL);
6658 		if (err) {
6659 			netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
6660 				   vsi->vsi_num, err);
6661 			return err;
6662 		}
6663 	}
6664 
6665 	err = ice_vsi_open(vsi);
6666 	if (err)
6667 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6668 			   vsi->vsi_num, vsi->vsw->sw_id);
6669 
6670 	/* Update existing tunnels information */
6671 	udp_tunnel_get_rx_info(netdev);
6672 
6673 	return err;
6674 }
6675 
6676 /**
6677  * ice_stop - Disables a network interface
6678  * @netdev: network interface device structure
6679  *
6680  * The stop entry point is called when an interface is de-activated by the OS,
6681  * and the netdevice enters the DOWN state. The hardware is still under the
6682  * driver's control, but the netdev interface is disabled.
6683  *
6684  * Returns success only - not allowed to fail
6685  */
6686 int ice_stop(struct net_device *netdev)
6687 {
6688 	struct ice_netdev_priv *np = netdev_priv(netdev);
6689 	struct ice_vsi *vsi = np->vsi;
6690 
6691 	ice_vsi_close(vsi);
6692 
6693 	return 0;
6694 }
6695 
6696 /**
6697  * ice_features_check - Validate encapsulated packet conforms to limits
6698  * @skb: skb buffer
6699  * @netdev: This port's netdev
6700  * @features: Offload features that the stack believes apply
6701  */
6702 static netdev_features_t
6703 ice_features_check(struct sk_buff *skb,
6704 		   struct net_device __always_unused *netdev,
6705 		   netdev_features_t features)
6706 {
6707 	size_t len;
6708 
6709 	/* No point in doing any of this if neither checksum nor GSO are
6710 	 * being requested for this frame. We can rule out both by just
6711 	 * checking for CHECKSUM_PARTIAL
6712 	 */
6713 	if (skb->ip_summed != CHECKSUM_PARTIAL)
6714 		return features;
6715 
6716 	/* We cannot support GSO if the MSS is going to be less than
6717 	 * 64 bytes. If it is then we need to drop support for GSO.
6718 	 */
6719 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
6720 		features &= ~NETIF_F_GSO_MASK;
6721 
6722 	len = skb_network_header(skb) - skb->data;
6723 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
6724 		goto out_rm_features;
6725 
6726 	len = skb_transport_header(skb) - skb_network_header(skb);
6727 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6728 		goto out_rm_features;
6729 
6730 	if (skb->encapsulation) {
6731 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
6732 		if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
6733 			goto out_rm_features;
6734 
6735 		len = skb_inner_transport_header(skb) -
6736 		      skb_inner_network_header(skb);
6737 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6738 			goto out_rm_features;
6739 	}
6740 
6741 	return features;
6742 out_rm_features:
6743 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6744 }
6745 
6746 static const struct net_device_ops ice_netdev_safe_mode_ops = {
6747 	.ndo_open = ice_open,
6748 	.ndo_stop = ice_stop,
6749 	.ndo_start_xmit = ice_start_xmit,
6750 	.ndo_set_mac_address = ice_set_mac_address,
6751 	.ndo_validate_addr = eth_validate_addr,
6752 	.ndo_change_mtu = ice_change_mtu,
6753 	.ndo_get_stats64 = ice_get_stats64,
6754 	.ndo_tx_timeout = ice_tx_timeout,
6755 };
6756 
6757 static const struct net_device_ops ice_netdev_ops = {
6758 	.ndo_open = ice_open,
6759 	.ndo_stop = ice_stop,
6760 	.ndo_start_xmit = ice_start_xmit,
6761 	.ndo_features_check = ice_features_check,
6762 	.ndo_set_rx_mode = ice_set_rx_mode,
6763 	.ndo_set_mac_address = ice_set_mac_address,
6764 	.ndo_validate_addr = eth_validate_addr,
6765 	.ndo_change_mtu = ice_change_mtu,
6766 	.ndo_get_stats64 = ice_get_stats64,
6767 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
6768 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
6769 	.ndo_set_vf_mac = ice_set_vf_mac,
6770 	.ndo_get_vf_config = ice_get_vf_cfg,
6771 	.ndo_set_vf_trust = ice_set_vf_trust,
6772 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
6773 	.ndo_set_vf_link_state = ice_set_vf_link_state,
6774 	.ndo_get_vf_stats = ice_get_vf_stats,
6775 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6776 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6777 	.ndo_set_features = ice_set_features,
6778 	.ndo_bridge_getlink = ice_bridge_getlink,
6779 	.ndo_bridge_setlink = ice_bridge_setlink,
6780 	.ndo_fdb_add = ice_fdb_add,
6781 	.ndo_fdb_del = ice_fdb_del,
6782 #ifdef CONFIG_RFS_ACCEL
6783 	.ndo_rx_flow_steer = ice_rx_flow_steer,
6784 #endif
6785 	.ndo_tx_timeout = ice_tx_timeout,
6786 	.ndo_bpf = ice_xdp,
6787 	.ndo_xdp_xmit = ice_xdp_xmit,
6788 	.ndo_xsk_wakeup = ice_xsk_wakeup,
6789 	.ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
6790 	.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
6791 };
6792