xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision 442bc81bd344dc52c37d8f80b854cc6da062b2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "devlink/devlink.h"
17 #include "devlink/port.h"
18 #include "ice_sf_eth.h"
19 #include "ice_hwmon.h"
20 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
21  * ice tracepoint functions. This must be done exactly once across the
22  * ice driver.
23  */
24 #define CREATE_TRACE_POINTS
25 #include "ice_trace.h"
26 #include "ice_eswitch.h"
27 #include "ice_tc_lib.h"
28 #include "ice_vsi_vlan_ops.h"
29 #include <net/xdp_sock_drv.h>
30 
31 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
32 static const char ice_driver_string[] = DRV_SUMMARY;
33 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
34 
35 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
36 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
37 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
38 
39 MODULE_DESCRIPTION(DRV_SUMMARY);
40 MODULE_IMPORT_NS("LIBIE");
41 MODULE_LICENSE("GPL v2");
42 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
43 
44 static int debug = -1;
45 module_param(debug, int, 0644);
46 #ifndef CONFIG_DYNAMIC_DEBUG
47 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
48 #else
49 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
50 #endif /* !CONFIG_DYNAMIC_DEBUG */
51 
52 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
53 EXPORT_SYMBOL(ice_xdp_locking_key);
54 
55 /**
56  * ice_hw_to_dev - Get device pointer from the hardware structure
57  * @hw: pointer to the device HW structure
58  *
59  * Used to access the device pointer from compilation units which can't easily
60  * include the definition of struct ice_pf without leading to circular header
61  * dependencies.
62  */
ice_hw_to_dev(struct ice_hw * hw)63 struct device *ice_hw_to_dev(struct ice_hw *hw)
64 {
65 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
66 
67 	return &pf->pdev->dev;
68 }
69 
70 static struct workqueue_struct *ice_wq;
71 struct workqueue_struct *ice_lag_wq;
72 static const struct net_device_ops ice_netdev_safe_mode_ops;
73 static const struct net_device_ops ice_netdev_ops;
74 
75 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
76 
77 static void ice_vsi_release_all(struct ice_pf *pf);
78 
79 static int ice_rebuild_channels(struct ice_pf *pf);
80 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
81 
82 static int
83 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
84 		     void *cb_priv, enum tc_setup_type type, void *type_data,
85 		     void *data,
86 		     void (*cleanup)(struct flow_block_cb *block_cb));
87 
netif_is_ice(const struct net_device * dev)88 bool netif_is_ice(const struct net_device *dev)
89 {
90 	return dev && (dev->netdev_ops == &ice_netdev_ops ||
91 		       dev->netdev_ops == &ice_netdev_safe_mode_ops);
92 }
93 
94 /**
95  * ice_get_tx_pending - returns number of Tx descriptors not processed
96  * @ring: the ring of descriptors
97  */
ice_get_tx_pending(struct ice_tx_ring * ring)98 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
99 {
100 	u16 head, tail;
101 
102 	head = ring->next_to_clean;
103 	tail = ring->next_to_use;
104 
105 	if (head != tail)
106 		return (head < tail) ?
107 			tail - head : (tail + ring->count - head);
108 	return 0;
109 }
110 
111 /**
112  * ice_check_for_hang_subtask - check for and recover hung queues
113  * @pf: pointer to PF struct
114  */
ice_check_for_hang_subtask(struct ice_pf * pf)115 static void ice_check_for_hang_subtask(struct ice_pf *pf)
116 {
117 	struct ice_vsi *vsi = NULL;
118 	struct ice_hw *hw;
119 	unsigned int i;
120 	int packets;
121 	u32 v;
122 
123 	ice_for_each_vsi(pf, v)
124 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
125 			vsi = pf->vsi[v];
126 			break;
127 		}
128 
129 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
130 		return;
131 
132 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
133 		return;
134 
135 	hw = &vsi->back->hw;
136 
137 	ice_for_each_txq(vsi, i) {
138 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
139 		struct ice_ring_stats *ring_stats;
140 
141 		if (!tx_ring)
142 			continue;
143 		if (ice_ring_ch_enabled(tx_ring))
144 			continue;
145 
146 		ring_stats = tx_ring->ring_stats;
147 		if (!ring_stats)
148 			continue;
149 
150 		if (tx_ring->desc) {
151 			/* If packet counter has not changed the queue is
152 			 * likely stalled, so force an interrupt for this
153 			 * queue.
154 			 *
155 			 * prev_pkt would be negative if there was no
156 			 * pending work.
157 			 */
158 			packets = ring_stats->stats.pkts & INT_MAX;
159 			if (ring_stats->tx_stats.prev_pkt == packets) {
160 				/* Trigger sw interrupt to revive the queue */
161 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
162 				continue;
163 			}
164 
165 			/* Memory barrier between read of packet count and call
166 			 * to ice_get_tx_pending()
167 			 */
168 			smp_rmb();
169 			ring_stats->tx_stats.prev_pkt =
170 			    ice_get_tx_pending(tx_ring) ? packets : -1;
171 		}
172 	}
173 }
174 
175 /**
176  * ice_init_mac_fltr - Set initial MAC filters
177  * @pf: board private structure
178  *
179  * Set initial set of MAC filters for PF VSI; configure filters for permanent
180  * address and broadcast address. If an error is encountered, netdevice will be
181  * unregistered.
182  */
ice_init_mac_fltr(struct ice_pf * pf)183 static int ice_init_mac_fltr(struct ice_pf *pf)
184 {
185 	struct ice_vsi *vsi;
186 	u8 *perm_addr;
187 
188 	vsi = ice_get_main_vsi(pf);
189 	if (!vsi)
190 		return -EINVAL;
191 
192 	perm_addr = vsi->port_info->mac.perm_addr;
193 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
194 }
195 
196 /**
197  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
198  * @netdev: the net device on which the sync is happening
199  * @addr: MAC address to sync
200  *
201  * This is a callback function which is called by the in kernel device sync
202  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
203  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
204  * MAC filters from the hardware.
205  */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)206 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
207 {
208 	struct ice_netdev_priv *np = netdev_priv(netdev);
209 	struct ice_vsi *vsi = np->vsi;
210 
211 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
212 				     ICE_FWD_TO_VSI))
213 		return -EINVAL;
214 
215 	return 0;
216 }
217 
218 /**
219  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
220  * @netdev: the net device on which the unsync is happening
221  * @addr: MAC address to unsync
222  *
223  * This is a callback function which is called by the in kernel device unsync
224  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
225  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
226  * delete the MAC filters from the hardware.
227  */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)228 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
229 {
230 	struct ice_netdev_priv *np = netdev_priv(netdev);
231 	struct ice_vsi *vsi = np->vsi;
232 
233 	/* Under some circumstances, we might receive a request to delete our
234 	 * own device address from our uc list. Because we store the device
235 	 * address in the VSI's MAC filter list, we need to ignore such
236 	 * requests and not delete our device address from this list.
237 	 */
238 	if (ether_addr_equal(addr, netdev->dev_addr))
239 		return 0;
240 
241 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
242 				     ICE_FWD_TO_VSI))
243 		return -EINVAL;
244 
245 	return 0;
246 }
247 
248 /**
249  * ice_vsi_fltr_changed - check if filter state changed
250  * @vsi: VSI to be checked
251  *
252  * returns true if filter state has changed, false otherwise.
253  */
ice_vsi_fltr_changed(struct ice_vsi * vsi)254 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
255 {
256 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
257 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
258 }
259 
260 /**
261  * ice_set_promisc - Enable promiscuous mode for a given PF
262  * @vsi: the VSI being configured
263  * @promisc_m: mask of promiscuous config bits
264  *
265  */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)266 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
267 {
268 	int status;
269 
270 	if (vsi->type != ICE_VSI_PF)
271 		return 0;
272 
273 	if (ice_vsi_has_non_zero_vlans(vsi)) {
274 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
275 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
276 						       promisc_m);
277 	} else {
278 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
279 						  promisc_m, 0);
280 	}
281 	if (status && status != -EEXIST)
282 		return status;
283 
284 	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
285 		   vsi->vsi_num, promisc_m);
286 	return 0;
287 }
288 
289 /**
290  * ice_clear_promisc - Disable promiscuous mode for a given PF
291  * @vsi: the VSI being configured
292  * @promisc_m: mask of promiscuous config bits
293  *
294  */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)295 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
296 {
297 	int status;
298 
299 	if (vsi->type != ICE_VSI_PF)
300 		return 0;
301 
302 	if (ice_vsi_has_non_zero_vlans(vsi)) {
303 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
304 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
305 							 promisc_m);
306 	} else {
307 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
308 						    promisc_m, 0);
309 	}
310 
311 	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
312 		   vsi->vsi_num, promisc_m);
313 	return status;
314 }
315 
316 /**
317  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
318  * @vsi: ptr to the VSI
319  *
320  * Push any outstanding VSI filter changes through the AdminQ.
321  */
ice_vsi_sync_fltr(struct ice_vsi * vsi)322 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
323 {
324 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
325 	struct device *dev = ice_pf_to_dev(vsi->back);
326 	struct net_device *netdev = vsi->netdev;
327 	bool promisc_forced_on = false;
328 	struct ice_pf *pf = vsi->back;
329 	struct ice_hw *hw = &pf->hw;
330 	u32 changed_flags = 0;
331 	int err;
332 
333 	if (!vsi->netdev)
334 		return -EINVAL;
335 
336 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
337 		usleep_range(1000, 2000);
338 
339 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
340 	vsi->current_netdev_flags = vsi->netdev->flags;
341 
342 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
343 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
344 
345 	if (ice_vsi_fltr_changed(vsi)) {
346 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
347 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
348 
349 		/* grab the netdev's addr_list_lock */
350 		netif_addr_lock_bh(netdev);
351 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
352 			      ice_add_mac_to_unsync_list);
353 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
354 			      ice_add_mac_to_unsync_list);
355 		/* our temp lists are populated. release lock */
356 		netif_addr_unlock_bh(netdev);
357 	}
358 
359 	/* Remove MAC addresses in the unsync list */
360 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
361 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
362 	if (err) {
363 		netdev_err(netdev, "Failed to delete MAC filters\n");
364 		/* if we failed because of alloc failures, just bail */
365 		if (err == -ENOMEM)
366 			goto out;
367 	}
368 
369 	/* Add MAC addresses in the sync list */
370 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
371 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
372 	/* If filter is added successfully or already exists, do not go into
373 	 * 'if' condition and report it as error. Instead continue processing
374 	 * rest of the function.
375 	 */
376 	if (err && err != -EEXIST) {
377 		netdev_err(netdev, "Failed to add MAC filters\n");
378 		/* If there is no more space for new umac filters, VSI
379 		 * should go into promiscuous mode. There should be some
380 		 * space reserved for promiscuous filters.
381 		 */
382 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
383 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
384 				      vsi->state)) {
385 			promisc_forced_on = true;
386 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
387 				    vsi->vsi_num);
388 		} else {
389 			goto out;
390 		}
391 	}
392 	err = 0;
393 	/* check for changes in promiscuous modes */
394 	if (changed_flags & IFF_ALLMULTI) {
395 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
396 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
397 			if (err) {
398 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
399 				goto out_promisc;
400 			}
401 		} else {
402 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
403 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
404 			if (err) {
405 				vsi->current_netdev_flags |= IFF_ALLMULTI;
406 				goto out_promisc;
407 			}
408 		}
409 	}
410 
411 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
412 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
413 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
414 		if (vsi->current_netdev_flags & IFF_PROMISC) {
415 			/* Apply Rx filter rule to get traffic from wire */
416 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
417 				err = ice_set_dflt_vsi(vsi);
418 				if (err && err != -EEXIST) {
419 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
420 						   err, vsi->vsi_num);
421 					vsi->current_netdev_flags &=
422 						~IFF_PROMISC;
423 					goto out_promisc;
424 				}
425 				err = 0;
426 				vlan_ops->dis_rx_filtering(vsi);
427 
428 				/* promiscuous mode implies allmulticast so
429 				 * that VSIs that are in promiscuous mode are
430 				 * subscribed to multicast packets coming to
431 				 * the port
432 				 */
433 				err = ice_set_promisc(vsi,
434 						      ICE_MCAST_PROMISC_BITS);
435 				if (err)
436 					goto out_promisc;
437 			}
438 		} else {
439 			/* Clear Rx filter to remove traffic from wire */
440 			if (ice_is_vsi_dflt_vsi(vsi)) {
441 				err = ice_clear_dflt_vsi(vsi);
442 				if (err) {
443 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
444 						   err, vsi->vsi_num);
445 					vsi->current_netdev_flags |=
446 						IFF_PROMISC;
447 					goto out_promisc;
448 				}
449 				if (vsi->netdev->features &
450 				    NETIF_F_HW_VLAN_CTAG_FILTER)
451 					vlan_ops->ena_rx_filtering(vsi);
452 			}
453 
454 			/* disable allmulti here, but only if allmulti is not
455 			 * still enabled for the netdev
456 			 */
457 			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
458 				err = ice_clear_promisc(vsi,
459 							ICE_MCAST_PROMISC_BITS);
460 				if (err) {
461 					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
462 						   err, vsi->vsi_num);
463 				}
464 			}
465 		}
466 	}
467 	goto exit;
468 
469 out_promisc:
470 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
471 	goto exit;
472 out:
473 	/* if something went wrong then set the changed flag so we try again */
474 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
475 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
476 exit:
477 	clear_bit(ICE_CFG_BUSY, vsi->state);
478 	return err;
479 }
480 
481 /**
482  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
483  * @pf: board private structure
484  */
ice_sync_fltr_subtask(struct ice_pf * pf)485 static void ice_sync_fltr_subtask(struct ice_pf *pf)
486 {
487 	int v;
488 
489 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
490 		return;
491 
492 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
493 
494 	ice_for_each_vsi(pf, v)
495 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
496 		    ice_vsi_sync_fltr(pf->vsi[v])) {
497 			/* come back and try again later */
498 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
499 			break;
500 		}
501 }
502 
503 /**
504  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
505  * @pf: the PF
506  * @locked: is the rtnl_lock already held
507  */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)508 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
509 {
510 	int node;
511 	int v;
512 
513 	ice_for_each_vsi(pf, v)
514 		if (pf->vsi[v])
515 			ice_dis_vsi(pf->vsi[v], locked);
516 
517 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
518 		pf->pf_agg_node[node].num_vsis = 0;
519 
520 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
521 		pf->vf_agg_node[node].num_vsis = 0;
522 }
523 
524 /**
525  * ice_prepare_for_reset - prep for reset
526  * @pf: board private structure
527  * @reset_type: reset type requested
528  *
529  * Inform or close all dependent features in prep for reset.
530  */
531 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)532 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
533 {
534 	struct ice_hw *hw = &pf->hw;
535 	struct ice_vsi *vsi;
536 	struct ice_vf *vf;
537 	unsigned int bkt;
538 
539 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
540 
541 	/* already prepared for reset */
542 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
543 		return;
544 
545 	synchronize_irq(pf->oicr_irq.virq);
546 
547 	ice_unplug_aux_dev(pf);
548 
549 	/* Notify VFs of impending reset */
550 	if (ice_check_sq_alive(hw, &hw->mailboxq))
551 		ice_vc_notify_reset(pf);
552 
553 	/* Disable VFs until reset is completed */
554 	mutex_lock(&pf->vfs.table_lock);
555 	ice_for_each_vf(pf, bkt, vf)
556 		ice_set_vf_state_dis(vf);
557 	mutex_unlock(&pf->vfs.table_lock);
558 
559 	if (ice_is_eswitch_mode_switchdev(pf)) {
560 		rtnl_lock();
561 		ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
562 		rtnl_unlock();
563 	}
564 
565 	/* release ADQ specific HW and SW resources */
566 	vsi = ice_get_main_vsi(pf);
567 	if (!vsi)
568 		goto skip;
569 
570 	/* to be on safe side, reset orig_rss_size so that normal flow
571 	 * of deciding rss_size can take precedence
572 	 */
573 	vsi->orig_rss_size = 0;
574 
575 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
576 		if (reset_type == ICE_RESET_PFR) {
577 			vsi->old_ena_tc = vsi->all_enatc;
578 			vsi->old_numtc = vsi->all_numtc;
579 		} else {
580 			ice_remove_q_channels(vsi, true);
581 
582 			/* for other reset type, do not support channel rebuild
583 			 * hence reset needed info
584 			 */
585 			vsi->old_ena_tc = 0;
586 			vsi->all_enatc = 0;
587 			vsi->old_numtc = 0;
588 			vsi->all_numtc = 0;
589 			vsi->req_txq = 0;
590 			vsi->req_rxq = 0;
591 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
592 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
593 		}
594 	}
595 
596 	if (vsi->netdev)
597 		netif_device_detach(vsi->netdev);
598 skip:
599 
600 	/* clear SW filtering DB */
601 	ice_clear_hw_tbls(hw);
602 	/* disable the VSIs and their queues that are not already DOWN */
603 	set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
604 	ice_pf_dis_all_vsi(pf, false);
605 
606 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
607 		ice_ptp_prepare_for_reset(pf, reset_type);
608 
609 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
610 		ice_gnss_exit(pf);
611 
612 	if (hw->port_info)
613 		ice_sched_clear_port(hw->port_info);
614 
615 	ice_shutdown_all_ctrlq(hw, false);
616 
617 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
618 }
619 
620 /**
621  * ice_do_reset - Initiate one of many types of resets
622  * @pf: board private structure
623  * @reset_type: reset type requested before this function was called.
624  */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)625 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
626 {
627 	struct device *dev = ice_pf_to_dev(pf);
628 	struct ice_hw *hw = &pf->hw;
629 
630 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
631 
632 	if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
633 		dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
634 		reset_type = ICE_RESET_CORER;
635 	}
636 
637 	ice_prepare_for_reset(pf, reset_type);
638 
639 	/* trigger the reset */
640 	if (ice_reset(hw, reset_type)) {
641 		dev_err(dev, "reset %d failed\n", reset_type);
642 		set_bit(ICE_RESET_FAILED, pf->state);
643 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
644 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
645 		clear_bit(ICE_PFR_REQ, pf->state);
646 		clear_bit(ICE_CORER_REQ, pf->state);
647 		clear_bit(ICE_GLOBR_REQ, pf->state);
648 		wake_up(&pf->reset_wait_queue);
649 		return;
650 	}
651 
652 	/* PFR is a bit of a special case because it doesn't result in an OICR
653 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
654 	 * associated state bits.
655 	 */
656 	if (reset_type == ICE_RESET_PFR) {
657 		pf->pfr_count++;
658 		ice_rebuild(pf, reset_type);
659 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
660 		clear_bit(ICE_PFR_REQ, pf->state);
661 		wake_up(&pf->reset_wait_queue);
662 		ice_reset_all_vfs(pf);
663 	}
664 }
665 
666 /**
667  * ice_reset_subtask - Set up for resetting the device and driver
668  * @pf: board private structure
669  */
ice_reset_subtask(struct ice_pf * pf)670 static void ice_reset_subtask(struct ice_pf *pf)
671 {
672 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
673 
674 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
675 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
676 	 * of reset is pending and sets bits in pf->state indicating the reset
677 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
678 	 * prepare for pending reset if not already (for PF software-initiated
679 	 * global resets the software should already be prepared for it as
680 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
681 	 * by firmware or software on other PFs, that bit is not set so prepare
682 	 * for the reset now), poll for reset done, rebuild and return.
683 	 */
684 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
685 		/* Perform the largest reset requested */
686 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
687 			reset_type = ICE_RESET_CORER;
688 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
689 			reset_type = ICE_RESET_GLOBR;
690 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
691 			reset_type = ICE_RESET_EMPR;
692 		/* return if no valid reset type requested */
693 		if (reset_type == ICE_RESET_INVAL)
694 			return;
695 		ice_prepare_for_reset(pf, reset_type);
696 
697 		/* make sure we are ready to rebuild */
698 		if (ice_check_reset(&pf->hw)) {
699 			set_bit(ICE_RESET_FAILED, pf->state);
700 		} else {
701 			/* done with reset. start rebuild */
702 			pf->hw.reset_ongoing = false;
703 			ice_rebuild(pf, reset_type);
704 			/* clear bit to resume normal operations, but
705 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
706 			 */
707 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
708 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
709 			clear_bit(ICE_PFR_REQ, pf->state);
710 			clear_bit(ICE_CORER_REQ, pf->state);
711 			clear_bit(ICE_GLOBR_REQ, pf->state);
712 			wake_up(&pf->reset_wait_queue);
713 			ice_reset_all_vfs(pf);
714 		}
715 
716 		return;
717 	}
718 
719 	/* No pending resets to finish processing. Check for new resets */
720 	if (test_bit(ICE_PFR_REQ, pf->state)) {
721 		reset_type = ICE_RESET_PFR;
722 		if (pf->lag && pf->lag->bonded) {
723 			dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
724 			reset_type = ICE_RESET_CORER;
725 		}
726 	}
727 	if (test_bit(ICE_CORER_REQ, pf->state))
728 		reset_type = ICE_RESET_CORER;
729 	if (test_bit(ICE_GLOBR_REQ, pf->state))
730 		reset_type = ICE_RESET_GLOBR;
731 	/* If no valid reset type requested just return */
732 	if (reset_type == ICE_RESET_INVAL)
733 		return;
734 
735 	/* reset if not already down or busy */
736 	if (!test_bit(ICE_DOWN, pf->state) &&
737 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
738 		ice_do_reset(pf, reset_type);
739 	}
740 }
741 
742 /**
743  * ice_print_topo_conflict - print topology conflict message
744  * @vsi: the VSI whose topology status is being checked
745  */
ice_print_topo_conflict(struct ice_vsi * vsi)746 static void ice_print_topo_conflict(struct ice_vsi *vsi)
747 {
748 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
749 	case ICE_AQ_LINK_TOPO_CONFLICT:
750 	case ICE_AQ_LINK_MEDIA_CONFLICT:
751 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
752 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
753 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
754 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
755 		break;
756 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
757 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
758 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
759 		else
760 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
761 		break;
762 	default:
763 		break;
764 	}
765 }
766 
767 /**
768  * ice_print_link_msg - print link up or down message
769  * @vsi: the VSI whose link status is being queried
770  * @isup: boolean for if the link is now up or down
771  */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)772 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
773 {
774 	struct ice_aqc_get_phy_caps_data *caps;
775 	const char *an_advertised;
776 	const char *fec_req;
777 	const char *speed;
778 	const char *fec;
779 	const char *fc;
780 	const char *an;
781 	int status;
782 
783 	if (!vsi)
784 		return;
785 
786 	if (vsi->current_isup == isup)
787 		return;
788 
789 	vsi->current_isup = isup;
790 
791 	if (!isup) {
792 		netdev_info(vsi->netdev, "NIC Link is Down\n");
793 		return;
794 	}
795 
796 	switch (vsi->port_info->phy.link_info.link_speed) {
797 	case ICE_AQ_LINK_SPEED_200GB:
798 		speed = "200 G";
799 		break;
800 	case ICE_AQ_LINK_SPEED_100GB:
801 		speed = "100 G";
802 		break;
803 	case ICE_AQ_LINK_SPEED_50GB:
804 		speed = "50 G";
805 		break;
806 	case ICE_AQ_LINK_SPEED_40GB:
807 		speed = "40 G";
808 		break;
809 	case ICE_AQ_LINK_SPEED_25GB:
810 		speed = "25 G";
811 		break;
812 	case ICE_AQ_LINK_SPEED_20GB:
813 		speed = "20 G";
814 		break;
815 	case ICE_AQ_LINK_SPEED_10GB:
816 		speed = "10 G";
817 		break;
818 	case ICE_AQ_LINK_SPEED_5GB:
819 		speed = "5 G";
820 		break;
821 	case ICE_AQ_LINK_SPEED_2500MB:
822 		speed = "2.5 G";
823 		break;
824 	case ICE_AQ_LINK_SPEED_1000MB:
825 		speed = "1 G";
826 		break;
827 	case ICE_AQ_LINK_SPEED_100MB:
828 		speed = "100 M";
829 		break;
830 	default:
831 		speed = "Unknown ";
832 		break;
833 	}
834 
835 	switch (vsi->port_info->fc.current_mode) {
836 	case ICE_FC_FULL:
837 		fc = "Rx/Tx";
838 		break;
839 	case ICE_FC_TX_PAUSE:
840 		fc = "Tx";
841 		break;
842 	case ICE_FC_RX_PAUSE:
843 		fc = "Rx";
844 		break;
845 	case ICE_FC_NONE:
846 		fc = "None";
847 		break;
848 	default:
849 		fc = "Unknown";
850 		break;
851 	}
852 
853 	/* Get FEC mode based on negotiated link info */
854 	switch (vsi->port_info->phy.link_info.fec_info) {
855 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
856 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
857 		fec = "RS-FEC";
858 		break;
859 	case ICE_AQ_LINK_25G_KR_FEC_EN:
860 		fec = "FC-FEC/BASE-R";
861 		break;
862 	default:
863 		fec = "NONE";
864 		break;
865 	}
866 
867 	/* check if autoneg completed, might be false due to not supported */
868 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
869 		an = "True";
870 	else
871 		an = "False";
872 
873 	/* Get FEC mode requested based on PHY caps last SW configuration */
874 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
875 	if (!caps) {
876 		fec_req = "Unknown";
877 		an_advertised = "Unknown";
878 		goto done;
879 	}
880 
881 	status = ice_aq_get_phy_caps(vsi->port_info, false,
882 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
883 	if (status)
884 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
885 
886 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
887 
888 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
889 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
890 		fec_req = "RS-FEC";
891 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
892 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
893 		fec_req = "FC-FEC/BASE-R";
894 	else
895 		fec_req = "NONE";
896 
897 	kfree(caps);
898 
899 done:
900 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
901 		    speed, fec_req, fec, an_advertised, an, fc);
902 	ice_print_topo_conflict(vsi);
903 }
904 
905 /**
906  * ice_vsi_link_event - update the VSI's netdev
907  * @vsi: the VSI on which the link event occurred
908  * @link_up: whether or not the VSI needs to be set up or down
909  */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)910 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
911 {
912 	if (!vsi)
913 		return;
914 
915 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
916 		return;
917 
918 	if (vsi->type == ICE_VSI_PF) {
919 		if (link_up == netif_carrier_ok(vsi->netdev))
920 			return;
921 
922 		if (link_up) {
923 			netif_carrier_on(vsi->netdev);
924 			netif_tx_wake_all_queues(vsi->netdev);
925 		} else {
926 			netif_carrier_off(vsi->netdev);
927 			netif_tx_stop_all_queues(vsi->netdev);
928 		}
929 	}
930 }
931 
932 /**
933  * ice_set_dflt_mib - send a default config MIB to the FW
934  * @pf: private PF struct
935  *
936  * This function sends a default configuration MIB to the FW.
937  *
938  * If this function errors out at any point, the driver is still able to
939  * function.  The main impact is that LFC may not operate as expected.
940  * Therefore an error state in this function should be treated with a DBG
941  * message and continue on with driver rebuild/reenable.
942  */
ice_set_dflt_mib(struct ice_pf * pf)943 static void ice_set_dflt_mib(struct ice_pf *pf)
944 {
945 	struct device *dev = ice_pf_to_dev(pf);
946 	u8 mib_type, *buf, *lldpmib = NULL;
947 	u16 len, typelen, offset = 0;
948 	struct ice_lldp_org_tlv *tlv;
949 	struct ice_hw *hw = &pf->hw;
950 	u32 ouisubtype;
951 
952 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
953 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
954 	if (!lldpmib) {
955 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
956 			__func__);
957 		return;
958 	}
959 
960 	/* Add ETS CFG TLV */
961 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
962 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
963 		   ICE_IEEE_ETS_TLV_LEN);
964 	tlv->typelen = htons(typelen);
965 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
966 		      ICE_IEEE_SUBTYPE_ETS_CFG);
967 	tlv->ouisubtype = htonl(ouisubtype);
968 
969 	buf = tlv->tlvinfo;
970 	buf[0] = 0;
971 
972 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
973 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
974 	 * Octets 13 - 20 are TSA values - leave as zeros
975 	 */
976 	buf[5] = 0x64;
977 	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
978 	offset += len + 2;
979 	tlv = (struct ice_lldp_org_tlv *)
980 		((char *)tlv + sizeof(tlv->typelen) + len);
981 
982 	/* Add ETS REC TLV */
983 	buf = tlv->tlvinfo;
984 	tlv->typelen = htons(typelen);
985 
986 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
987 		      ICE_IEEE_SUBTYPE_ETS_REC);
988 	tlv->ouisubtype = htonl(ouisubtype);
989 
990 	/* First octet of buf is reserved
991 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
992 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
993 	 * Octets 13 - 20 are TSA value - leave as zeros
994 	 */
995 	buf[5] = 0x64;
996 	offset += len + 2;
997 	tlv = (struct ice_lldp_org_tlv *)
998 		((char *)tlv + sizeof(tlv->typelen) + len);
999 
1000 	/* Add PFC CFG TLV */
1001 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1002 		   ICE_IEEE_PFC_TLV_LEN);
1003 	tlv->typelen = htons(typelen);
1004 
1005 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1006 		      ICE_IEEE_SUBTYPE_PFC_CFG);
1007 	tlv->ouisubtype = htonl(ouisubtype);
1008 
1009 	/* Octet 1 left as all zeros - PFC disabled */
1010 	buf[0] = 0x08;
1011 	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1012 	offset += len + 2;
1013 
1014 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1015 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1016 
1017 	kfree(lldpmib);
1018 }
1019 
1020 /**
1021  * ice_check_phy_fw_load - check if PHY FW load failed
1022  * @pf: pointer to PF struct
1023  * @link_cfg_err: bitmap from the link info structure
1024  *
1025  * check if external PHY FW load failed and print an error message if it did
1026  */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1027 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1028 {
1029 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1030 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1031 		return;
1032 	}
1033 
1034 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1035 		return;
1036 
1037 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1038 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1039 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1040 	}
1041 }
1042 
1043 /**
1044  * ice_check_module_power
1045  * @pf: pointer to PF struct
1046  * @link_cfg_err: bitmap from the link info structure
1047  *
1048  * check module power level returned by a previous call to aq_get_link_info
1049  * and print error messages if module power level is not supported
1050  */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1051 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1052 {
1053 	/* if module power level is supported, clear the flag */
1054 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1055 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1056 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1057 		return;
1058 	}
1059 
1060 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1061 	 * above block didn't clear this bit, there's nothing to do
1062 	 */
1063 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1064 		return;
1065 
1066 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1067 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1068 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1069 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1070 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1071 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1072 	}
1073 }
1074 
1075 /**
1076  * ice_check_link_cfg_err - check if link configuration failed
1077  * @pf: pointer to the PF struct
1078  * @link_cfg_err: bitmap from the link info structure
1079  *
1080  * print if any link configuration failure happens due to the value in the
1081  * link_cfg_err parameter in the link info structure
1082  */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1083 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1084 {
1085 	ice_check_module_power(pf, link_cfg_err);
1086 	ice_check_phy_fw_load(pf, link_cfg_err);
1087 }
1088 
1089 /**
1090  * ice_link_event - process the link event
1091  * @pf: PF that the link event is associated with
1092  * @pi: port_info for the port that the link event is associated with
1093  * @link_up: true if the physical link is up and false if it is down
1094  * @link_speed: current link speed received from the link event
1095  *
1096  * Returns 0 on success and negative on failure
1097  */
1098 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1099 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1100 	       u16 link_speed)
1101 {
1102 	struct device *dev = ice_pf_to_dev(pf);
1103 	struct ice_phy_info *phy_info;
1104 	struct ice_vsi *vsi;
1105 	u16 old_link_speed;
1106 	bool old_link;
1107 	int status;
1108 
1109 	phy_info = &pi->phy;
1110 	phy_info->link_info_old = phy_info->link_info;
1111 
1112 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1113 	old_link_speed = phy_info->link_info_old.link_speed;
1114 
1115 	/* update the link info structures and re-enable link events,
1116 	 * don't bail on failure due to other book keeping needed
1117 	 */
1118 	status = ice_update_link_info(pi);
1119 	if (status)
1120 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1121 			pi->lport, status,
1122 			ice_aq_str(pi->hw->adminq.sq_last_status));
1123 
1124 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1125 
1126 	/* Check if the link state is up after updating link info, and treat
1127 	 * this event as an UP event since the link is actually UP now.
1128 	 */
1129 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1130 		link_up = true;
1131 
1132 	vsi = ice_get_main_vsi(pf);
1133 	if (!vsi || !vsi->port_info)
1134 		return -EINVAL;
1135 
1136 	/* turn off PHY if media was removed */
1137 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1138 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1139 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1140 		ice_set_link(vsi, false);
1141 	}
1142 
1143 	/* if the old link up/down and speed is the same as the new */
1144 	if (link_up == old_link && link_speed == old_link_speed)
1145 		return 0;
1146 
1147 	ice_ptp_link_change(pf, link_up);
1148 
1149 	if (ice_is_dcb_active(pf)) {
1150 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1151 			ice_dcb_rebuild(pf);
1152 	} else {
1153 		if (link_up)
1154 			ice_set_dflt_mib(pf);
1155 	}
1156 	ice_vsi_link_event(vsi, link_up);
1157 	ice_print_link_msg(vsi, link_up);
1158 
1159 	ice_vc_notify_link_state(pf);
1160 
1161 	return 0;
1162 }
1163 
1164 /**
1165  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1166  * @pf: board private structure
1167  */
ice_watchdog_subtask(struct ice_pf * pf)1168 static void ice_watchdog_subtask(struct ice_pf *pf)
1169 {
1170 	int i;
1171 
1172 	/* if interface is down do nothing */
1173 	if (test_bit(ICE_DOWN, pf->state) ||
1174 	    test_bit(ICE_CFG_BUSY, pf->state))
1175 		return;
1176 
1177 	/* make sure we don't do these things too often */
1178 	if (time_before(jiffies,
1179 			pf->serv_tmr_prev + pf->serv_tmr_period))
1180 		return;
1181 
1182 	pf->serv_tmr_prev = jiffies;
1183 
1184 	/* Update the stats for active netdevs so the network stack
1185 	 * can look at updated numbers whenever it cares to
1186 	 */
1187 	ice_update_pf_stats(pf);
1188 	ice_for_each_vsi(pf, i)
1189 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1190 			ice_update_vsi_stats(pf->vsi[i]);
1191 }
1192 
1193 /**
1194  * ice_init_link_events - enable/initialize link events
1195  * @pi: pointer to the port_info instance
1196  *
1197  * Returns -EIO on failure, 0 on success
1198  */
ice_init_link_events(struct ice_port_info * pi)1199 static int ice_init_link_events(struct ice_port_info *pi)
1200 {
1201 	u16 mask;
1202 
1203 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1204 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1205 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1206 
1207 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1208 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1209 			pi->lport);
1210 		return -EIO;
1211 	}
1212 
1213 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1214 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1215 			pi->lport);
1216 		return -EIO;
1217 	}
1218 
1219 	return 0;
1220 }
1221 
1222 /**
1223  * ice_handle_link_event - handle link event via ARQ
1224  * @pf: PF that the link event is associated with
1225  * @event: event structure containing link status info
1226  */
1227 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1228 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1229 {
1230 	struct ice_aqc_get_link_status_data *link_data;
1231 	struct ice_port_info *port_info;
1232 	int status;
1233 
1234 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1235 	port_info = pf->hw.port_info;
1236 	if (!port_info)
1237 		return -EINVAL;
1238 
1239 	status = ice_link_event(pf, port_info,
1240 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1241 				le16_to_cpu(link_data->link_speed));
1242 	if (status)
1243 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1244 			status);
1245 
1246 	return status;
1247 }
1248 
1249 /**
1250  * ice_get_fwlog_data - copy the FW log data from ARQ event
1251  * @pf: PF that the FW log event is associated with
1252  * @event: event structure containing FW log data
1253  */
1254 static void
ice_get_fwlog_data(struct ice_pf * pf,struct ice_rq_event_info * event)1255 ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
1256 {
1257 	struct ice_fwlog_data *fwlog;
1258 	struct ice_hw *hw = &pf->hw;
1259 
1260 	fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
1261 
1262 	memset(fwlog->data, 0, PAGE_SIZE);
1263 	fwlog->data_size = le16_to_cpu(event->desc.datalen);
1264 
1265 	memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
1266 	ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
1267 
1268 	if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
1269 		/* the rings are full so bump the head to create room */
1270 		ice_fwlog_ring_increment(&hw->fwlog_ring.head,
1271 					 hw->fwlog_ring.size);
1272 	}
1273 }
1274 
1275 /**
1276  * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1277  * @pf: pointer to the PF private structure
1278  * @task: intermediate helper storage and identifier for waiting
1279  * @opcode: the opcode to wait for
1280  *
1281  * Prepares to wait for a specific AdminQ completion event on the ARQ for
1282  * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1283  *
1284  * Calls are separated to allow caller registering for event before sending
1285  * the command, which mitigates a race between registering and FW responding.
1286  *
1287  * To obtain only the descriptor contents, pass an task->event with null
1288  * msg_buf. If the complete data buffer is desired, allocate the
1289  * task->event.msg_buf with enough space ahead of time.
1290  */
ice_aq_prep_for_event(struct ice_pf * pf,struct ice_aq_task * task,u16 opcode)1291 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1292 			   u16 opcode)
1293 {
1294 	INIT_HLIST_NODE(&task->entry);
1295 	task->opcode = opcode;
1296 	task->state = ICE_AQ_TASK_WAITING;
1297 
1298 	spin_lock_bh(&pf->aq_wait_lock);
1299 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1300 	spin_unlock_bh(&pf->aq_wait_lock);
1301 }
1302 
1303 /**
1304  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1305  * @pf: pointer to the PF private structure
1306  * @task: ptr prepared by ice_aq_prep_for_event()
1307  * @timeout: how long to wait, in jiffies
1308  *
1309  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1310  * current thread will be put to sleep until the specified event occurs or
1311  * until the given timeout is reached.
1312  *
1313  * Returns: zero on success, or a negative error code on failure.
1314  */
ice_aq_wait_for_event(struct ice_pf * pf,struct ice_aq_task * task,unsigned long timeout)1315 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1316 			  unsigned long timeout)
1317 {
1318 	enum ice_aq_task_state *state = &task->state;
1319 	struct device *dev = ice_pf_to_dev(pf);
1320 	unsigned long start = jiffies;
1321 	long ret;
1322 	int err;
1323 
1324 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1325 					       *state != ICE_AQ_TASK_WAITING,
1326 					       timeout);
1327 	switch (*state) {
1328 	case ICE_AQ_TASK_NOT_PREPARED:
1329 		WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1330 		err = -EINVAL;
1331 		break;
1332 	case ICE_AQ_TASK_WAITING:
1333 		err = ret < 0 ? ret : -ETIMEDOUT;
1334 		break;
1335 	case ICE_AQ_TASK_CANCELED:
1336 		err = ret < 0 ? ret : -ECANCELED;
1337 		break;
1338 	case ICE_AQ_TASK_COMPLETE:
1339 		err = ret < 0 ? ret : 0;
1340 		break;
1341 	default:
1342 		WARN(1, "Unexpected AdminQ wait task state %u", *state);
1343 		err = -EINVAL;
1344 		break;
1345 	}
1346 
1347 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1348 		jiffies_to_msecs(jiffies - start),
1349 		jiffies_to_msecs(timeout),
1350 		task->opcode);
1351 
1352 	spin_lock_bh(&pf->aq_wait_lock);
1353 	hlist_del(&task->entry);
1354 	spin_unlock_bh(&pf->aq_wait_lock);
1355 
1356 	return err;
1357 }
1358 
1359 /**
1360  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1361  * @pf: pointer to the PF private structure
1362  * @opcode: the opcode of the event
1363  * @event: the event to check
1364  *
1365  * Loops over the current list of pending threads waiting for an AdminQ event.
1366  * For each matching task, copy the contents of the event into the task
1367  * structure and wake up the thread.
1368  *
1369  * If multiple threads wait for the same opcode, they will all be woken up.
1370  *
1371  * Note that event->msg_buf will only be duplicated if the event has a buffer
1372  * with enough space already allocated. Otherwise, only the descriptor and
1373  * message length will be copied.
1374  *
1375  * Returns: true if an event was found, false otherwise
1376  */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1377 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1378 				struct ice_rq_event_info *event)
1379 {
1380 	struct ice_rq_event_info *task_ev;
1381 	struct ice_aq_task *task;
1382 	bool found = false;
1383 
1384 	spin_lock_bh(&pf->aq_wait_lock);
1385 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1386 		if (task->state != ICE_AQ_TASK_WAITING)
1387 			continue;
1388 		if (task->opcode != opcode)
1389 			continue;
1390 
1391 		task_ev = &task->event;
1392 		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1393 		task_ev->msg_len = event->msg_len;
1394 
1395 		/* Only copy the data buffer if a destination was set */
1396 		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1397 			memcpy(task_ev->msg_buf, event->msg_buf,
1398 			       event->buf_len);
1399 			task_ev->buf_len = event->buf_len;
1400 		}
1401 
1402 		task->state = ICE_AQ_TASK_COMPLETE;
1403 		found = true;
1404 	}
1405 	spin_unlock_bh(&pf->aq_wait_lock);
1406 
1407 	if (found)
1408 		wake_up(&pf->aq_wait_queue);
1409 }
1410 
1411 /**
1412  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1413  * @pf: the PF private structure
1414  *
1415  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1416  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1417  */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1418 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1419 {
1420 	struct ice_aq_task *task;
1421 
1422 	spin_lock_bh(&pf->aq_wait_lock);
1423 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1424 		task->state = ICE_AQ_TASK_CANCELED;
1425 	spin_unlock_bh(&pf->aq_wait_lock);
1426 
1427 	wake_up(&pf->aq_wait_queue);
1428 }
1429 
1430 #define ICE_MBX_OVERFLOW_WATERMARK 64
1431 
1432 /**
1433  * __ice_clean_ctrlq - helper function to clean controlq rings
1434  * @pf: ptr to struct ice_pf
1435  * @q_type: specific Control queue type
1436  */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1437 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1438 {
1439 	struct device *dev = ice_pf_to_dev(pf);
1440 	struct ice_rq_event_info event;
1441 	struct ice_hw *hw = &pf->hw;
1442 	struct ice_ctl_q_info *cq;
1443 	u16 pending, i = 0;
1444 	const char *qtype;
1445 	u32 oldval, val;
1446 
1447 	/* Do not clean control queue if/when PF reset fails */
1448 	if (test_bit(ICE_RESET_FAILED, pf->state))
1449 		return 0;
1450 
1451 	switch (q_type) {
1452 	case ICE_CTL_Q_ADMIN:
1453 		cq = &hw->adminq;
1454 		qtype = "Admin";
1455 		break;
1456 	case ICE_CTL_Q_SB:
1457 		cq = &hw->sbq;
1458 		qtype = "Sideband";
1459 		break;
1460 	case ICE_CTL_Q_MAILBOX:
1461 		cq = &hw->mailboxq;
1462 		qtype = "Mailbox";
1463 		/* we are going to try to detect a malicious VF, so set the
1464 		 * state to begin detection
1465 		 */
1466 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1467 		break;
1468 	default:
1469 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1470 		return 0;
1471 	}
1472 
1473 	/* check for error indications - PF_xx_AxQLEN register layout for
1474 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1475 	 */
1476 	val = rd32(hw, cq->rq.len);
1477 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1478 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1479 		oldval = val;
1480 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1481 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1482 				qtype);
1483 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1484 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1485 				qtype);
1486 		}
1487 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1488 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1489 				qtype);
1490 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1491 			 PF_FW_ARQLEN_ARQCRIT_M);
1492 		if (oldval != val)
1493 			wr32(hw, cq->rq.len, val);
1494 	}
1495 
1496 	val = rd32(hw, cq->sq.len);
1497 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1498 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1499 		oldval = val;
1500 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1501 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1502 				qtype);
1503 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1504 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1505 				qtype);
1506 		}
1507 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1508 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1509 				qtype);
1510 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1511 			 PF_FW_ATQLEN_ATQCRIT_M);
1512 		if (oldval != val)
1513 			wr32(hw, cq->sq.len, val);
1514 	}
1515 
1516 	event.buf_len = cq->rq_buf_size;
1517 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1518 	if (!event.msg_buf)
1519 		return 0;
1520 
1521 	do {
1522 		struct ice_mbx_data data = {};
1523 		u16 opcode;
1524 		int ret;
1525 
1526 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1527 		if (ret == -EALREADY)
1528 			break;
1529 		if (ret) {
1530 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1531 				ret);
1532 			break;
1533 		}
1534 
1535 		opcode = le16_to_cpu(event.desc.opcode);
1536 
1537 		/* Notify any thread that might be waiting for this event */
1538 		ice_aq_check_events(pf, opcode, &event);
1539 
1540 		switch (opcode) {
1541 		case ice_aqc_opc_get_link_status:
1542 			if (ice_handle_link_event(pf, &event))
1543 				dev_err(dev, "Could not handle link event\n");
1544 			break;
1545 		case ice_aqc_opc_event_lan_overflow:
1546 			ice_vf_lan_overflow_event(pf, &event);
1547 			break;
1548 		case ice_mbx_opc_send_msg_to_pf:
1549 			if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
1550 				ice_vc_process_vf_msg(pf, &event, NULL);
1551 				ice_mbx_vf_dec_trig_e830(hw, &event);
1552 			} else {
1553 				u16 val = hw->mailboxq.num_rq_entries;
1554 
1555 				data.max_num_msgs_mbx = val;
1556 				val = ICE_MBX_OVERFLOW_WATERMARK;
1557 				data.async_watermark_val = val;
1558 				data.num_msg_proc = i;
1559 				data.num_pending_arq = pending;
1560 
1561 				ice_vc_process_vf_msg(pf, &event, &data);
1562 			}
1563 			break;
1564 		case ice_aqc_opc_fw_logs_event:
1565 			ice_get_fwlog_data(pf, &event);
1566 			break;
1567 		case ice_aqc_opc_lldp_set_mib_change:
1568 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1569 			break;
1570 		case ice_aqc_opc_get_health_status:
1571 			ice_process_health_status_event(pf, &event);
1572 			break;
1573 		default:
1574 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1575 				qtype, opcode);
1576 			break;
1577 		}
1578 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1579 
1580 	kfree(event.msg_buf);
1581 
1582 	return pending && (i == ICE_DFLT_IRQ_WORK);
1583 }
1584 
1585 /**
1586  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1587  * @hw: pointer to hardware info
1588  * @cq: control queue information
1589  *
1590  * returns true if there are pending messages in a queue, false if there aren't
1591  */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1592 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1593 {
1594 	u16 ntu;
1595 
1596 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1597 	return cq->rq.next_to_clean != ntu;
1598 }
1599 
1600 /**
1601  * ice_clean_adminq_subtask - clean the AdminQ rings
1602  * @pf: board private structure
1603  */
ice_clean_adminq_subtask(struct ice_pf * pf)1604 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1605 {
1606 	struct ice_hw *hw = &pf->hw;
1607 
1608 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1609 		return;
1610 
1611 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1612 		return;
1613 
1614 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1615 
1616 	/* There might be a situation where new messages arrive to a control
1617 	 * queue between processing the last message and clearing the
1618 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1619 	 * ice_ctrlq_pending) and process new messages if any.
1620 	 */
1621 	if (ice_ctrlq_pending(hw, &hw->adminq))
1622 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1623 
1624 	ice_flush(hw);
1625 }
1626 
1627 /**
1628  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1629  * @pf: board private structure
1630  */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1631 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1632 {
1633 	struct ice_hw *hw = &pf->hw;
1634 
1635 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1636 		return;
1637 
1638 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1639 		return;
1640 
1641 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1642 
1643 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1644 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1645 
1646 	ice_flush(hw);
1647 }
1648 
1649 /**
1650  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1651  * @pf: board private structure
1652  */
ice_clean_sbq_subtask(struct ice_pf * pf)1653 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1654 {
1655 	struct ice_hw *hw = &pf->hw;
1656 
1657 	/* if mac_type is not generic, sideband is not supported
1658 	 * and there's nothing to do here
1659 	 */
1660 	if (!ice_is_generic_mac(hw)) {
1661 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1662 		return;
1663 	}
1664 
1665 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1666 		return;
1667 
1668 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1669 		return;
1670 
1671 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1672 
1673 	if (ice_ctrlq_pending(hw, &hw->sbq))
1674 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1675 
1676 	ice_flush(hw);
1677 }
1678 
1679 /**
1680  * ice_service_task_schedule - schedule the service task to wake up
1681  * @pf: board private structure
1682  *
1683  * If not already scheduled, this puts the task into the work queue.
1684  */
ice_service_task_schedule(struct ice_pf * pf)1685 void ice_service_task_schedule(struct ice_pf *pf)
1686 {
1687 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1688 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1689 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1690 		queue_work(ice_wq, &pf->serv_task);
1691 }
1692 
1693 /**
1694  * ice_service_task_complete - finish up the service task
1695  * @pf: board private structure
1696  */
ice_service_task_complete(struct ice_pf * pf)1697 static void ice_service_task_complete(struct ice_pf *pf)
1698 {
1699 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1700 
1701 	/* force memory (pf->state) to sync before next service task */
1702 	smp_mb__before_atomic();
1703 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1704 }
1705 
1706 /**
1707  * ice_service_task_stop - stop service task and cancel works
1708  * @pf: board private structure
1709  *
1710  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1711  * 1 otherwise.
1712  */
ice_service_task_stop(struct ice_pf * pf)1713 static int ice_service_task_stop(struct ice_pf *pf)
1714 {
1715 	int ret;
1716 
1717 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1718 
1719 	if (pf->serv_tmr.function)
1720 		del_timer_sync(&pf->serv_tmr);
1721 	if (pf->serv_task.func)
1722 		cancel_work_sync(&pf->serv_task);
1723 
1724 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1725 	return ret;
1726 }
1727 
1728 /**
1729  * ice_service_task_restart - restart service task and schedule works
1730  * @pf: board private structure
1731  *
1732  * This function is needed for suspend and resume works (e.g WoL scenario)
1733  */
ice_service_task_restart(struct ice_pf * pf)1734 static void ice_service_task_restart(struct ice_pf *pf)
1735 {
1736 	clear_bit(ICE_SERVICE_DIS, pf->state);
1737 	ice_service_task_schedule(pf);
1738 }
1739 
1740 /**
1741  * ice_service_timer - timer callback to schedule service task
1742  * @t: pointer to timer_list
1743  */
ice_service_timer(struct timer_list * t)1744 static void ice_service_timer(struct timer_list *t)
1745 {
1746 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1747 
1748 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1749 	ice_service_task_schedule(pf);
1750 }
1751 
1752 /**
1753  * ice_mdd_maybe_reset_vf - reset VF after MDD event
1754  * @pf: pointer to the PF structure
1755  * @vf: pointer to the VF structure
1756  * @reset_vf_tx: whether Tx MDD has occurred
1757  * @reset_vf_rx: whether Rx MDD has occurred
1758  *
1759  * Since the queue can get stuck on VF MDD events, the PF can be configured to
1760  * automatically reset the VF by enabling the private ethtool flag
1761  * mdd-auto-reset-vf.
1762  */
ice_mdd_maybe_reset_vf(struct ice_pf * pf,struct ice_vf * vf,bool reset_vf_tx,bool reset_vf_rx)1763 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1764 				   bool reset_vf_tx, bool reset_vf_rx)
1765 {
1766 	struct device *dev = ice_pf_to_dev(pf);
1767 
1768 	if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1769 		return;
1770 
1771 	/* VF MDD event counters will be cleared by reset, so print the event
1772 	 * prior to reset.
1773 	 */
1774 	if (reset_vf_tx)
1775 		ice_print_vf_tx_mdd_event(vf);
1776 
1777 	if (reset_vf_rx)
1778 		ice_print_vf_rx_mdd_event(vf);
1779 
1780 	dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1781 		 pf->hw.pf_id, vf->vf_id);
1782 	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1783 }
1784 
1785 /**
1786  * ice_handle_mdd_event - handle malicious driver detect event
1787  * @pf: pointer to the PF structure
1788  *
1789  * Called from service task. OICR interrupt handler indicates MDD event.
1790  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1791  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1792  * disable the queue, the PF can be configured to reset the VF using ethtool
1793  * private flag mdd-auto-reset-vf.
1794  */
ice_handle_mdd_event(struct ice_pf * pf)1795 static void ice_handle_mdd_event(struct ice_pf *pf)
1796 {
1797 	struct device *dev = ice_pf_to_dev(pf);
1798 	struct ice_hw *hw = &pf->hw;
1799 	struct ice_vf *vf;
1800 	unsigned int bkt;
1801 	u32 reg;
1802 
1803 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1804 		/* Since the VF MDD event logging is rate limited, check if
1805 		 * there are pending MDD events.
1806 		 */
1807 		ice_print_vfs_mdd_events(pf);
1808 		return;
1809 	}
1810 
1811 	/* find what triggered an MDD event */
1812 	reg = rd32(hw, GL_MDET_TX_PQM);
1813 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1814 		u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1815 		u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1816 		u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1817 		u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
1818 
1819 		if (netif_msg_tx_err(pf))
1820 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1821 				 event, queue, pf_num, vf_num);
1822 		ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
1823 				     event, queue);
1824 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1825 	}
1826 
1827 	reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1828 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1829 		u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1830 		u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1831 		u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1832 		u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
1833 
1834 		if (netif_msg_tx_err(pf))
1835 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1836 				 event, queue, pf_num, vf_num);
1837 		ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
1838 				     event, queue);
1839 		wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1840 	}
1841 
1842 	reg = rd32(hw, GL_MDET_RX);
1843 	if (reg & GL_MDET_RX_VALID_M) {
1844 		u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1845 		u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1846 		u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1847 		u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
1848 
1849 		if (netif_msg_rx_err(pf))
1850 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1851 				 event, queue, pf_num, vf_num);
1852 		ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
1853 				     queue);
1854 		wr32(hw, GL_MDET_RX, 0xffffffff);
1855 	}
1856 
1857 	/* check to see if this PF caused an MDD event */
1858 	reg = rd32(hw, PF_MDET_TX_PQM);
1859 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1860 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1861 		if (netif_msg_tx_err(pf))
1862 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1863 	}
1864 
1865 	reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1866 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1867 		wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1868 		if (netif_msg_tx_err(pf))
1869 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1870 	}
1871 
1872 	reg = rd32(hw, PF_MDET_RX);
1873 	if (reg & PF_MDET_RX_VALID_M) {
1874 		wr32(hw, PF_MDET_RX, 0xFFFF);
1875 		if (netif_msg_rx_err(pf))
1876 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1877 	}
1878 
1879 	/* Check to see if one of the VFs caused an MDD event, and then
1880 	 * increment counters and set print pending
1881 	 */
1882 	mutex_lock(&pf->vfs.table_lock);
1883 	ice_for_each_vf(pf, bkt, vf) {
1884 		bool reset_vf_tx = false, reset_vf_rx = false;
1885 
1886 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1887 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1888 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1889 			vf->mdd_tx_events.count++;
1890 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1891 			if (netif_msg_tx_err(pf))
1892 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1893 					 vf->vf_id);
1894 
1895 			reset_vf_tx = true;
1896 		}
1897 
1898 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1899 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1900 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1901 			vf->mdd_tx_events.count++;
1902 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1903 			if (netif_msg_tx_err(pf))
1904 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1905 					 vf->vf_id);
1906 
1907 			reset_vf_tx = true;
1908 		}
1909 
1910 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1911 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1912 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1913 			vf->mdd_tx_events.count++;
1914 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1915 			if (netif_msg_tx_err(pf))
1916 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1917 					 vf->vf_id);
1918 
1919 			reset_vf_tx = true;
1920 		}
1921 
1922 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1923 		if (reg & VP_MDET_RX_VALID_M) {
1924 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1925 			vf->mdd_rx_events.count++;
1926 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1927 			if (netif_msg_rx_err(pf))
1928 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1929 					 vf->vf_id);
1930 
1931 			reset_vf_rx = true;
1932 		}
1933 
1934 		if (reset_vf_tx || reset_vf_rx)
1935 			ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1936 					       reset_vf_rx);
1937 	}
1938 	mutex_unlock(&pf->vfs.table_lock);
1939 
1940 	ice_print_vfs_mdd_events(pf);
1941 }
1942 
1943 /**
1944  * ice_force_phys_link_state - Force the physical link state
1945  * @vsi: VSI to force the physical link state to up/down
1946  * @link_up: true/false indicates to set the physical link to up/down
1947  *
1948  * Force the physical link state by getting the current PHY capabilities from
1949  * hardware and setting the PHY config based on the determined capabilities. If
1950  * link changes a link event will be triggered because both the Enable Automatic
1951  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1952  *
1953  * Returns 0 on success, negative on failure
1954  */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1955 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1956 {
1957 	struct ice_aqc_get_phy_caps_data *pcaps;
1958 	struct ice_aqc_set_phy_cfg_data *cfg;
1959 	struct ice_port_info *pi;
1960 	struct device *dev;
1961 	int retcode;
1962 
1963 	if (!vsi || !vsi->port_info || !vsi->back)
1964 		return -EINVAL;
1965 	if (vsi->type != ICE_VSI_PF)
1966 		return 0;
1967 
1968 	dev = ice_pf_to_dev(vsi->back);
1969 
1970 	pi = vsi->port_info;
1971 
1972 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1973 	if (!pcaps)
1974 		return -ENOMEM;
1975 
1976 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1977 				      NULL);
1978 	if (retcode) {
1979 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1980 			vsi->vsi_num, retcode);
1981 		retcode = -EIO;
1982 		goto out;
1983 	}
1984 
1985 	/* No change in link */
1986 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1987 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1988 		goto out;
1989 
1990 	/* Use the current user PHY configuration. The current user PHY
1991 	 * configuration is initialized during probe from PHY capabilities
1992 	 * software mode, and updated on set PHY configuration.
1993 	 */
1994 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1995 	if (!cfg) {
1996 		retcode = -ENOMEM;
1997 		goto out;
1998 	}
1999 
2000 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2001 	if (link_up)
2002 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
2003 	else
2004 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
2005 
2006 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
2007 	if (retcode) {
2008 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2009 			vsi->vsi_num, retcode);
2010 		retcode = -EIO;
2011 	}
2012 
2013 	kfree(cfg);
2014 out:
2015 	kfree(pcaps);
2016 	return retcode;
2017 }
2018 
2019 /**
2020  * ice_init_nvm_phy_type - Initialize the NVM PHY type
2021  * @pi: port info structure
2022  *
2023  * Initialize nvm_phy_type_[low|high] for link lenient mode support
2024  */
ice_init_nvm_phy_type(struct ice_port_info * pi)2025 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2026 {
2027 	struct ice_aqc_get_phy_caps_data *pcaps;
2028 	struct ice_pf *pf = pi->hw->back;
2029 	int err;
2030 
2031 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2032 	if (!pcaps)
2033 		return -ENOMEM;
2034 
2035 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2036 				  pcaps, NULL);
2037 
2038 	if (err) {
2039 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2040 		goto out;
2041 	}
2042 
2043 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
2044 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
2045 
2046 out:
2047 	kfree(pcaps);
2048 	return err;
2049 }
2050 
2051 /**
2052  * ice_init_link_dflt_override - Initialize link default override
2053  * @pi: port info structure
2054  *
2055  * Initialize link default override and PHY total port shutdown during probe
2056  */
ice_init_link_dflt_override(struct ice_port_info * pi)2057 static void ice_init_link_dflt_override(struct ice_port_info *pi)
2058 {
2059 	struct ice_link_default_override_tlv *ldo;
2060 	struct ice_pf *pf = pi->hw->back;
2061 
2062 	ldo = &pf->link_dflt_override;
2063 	if (ice_get_link_default_override(ldo, pi))
2064 		return;
2065 
2066 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2067 		return;
2068 
2069 	/* Enable Total Port Shutdown (override/replace link-down-on-close
2070 	 * ethtool private flag) for ports with Port Disable bit set.
2071 	 */
2072 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2073 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2074 }
2075 
2076 /**
2077  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2078  * @pi: port info structure
2079  *
2080  * If default override is enabled, initialize the user PHY cfg speed and FEC
2081  * settings using the default override mask from the NVM.
2082  *
2083  * The PHY should only be configured with the default override settings the
2084  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2085  * is used to indicate that the user PHY cfg default override is initialized
2086  * and the PHY has not been configured with the default override settings. The
2087  * state is set here, and cleared in ice_configure_phy the first time the PHY is
2088  * configured.
2089  *
2090  * This function should be called only if the FW doesn't support default
2091  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2092  */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2093 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2094 {
2095 	struct ice_link_default_override_tlv *ldo;
2096 	struct ice_aqc_set_phy_cfg_data *cfg;
2097 	struct ice_phy_info *phy = &pi->phy;
2098 	struct ice_pf *pf = pi->hw->back;
2099 
2100 	ldo = &pf->link_dflt_override;
2101 
2102 	/* If link default override is enabled, use to mask NVM PHY capabilities
2103 	 * for speed and FEC default configuration.
2104 	 */
2105 	cfg = &phy->curr_user_phy_cfg;
2106 
2107 	if (ldo->phy_type_low || ldo->phy_type_high) {
2108 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2109 				    cpu_to_le64(ldo->phy_type_low);
2110 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2111 				     cpu_to_le64(ldo->phy_type_high);
2112 	}
2113 	cfg->link_fec_opt = ldo->fec_options;
2114 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2115 
2116 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2117 }
2118 
2119 /**
2120  * ice_init_phy_user_cfg - Initialize the PHY user configuration
2121  * @pi: port info structure
2122  *
2123  * Initialize the current user PHY configuration, speed, FEC, and FC requested
2124  * mode to default. The PHY defaults are from get PHY capabilities topology
2125  * with media so call when media is first available. An error is returned if
2126  * called when media is not available. The PHY initialization completed state is
2127  * set here.
2128  *
2129  * These configurations are used when setting PHY
2130  * configuration. The user PHY configuration is updated on set PHY
2131  * configuration. Returns 0 on success, negative on failure
2132  */
ice_init_phy_user_cfg(struct ice_port_info * pi)2133 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2134 {
2135 	struct ice_aqc_get_phy_caps_data *pcaps;
2136 	struct ice_phy_info *phy = &pi->phy;
2137 	struct ice_pf *pf = pi->hw->back;
2138 	int err;
2139 
2140 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2141 		return -EIO;
2142 
2143 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2144 	if (!pcaps)
2145 		return -ENOMEM;
2146 
2147 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2148 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2149 					  pcaps, NULL);
2150 	else
2151 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2152 					  pcaps, NULL);
2153 	if (err) {
2154 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2155 		goto err_out;
2156 	}
2157 
2158 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2159 
2160 	/* check if lenient mode is supported and enabled */
2161 	if (ice_fw_supports_link_override(pi->hw) &&
2162 	    !(pcaps->module_compliance_enforcement &
2163 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2164 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2165 
2166 		/* if the FW supports default PHY configuration mode, then the driver
2167 		 * does not have to apply link override settings. If not,
2168 		 * initialize user PHY configuration with link override values
2169 		 */
2170 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2171 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2172 			ice_init_phy_cfg_dflt_override(pi);
2173 			goto out;
2174 		}
2175 	}
2176 
2177 	/* if link default override is not enabled, set user flow control and
2178 	 * FEC settings based on what get_phy_caps returned
2179 	 */
2180 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2181 						      pcaps->link_fec_options);
2182 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2183 
2184 out:
2185 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2186 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2187 err_out:
2188 	kfree(pcaps);
2189 	return err;
2190 }
2191 
2192 /**
2193  * ice_configure_phy - configure PHY
2194  * @vsi: VSI of PHY
2195  *
2196  * Set the PHY configuration. If the current PHY configuration is the same as
2197  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2198  * configure the based get PHY capabilities for topology with media.
2199  */
ice_configure_phy(struct ice_vsi * vsi)2200 static int ice_configure_phy(struct ice_vsi *vsi)
2201 {
2202 	struct device *dev = ice_pf_to_dev(vsi->back);
2203 	struct ice_port_info *pi = vsi->port_info;
2204 	struct ice_aqc_get_phy_caps_data *pcaps;
2205 	struct ice_aqc_set_phy_cfg_data *cfg;
2206 	struct ice_phy_info *phy = &pi->phy;
2207 	struct ice_pf *pf = vsi->back;
2208 	int err;
2209 
2210 	/* Ensure we have media as we cannot configure a medialess port */
2211 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2212 		return -ENOMEDIUM;
2213 
2214 	ice_print_topo_conflict(vsi);
2215 
2216 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2217 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2218 		return -EPERM;
2219 
2220 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2221 		return ice_force_phys_link_state(vsi, true);
2222 
2223 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2224 	if (!pcaps)
2225 		return -ENOMEM;
2226 
2227 	/* Get current PHY config */
2228 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2229 				  NULL);
2230 	if (err) {
2231 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2232 			vsi->vsi_num, err);
2233 		goto done;
2234 	}
2235 
2236 	/* If PHY enable link is configured and configuration has not changed,
2237 	 * there's nothing to do
2238 	 */
2239 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2240 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2241 		goto done;
2242 
2243 	/* Use PHY topology as baseline for configuration */
2244 	memset(pcaps, 0, sizeof(*pcaps));
2245 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2246 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2247 					  pcaps, NULL);
2248 	else
2249 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2250 					  pcaps, NULL);
2251 	if (err) {
2252 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2253 			vsi->vsi_num, err);
2254 		goto done;
2255 	}
2256 
2257 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2258 	if (!cfg) {
2259 		err = -ENOMEM;
2260 		goto done;
2261 	}
2262 
2263 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2264 
2265 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2266 	 * ice_init_phy_user_cfg_ldo.
2267 	 */
2268 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2269 			       vsi->back->state)) {
2270 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2271 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2272 	} else {
2273 		u64 phy_low = 0, phy_high = 0;
2274 
2275 		ice_update_phy_type(&phy_low, &phy_high,
2276 				    pi->phy.curr_user_speed_req);
2277 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2278 		cfg->phy_type_high = pcaps->phy_type_high &
2279 				     cpu_to_le64(phy_high);
2280 	}
2281 
2282 	/* Can't provide what was requested; use PHY capabilities */
2283 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2284 		cfg->phy_type_low = pcaps->phy_type_low;
2285 		cfg->phy_type_high = pcaps->phy_type_high;
2286 	}
2287 
2288 	/* FEC */
2289 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2290 
2291 	/* Can't provide what was requested; use PHY capabilities */
2292 	if (cfg->link_fec_opt !=
2293 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2294 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2295 		cfg->link_fec_opt = pcaps->link_fec_options;
2296 	}
2297 
2298 	/* Flow Control - always supported; no need to check against
2299 	 * capabilities
2300 	 */
2301 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2302 
2303 	/* Enable link and link update */
2304 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2305 
2306 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2307 	if (err)
2308 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2309 			vsi->vsi_num, err);
2310 
2311 	kfree(cfg);
2312 done:
2313 	kfree(pcaps);
2314 	return err;
2315 }
2316 
2317 /**
2318  * ice_check_media_subtask - Check for media
2319  * @pf: pointer to PF struct
2320  *
2321  * If media is available, then initialize PHY user configuration if it is not
2322  * been, and configure the PHY if the interface is up.
2323  */
ice_check_media_subtask(struct ice_pf * pf)2324 static void ice_check_media_subtask(struct ice_pf *pf)
2325 {
2326 	struct ice_port_info *pi;
2327 	struct ice_vsi *vsi;
2328 	int err;
2329 
2330 	/* No need to check for media if it's already present */
2331 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2332 		return;
2333 
2334 	vsi = ice_get_main_vsi(pf);
2335 	if (!vsi)
2336 		return;
2337 
2338 	/* Refresh link info and check if media is present */
2339 	pi = vsi->port_info;
2340 	err = ice_update_link_info(pi);
2341 	if (err)
2342 		return;
2343 
2344 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2345 
2346 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2347 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2348 			ice_init_phy_user_cfg(pi);
2349 
2350 		/* PHY settings are reset on media insertion, reconfigure
2351 		 * PHY to preserve settings.
2352 		 */
2353 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2354 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2355 			return;
2356 
2357 		err = ice_configure_phy(vsi);
2358 		if (!err)
2359 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2360 
2361 		/* A Link Status Event will be generated; the event handler
2362 		 * will complete bringing the interface up
2363 		 */
2364 	}
2365 }
2366 
ice_service_task_recovery_mode(struct work_struct * work)2367 static void ice_service_task_recovery_mode(struct work_struct *work)
2368 {
2369 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2370 
2371 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2372 	ice_clean_adminq_subtask(pf);
2373 
2374 	ice_service_task_complete(pf);
2375 
2376 	mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
2377 }
2378 
2379 /**
2380  * ice_service_task - manage and run subtasks
2381  * @work: pointer to work_struct contained by the PF struct
2382  */
ice_service_task(struct work_struct * work)2383 static void ice_service_task(struct work_struct *work)
2384 {
2385 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2386 	unsigned long start_time = jiffies;
2387 
2388 	if (pf->health_reporters.tx_hang_buf.tx_ring) {
2389 		ice_report_tx_hang(pf);
2390 		pf->health_reporters.tx_hang_buf.tx_ring = NULL;
2391 	}
2392 
2393 	ice_reset_subtask(pf);
2394 
2395 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2396 	if (ice_is_reset_in_progress(pf->state) ||
2397 	    test_bit(ICE_SUSPENDED, pf->state) ||
2398 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2399 		ice_service_task_complete(pf);
2400 		return;
2401 	}
2402 
2403 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2404 		struct iidc_event *event;
2405 
2406 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2407 		if (event) {
2408 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2409 			/* report the entire OICR value to AUX driver */
2410 			swap(event->reg, pf->oicr_err_reg);
2411 			ice_send_event_to_aux(pf, event);
2412 			kfree(event);
2413 		}
2414 	}
2415 
2416 	/* unplug aux dev per request, if an unplug request came in
2417 	 * while processing a plug request, this will handle it
2418 	 */
2419 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2420 		ice_unplug_aux_dev(pf);
2421 
2422 	/* Plug aux device per request */
2423 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2424 		ice_plug_aux_dev(pf);
2425 
2426 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2427 		struct iidc_event *event;
2428 
2429 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2430 		if (event) {
2431 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2432 			ice_send_event_to_aux(pf, event);
2433 			kfree(event);
2434 		}
2435 	}
2436 
2437 	ice_clean_adminq_subtask(pf);
2438 	ice_check_media_subtask(pf);
2439 	ice_check_for_hang_subtask(pf);
2440 	ice_sync_fltr_subtask(pf);
2441 	ice_handle_mdd_event(pf);
2442 	ice_watchdog_subtask(pf);
2443 
2444 	if (ice_is_safe_mode(pf)) {
2445 		ice_service_task_complete(pf);
2446 		return;
2447 	}
2448 
2449 	ice_process_vflr_event(pf);
2450 	ice_clean_mailboxq_subtask(pf);
2451 	ice_clean_sbq_subtask(pf);
2452 	ice_sync_arfs_fltrs(pf);
2453 	ice_flush_fdir_ctx(pf);
2454 
2455 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2456 	ice_service_task_complete(pf);
2457 
2458 	/* If the tasks have taken longer than one service timer period
2459 	 * or there is more work to be done, reset the service timer to
2460 	 * schedule the service task now.
2461 	 */
2462 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2463 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2464 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2465 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2466 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2467 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2468 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2469 		mod_timer(&pf->serv_tmr, jiffies);
2470 }
2471 
2472 /**
2473  * ice_set_ctrlq_len - helper function to set controlq length
2474  * @hw: pointer to the HW instance
2475  */
ice_set_ctrlq_len(struct ice_hw * hw)2476 static void ice_set_ctrlq_len(struct ice_hw *hw)
2477 {
2478 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2479 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2480 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2481 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2482 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2483 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2484 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2485 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2486 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2487 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2488 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2489 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2490 }
2491 
2492 /**
2493  * ice_schedule_reset - schedule a reset
2494  * @pf: board private structure
2495  * @reset: reset being requested
2496  */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2497 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2498 {
2499 	struct device *dev = ice_pf_to_dev(pf);
2500 
2501 	/* bail out if earlier reset has failed */
2502 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2503 		dev_dbg(dev, "earlier reset has failed\n");
2504 		return -EIO;
2505 	}
2506 	/* bail if reset/recovery already in progress */
2507 	if (ice_is_reset_in_progress(pf->state)) {
2508 		dev_dbg(dev, "Reset already in progress\n");
2509 		return -EBUSY;
2510 	}
2511 
2512 	switch (reset) {
2513 	case ICE_RESET_PFR:
2514 		set_bit(ICE_PFR_REQ, pf->state);
2515 		break;
2516 	case ICE_RESET_CORER:
2517 		set_bit(ICE_CORER_REQ, pf->state);
2518 		break;
2519 	case ICE_RESET_GLOBR:
2520 		set_bit(ICE_GLOBR_REQ, pf->state);
2521 		break;
2522 	default:
2523 		return -EINVAL;
2524 	}
2525 
2526 	ice_service_task_schedule(pf);
2527 	return 0;
2528 }
2529 
2530 /**
2531  * ice_irq_affinity_notify - Callback for affinity changes
2532  * @notify: context as to what irq was changed
2533  * @mask: the new affinity mask
2534  *
2535  * This is a callback function used by the irq_set_affinity_notifier function
2536  * so that we may register to receive changes to the irq affinity masks.
2537  */
2538 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2539 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2540 			const cpumask_t *mask)
2541 {
2542 	struct ice_q_vector *q_vector =
2543 		container_of(notify, struct ice_q_vector, affinity_notify);
2544 
2545 	cpumask_copy(&q_vector->affinity_mask, mask);
2546 }
2547 
2548 /**
2549  * ice_irq_affinity_release - Callback for affinity notifier release
2550  * @ref: internal core kernel usage
2551  *
2552  * This is a callback function used by the irq_set_affinity_notifier function
2553  * to inform the current notification subscriber that they will no longer
2554  * receive notifications.
2555  */
ice_irq_affinity_release(struct kref __always_unused * ref)2556 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2557 
2558 /**
2559  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2560  * @vsi: the VSI being configured
2561  */
ice_vsi_ena_irq(struct ice_vsi * vsi)2562 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2563 {
2564 	struct ice_hw *hw = &vsi->back->hw;
2565 	int i;
2566 
2567 	ice_for_each_q_vector(vsi, i)
2568 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2569 
2570 	ice_flush(hw);
2571 	return 0;
2572 }
2573 
2574 /**
2575  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2576  * @vsi: the VSI being configured
2577  * @basename: name for the vector
2578  */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2579 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2580 {
2581 	int q_vectors = vsi->num_q_vectors;
2582 	struct ice_pf *pf = vsi->back;
2583 	struct device *dev;
2584 	int rx_int_idx = 0;
2585 	int tx_int_idx = 0;
2586 	int vector, err;
2587 	int irq_num;
2588 
2589 	dev = ice_pf_to_dev(pf);
2590 	for (vector = 0; vector < q_vectors; vector++) {
2591 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2592 
2593 		irq_num = q_vector->irq.virq;
2594 
2595 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2596 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2597 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2598 			tx_int_idx++;
2599 		} else if (q_vector->rx.rx_ring) {
2600 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2601 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2602 		} else if (q_vector->tx.tx_ring) {
2603 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2604 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2605 		} else {
2606 			/* skip this unused q_vector */
2607 			continue;
2608 		}
2609 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2610 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2611 					       IRQF_SHARED, q_vector->name,
2612 					       q_vector);
2613 		else
2614 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2615 					       0, q_vector->name, q_vector);
2616 		if (err) {
2617 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2618 				   err);
2619 			goto free_q_irqs;
2620 		}
2621 
2622 		/* register for affinity change notifications */
2623 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2624 			struct irq_affinity_notify *affinity_notify;
2625 
2626 			affinity_notify = &q_vector->affinity_notify;
2627 			affinity_notify->notify = ice_irq_affinity_notify;
2628 			affinity_notify->release = ice_irq_affinity_release;
2629 			irq_set_affinity_notifier(irq_num, affinity_notify);
2630 		}
2631 
2632 		/* assign the mask for this irq */
2633 		irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
2634 	}
2635 
2636 	err = ice_set_cpu_rx_rmap(vsi);
2637 	if (err) {
2638 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2639 			   vsi->vsi_num, ERR_PTR(err));
2640 		goto free_q_irqs;
2641 	}
2642 
2643 	vsi->irqs_ready = true;
2644 	return 0;
2645 
2646 free_q_irqs:
2647 	while (vector--) {
2648 		irq_num = vsi->q_vectors[vector]->irq.virq;
2649 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2650 			irq_set_affinity_notifier(irq_num, NULL);
2651 		irq_update_affinity_hint(irq_num, NULL);
2652 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2653 	}
2654 	return err;
2655 }
2656 
2657 /**
2658  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2659  * @vsi: VSI to setup Tx rings used by XDP
2660  *
2661  * Return 0 on success and negative value on error
2662  */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2663 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2664 {
2665 	struct device *dev = ice_pf_to_dev(vsi->back);
2666 	struct ice_tx_desc *tx_desc;
2667 	int i, j;
2668 
2669 	ice_for_each_xdp_txq(vsi, i) {
2670 		u16 xdp_q_idx = vsi->alloc_txq + i;
2671 		struct ice_ring_stats *ring_stats;
2672 		struct ice_tx_ring *xdp_ring;
2673 
2674 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2675 		if (!xdp_ring)
2676 			goto free_xdp_rings;
2677 
2678 		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2679 		if (!ring_stats) {
2680 			ice_free_tx_ring(xdp_ring);
2681 			goto free_xdp_rings;
2682 		}
2683 
2684 		xdp_ring->ring_stats = ring_stats;
2685 		xdp_ring->q_index = xdp_q_idx;
2686 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2687 		xdp_ring->vsi = vsi;
2688 		xdp_ring->netdev = NULL;
2689 		xdp_ring->dev = dev;
2690 		xdp_ring->count = vsi->num_tx_desc;
2691 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2692 		if (ice_setup_tx_ring(xdp_ring))
2693 			goto free_xdp_rings;
2694 		ice_set_ring_xdp(xdp_ring);
2695 		spin_lock_init(&xdp_ring->tx_lock);
2696 		for (j = 0; j < xdp_ring->count; j++) {
2697 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2698 			tx_desc->cmd_type_offset_bsz = 0;
2699 		}
2700 	}
2701 
2702 	return 0;
2703 
2704 free_xdp_rings:
2705 	for (; i >= 0; i--) {
2706 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2707 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2708 			vsi->xdp_rings[i]->ring_stats = NULL;
2709 			ice_free_tx_ring(vsi->xdp_rings[i]);
2710 		}
2711 	}
2712 	return -ENOMEM;
2713 }
2714 
2715 /**
2716  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2717  * @vsi: VSI to set the bpf prog on
2718  * @prog: the bpf prog pointer
2719  */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2720 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2721 {
2722 	struct bpf_prog *old_prog;
2723 	int i;
2724 
2725 	old_prog = xchg(&vsi->xdp_prog, prog);
2726 	ice_for_each_rxq(vsi, i)
2727 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2728 
2729 	if (old_prog)
2730 		bpf_prog_put(old_prog);
2731 }
2732 
ice_xdp_ring_from_qid(struct ice_vsi * vsi,int qid)2733 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2734 {
2735 	struct ice_q_vector *q_vector;
2736 	struct ice_tx_ring *ring;
2737 
2738 	if (static_key_enabled(&ice_xdp_locking_key))
2739 		return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2740 
2741 	q_vector = vsi->rx_rings[qid]->q_vector;
2742 	ice_for_each_tx_ring(ring, q_vector->tx)
2743 		if (ice_ring_is_xdp(ring))
2744 			return ring;
2745 
2746 	return NULL;
2747 }
2748 
2749 /**
2750  * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2751  * @vsi: the VSI with XDP rings being configured
2752  *
2753  * Map XDP rings to interrupt vectors and perform the configuration steps
2754  * dependent on the mapping.
2755  */
ice_map_xdp_rings(struct ice_vsi * vsi)2756 void ice_map_xdp_rings(struct ice_vsi *vsi)
2757 {
2758 	int xdp_rings_rem = vsi->num_xdp_txq;
2759 	int v_idx, q_idx;
2760 
2761 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2762 	ice_for_each_q_vector(vsi, v_idx) {
2763 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2764 		int xdp_rings_per_v, q_id, q_base;
2765 
2766 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2767 					       vsi->num_q_vectors - v_idx);
2768 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2769 
2770 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2771 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2772 
2773 			xdp_ring->q_vector = q_vector;
2774 			xdp_ring->next = q_vector->tx.tx_ring;
2775 			q_vector->tx.tx_ring = xdp_ring;
2776 		}
2777 		xdp_rings_rem -= xdp_rings_per_v;
2778 	}
2779 
2780 	ice_for_each_rxq(vsi, q_idx) {
2781 		vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2782 								       q_idx);
2783 		ice_tx_xsk_pool(vsi, q_idx);
2784 	}
2785 }
2786 
2787 /**
2788  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2789  * @vsi: VSI to bring up Tx rings used by XDP
2790  * @prog: bpf program that will be assigned to VSI
2791  * @cfg_type: create from scratch or restore the existing configuration
2792  *
2793  * Return 0 on success and negative value on error
2794  */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog,enum ice_xdp_cfg cfg_type)2795 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2796 			  enum ice_xdp_cfg cfg_type)
2797 {
2798 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2799 	struct ice_pf *pf = vsi->back;
2800 	struct ice_qs_cfg xdp_qs_cfg = {
2801 		.qs_mutex = &pf->avail_q_mutex,
2802 		.pf_map = pf->avail_txqs,
2803 		.pf_map_size = pf->max_pf_txqs,
2804 		.q_count = vsi->num_xdp_txq,
2805 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2806 		.vsi_map = vsi->txq_map,
2807 		.vsi_map_offset = vsi->alloc_txq,
2808 		.mapping_mode = ICE_VSI_MAP_CONTIG
2809 	};
2810 	struct device *dev;
2811 	int status, i;
2812 
2813 	dev = ice_pf_to_dev(pf);
2814 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2815 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2816 	if (!vsi->xdp_rings)
2817 		return -ENOMEM;
2818 
2819 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2820 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2821 		goto err_map_xdp;
2822 
2823 	if (static_key_enabled(&ice_xdp_locking_key))
2824 		netdev_warn(vsi->netdev,
2825 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2826 
2827 	if (ice_xdp_alloc_setup_rings(vsi))
2828 		goto clear_xdp_rings;
2829 
2830 	/* omit the scheduler update if in reset path; XDP queues will be
2831 	 * taken into account at the end of ice_vsi_rebuild, where
2832 	 * ice_cfg_vsi_lan is being called
2833 	 */
2834 	if (cfg_type == ICE_XDP_CFG_PART)
2835 		return 0;
2836 
2837 	ice_map_xdp_rings(vsi);
2838 
2839 	/* tell the Tx scheduler that right now we have
2840 	 * additional queues
2841 	 */
2842 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2843 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2844 
2845 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2846 				 max_txqs);
2847 	if (status) {
2848 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2849 			status);
2850 		goto clear_xdp_rings;
2851 	}
2852 
2853 	/* assign the prog only when it's not already present on VSI;
2854 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2855 	 * VSI rebuild that happens under ethtool -L can expose us to
2856 	 * the bpf_prog refcount issues as we would be swapping same
2857 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2858 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2859 	 * this is not harmful as dev_xdp_install bumps the refcount
2860 	 * before calling the op exposed by the driver;
2861 	 */
2862 	if (!ice_is_xdp_ena_vsi(vsi))
2863 		ice_vsi_assign_bpf_prog(vsi, prog);
2864 
2865 	return 0;
2866 clear_xdp_rings:
2867 	ice_for_each_xdp_txq(vsi, i)
2868 		if (vsi->xdp_rings[i]) {
2869 			kfree_rcu(vsi->xdp_rings[i], rcu);
2870 			vsi->xdp_rings[i] = NULL;
2871 		}
2872 
2873 err_map_xdp:
2874 	mutex_lock(&pf->avail_q_mutex);
2875 	ice_for_each_xdp_txq(vsi, i) {
2876 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2877 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2878 	}
2879 	mutex_unlock(&pf->avail_q_mutex);
2880 
2881 	devm_kfree(dev, vsi->xdp_rings);
2882 	return -ENOMEM;
2883 }
2884 
2885 /**
2886  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2887  * @vsi: VSI to remove XDP rings
2888  * @cfg_type: disable XDP permanently or allow it to be restored later
2889  *
2890  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2891  * resources
2892  */
ice_destroy_xdp_rings(struct ice_vsi * vsi,enum ice_xdp_cfg cfg_type)2893 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2894 {
2895 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2896 	struct ice_pf *pf = vsi->back;
2897 	int i, v_idx;
2898 
2899 	/* q_vectors are freed in reset path so there's no point in detaching
2900 	 * rings
2901 	 */
2902 	if (cfg_type == ICE_XDP_CFG_PART)
2903 		goto free_qmap;
2904 
2905 	ice_for_each_q_vector(vsi, v_idx) {
2906 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2907 		struct ice_tx_ring *ring;
2908 
2909 		ice_for_each_tx_ring(ring, q_vector->tx)
2910 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2911 				break;
2912 
2913 		/* restore the value of last node prior to XDP setup */
2914 		q_vector->tx.tx_ring = ring;
2915 	}
2916 
2917 free_qmap:
2918 	mutex_lock(&pf->avail_q_mutex);
2919 	ice_for_each_xdp_txq(vsi, i) {
2920 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2921 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2922 	}
2923 	mutex_unlock(&pf->avail_q_mutex);
2924 
2925 	ice_for_each_xdp_txq(vsi, i)
2926 		if (vsi->xdp_rings[i]) {
2927 			if (vsi->xdp_rings[i]->desc) {
2928 				synchronize_rcu();
2929 				ice_free_tx_ring(vsi->xdp_rings[i]);
2930 			}
2931 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2932 			vsi->xdp_rings[i]->ring_stats = NULL;
2933 			kfree_rcu(vsi->xdp_rings[i], rcu);
2934 			vsi->xdp_rings[i] = NULL;
2935 		}
2936 
2937 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2938 	vsi->xdp_rings = NULL;
2939 
2940 	if (static_key_enabled(&ice_xdp_locking_key))
2941 		static_branch_dec(&ice_xdp_locking_key);
2942 
2943 	if (cfg_type == ICE_XDP_CFG_PART)
2944 		return 0;
2945 
2946 	ice_vsi_assign_bpf_prog(vsi, NULL);
2947 
2948 	/* notify Tx scheduler that we destroyed XDP queues and bring
2949 	 * back the old number of child nodes
2950 	 */
2951 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2952 		max_txqs[i] = vsi->num_txq;
2953 
2954 	/* change number of XDP Tx queues to 0 */
2955 	vsi->num_xdp_txq = 0;
2956 
2957 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2958 			       max_txqs);
2959 }
2960 
2961 /**
2962  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2963  * @vsi: VSI to schedule napi on
2964  */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2965 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2966 {
2967 	int i;
2968 
2969 	ice_for_each_rxq(vsi, i) {
2970 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2971 
2972 		if (READ_ONCE(rx_ring->xsk_pool))
2973 			napi_schedule(&rx_ring->q_vector->napi);
2974 	}
2975 }
2976 
2977 /**
2978  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2979  * @vsi: VSI to determine the count of XDP Tx qs
2980  *
2981  * returns 0 if Tx qs count is higher than at least half of CPU count,
2982  * -ENOMEM otherwise
2983  */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2984 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2985 {
2986 	u16 avail = ice_get_avail_txq_count(vsi->back);
2987 	u16 cpus = num_possible_cpus();
2988 
2989 	if (avail < cpus / 2)
2990 		return -ENOMEM;
2991 
2992 	if (vsi->type == ICE_VSI_SF)
2993 		avail = vsi->alloc_txq;
2994 
2995 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2996 
2997 	if (vsi->num_xdp_txq < cpus)
2998 		static_branch_inc(&ice_xdp_locking_key);
2999 
3000 	return 0;
3001 }
3002 
3003 /**
3004  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
3005  * @vsi: Pointer to VSI structure
3006  */
ice_max_xdp_frame_size(struct ice_vsi * vsi)3007 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
3008 {
3009 	if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
3010 		return ICE_RXBUF_1664;
3011 	else
3012 		return ICE_RXBUF_3072;
3013 }
3014 
3015 /**
3016  * ice_xdp_setup_prog - Add or remove XDP eBPF program
3017  * @vsi: VSI to setup XDP for
3018  * @prog: XDP program
3019  * @extack: netlink extended ack
3020  */
3021 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)3022 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
3023 		   struct netlink_ext_ack *extack)
3024 {
3025 	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
3026 	int ret = 0, xdp_ring_err = 0;
3027 	bool if_running;
3028 
3029 	if (prog && !prog->aux->xdp_has_frags) {
3030 		if (frame_size > ice_max_xdp_frame_size(vsi)) {
3031 			NL_SET_ERR_MSG_MOD(extack,
3032 					   "MTU is too large for linear frames and XDP prog does not support frags");
3033 			return -EOPNOTSUPP;
3034 		}
3035 	}
3036 
3037 	/* hot swap progs and avoid toggling link */
3038 	if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
3039 	    test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
3040 		ice_vsi_assign_bpf_prog(vsi, prog);
3041 		return 0;
3042 	}
3043 
3044 	if_running = netif_running(vsi->netdev) &&
3045 		     !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
3046 
3047 	/* need to stop netdev while setting up the program for Rx rings */
3048 	if (if_running) {
3049 		ret = ice_down(vsi);
3050 		if (ret) {
3051 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3052 			return ret;
3053 		}
3054 	}
3055 
3056 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3057 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3058 		if (xdp_ring_err) {
3059 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3060 		} else {
3061 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3062 							     ICE_XDP_CFG_FULL);
3063 			if (xdp_ring_err)
3064 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3065 		}
3066 		xdp_features_set_redirect_target(vsi->netdev, true);
3067 		/* reallocate Rx queues that are used for zero-copy */
3068 		xdp_ring_err = ice_realloc_zc_buf(vsi, true);
3069 		if (xdp_ring_err)
3070 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
3071 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3072 		xdp_features_clear_redirect_target(vsi->netdev);
3073 		xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3074 		if (xdp_ring_err)
3075 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3076 		/* reallocate Rx queues that were used for zero-copy */
3077 		xdp_ring_err = ice_realloc_zc_buf(vsi, false);
3078 		if (xdp_ring_err)
3079 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
3080 	}
3081 
3082 	if (if_running)
3083 		ret = ice_up(vsi);
3084 
3085 	if (!ret && prog)
3086 		ice_vsi_rx_napi_schedule(vsi);
3087 
3088 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
3089 }
3090 
3091 /**
3092  * ice_xdp_safe_mode - XDP handler for safe mode
3093  * @dev: netdevice
3094  * @xdp: XDP command
3095  */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)3096 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3097 			     struct netdev_bpf *xdp)
3098 {
3099 	NL_SET_ERR_MSG_MOD(xdp->extack,
3100 			   "Please provide working DDP firmware package in order to use XDP\n"
3101 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3102 	return -EOPNOTSUPP;
3103 }
3104 
3105 /**
3106  * ice_xdp - implements XDP handler
3107  * @dev: netdevice
3108  * @xdp: XDP command
3109  */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)3110 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3111 {
3112 	struct ice_netdev_priv *np = netdev_priv(dev);
3113 	struct ice_vsi *vsi = np->vsi;
3114 	int ret;
3115 
3116 	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
3117 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
3118 		return -EINVAL;
3119 	}
3120 
3121 	mutex_lock(&vsi->xdp_state_lock);
3122 
3123 	switch (xdp->command) {
3124 	case XDP_SETUP_PROG:
3125 		ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3126 		break;
3127 	case XDP_SETUP_XSK_POOL:
3128 		ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
3129 		break;
3130 	default:
3131 		ret = -EINVAL;
3132 	}
3133 
3134 	mutex_unlock(&vsi->xdp_state_lock);
3135 	return ret;
3136 }
3137 
3138 /**
3139  * ice_ena_misc_vector - enable the non-queue interrupts
3140  * @pf: board private structure
3141  */
ice_ena_misc_vector(struct ice_pf * pf)3142 static void ice_ena_misc_vector(struct ice_pf *pf)
3143 {
3144 	struct ice_hw *hw = &pf->hw;
3145 	u32 pf_intr_start_offset;
3146 	u32 val;
3147 
3148 	/* Disable anti-spoof detection interrupt to prevent spurious event
3149 	 * interrupts during a function reset. Anti-spoof functionally is
3150 	 * still supported.
3151 	 */
3152 	val = rd32(hw, GL_MDCK_TX_TDPU);
3153 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3154 	wr32(hw, GL_MDCK_TX_TDPU, val);
3155 
3156 	/* clear things first */
3157 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3158 	rd32(hw, PFINT_OICR);		/* read to clear */
3159 
3160 	val = (PFINT_OICR_ECC_ERR_M |
3161 	       PFINT_OICR_MAL_DETECT_M |
3162 	       PFINT_OICR_GRST_M |
3163 	       PFINT_OICR_PCI_EXCEPTION_M |
3164 	       PFINT_OICR_VFLR_M |
3165 	       PFINT_OICR_HMC_ERR_M |
3166 	       PFINT_OICR_PE_PUSH_M |
3167 	       PFINT_OICR_PE_CRITERR_M);
3168 
3169 	wr32(hw, PFINT_OICR_ENA, val);
3170 
3171 	/* SW_ITR_IDX = 0, but don't change INTENA */
3172 	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3173 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3174 
3175 	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3176 		return;
3177 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3178 	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3179 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3180 }
3181 
3182 /**
3183  * ice_ll_ts_intr - ll_ts interrupt handler
3184  * @irq: interrupt number
3185  * @data: pointer to a q_vector
3186  */
ice_ll_ts_intr(int __always_unused irq,void * data)3187 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3188 {
3189 	struct ice_pf *pf = data;
3190 	u32 pf_intr_start_offset;
3191 	struct ice_ptp_tx *tx;
3192 	unsigned long flags;
3193 	struct ice_hw *hw;
3194 	u32 val;
3195 	u8 idx;
3196 
3197 	hw = &pf->hw;
3198 	tx = &pf->ptp.port.tx;
3199 	spin_lock_irqsave(&tx->lock, flags);
3200 	ice_ptp_complete_tx_single_tstamp(tx);
3201 
3202 	idx = find_next_bit_wrap(tx->in_use, tx->len,
3203 				 tx->last_ll_ts_idx_read + 1);
3204 	if (idx != tx->len)
3205 		ice_ptp_req_tx_single_tstamp(tx, idx);
3206 	spin_unlock_irqrestore(&tx->lock, flags);
3207 
3208 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3209 	      (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3210 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3211 	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3212 	     val);
3213 
3214 	return IRQ_HANDLED;
3215 }
3216 
3217 /**
3218  * ice_misc_intr - misc interrupt handler
3219  * @irq: interrupt number
3220  * @data: pointer to a q_vector
3221  */
ice_misc_intr(int __always_unused irq,void * data)3222 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3223 {
3224 	struct ice_pf *pf = (struct ice_pf *)data;
3225 	irqreturn_t ret = IRQ_HANDLED;
3226 	struct ice_hw *hw = &pf->hw;
3227 	struct device *dev;
3228 	u32 oicr, ena_mask;
3229 
3230 	dev = ice_pf_to_dev(pf);
3231 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3232 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3233 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3234 
3235 	oicr = rd32(hw, PFINT_OICR);
3236 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3237 
3238 	if (oicr & PFINT_OICR_SWINT_M) {
3239 		ena_mask &= ~PFINT_OICR_SWINT_M;
3240 		pf->sw_int_count++;
3241 	}
3242 
3243 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3244 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3245 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3246 	}
3247 	if (oicr & PFINT_OICR_VFLR_M) {
3248 		/* disable any further VFLR event notifications */
3249 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3250 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3251 
3252 			reg &= ~PFINT_OICR_VFLR_M;
3253 			wr32(hw, PFINT_OICR_ENA, reg);
3254 		} else {
3255 			ena_mask &= ~PFINT_OICR_VFLR_M;
3256 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3257 		}
3258 	}
3259 
3260 	if (oicr & PFINT_OICR_GRST_M) {
3261 		u32 reset;
3262 
3263 		/* we have a reset warning */
3264 		ena_mask &= ~PFINT_OICR_GRST_M;
3265 		reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3266 				  rd32(hw, GLGEN_RSTAT));
3267 
3268 		if (reset == ICE_RESET_CORER)
3269 			pf->corer_count++;
3270 		else if (reset == ICE_RESET_GLOBR)
3271 			pf->globr_count++;
3272 		else if (reset == ICE_RESET_EMPR)
3273 			pf->empr_count++;
3274 		else
3275 			dev_dbg(dev, "Invalid reset type %d\n", reset);
3276 
3277 		/* If a reset cycle isn't already in progress, we set a bit in
3278 		 * pf->state so that the service task can start a reset/rebuild.
3279 		 */
3280 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3281 			if (reset == ICE_RESET_CORER)
3282 				set_bit(ICE_CORER_RECV, pf->state);
3283 			else if (reset == ICE_RESET_GLOBR)
3284 				set_bit(ICE_GLOBR_RECV, pf->state);
3285 			else
3286 				set_bit(ICE_EMPR_RECV, pf->state);
3287 
3288 			/* There are couple of different bits at play here.
3289 			 * hw->reset_ongoing indicates whether the hardware is
3290 			 * in reset. This is set to true when a reset interrupt
3291 			 * is received and set back to false after the driver
3292 			 * has determined that the hardware is out of reset.
3293 			 *
3294 			 * ICE_RESET_OICR_RECV in pf->state indicates
3295 			 * that a post reset rebuild is required before the
3296 			 * driver is operational again. This is set above.
3297 			 *
3298 			 * As this is the start of the reset/rebuild cycle, set
3299 			 * both to indicate that.
3300 			 */
3301 			hw->reset_ongoing = true;
3302 		}
3303 	}
3304 
3305 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3306 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3307 		if (ice_pf_state_is_nominal(pf) &&
3308 		    pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
3309 			struct ice_ptp_tx *tx = &pf->ptp.port.tx;
3310 			unsigned long flags;
3311 			u8 idx;
3312 
3313 			spin_lock_irqsave(&tx->lock, flags);
3314 			idx = find_next_bit_wrap(tx->in_use, tx->len,
3315 						 tx->last_ll_ts_idx_read + 1);
3316 			if (idx != tx->len)
3317 				ice_ptp_req_tx_single_tstamp(tx, idx);
3318 			spin_unlock_irqrestore(&tx->lock, flags);
3319 		} else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
3320 			set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3321 			ret = IRQ_WAKE_THREAD;
3322 		}
3323 	}
3324 
3325 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3326 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3327 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3328 
3329 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3330 
3331 		if (ice_pf_src_tmr_owned(pf)) {
3332 			/* Save EVENTs from GLTSYN register */
3333 			pf->ptp.ext_ts_irq |= gltsyn_stat &
3334 					      (GLTSYN_STAT_EVENT0_M |
3335 					       GLTSYN_STAT_EVENT1_M |
3336 					       GLTSYN_STAT_EVENT2_M);
3337 
3338 			ice_ptp_extts_event(pf);
3339 		}
3340 	}
3341 
3342 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3343 	if (oicr & ICE_AUX_CRIT_ERR) {
3344 		pf->oicr_err_reg |= oicr;
3345 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3346 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3347 	}
3348 
3349 	/* Report any remaining unexpected interrupts */
3350 	oicr &= ena_mask;
3351 	if (oicr) {
3352 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3353 		/* If a critical error is pending there is no choice but to
3354 		 * reset the device.
3355 		 */
3356 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3357 			    PFINT_OICR_ECC_ERR_M)) {
3358 			set_bit(ICE_PFR_REQ, pf->state);
3359 		}
3360 	}
3361 	ice_service_task_schedule(pf);
3362 	if (ret == IRQ_HANDLED)
3363 		ice_irq_dynamic_ena(hw, NULL, NULL);
3364 
3365 	return ret;
3366 }
3367 
3368 /**
3369  * ice_misc_intr_thread_fn - misc interrupt thread function
3370  * @irq: interrupt number
3371  * @data: pointer to a q_vector
3372  */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3373 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3374 {
3375 	struct ice_pf *pf = data;
3376 	struct ice_hw *hw;
3377 
3378 	hw = &pf->hw;
3379 
3380 	if (ice_is_reset_in_progress(pf->state))
3381 		goto skip_irq;
3382 
3383 	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3384 		/* Process outstanding Tx timestamps. If there is more work,
3385 		 * re-arm the interrupt to trigger again.
3386 		 */
3387 		if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
3388 			wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3389 			ice_flush(hw);
3390 		}
3391 	}
3392 
3393 skip_irq:
3394 	ice_irq_dynamic_ena(hw, NULL, NULL);
3395 
3396 	return IRQ_HANDLED;
3397 }
3398 
3399 /**
3400  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3401  * @hw: pointer to HW structure
3402  */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3403 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3404 {
3405 	/* disable Admin queue Interrupt causes */
3406 	wr32(hw, PFINT_FW_CTL,
3407 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3408 
3409 	/* disable Mailbox queue Interrupt causes */
3410 	wr32(hw, PFINT_MBX_CTL,
3411 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3412 
3413 	wr32(hw, PFINT_SB_CTL,
3414 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3415 
3416 	/* disable Control queue Interrupt causes */
3417 	wr32(hw, PFINT_OICR_CTL,
3418 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3419 
3420 	ice_flush(hw);
3421 }
3422 
3423 /**
3424  * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3425  * @pf: board private structure
3426  */
ice_free_irq_msix_ll_ts(struct ice_pf * pf)3427 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3428 {
3429 	int irq_num = pf->ll_ts_irq.virq;
3430 
3431 	synchronize_irq(irq_num);
3432 	devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3433 
3434 	ice_free_irq(pf, pf->ll_ts_irq);
3435 }
3436 
3437 /**
3438  * ice_free_irq_msix_misc - Unroll misc vector setup
3439  * @pf: board private structure
3440  */
ice_free_irq_msix_misc(struct ice_pf * pf)3441 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3442 {
3443 	int misc_irq_num = pf->oicr_irq.virq;
3444 	struct ice_hw *hw = &pf->hw;
3445 
3446 	ice_dis_ctrlq_interrupts(hw);
3447 
3448 	/* disable OICR interrupt */
3449 	wr32(hw, PFINT_OICR_ENA, 0);
3450 	ice_flush(hw);
3451 
3452 	synchronize_irq(misc_irq_num);
3453 	devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3454 
3455 	ice_free_irq(pf, pf->oicr_irq);
3456 	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3457 		ice_free_irq_msix_ll_ts(pf);
3458 }
3459 
3460 /**
3461  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3462  * @hw: pointer to HW structure
3463  * @reg_idx: HW vector index to associate the control queue interrupts with
3464  */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3465 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3466 {
3467 	u32 val;
3468 
3469 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3470 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3471 	wr32(hw, PFINT_OICR_CTL, val);
3472 
3473 	/* enable Admin queue Interrupt causes */
3474 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3475 	       PFINT_FW_CTL_CAUSE_ENA_M);
3476 	wr32(hw, PFINT_FW_CTL, val);
3477 
3478 	/* enable Mailbox queue Interrupt causes */
3479 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3480 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3481 	wr32(hw, PFINT_MBX_CTL, val);
3482 
3483 	if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3484 		/* enable Sideband queue Interrupt causes */
3485 		val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3486 		       PFINT_SB_CTL_CAUSE_ENA_M);
3487 		wr32(hw, PFINT_SB_CTL, val);
3488 	}
3489 
3490 	ice_flush(hw);
3491 }
3492 
3493 /**
3494  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3495  * @pf: board private structure
3496  *
3497  * This sets up the handler for MSIX 0, which is used to manage the
3498  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3499  * when in MSI or Legacy interrupt mode.
3500  */
ice_req_irq_msix_misc(struct ice_pf * pf)3501 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3502 {
3503 	struct device *dev = ice_pf_to_dev(pf);
3504 	struct ice_hw *hw = &pf->hw;
3505 	u32 pf_intr_start_offset;
3506 	struct msi_map irq;
3507 	int err = 0;
3508 
3509 	if (!pf->int_name[0])
3510 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3511 			 dev_driver_string(dev), dev_name(dev));
3512 
3513 	if (!pf->int_name_ll_ts[0])
3514 		snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3515 			 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3516 	/* Do not request IRQ but do enable OICR interrupt since settings are
3517 	 * lost during reset. Note that this function is called only during
3518 	 * rebuild path and not while reset is in progress.
3519 	 */
3520 	if (ice_is_reset_in_progress(pf->state))
3521 		goto skip_req_irq;
3522 
3523 	/* reserve one vector in irq_tracker for misc interrupts */
3524 	irq = ice_alloc_irq(pf, false);
3525 	if (irq.index < 0)
3526 		return irq.index;
3527 
3528 	pf->oicr_irq = irq;
3529 	err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3530 					ice_misc_intr_thread_fn, 0,
3531 					pf->int_name, pf);
3532 	if (err) {
3533 		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3534 			pf->int_name, err);
3535 		ice_free_irq(pf, pf->oicr_irq);
3536 		return err;
3537 	}
3538 
3539 	/* reserve one vector in irq_tracker for ll_ts interrupt */
3540 	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3541 		goto skip_req_irq;
3542 
3543 	irq = ice_alloc_irq(pf, false);
3544 	if (irq.index < 0)
3545 		return irq.index;
3546 
3547 	pf->ll_ts_irq = irq;
3548 	err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3549 			       pf->int_name_ll_ts, pf);
3550 	if (err) {
3551 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3552 			pf->int_name_ll_ts, err);
3553 		ice_free_irq(pf, pf->ll_ts_irq);
3554 		return err;
3555 	}
3556 
3557 skip_req_irq:
3558 	ice_ena_misc_vector(pf);
3559 
3560 	ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3561 	/* This enables LL TS interrupt */
3562 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3563 	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3564 		wr32(hw, PFINT_SB_CTL,
3565 		     ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3566 		      PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3567 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3568 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3569 
3570 	ice_flush(hw);
3571 	ice_irq_dynamic_ena(hw, NULL, NULL);
3572 
3573 	return 0;
3574 }
3575 
3576 /**
3577  * ice_set_ops - set netdev and ethtools ops for the given netdev
3578  * @vsi: the VSI associated with the new netdev
3579  */
ice_set_ops(struct ice_vsi * vsi)3580 static void ice_set_ops(struct ice_vsi *vsi)
3581 {
3582 	struct net_device *netdev = vsi->netdev;
3583 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3584 
3585 	if (ice_is_safe_mode(pf)) {
3586 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3587 		ice_set_ethtool_safe_mode_ops(netdev);
3588 		return;
3589 	}
3590 
3591 	netdev->netdev_ops = &ice_netdev_ops;
3592 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3593 	netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3594 	ice_set_ethtool_ops(netdev);
3595 
3596 	if (vsi->type != ICE_VSI_PF)
3597 		return;
3598 
3599 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3600 			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3601 			       NETDEV_XDP_ACT_RX_SG;
3602 	netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3603 }
3604 
3605 /**
3606  * ice_set_netdev_features - set features for the given netdev
3607  * @netdev: netdev instance
3608  */
ice_set_netdev_features(struct net_device * netdev)3609 void ice_set_netdev_features(struct net_device *netdev)
3610 {
3611 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3612 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3613 	netdev_features_t csumo_features;
3614 	netdev_features_t vlano_features;
3615 	netdev_features_t dflt_features;
3616 	netdev_features_t tso_features;
3617 
3618 	if (ice_is_safe_mode(pf)) {
3619 		/* safe mode */
3620 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3621 		netdev->hw_features = netdev->features;
3622 		return;
3623 	}
3624 
3625 	dflt_features = NETIF_F_SG	|
3626 			NETIF_F_HIGHDMA	|
3627 			NETIF_F_NTUPLE	|
3628 			NETIF_F_RXHASH;
3629 
3630 	csumo_features = NETIF_F_RXCSUM	  |
3631 			 NETIF_F_IP_CSUM  |
3632 			 NETIF_F_SCTP_CRC |
3633 			 NETIF_F_IPV6_CSUM;
3634 
3635 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3636 			 NETIF_F_HW_VLAN_CTAG_TX     |
3637 			 NETIF_F_HW_VLAN_CTAG_RX;
3638 
3639 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3640 	if (is_dvm_ena)
3641 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3642 
3643 	tso_features = NETIF_F_TSO			|
3644 		       NETIF_F_TSO_ECN			|
3645 		       NETIF_F_TSO6			|
3646 		       NETIF_F_GSO_GRE			|
3647 		       NETIF_F_GSO_UDP_TUNNEL		|
3648 		       NETIF_F_GSO_GRE_CSUM		|
3649 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3650 		       NETIF_F_GSO_PARTIAL		|
3651 		       NETIF_F_GSO_IPXIP4		|
3652 		       NETIF_F_GSO_IPXIP6		|
3653 		       NETIF_F_GSO_UDP_L4;
3654 
3655 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3656 					NETIF_F_GSO_GRE_CSUM;
3657 	/* set features that user can change */
3658 	netdev->hw_features = dflt_features | csumo_features |
3659 			      vlano_features | tso_features;
3660 
3661 	/* add support for HW_CSUM on packets with MPLS header */
3662 	netdev->mpls_features =  NETIF_F_HW_CSUM |
3663 				 NETIF_F_TSO     |
3664 				 NETIF_F_TSO6;
3665 
3666 	/* enable features */
3667 	netdev->features |= netdev->hw_features;
3668 
3669 	netdev->hw_features |= NETIF_F_HW_TC;
3670 	netdev->hw_features |= NETIF_F_LOOPBACK;
3671 
3672 	/* encap and VLAN devices inherit default, csumo and tso features */
3673 	netdev->hw_enc_features |= dflt_features | csumo_features |
3674 				   tso_features;
3675 	netdev->vlan_features |= dflt_features | csumo_features |
3676 				 tso_features;
3677 
3678 	/* advertise support but don't enable by default since only one type of
3679 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3680 	 * type turns on the other has to be turned off. This is enforced by the
3681 	 * ice_fix_features() ndo callback.
3682 	 */
3683 	if (is_dvm_ena)
3684 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3685 			NETIF_F_HW_VLAN_STAG_TX;
3686 
3687 	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3688 	 * be changed at runtime
3689 	 */
3690 	netdev->hw_features |= NETIF_F_RXFCS;
3691 
3692 	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3693 }
3694 
3695 /**
3696  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3697  * @lut: Lookup table
3698  * @rss_table_size: Lookup table size
3699  * @rss_size: Range of queue number for hashing
3700  */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3701 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3702 {
3703 	u16 i;
3704 
3705 	for (i = 0; i < rss_table_size; i++)
3706 		lut[i] = i % rss_size;
3707 }
3708 
3709 /**
3710  * ice_pf_vsi_setup - Set up a PF VSI
3711  * @pf: board private structure
3712  * @pi: pointer to the port_info instance
3713  *
3714  * Returns pointer to the successfully allocated VSI software struct
3715  * on success, otherwise returns NULL on failure.
3716  */
3717 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3718 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3719 {
3720 	struct ice_vsi_cfg_params params = {};
3721 
3722 	params.type = ICE_VSI_PF;
3723 	params.port_info = pi;
3724 	params.flags = ICE_VSI_FLAG_INIT;
3725 
3726 	return ice_vsi_setup(pf, &params);
3727 }
3728 
3729 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3730 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3731 		   struct ice_channel *ch)
3732 {
3733 	struct ice_vsi_cfg_params params = {};
3734 
3735 	params.type = ICE_VSI_CHNL;
3736 	params.port_info = pi;
3737 	params.ch = ch;
3738 	params.flags = ICE_VSI_FLAG_INIT;
3739 
3740 	return ice_vsi_setup(pf, &params);
3741 }
3742 
3743 /**
3744  * ice_ctrl_vsi_setup - Set up a control VSI
3745  * @pf: board private structure
3746  * @pi: pointer to the port_info instance
3747  *
3748  * Returns pointer to the successfully allocated VSI software struct
3749  * on success, otherwise returns NULL on failure.
3750  */
3751 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3752 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3753 {
3754 	struct ice_vsi_cfg_params params = {};
3755 
3756 	params.type = ICE_VSI_CTRL;
3757 	params.port_info = pi;
3758 	params.flags = ICE_VSI_FLAG_INIT;
3759 
3760 	return ice_vsi_setup(pf, &params);
3761 }
3762 
3763 /**
3764  * ice_lb_vsi_setup - Set up a loopback VSI
3765  * @pf: board private structure
3766  * @pi: pointer to the port_info instance
3767  *
3768  * Returns pointer to the successfully allocated VSI software struct
3769  * on success, otherwise returns NULL on failure.
3770  */
3771 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3772 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3773 {
3774 	struct ice_vsi_cfg_params params = {};
3775 
3776 	params.type = ICE_VSI_LB;
3777 	params.port_info = pi;
3778 	params.flags = ICE_VSI_FLAG_INIT;
3779 
3780 	return ice_vsi_setup(pf, &params);
3781 }
3782 
3783 /**
3784  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3785  * @netdev: network interface to be adjusted
3786  * @proto: VLAN TPID
3787  * @vid: VLAN ID to be added
3788  *
3789  * net_device_ops implementation for adding VLAN IDs
3790  */
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3791 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3792 {
3793 	struct ice_netdev_priv *np = netdev_priv(netdev);
3794 	struct ice_vsi_vlan_ops *vlan_ops;
3795 	struct ice_vsi *vsi = np->vsi;
3796 	struct ice_vlan vlan;
3797 	int ret;
3798 
3799 	/* VLAN 0 is added by default during load/reset */
3800 	if (!vid)
3801 		return 0;
3802 
3803 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3804 		usleep_range(1000, 2000);
3805 
3806 	/* Add multicast promisc rule for the VLAN ID to be added if
3807 	 * all-multicast is currently enabled.
3808 	 */
3809 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3810 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3811 					       ICE_MCAST_VLAN_PROMISC_BITS,
3812 					       vid);
3813 		if (ret)
3814 			goto finish;
3815 	}
3816 
3817 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3818 
3819 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3820 	 * packets aren't pruned by the device's internal switch on Rx
3821 	 */
3822 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3823 	ret = vlan_ops->add_vlan(vsi, &vlan);
3824 	if (ret)
3825 		goto finish;
3826 
3827 	/* If all-multicast is currently enabled and this VLAN ID is only one
3828 	 * besides VLAN-0 we have to update look-up type of multicast promisc
3829 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3830 	 */
3831 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3832 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3833 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3834 					   ICE_MCAST_PROMISC_BITS, 0);
3835 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3836 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3837 	}
3838 
3839 finish:
3840 	clear_bit(ICE_CFG_BUSY, vsi->state);
3841 
3842 	return ret;
3843 }
3844 
3845 /**
3846  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3847  * @netdev: network interface to be adjusted
3848  * @proto: VLAN TPID
3849  * @vid: VLAN ID to be removed
3850  *
3851  * net_device_ops implementation for removing VLAN IDs
3852  */
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3853 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3854 {
3855 	struct ice_netdev_priv *np = netdev_priv(netdev);
3856 	struct ice_vsi_vlan_ops *vlan_ops;
3857 	struct ice_vsi *vsi = np->vsi;
3858 	struct ice_vlan vlan;
3859 	int ret;
3860 
3861 	/* don't allow removal of VLAN 0 */
3862 	if (!vid)
3863 		return 0;
3864 
3865 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3866 		usleep_range(1000, 2000);
3867 
3868 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3869 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3870 	if (ret) {
3871 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3872 			   vsi->vsi_num);
3873 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3874 	}
3875 
3876 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3877 
3878 	/* Make sure VLAN delete is successful before updating VLAN
3879 	 * information
3880 	 */
3881 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3882 	ret = vlan_ops->del_vlan(vsi, &vlan);
3883 	if (ret)
3884 		goto finish;
3885 
3886 	/* Remove multicast promisc rule for the removed VLAN ID if
3887 	 * all-multicast is enabled.
3888 	 */
3889 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3890 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3891 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3892 
3893 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3894 		/* Update look-up type of multicast promisc rule for VLAN 0
3895 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3896 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3897 		 */
3898 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3899 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3900 						   ICE_MCAST_VLAN_PROMISC_BITS,
3901 						   0);
3902 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3903 						 ICE_MCAST_PROMISC_BITS, 0);
3904 		}
3905 	}
3906 
3907 finish:
3908 	clear_bit(ICE_CFG_BUSY, vsi->state);
3909 
3910 	return ret;
3911 }
3912 
3913 /**
3914  * ice_rep_indr_tc_block_unbind
3915  * @cb_priv: indirection block private data
3916  */
ice_rep_indr_tc_block_unbind(void * cb_priv)3917 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3918 {
3919 	struct ice_indr_block_priv *indr_priv = cb_priv;
3920 
3921 	list_del(&indr_priv->list);
3922 	kfree(indr_priv);
3923 }
3924 
3925 /**
3926  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3927  * @vsi: VSI struct which has the netdev
3928  */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3929 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3930 {
3931 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3932 
3933 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3934 				 ice_rep_indr_tc_block_unbind);
3935 }
3936 
3937 /**
3938  * ice_tc_indir_block_register - Register TC indirect block notifications
3939  * @vsi: VSI struct which has the netdev
3940  *
3941  * Returns 0 on success, negative value on failure
3942  */
ice_tc_indir_block_register(struct ice_vsi * vsi)3943 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3944 {
3945 	struct ice_netdev_priv *np;
3946 
3947 	if (!vsi || !vsi->netdev)
3948 		return -EINVAL;
3949 
3950 	np = netdev_priv(vsi->netdev);
3951 
3952 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3953 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3954 }
3955 
3956 /**
3957  * ice_get_avail_q_count - Get count of queues in use
3958  * @pf_qmap: bitmap to get queue use count from
3959  * @lock: pointer to a mutex that protects access to pf_qmap
3960  * @size: size of the bitmap
3961  */
3962 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3963 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3964 {
3965 	unsigned long bit;
3966 	u16 count = 0;
3967 
3968 	mutex_lock(lock);
3969 	for_each_clear_bit(bit, pf_qmap, size)
3970 		count++;
3971 	mutex_unlock(lock);
3972 
3973 	return count;
3974 }
3975 
3976 /**
3977  * ice_get_avail_txq_count - Get count of Tx queues in use
3978  * @pf: pointer to an ice_pf instance
3979  */
ice_get_avail_txq_count(struct ice_pf * pf)3980 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3981 {
3982 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3983 				     pf->max_pf_txqs);
3984 }
3985 
3986 /**
3987  * ice_get_avail_rxq_count - Get count of Rx queues in use
3988  * @pf: pointer to an ice_pf instance
3989  */
ice_get_avail_rxq_count(struct ice_pf * pf)3990 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3991 {
3992 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3993 				     pf->max_pf_rxqs);
3994 }
3995 
3996 /**
3997  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3998  * @pf: board private structure to initialize
3999  */
ice_deinit_pf(struct ice_pf * pf)4000 static void ice_deinit_pf(struct ice_pf *pf)
4001 {
4002 	ice_service_task_stop(pf);
4003 	mutex_destroy(&pf->lag_mutex);
4004 	mutex_destroy(&pf->adev_mutex);
4005 	mutex_destroy(&pf->sw_mutex);
4006 	mutex_destroy(&pf->tc_mutex);
4007 	mutex_destroy(&pf->avail_q_mutex);
4008 	mutex_destroy(&pf->vfs.table_lock);
4009 
4010 	if (pf->avail_txqs) {
4011 		bitmap_free(pf->avail_txqs);
4012 		pf->avail_txqs = NULL;
4013 	}
4014 
4015 	if (pf->avail_rxqs) {
4016 		bitmap_free(pf->avail_rxqs);
4017 		pf->avail_rxqs = NULL;
4018 	}
4019 
4020 	if (pf->ptp.clock)
4021 		ptp_clock_unregister(pf->ptp.clock);
4022 
4023 	xa_destroy(&pf->dyn_ports);
4024 	xa_destroy(&pf->sf_nums);
4025 }
4026 
4027 /**
4028  * ice_set_pf_caps - set PFs capability flags
4029  * @pf: pointer to the PF instance
4030  */
ice_set_pf_caps(struct ice_pf * pf)4031 static void ice_set_pf_caps(struct ice_pf *pf)
4032 {
4033 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
4034 
4035 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4036 	if (func_caps->common_cap.rdma)
4037 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4038 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4039 	if (func_caps->common_cap.dcb)
4040 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4041 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4042 	if (func_caps->common_cap.sr_iov_1_1) {
4043 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
4044 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
4045 					      ICE_MAX_SRIOV_VFS);
4046 	}
4047 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4048 	if (func_caps->common_cap.rss_table_size)
4049 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4050 
4051 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4052 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4053 		u16 unused;
4054 
4055 		/* ctrl_vsi_idx will be set to a valid value when flow director
4056 		 * is setup by ice_init_fdir
4057 		 */
4058 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4059 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
4060 		/* force guaranteed filter pool for PF */
4061 		ice_alloc_fd_guar_item(&pf->hw, &unused,
4062 				       func_caps->fd_fltr_guar);
4063 		/* force shared filter pool for PF */
4064 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
4065 				       func_caps->fd_fltr_best_effort);
4066 	}
4067 
4068 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4069 	if (func_caps->common_cap.ieee_1588 &&
4070 	    !(pf->hw.mac_type == ICE_MAC_E830))
4071 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4072 
4073 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
4074 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4075 }
4076 
4077 /**
4078  * ice_init_pf - Initialize general software structures (struct ice_pf)
4079  * @pf: board private structure to initialize
4080  */
ice_init_pf(struct ice_pf * pf)4081 static int ice_init_pf(struct ice_pf *pf)
4082 {
4083 	ice_set_pf_caps(pf);
4084 
4085 	mutex_init(&pf->sw_mutex);
4086 	mutex_init(&pf->tc_mutex);
4087 	mutex_init(&pf->adev_mutex);
4088 	mutex_init(&pf->lag_mutex);
4089 
4090 	INIT_HLIST_HEAD(&pf->aq_wait_list);
4091 	spin_lock_init(&pf->aq_wait_lock);
4092 	init_waitqueue_head(&pf->aq_wait_queue);
4093 
4094 	init_waitqueue_head(&pf->reset_wait_queue);
4095 
4096 	/* setup service timer and periodic service task */
4097 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4098 	pf->serv_tmr_period = HZ;
4099 	INIT_WORK(&pf->serv_task, ice_service_task);
4100 	clear_bit(ICE_SERVICE_SCHED, pf->state);
4101 
4102 	mutex_init(&pf->avail_q_mutex);
4103 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4104 	if (!pf->avail_txqs)
4105 		return -ENOMEM;
4106 
4107 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4108 	if (!pf->avail_rxqs) {
4109 		bitmap_free(pf->avail_txqs);
4110 		pf->avail_txqs = NULL;
4111 		return -ENOMEM;
4112 	}
4113 
4114 	mutex_init(&pf->vfs.table_lock);
4115 	hash_init(pf->vfs.table);
4116 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
4117 		wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
4118 		     ICE_MBX_OVERFLOW_WATERMARK);
4119 	else
4120 		ice_mbx_init_snapshot(&pf->hw);
4121 
4122 	xa_init(&pf->dyn_ports);
4123 	xa_init(&pf->sf_nums);
4124 
4125 	return 0;
4126 }
4127 
4128 /**
4129  * ice_is_wol_supported - check if WoL is supported
4130  * @hw: pointer to hardware info
4131  *
4132  * Check if WoL is supported based on the HW configuration.
4133  * Returns true if NVM supports and enables WoL for this port, false otherwise
4134  */
ice_is_wol_supported(struct ice_hw * hw)4135 bool ice_is_wol_supported(struct ice_hw *hw)
4136 {
4137 	u16 wol_ctrl;
4138 
4139 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4140 	 * word) indicates WoL is not supported on the corresponding PF ID.
4141 	 */
4142 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4143 		return false;
4144 
4145 	return !(BIT(hw->port_info->lport) & wol_ctrl);
4146 }
4147 
4148 /**
4149  * ice_vsi_recfg_qs - Change the number of queues on a VSI
4150  * @vsi: VSI being changed
4151  * @new_rx: new number of Rx queues
4152  * @new_tx: new number of Tx queues
4153  * @locked: is adev device_lock held
4154  *
4155  * Only change the number of queues if new_tx, or new_rx is non-0.
4156  *
4157  * Returns 0 on success.
4158  */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)4159 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4160 {
4161 	struct ice_pf *pf = vsi->back;
4162 	int i, err = 0, timeout = 50;
4163 
4164 	if (!new_rx && !new_tx)
4165 		return -EINVAL;
4166 
4167 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4168 		timeout--;
4169 		if (!timeout)
4170 			return -EBUSY;
4171 		usleep_range(1000, 2000);
4172 	}
4173 
4174 	if (new_tx)
4175 		vsi->req_txq = (u16)new_tx;
4176 	if (new_rx)
4177 		vsi->req_rxq = (u16)new_rx;
4178 
4179 	/* set for the next time the netdev is started */
4180 	if (!netif_running(vsi->netdev)) {
4181 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4182 		if (err)
4183 			goto rebuild_err;
4184 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4185 		goto done;
4186 	}
4187 
4188 	ice_vsi_close(vsi);
4189 	err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4190 	if (err)
4191 		goto rebuild_err;
4192 
4193 	ice_for_each_traffic_class(i) {
4194 		if (vsi->tc_cfg.ena_tc & BIT(i))
4195 			netdev_set_tc_queue(vsi->netdev,
4196 					    vsi->tc_cfg.tc_info[i].netdev_tc,
4197 					    vsi->tc_cfg.tc_info[i].qcount_tx,
4198 					    vsi->tc_cfg.tc_info[i].qoffset);
4199 	}
4200 	ice_pf_dcb_recfg(pf, locked);
4201 	ice_vsi_open(vsi);
4202 	goto done;
4203 
4204 rebuild_err:
4205 	dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4206 		err);
4207 done:
4208 	clear_bit(ICE_CFG_BUSY, pf->state);
4209 	return err;
4210 }
4211 
4212 /**
4213  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4214  * @pf: PF to configure
4215  *
4216  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4217  * VSI can still Tx/Rx VLAN tagged packets.
4218  */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4219 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4220 {
4221 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4222 	struct ice_vsi_ctx *ctxt;
4223 	struct ice_hw *hw;
4224 	int status;
4225 
4226 	if (!vsi)
4227 		return;
4228 
4229 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4230 	if (!ctxt)
4231 		return;
4232 
4233 	hw = &pf->hw;
4234 	ctxt->info = vsi->info;
4235 
4236 	ctxt->info.valid_sections =
4237 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4238 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4239 			    ICE_AQ_VSI_PROP_SW_VALID);
4240 
4241 	/* disable VLAN anti-spoof */
4242 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4243 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4244 
4245 	/* disable VLAN pruning and keep all other settings */
4246 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4247 
4248 	/* allow all VLANs on Tx and don't strip on Rx */
4249 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4250 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4251 
4252 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4253 	if (status) {
4254 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4255 			status, ice_aq_str(hw->adminq.sq_last_status));
4256 	} else {
4257 		vsi->info.sec_flags = ctxt->info.sec_flags;
4258 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4259 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4260 	}
4261 
4262 	kfree(ctxt);
4263 }
4264 
4265 /**
4266  * ice_log_pkg_init - log result of DDP package load
4267  * @hw: pointer to hardware info
4268  * @state: state of package load
4269  */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4270 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4271 {
4272 	struct ice_pf *pf = hw->back;
4273 	struct device *dev;
4274 
4275 	dev = ice_pf_to_dev(pf);
4276 
4277 	switch (state) {
4278 	case ICE_DDP_PKG_SUCCESS:
4279 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4280 			 hw->active_pkg_name,
4281 			 hw->active_pkg_ver.major,
4282 			 hw->active_pkg_ver.minor,
4283 			 hw->active_pkg_ver.update,
4284 			 hw->active_pkg_ver.draft);
4285 		break;
4286 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4287 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4288 			 hw->active_pkg_name,
4289 			 hw->active_pkg_ver.major,
4290 			 hw->active_pkg_ver.minor,
4291 			 hw->active_pkg_ver.update,
4292 			 hw->active_pkg_ver.draft);
4293 		break;
4294 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4295 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4296 			hw->active_pkg_name,
4297 			hw->active_pkg_ver.major,
4298 			hw->active_pkg_ver.minor,
4299 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4300 		break;
4301 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4302 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4303 			 hw->active_pkg_name,
4304 			 hw->active_pkg_ver.major,
4305 			 hw->active_pkg_ver.minor,
4306 			 hw->active_pkg_ver.update,
4307 			 hw->active_pkg_ver.draft,
4308 			 hw->pkg_name,
4309 			 hw->pkg_ver.major,
4310 			 hw->pkg_ver.minor,
4311 			 hw->pkg_ver.update,
4312 			 hw->pkg_ver.draft);
4313 		break;
4314 	case ICE_DDP_PKG_FW_MISMATCH:
4315 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4316 		break;
4317 	case ICE_DDP_PKG_INVALID_FILE:
4318 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4319 		break;
4320 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4321 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4322 		break;
4323 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4324 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4325 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4326 		break;
4327 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4328 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4329 		break;
4330 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4331 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4332 		break;
4333 	case ICE_DDP_PKG_LOAD_ERROR:
4334 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4335 		/* poll for reset to complete */
4336 		if (ice_check_reset(hw))
4337 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4338 		break;
4339 	case ICE_DDP_PKG_ERR:
4340 	default:
4341 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4342 		break;
4343 	}
4344 }
4345 
4346 /**
4347  * ice_load_pkg - load/reload the DDP Package file
4348  * @firmware: firmware structure when firmware requested or NULL for reload
4349  * @pf: pointer to the PF instance
4350  *
4351  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4352  * initialize HW tables.
4353  */
4354 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4355 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4356 {
4357 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4358 	struct device *dev = ice_pf_to_dev(pf);
4359 	struct ice_hw *hw = &pf->hw;
4360 
4361 	/* Load DDP Package */
4362 	if (firmware && !hw->pkg_copy) {
4363 		state = ice_copy_and_init_pkg(hw, firmware->data,
4364 					      firmware->size);
4365 		ice_log_pkg_init(hw, state);
4366 	} else if (!firmware && hw->pkg_copy) {
4367 		/* Reload package during rebuild after CORER/GLOBR reset */
4368 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4369 		ice_log_pkg_init(hw, state);
4370 	} else {
4371 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4372 	}
4373 
4374 	if (!ice_is_init_pkg_successful(state)) {
4375 		/* Safe Mode */
4376 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4377 		return;
4378 	}
4379 
4380 	/* Successful download package is the precondition for advanced
4381 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4382 	 */
4383 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4384 }
4385 
4386 /**
4387  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4388  * @pf: pointer to the PF structure
4389  *
4390  * There is no error returned here because the driver should be able to handle
4391  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4392  * specifically with Tx.
4393  */
ice_verify_cacheline_size(struct ice_pf * pf)4394 static void ice_verify_cacheline_size(struct ice_pf *pf)
4395 {
4396 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4397 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4398 			 ICE_CACHE_LINE_BYTES);
4399 }
4400 
4401 /**
4402  * ice_send_version - update firmware with driver version
4403  * @pf: PF struct
4404  *
4405  * Returns 0 on success, else error code
4406  */
ice_send_version(struct ice_pf * pf)4407 static int ice_send_version(struct ice_pf *pf)
4408 {
4409 	struct ice_driver_ver dv;
4410 
4411 	dv.major_ver = 0xff;
4412 	dv.minor_ver = 0xff;
4413 	dv.build_ver = 0xff;
4414 	dv.subbuild_ver = 0;
4415 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4416 		sizeof(dv.driver_string));
4417 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4418 }
4419 
4420 /**
4421  * ice_init_fdir - Initialize flow director VSI and configuration
4422  * @pf: pointer to the PF instance
4423  *
4424  * returns 0 on success, negative on error
4425  */
ice_init_fdir(struct ice_pf * pf)4426 static int ice_init_fdir(struct ice_pf *pf)
4427 {
4428 	struct device *dev = ice_pf_to_dev(pf);
4429 	struct ice_vsi *ctrl_vsi;
4430 	int err;
4431 
4432 	/* Side Band Flow Director needs to have a control VSI.
4433 	 * Allocate it and store it in the PF.
4434 	 */
4435 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4436 	if (!ctrl_vsi) {
4437 		dev_dbg(dev, "could not create control VSI\n");
4438 		return -ENOMEM;
4439 	}
4440 
4441 	err = ice_vsi_open_ctrl(ctrl_vsi);
4442 	if (err) {
4443 		dev_dbg(dev, "could not open control VSI\n");
4444 		goto err_vsi_open;
4445 	}
4446 
4447 	mutex_init(&pf->hw.fdir_fltr_lock);
4448 
4449 	err = ice_fdir_create_dflt_rules(pf);
4450 	if (err)
4451 		goto err_fdir_rule;
4452 
4453 	return 0;
4454 
4455 err_fdir_rule:
4456 	ice_fdir_release_flows(&pf->hw);
4457 	ice_vsi_close(ctrl_vsi);
4458 err_vsi_open:
4459 	ice_vsi_release(ctrl_vsi);
4460 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4461 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4462 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4463 	}
4464 	return err;
4465 }
4466 
ice_deinit_fdir(struct ice_pf * pf)4467 static void ice_deinit_fdir(struct ice_pf *pf)
4468 {
4469 	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4470 
4471 	if (!vsi)
4472 		return;
4473 
4474 	ice_vsi_manage_fdir(vsi, false);
4475 	ice_vsi_release(vsi);
4476 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4477 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4478 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4479 	}
4480 
4481 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4482 }
4483 
4484 /**
4485  * ice_get_opt_fw_name - return optional firmware file name or NULL
4486  * @pf: pointer to the PF instance
4487  */
ice_get_opt_fw_name(struct ice_pf * pf)4488 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4489 {
4490 	/* Optional firmware name same as default with additional dash
4491 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4492 	 */
4493 	struct pci_dev *pdev = pf->pdev;
4494 	char *opt_fw_filename;
4495 	u64 dsn;
4496 
4497 	/* Determine the name of the optional file using the DSN (two
4498 	 * dwords following the start of the DSN Capability).
4499 	 */
4500 	dsn = pci_get_dsn(pdev);
4501 	if (!dsn)
4502 		return NULL;
4503 
4504 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4505 	if (!opt_fw_filename)
4506 		return NULL;
4507 
4508 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4509 		 ICE_DDP_PKG_PATH, dsn);
4510 
4511 	return opt_fw_filename;
4512 }
4513 
4514 /**
4515  * ice_request_fw - Device initialization routine
4516  * @pf: pointer to the PF instance
4517  * @firmware: double pointer to firmware struct
4518  *
4519  * Return: zero when successful, negative values otherwise.
4520  */
ice_request_fw(struct ice_pf * pf,const struct firmware ** firmware)4521 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4522 {
4523 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4524 	struct device *dev = ice_pf_to_dev(pf);
4525 	int err = 0;
4526 
4527 	/* optional device-specific DDP (if present) overrides the default DDP
4528 	 * package file. kernel logs a debug message if the file doesn't exist,
4529 	 * and warning messages for other errors.
4530 	 */
4531 	if (opt_fw_filename) {
4532 		err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4533 		kfree(opt_fw_filename);
4534 		if (!err)
4535 			return err;
4536 	}
4537 	err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4538 	if (err)
4539 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4540 
4541 	return err;
4542 }
4543 
4544 /**
4545  * ice_init_tx_topology - performs Tx topology initialization
4546  * @hw: pointer to the hardware structure
4547  * @firmware: pointer to firmware structure
4548  *
4549  * Return: zero when init was successful, negative values otherwise.
4550  */
4551 static int
ice_init_tx_topology(struct ice_hw * hw,const struct firmware * firmware)4552 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4553 {
4554 	u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4555 	struct ice_pf *pf = hw->back;
4556 	struct device *dev;
4557 	int err;
4558 
4559 	dev = ice_pf_to_dev(pf);
4560 	err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
4561 	if (!err) {
4562 		if (hw->num_tx_sched_layers > num_tx_sched_layers)
4563 			dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4564 		else
4565 			dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4566 		/* if there was a change in topology ice_cfg_tx_topo triggered
4567 		 * a CORER and we need to re-init hw
4568 		 */
4569 		ice_deinit_hw(hw);
4570 		err = ice_init_hw(hw);
4571 
4572 		return err;
4573 	} else if (err == -EIO) {
4574 		dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4575 	}
4576 
4577 	return 0;
4578 }
4579 
4580 /**
4581  * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
4582  * @hw: pointer to the hardware structure
4583  * @pf: pointer to pf structure
4584  *
4585  * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4586  * formats the PF hardware supports. The exact list of supported RXDIDs
4587  * depends on the loaded DDP package. The IDs can be determined by reading the
4588  * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
4589  *
4590  * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
4591  * in the DDP package. The 16-byte legacy descriptor is never supported by
4592  * VFs.
4593  */
ice_init_supported_rxdids(struct ice_hw * hw,struct ice_pf * pf)4594 static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
4595 {
4596 	pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
4597 
4598 	for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
4599 		u32 regval;
4600 
4601 		regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
4602 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
4603 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
4604 			pf->supported_rxdids |= BIT(i);
4605 	}
4606 }
4607 
4608 /**
4609  * ice_init_ddp_config - DDP related configuration
4610  * @hw: pointer to the hardware structure
4611  * @pf: pointer to pf structure
4612  *
4613  * This function loads DDP file from the disk, then initializes Tx
4614  * topology. At the end DDP package is loaded on the card.
4615  *
4616  * Return: zero when init was successful, negative values otherwise.
4617  */
ice_init_ddp_config(struct ice_hw * hw,struct ice_pf * pf)4618 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4619 {
4620 	struct device *dev = ice_pf_to_dev(pf);
4621 	const struct firmware *firmware = NULL;
4622 	int err;
4623 
4624 	err = ice_request_fw(pf, &firmware);
4625 	if (err) {
4626 		dev_err(dev, "Fail during requesting FW: %d\n", err);
4627 		return err;
4628 	}
4629 
4630 	err = ice_init_tx_topology(hw, firmware);
4631 	if (err) {
4632 		dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4633 			err);
4634 		release_firmware(firmware);
4635 		return err;
4636 	}
4637 
4638 	/* Download firmware to device */
4639 	ice_load_pkg(firmware, pf);
4640 	release_firmware(firmware);
4641 
4642 	/* Initialize the supported Rx descriptor IDs after loading DDP */
4643 	ice_init_supported_rxdids(hw, pf);
4644 
4645 	return 0;
4646 }
4647 
4648 /**
4649  * ice_print_wake_reason - show the wake up cause in the log
4650  * @pf: pointer to the PF struct
4651  */
ice_print_wake_reason(struct ice_pf * pf)4652 static void ice_print_wake_reason(struct ice_pf *pf)
4653 {
4654 	u32 wus = pf->wakeup_reason;
4655 	const char *wake_str;
4656 
4657 	/* if no wake event, nothing to print */
4658 	if (!wus)
4659 		return;
4660 
4661 	if (wus & PFPM_WUS_LNKC_M)
4662 		wake_str = "Link\n";
4663 	else if (wus & PFPM_WUS_MAG_M)
4664 		wake_str = "Magic Packet\n";
4665 	else if (wus & PFPM_WUS_MNG_M)
4666 		wake_str = "Management\n";
4667 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4668 		wake_str = "Firmware Reset\n";
4669 	else
4670 		wake_str = "Unknown\n";
4671 
4672 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4673 }
4674 
4675 /**
4676  * ice_pf_fwlog_update_module - update 1 module
4677  * @pf: pointer to the PF struct
4678  * @log_level: log_level to use for the @module
4679  * @module: module to update
4680  */
ice_pf_fwlog_update_module(struct ice_pf * pf,int log_level,int module)4681 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
4682 {
4683 	struct ice_hw *hw = &pf->hw;
4684 
4685 	hw->fwlog_cfg.module_entries[module].log_level = log_level;
4686 }
4687 
4688 /**
4689  * ice_register_netdev - register netdev
4690  * @vsi: pointer to the VSI struct
4691  */
ice_register_netdev(struct ice_vsi * vsi)4692 static int ice_register_netdev(struct ice_vsi *vsi)
4693 {
4694 	int err;
4695 
4696 	if (!vsi || !vsi->netdev)
4697 		return -EIO;
4698 
4699 	err = register_netdev(vsi->netdev);
4700 	if (err)
4701 		return err;
4702 
4703 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4704 	netif_carrier_off(vsi->netdev);
4705 	netif_tx_stop_all_queues(vsi->netdev);
4706 
4707 	return 0;
4708 }
4709 
ice_unregister_netdev(struct ice_vsi * vsi)4710 static void ice_unregister_netdev(struct ice_vsi *vsi)
4711 {
4712 	if (!vsi || !vsi->netdev)
4713 		return;
4714 
4715 	unregister_netdev(vsi->netdev);
4716 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4717 }
4718 
4719 /**
4720  * ice_cfg_netdev - Allocate, configure and register a netdev
4721  * @vsi: the VSI associated with the new netdev
4722  *
4723  * Returns 0 on success, negative value on failure
4724  */
ice_cfg_netdev(struct ice_vsi * vsi)4725 static int ice_cfg_netdev(struct ice_vsi *vsi)
4726 {
4727 	struct ice_netdev_priv *np;
4728 	struct net_device *netdev;
4729 	u8 mac_addr[ETH_ALEN];
4730 
4731 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4732 				    vsi->alloc_rxq);
4733 	if (!netdev)
4734 		return -ENOMEM;
4735 
4736 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4737 	vsi->netdev = netdev;
4738 	np = netdev_priv(netdev);
4739 	np->vsi = vsi;
4740 
4741 	ice_set_netdev_features(netdev);
4742 	ice_set_ops(vsi);
4743 
4744 	if (vsi->type == ICE_VSI_PF) {
4745 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4746 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4747 		eth_hw_addr_set(netdev, mac_addr);
4748 	}
4749 
4750 	netdev->priv_flags |= IFF_UNICAST_FLT;
4751 
4752 	/* Setup netdev TC information */
4753 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4754 
4755 	netdev->max_mtu = ICE_MAX_MTU;
4756 
4757 	return 0;
4758 }
4759 
ice_decfg_netdev(struct ice_vsi * vsi)4760 static void ice_decfg_netdev(struct ice_vsi *vsi)
4761 {
4762 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4763 	free_netdev(vsi->netdev);
4764 	vsi->netdev = NULL;
4765 }
4766 
ice_init_dev(struct ice_pf * pf)4767 int ice_init_dev(struct ice_pf *pf)
4768 {
4769 	struct device *dev = ice_pf_to_dev(pf);
4770 	struct ice_hw *hw = &pf->hw;
4771 	int err;
4772 
4773 	ice_init_feature_support(pf);
4774 
4775 	err = ice_init_ddp_config(hw, pf);
4776 
4777 	/* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4778 	 * set in pf->state, which will cause ice_is_safe_mode to return
4779 	 * true
4780 	 */
4781 	if (err || ice_is_safe_mode(pf)) {
4782 		/* we already got function/device capabilities but these don't
4783 		 * reflect what the driver needs to do in safe mode. Instead of
4784 		 * adding conditional logic everywhere to ignore these
4785 		 * device/function capabilities, override them.
4786 		 */
4787 		ice_set_safe_mode_caps(hw);
4788 	}
4789 
4790 	err = ice_init_pf(pf);
4791 	if (err) {
4792 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4793 		return err;
4794 	}
4795 
4796 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4797 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4798 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4799 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4800 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4801 		pf->hw.udp_tunnel_nic.tables[0].n_entries =
4802 			pf->hw.tnl.valid_count[TNL_VXLAN];
4803 		pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
4804 			UDP_TUNNEL_TYPE_VXLAN;
4805 	}
4806 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4807 		pf->hw.udp_tunnel_nic.tables[1].n_entries =
4808 			pf->hw.tnl.valid_count[TNL_GENEVE];
4809 		pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
4810 			UDP_TUNNEL_TYPE_GENEVE;
4811 	}
4812 
4813 	err = ice_init_interrupt_scheme(pf);
4814 	if (err) {
4815 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4816 		err = -EIO;
4817 		goto unroll_pf_init;
4818 	}
4819 
4820 	/* In case of MSIX we are going to setup the misc vector right here
4821 	 * to handle admin queue events etc. In case of legacy and MSI
4822 	 * the misc functionality and queue processing is combined in
4823 	 * the same vector and that gets setup at open.
4824 	 */
4825 	err = ice_req_irq_msix_misc(pf);
4826 	if (err) {
4827 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4828 		goto unroll_irq_scheme_init;
4829 	}
4830 
4831 	return 0;
4832 
4833 unroll_irq_scheme_init:
4834 	ice_clear_interrupt_scheme(pf);
4835 unroll_pf_init:
4836 	ice_deinit_pf(pf);
4837 	return err;
4838 }
4839 
ice_deinit_dev(struct ice_pf * pf)4840 void ice_deinit_dev(struct ice_pf *pf)
4841 {
4842 	ice_free_irq_msix_misc(pf);
4843 	ice_deinit_pf(pf);
4844 	ice_deinit_hw(&pf->hw);
4845 
4846 	/* Service task is already stopped, so call reset directly. */
4847 	ice_reset(&pf->hw, ICE_RESET_PFR);
4848 	pci_wait_for_pending_transaction(pf->pdev);
4849 	ice_clear_interrupt_scheme(pf);
4850 }
4851 
ice_init_features(struct ice_pf * pf)4852 static void ice_init_features(struct ice_pf *pf)
4853 {
4854 	struct device *dev = ice_pf_to_dev(pf);
4855 
4856 	if (ice_is_safe_mode(pf))
4857 		return;
4858 
4859 	/* initialize DDP driven features */
4860 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4861 		ice_ptp_init(pf);
4862 
4863 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4864 		ice_gnss_init(pf);
4865 
4866 	if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4867 	    ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4868 		ice_dpll_init(pf);
4869 
4870 	/* Note: Flow director init failure is non-fatal to load */
4871 	if (ice_init_fdir(pf))
4872 		dev_err(dev, "could not initialize flow director\n");
4873 
4874 	/* Note: DCB init failure is non-fatal to load */
4875 	if (ice_init_pf_dcb(pf, false)) {
4876 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4877 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4878 	} else {
4879 		ice_cfg_lldp_mib_change(&pf->hw, true);
4880 	}
4881 
4882 	if (ice_init_lag(pf))
4883 		dev_warn(dev, "Failed to init link aggregation support\n");
4884 
4885 	ice_hwmon_init(pf);
4886 }
4887 
ice_deinit_features(struct ice_pf * pf)4888 static void ice_deinit_features(struct ice_pf *pf)
4889 {
4890 	if (ice_is_safe_mode(pf))
4891 		return;
4892 
4893 	ice_deinit_lag(pf);
4894 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4895 		ice_cfg_lldp_mib_change(&pf->hw, false);
4896 	ice_deinit_fdir(pf);
4897 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4898 		ice_gnss_exit(pf);
4899 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4900 		ice_ptp_release(pf);
4901 	if (test_bit(ICE_FLAG_DPLL, pf->flags))
4902 		ice_dpll_deinit(pf);
4903 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4904 		xa_destroy(&pf->eswitch.reprs);
4905 }
4906 
ice_init_wakeup(struct ice_pf * pf)4907 static void ice_init_wakeup(struct ice_pf *pf)
4908 {
4909 	/* Save wakeup reason register for later use */
4910 	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4911 
4912 	/* check for a power management event */
4913 	ice_print_wake_reason(pf);
4914 
4915 	/* clear wake status, all bits */
4916 	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4917 
4918 	/* Disable WoL at init, wait for user to enable */
4919 	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4920 }
4921 
ice_init_link(struct ice_pf * pf)4922 static int ice_init_link(struct ice_pf *pf)
4923 {
4924 	struct device *dev = ice_pf_to_dev(pf);
4925 	int err;
4926 
4927 	err = ice_init_link_events(pf->hw.port_info);
4928 	if (err) {
4929 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4930 		return err;
4931 	}
4932 
4933 	/* not a fatal error if this fails */
4934 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4935 	if (err)
4936 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4937 
4938 	/* not a fatal error if this fails */
4939 	err = ice_update_link_info(pf->hw.port_info);
4940 	if (err)
4941 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4942 
4943 	ice_init_link_dflt_override(pf->hw.port_info);
4944 
4945 	ice_check_link_cfg_err(pf,
4946 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4947 
4948 	/* if media available, initialize PHY settings */
4949 	if (pf->hw.port_info->phy.link_info.link_info &
4950 	    ICE_AQ_MEDIA_AVAILABLE) {
4951 		/* not a fatal error if this fails */
4952 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4953 		if (err)
4954 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4955 
4956 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4957 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4958 
4959 			if (vsi)
4960 				ice_configure_phy(vsi);
4961 		}
4962 	} else {
4963 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4964 	}
4965 
4966 	return err;
4967 }
4968 
ice_init_pf_sw(struct ice_pf * pf)4969 static int ice_init_pf_sw(struct ice_pf *pf)
4970 {
4971 	bool dvm = ice_is_dvm_ena(&pf->hw);
4972 	struct ice_vsi *vsi;
4973 	int err;
4974 
4975 	/* create switch struct for the switch element created by FW on boot */
4976 	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4977 	if (!pf->first_sw)
4978 		return -ENOMEM;
4979 
4980 	if (pf->hw.evb_veb)
4981 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4982 	else
4983 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4984 
4985 	pf->first_sw->pf = pf;
4986 
4987 	/* record the sw_id available for later use */
4988 	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4989 
4990 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4991 	if (err)
4992 		goto err_aq_set_port_params;
4993 
4994 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4995 	if (!vsi) {
4996 		err = -ENOMEM;
4997 		goto err_pf_vsi_setup;
4998 	}
4999 
5000 	return 0;
5001 
5002 err_pf_vsi_setup:
5003 err_aq_set_port_params:
5004 	kfree(pf->first_sw);
5005 	return err;
5006 }
5007 
ice_deinit_pf_sw(struct ice_pf * pf)5008 static void ice_deinit_pf_sw(struct ice_pf *pf)
5009 {
5010 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
5011 
5012 	if (!vsi)
5013 		return;
5014 
5015 	ice_vsi_release(vsi);
5016 	kfree(pf->first_sw);
5017 }
5018 
ice_alloc_vsis(struct ice_pf * pf)5019 static int ice_alloc_vsis(struct ice_pf *pf)
5020 {
5021 	struct device *dev = ice_pf_to_dev(pf);
5022 
5023 	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
5024 	if (!pf->num_alloc_vsi)
5025 		return -EIO;
5026 
5027 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
5028 		dev_warn(dev,
5029 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
5030 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
5031 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
5032 	}
5033 
5034 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
5035 			       GFP_KERNEL);
5036 	if (!pf->vsi)
5037 		return -ENOMEM;
5038 
5039 	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
5040 				     sizeof(*pf->vsi_stats), GFP_KERNEL);
5041 	if (!pf->vsi_stats) {
5042 		devm_kfree(dev, pf->vsi);
5043 		return -ENOMEM;
5044 	}
5045 
5046 	return 0;
5047 }
5048 
ice_dealloc_vsis(struct ice_pf * pf)5049 static void ice_dealloc_vsis(struct ice_pf *pf)
5050 {
5051 	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
5052 	pf->vsi_stats = NULL;
5053 
5054 	pf->num_alloc_vsi = 0;
5055 	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
5056 	pf->vsi = NULL;
5057 }
5058 
ice_init_devlink(struct ice_pf * pf)5059 static int ice_init_devlink(struct ice_pf *pf)
5060 {
5061 	int err;
5062 
5063 	err = ice_devlink_register_params(pf);
5064 	if (err)
5065 		return err;
5066 
5067 	ice_devlink_init_regions(pf);
5068 	ice_health_init(pf);
5069 	ice_devlink_register(pf);
5070 
5071 	return 0;
5072 }
5073 
ice_deinit_devlink(struct ice_pf * pf)5074 static void ice_deinit_devlink(struct ice_pf *pf)
5075 {
5076 	ice_devlink_unregister(pf);
5077 	ice_health_deinit(pf);
5078 	ice_devlink_destroy_regions(pf);
5079 	ice_devlink_unregister_params(pf);
5080 }
5081 
ice_init(struct ice_pf * pf)5082 static int ice_init(struct ice_pf *pf)
5083 {
5084 	int err;
5085 
5086 	err = ice_init_dev(pf);
5087 	if (err)
5088 		return err;
5089 
5090 	err = ice_alloc_vsis(pf);
5091 	if (err)
5092 		goto err_alloc_vsis;
5093 
5094 	err = ice_init_pf_sw(pf);
5095 	if (err)
5096 		goto err_init_pf_sw;
5097 
5098 	ice_init_wakeup(pf);
5099 
5100 	err = ice_init_link(pf);
5101 	if (err)
5102 		goto err_init_link;
5103 
5104 	err = ice_send_version(pf);
5105 	if (err)
5106 		goto err_init_link;
5107 
5108 	ice_verify_cacheline_size(pf);
5109 
5110 	if (ice_is_safe_mode(pf))
5111 		ice_set_safe_mode_vlan_cfg(pf);
5112 	else
5113 		/* print PCI link speed and width */
5114 		pcie_print_link_status(pf->pdev);
5115 
5116 	/* ready to go, so clear down state bit */
5117 	clear_bit(ICE_DOWN, pf->state);
5118 	clear_bit(ICE_SERVICE_DIS, pf->state);
5119 
5120 	/* since everything is good, start the service timer */
5121 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5122 
5123 	return 0;
5124 
5125 err_init_link:
5126 	ice_deinit_pf_sw(pf);
5127 err_init_pf_sw:
5128 	ice_dealloc_vsis(pf);
5129 err_alloc_vsis:
5130 	ice_deinit_dev(pf);
5131 	return err;
5132 }
5133 
ice_deinit(struct ice_pf * pf)5134 static void ice_deinit(struct ice_pf *pf)
5135 {
5136 	set_bit(ICE_SERVICE_DIS, pf->state);
5137 	set_bit(ICE_DOWN, pf->state);
5138 
5139 	ice_deinit_pf_sw(pf);
5140 	ice_dealloc_vsis(pf);
5141 	ice_deinit_dev(pf);
5142 }
5143 
5144 /**
5145  * ice_load - load pf by init hw and starting VSI
5146  * @pf: pointer to the pf instance
5147  *
5148  * This function has to be called under devl_lock.
5149  */
ice_load(struct ice_pf * pf)5150 int ice_load(struct ice_pf *pf)
5151 {
5152 	struct ice_vsi *vsi;
5153 	int err;
5154 
5155 	devl_assert_locked(priv_to_devlink(pf));
5156 
5157 	vsi = ice_get_main_vsi(pf);
5158 
5159 	/* init channel list */
5160 	INIT_LIST_HEAD(&vsi->ch_list);
5161 
5162 	err = ice_cfg_netdev(vsi);
5163 	if (err)
5164 		return err;
5165 
5166 	/* Setup DCB netlink interface */
5167 	ice_dcbnl_setup(vsi);
5168 
5169 	err = ice_init_mac_fltr(pf);
5170 	if (err)
5171 		goto err_init_mac_fltr;
5172 
5173 	err = ice_devlink_create_pf_port(pf);
5174 	if (err)
5175 		goto err_devlink_create_pf_port;
5176 
5177 	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5178 
5179 	err = ice_register_netdev(vsi);
5180 	if (err)
5181 		goto err_register_netdev;
5182 
5183 	err = ice_tc_indir_block_register(vsi);
5184 	if (err)
5185 		goto err_tc_indir_block_register;
5186 
5187 	ice_napi_add(vsi);
5188 
5189 	err = ice_init_rdma(pf);
5190 	if (err)
5191 		goto err_init_rdma;
5192 
5193 	ice_init_features(pf);
5194 	ice_service_task_restart(pf);
5195 
5196 	clear_bit(ICE_DOWN, pf->state);
5197 
5198 	return 0;
5199 
5200 err_init_rdma:
5201 	ice_tc_indir_block_unregister(vsi);
5202 err_tc_indir_block_register:
5203 	ice_unregister_netdev(vsi);
5204 err_register_netdev:
5205 	ice_devlink_destroy_pf_port(pf);
5206 err_devlink_create_pf_port:
5207 err_init_mac_fltr:
5208 	ice_decfg_netdev(vsi);
5209 	return err;
5210 }
5211 
5212 /**
5213  * ice_unload - unload pf by stopping VSI and deinit hw
5214  * @pf: pointer to the pf instance
5215  *
5216  * This function has to be called under devl_lock.
5217  */
ice_unload(struct ice_pf * pf)5218 void ice_unload(struct ice_pf *pf)
5219 {
5220 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
5221 
5222 	devl_assert_locked(priv_to_devlink(pf));
5223 
5224 	ice_deinit_features(pf);
5225 	ice_deinit_rdma(pf);
5226 	ice_tc_indir_block_unregister(vsi);
5227 	ice_unregister_netdev(vsi);
5228 	ice_devlink_destroy_pf_port(pf);
5229 	ice_decfg_netdev(vsi);
5230 }
5231 
ice_probe_recovery_mode(struct ice_pf * pf)5232 static int ice_probe_recovery_mode(struct ice_pf *pf)
5233 {
5234 	struct device *dev = ice_pf_to_dev(pf);
5235 	int err;
5236 
5237 	dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
5238 
5239 	INIT_HLIST_HEAD(&pf->aq_wait_list);
5240 	spin_lock_init(&pf->aq_wait_lock);
5241 	init_waitqueue_head(&pf->aq_wait_queue);
5242 
5243 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
5244 	pf->serv_tmr_period = HZ;
5245 	INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
5246 	clear_bit(ICE_SERVICE_SCHED, pf->state);
5247 	err = ice_create_all_ctrlq(&pf->hw);
5248 	if (err)
5249 		return err;
5250 
5251 	scoped_guard(devl, priv_to_devlink(pf)) {
5252 		err = ice_init_devlink(pf);
5253 		if (err)
5254 			return err;
5255 	}
5256 
5257 	ice_service_task_restart(pf);
5258 
5259 	return 0;
5260 }
5261 
5262 /**
5263  * ice_probe - Device initialization routine
5264  * @pdev: PCI device information struct
5265  * @ent: entry in ice_pci_tbl
5266  *
5267  * Returns 0 on success, negative on failure
5268  */
5269 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)5270 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5271 {
5272 	struct device *dev = &pdev->dev;
5273 	struct ice_adapter *adapter;
5274 	struct ice_pf *pf;
5275 	struct ice_hw *hw;
5276 	int err;
5277 
5278 	if (pdev->is_virtfn) {
5279 		dev_err(dev, "can't probe a virtual function\n");
5280 		return -EINVAL;
5281 	}
5282 
5283 	/* when under a kdump kernel initiate a reset before enabling the
5284 	 * device in order to clear out any pending DMA transactions. These
5285 	 * transactions can cause some systems to machine check when doing
5286 	 * the pcim_enable_device() below.
5287 	 */
5288 	if (is_kdump_kernel()) {
5289 		pci_save_state(pdev);
5290 		pci_clear_master(pdev);
5291 		err = pcie_flr(pdev);
5292 		if (err)
5293 			return err;
5294 		pci_restore_state(pdev);
5295 	}
5296 
5297 	/* this driver uses devres, see
5298 	 * Documentation/driver-api/driver-model/devres.rst
5299 	 */
5300 	err = pcim_enable_device(pdev);
5301 	if (err)
5302 		return err;
5303 
5304 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5305 	if (err) {
5306 		dev_err(dev, "BAR0 I/O map error %d\n", err);
5307 		return err;
5308 	}
5309 
5310 	pf = ice_allocate_pf(dev);
5311 	if (!pf)
5312 		return -ENOMEM;
5313 
5314 	/* initialize Auxiliary index to invalid value */
5315 	pf->aux_idx = -1;
5316 
5317 	/* set up for high or low DMA */
5318 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5319 	if (err) {
5320 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5321 		return err;
5322 	}
5323 
5324 	pci_set_master(pdev);
5325 	pf->pdev = pdev;
5326 	pci_set_drvdata(pdev, pf);
5327 	set_bit(ICE_DOWN, pf->state);
5328 	/* Disable service task until DOWN bit is cleared */
5329 	set_bit(ICE_SERVICE_DIS, pf->state);
5330 
5331 	hw = &pf->hw;
5332 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5333 	pci_save_state(pdev);
5334 
5335 	hw->back = pf;
5336 	hw->port_info = NULL;
5337 	hw->vendor_id = pdev->vendor;
5338 	hw->device_id = pdev->device;
5339 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5340 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5341 	hw->subsystem_device_id = pdev->subsystem_device;
5342 	hw->bus.device = PCI_SLOT(pdev->devfn);
5343 	hw->bus.func = PCI_FUNC(pdev->devfn);
5344 	ice_set_ctrlq_len(hw);
5345 
5346 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5347 
5348 #ifndef CONFIG_DYNAMIC_DEBUG
5349 	if (debug < -1)
5350 		hw->debug_mask = debug;
5351 #endif
5352 
5353 	if (ice_is_recovery_mode(hw))
5354 		return ice_probe_recovery_mode(pf);
5355 
5356 	err = ice_init_hw(hw);
5357 	if (err) {
5358 		dev_err(dev, "ice_init_hw failed: %d\n", err);
5359 		return err;
5360 	}
5361 
5362 	adapter = ice_adapter_get(pdev);
5363 	if (IS_ERR(adapter)) {
5364 		err = PTR_ERR(adapter);
5365 		goto unroll_hw_init;
5366 	}
5367 	pf->adapter = adapter;
5368 
5369 	err = ice_init(pf);
5370 	if (err)
5371 		goto unroll_adapter;
5372 
5373 	devl_lock(priv_to_devlink(pf));
5374 	err = ice_load(pf);
5375 	if (err)
5376 		goto unroll_init;
5377 
5378 	err = ice_init_devlink(pf);
5379 	if (err)
5380 		goto unroll_load;
5381 	devl_unlock(priv_to_devlink(pf));
5382 
5383 	return 0;
5384 
5385 unroll_load:
5386 	ice_unload(pf);
5387 unroll_init:
5388 	devl_unlock(priv_to_devlink(pf));
5389 	ice_deinit(pf);
5390 unroll_adapter:
5391 	ice_adapter_put(pdev);
5392 unroll_hw_init:
5393 	ice_deinit_hw(hw);
5394 	return err;
5395 }
5396 
5397 /**
5398  * ice_set_wake - enable or disable Wake on LAN
5399  * @pf: pointer to the PF struct
5400  *
5401  * Simple helper for WoL control
5402  */
ice_set_wake(struct ice_pf * pf)5403 static void ice_set_wake(struct ice_pf *pf)
5404 {
5405 	struct ice_hw *hw = &pf->hw;
5406 	bool wol = pf->wol_ena;
5407 
5408 	/* clear wake state, otherwise new wake events won't fire */
5409 	wr32(hw, PFPM_WUS, U32_MAX);
5410 
5411 	/* enable / disable APM wake up, no RMW needed */
5412 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5413 
5414 	/* set magic packet filter enabled */
5415 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5416 }
5417 
5418 /**
5419  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5420  * @pf: pointer to the PF struct
5421  *
5422  * Issue firmware command to enable multicast magic wake, making
5423  * sure that any locally administered address (LAA) is used for
5424  * wake, and that PF reset doesn't undo the LAA.
5425  */
ice_setup_mc_magic_wake(struct ice_pf * pf)5426 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5427 {
5428 	struct device *dev = ice_pf_to_dev(pf);
5429 	struct ice_hw *hw = &pf->hw;
5430 	u8 mac_addr[ETH_ALEN];
5431 	struct ice_vsi *vsi;
5432 	int status;
5433 	u8 flags;
5434 
5435 	if (!pf->wol_ena)
5436 		return;
5437 
5438 	vsi = ice_get_main_vsi(pf);
5439 	if (!vsi)
5440 		return;
5441 
5442 	/* Get current MAC address in case it's an LAA */
5443 	if (vsi->netdev)
5444 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5445 	else
5446 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5447 
5448 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5449 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5450 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5451 
5452 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5453 	if (status)
5454 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5455 			status, ice_aq_str(hw->adminq.sq_last_status));
5456 }
5457 
5458 /**
5459  * ice_remove - Device removal routine
5460  * @pdev: PCI device information struct
5461  */
ice_remove(struct pci_dev * pdev)5462 static void ice_remove(struct pci_dev *pdev)
5463 {
5464 	struct ice_pf *pf = pci_get_drvdata(pdev);
5465 	int i;
5466 
5467 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5468 		if (!ice_is_reset_in_progress(pf->state))
5469 			break;
5470 		msleep(100);
5471 	}
5472 
5473 	if (ice_is_recovery_mode(&pf->hw)) {
5474 		ice_service_task_stop(pf);
5475 		scoped_guard(devl, priv_to_devlink(pf)) {
5476 			ice_deinit_devlink(pf);
5477 		}
5478 		return;
5479 	}
5480 
5481 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5482 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5483 		ice_free_vfs(pf);
5484 	}
5485 
5486 	ice_hwmon_exit(pf);
5487 
5488 	ice_service_task_stop(pf);
5489 	ice_aq_cancel_waiting_tasks(pf);
5490 	set_bit(ICE_DOWN, pf->state);
5491 
5492 	if (!ice_is_safe_mode(pf))
5493 		ice_remove_arfs(pf);
5494 
5495 	devl_lock(priv_to_devlink(pf));
5496 	ice_dealloc_all_dynamic_ports(pf);
5497 	ice_deinit_devlink(pf);
5498 
5499 	ice_unload(pf);
5500 	devl_unlock(priv_to_devlink(pf));
5501 
5502 	ice_deinit(pf);
5503 	ice_vsi_release_all(pf);
5504 
5505 	ice_setup_mc_magic_wake(pf);
5506 	ice_set_wake(pf);
5507 
5508 	ice_adapter_put(pdev);
5509 }
5510 
5511 /**
5512  * ice_shutdown - PCI callback for shutting down device
5513  * @pdev: PCI device information struct
5514  */
ice_shutdown(struct pci_dev * pdev)5515 static void ice_shutdown(struct pci_dev *pdev)
5516 {
5517 	struct ice_pf *pf = pci_get_drvdata(pdev);
5518 
5519 	ice_remove(pdev);
5520 
5521 	if (system_state == SYSTEM_POWER_OFF) {
5522 		pci_wake_from_d3(pdev, pf->wol_ena);
5523 		pci_set_power_state(pdev, PCI_D3hot);
5524 	}
5525 }
5526 
5527 /**
5528  * ice_prepare_for_shutdown - prep for PCI shutdown
5529  * @pf: board private structure
5530  *
5531  * Inform or close all dependent features in prep for PCI device shutdown
5532  */
ice_prepare_for_shutdown(struct ice_pf * pf)5533 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5534 {
5535 	struct ice_hw *hw = &pf->hw;
5536 	u32 v;
5537 
5538 	/* Notify VFs of impending reset */
5539 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5540 		ice_vc_notify_reset(pf);
5541 
5542 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5543 
5544 	/* disable the VSIs and their queues that are not already DOWN */
5545 	ice_pf_dis_all_vsi(pf, false);
5546 
5547 	ice_for_each_vsi(pf, v)
5548 		if (pf->vsi[v])
5549 			pf->vsi[v]->vsi_num = 0;
5550 
5551 	ice_shutdown_all_ctrlq(hw, true);
5552 }
5553 
5554 /**
5555  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5556  * @pf: board private structure to reinitialize
5557  *
5558  * This routine reinitialize interrupt scheme that was cleared during
5559  * power management suspend callback.
5560  *
5561  * This should be called during resume routine to re-allocate the q_vectors
5562  * and reacquire interrupts.
5563  */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5564 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5565 {
5566 	struct device *dev = ice_pf_to_dev(pf);
5567 	int ret, v;
5568 
5569 	/* Since we clear MSIX flag during suspend, we need to
5570 	 * set it back during resume...
5571 	 */
5572 
5573 	ret = ice_init_interrupt_scheme(pf);
5574 	if (ret) {
5575 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5576 		return ret;
5577 	}
5578 
5579 	/* Remap vectors and rings, after successful re-init interrupts */
5580 	ice_for_each_vsi(pf, v) {
5581 		if (!pf->vsi[v])
5582 			continue;
5583 
5584 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5585 		if (ret)
5586 			goto err_reinit;
5587 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5588 		rtnl_lock();
5589 		ice_vsi_set_napi_queues(pf->vsi[v]);
5590 		rtnl_unlock();
5591 	}
5592 
5593 	ret = ice_req_irq_msix_misc(pf);
5594 	if (ret) {
5595 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5596 			ret);
5597 		goto err_reinit;
5598 	}
5599 
5600 	return 0;
5601 
5602 err_reinit:
5603 	while (v--)
5604 		if (pf->vsi[v]) {
5605 			rtnl_lock();
5606 			ice_vsi_clear_napi_queues(pf->vsi[v]);
5607 			rtnl_unlock();
5608 			ice_vsi_free_q_vectors(pf->vsi[v]);
5609 		}
5610 
5611 	return ret;
5612 }
5613 
5614 /**
5615  * ice_suspend
5616  * @dev: generic device information structure
5617  *
5618  * Power Management callback to quiesce the device and prepare
5619  * for D3 transition.
5620  */
ice_suspend(struct device * dev)5621 static int ice_suspend(struct device *dev)
5622 {
5623 	struct pci_dev *pdev = to_pci_dev(dev);
5624 	struct ice_pf *pf;
5625 	int disabled, v;
5626 
5627 	pf = pci_get_drvdata(pdev);
5628 
5629 	if (!ice_pf_state_is_nominal(pf)) {
5630 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5631 		return -EBUSY;
5632 	}
5633 
5634 	/* Stop watchdog tasks until resume completion.
5635 	 * Even though it is most likely that the service task is
5636 	 * disabled if the device is suspended or down, the service task's
5637 	 * state is controlled by a different state bit, and we should
5638 	 * store and honor whatever state that bit is in at this point.
5639 	 */
5640 	disabled = ice_service_task_stop(pf);
5641 
5642 	ice_deinit_rdma(pf);
5643 
5644 	/* Already suspended?, then there is nothing to do */
5645 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5646 		if (!disabled)
5647 			ice_service_task_restart(pf);
5648 		return 0;
5649 	}
5650 
5651 	if (test_bit(ICE_DOWN, pf->state) ||
5652 	    ice_is_reset_in_progress(pf->state)) {
5653 		dev_err(dev, "can't suspend device in reset or already down\n");
5654 		if (!disabled)
5655 			ice_service_task_restart(pf);
5656 		return 0;
5657 	}
5658 
5659 	ice_setup_mc_magic_wake(pf);
5660 
5661 	ice_prepare_for_shutdown(pf);
5662 
5663 	ice_set_wake(pf);
5664 
5665 	/* Free vectors, clear the interrupt scheme and release IRQs
5666 	 * for proper hibernation, especially with large number of CPUs.
5667 	 * Otherwise hibernation might fail when mapping all the vectors back
5668 	 * to CPU0.
5669 	 */
5670 	ice_free_irq_msix_misc(pf);
5671 	ice_for_each_vsi(pf, v) {
5672 		if (!pf->vsi[v])
5673 			continue;
5674 		rtnl_lock();
5675 		ice_vsi_clear_napi_queues(pf->vsi[v]);
5676 		rtnl_unlock();
5677 		ice_vsi_free_q_vectors(pf->vsi[v]);
5678 	}
5679 	ice_clear_interrupt_scheme(pf);
5680 
5681 	pci_save_state(pdev);
5682 	pci_wake_from_d3(pdev, pf->wol_ena);
5683 	pci_set_power_state(pdev, PCI_D3hot);
5684 	return 0;
5685 }
5686 
5687 /**
5688  * ice_resume - PM callback for waking up from D3
5689  * @dev: generic device information structure
5690  */
ice_resume(struct device * dev)5691 static int ice_resume(struct device *dev)
5692 {
5693 	struct pci_dev *pdev = to_pci_dev(dev);
5694 	enum ice_reset_req reset_type;
5695 	struct ice_pf *pf;
5696 	struct ice_hw *hw;
5697 	int ret;
5698 
5699 	pci_set_power_state(pdev, PCI_D0);
5700 	pci_restore_state(pdev);
5701 	pci_save_state(pdev);
5702 
5703 	if (!pci_device_is_present(pdev))
5704 		return -ENODEV;
5705 
5706 	ret = pci_enable_device_mem(pdev);
5707 	if (ret) {
5708 		dev_err(dev, "Cannot enable device after suspend\n");
5709 		return ret;
5710 	}
5711 
5712 	pf = pci_get_drvdata(pdev);
5713 	hw = &pf->hw;
5714 
5715 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5716 	ice_print_wake_reason(pf);
5717 
5718 	/* We cleared the interrupt scheme when we suspended, so we need to
5719 	 * restore it now to resume device functionality.
5720 	 */
5721 	ret = ice_reinit_interrupt_scheme(pf);
5722 	if (ret)
5723 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5724 
5725 	ret = ice_init_rdma(pf);
5726 	if (ret)
5727 		dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5728 			ret);
5729 
5730 	clear_bit(ICE_DOWN, pf->state);
5731 	/* Now perform PF reset and rebuild */
5732 	reset_type = ICE_RESET_PFR;
5733 	/* re-enable service task for reset, but allow reset to schedule it */
5734 	clear_bit(ICE_SERVICE_DIS, pf->state);
5735 
5736 	if (ice_schedule_reset(pf, reset_type))
5737 		dev_err(dev, "Reset during resume failed.\n");
5738 
5739 	clear_bit(ICE_SUSPENDED, pf->state);
5740 	ice_service_task_restart(pf);
5741 
5742 	/* Restart the service task */
5743 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5744 
5745 	return 0;
5746 }
5747 
5748 /**
5749  * ice_pci_err_detected - warning that PCI error has been detected
5750  * @pdev: PCI device information struct
5751  * @err: the type of PCI error
5752  *
5753  * Called to warn that something happened on the PCI bus and the error handling
5754  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5755  */
5756 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5757 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5758 {
5759 	struct ice_pf *pf = pci_get_drvdata(pdev);
5760 
5761 	if (!pf) {
5762 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5763 			__func__, err);
5764 		return PCI_ERS_RESULT_DISCONNECT;
5765 	}
5766 
5767 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5768 		ice_service_task_stop(pf);
5769 
5770 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5771 			set_bit(ICE_PFR_REQ, pf->state);
5772 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5773 		}
5774 	}
5775 
5776 	return PCI_ERS_RESULT_NEED_RESET;
5777 }
5778 
5779 /**
5780  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5781  * @pdev: PCI device information struct
5782  *
5783  * Called to determine if the driver can recover from the PCI slot reset by
5784  * using a register read to determine if the device is recoverable.
5785  */
ice_pci_err_slot_reset(struct pci_dev * pdev)5786 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5787 {
5788 	struct ice_pf *pf = pci_get_drvdata(pdev);
5789 	pci_ers_result_t result;
5790 	int err;
5791 	u32 reg;
5792 
5793 	err = pci_enable_device_mem(pdev);
5794 	if (err) {
5795 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5796 			err);
5797 		result = PCI_ERS_RESULT_DISCONNECT;
5798 	} else {
5799 		pci_set_master(pdev);
5800 		pci_restore_state(pdev);
5801 		pci_save_state(pdev);
5802 		pci_wake_from_d3(pdev, false);
5803 
5804 		/* Check for life */
5805 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5806 		if (!reg)
5807 			result = PCI_ERS_RESULT_RECOVERED;
5808 		else
5809 			result = PCI_ERS_RESULT_DISCONNECT;
5810 	}
5811 
5812 	return result;
5813 }
5814 
5815 /**
5816  * ice_pci_err_resume - restart operations after PCI error recovery
5817  * @pdev: PCI device information struct
5818  *
5819  * Called to allow the driver to bring things back up after PCI error and/or
5820  * reset recovery have finished
5821  */
ice_pci_err_resume(struct pci_dev * pdev)5822 static void ice_pci_err_resume(struct pci_dev *pdev)
5823 {
5824 	struct ice_pf *pf = pci_get_drvdata(pdev);
5825 
5826 	if (!pf) {
5827 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5828 			__func__);
5829 		return;
5830 	}
5831 
5832 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5833 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5834 			__func__);
5835 		return;
5836 	}
5837 
5838 	ice_restore_all_vfs_msi_state(pf);
5839 
5840 	ice_do_reset(pf, ICE_RESET_PFR);
5841 	ice_service_task_restart(pf);
5842 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5843 }
5844 
5845 /**
5846  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5847  * @pdev: PCI device information struct
5848  */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5849 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5850 {
5851 	struct ice_pf *pf = pci_get_drvdata(pdev);
5852 
5853 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5854 		ice_service_task_stop(pf);
5855 
5856 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5857 			set_bit(ICE_PFR_REQ, pf->state);
5858 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5859 		}
5860 	}
5861 }
5862 
5863 /**
5864  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5865  * @pdev: PCI device information struct
5866  */
ice_pci_err_reset_done(struct pci_dev * pdev)5867 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5868 {
5869 	ice_pci_err_resume(pdev);
5870 }
5871 
5872 /* ice_pci_tbl - PCI Device ID Table
5873  *
5874  * Wildcard entries (PCI_ANY_ID) should come last
5875  * Last entry must be all 0s
5876  *
5877  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5878  *   Class, Class Mask, private data (not used) }
5879  */
5880 static const struct pci_device_id ice_pci_tbl[] = {
5881 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5882 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5883 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5884 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5885 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5886 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5887 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5888 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5889 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5890 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5891 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5892 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5893 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5894 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5895 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5896 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5897 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5898 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5899 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5900 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5901 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5902 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5903 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5904 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5905 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5906 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5907 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5908 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5909 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5910 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5911 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5912 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5913 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5914 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5915 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5916 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5917 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5918 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5919 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5920 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5921 	/* required last entry */
5922 	{}
5923 };
5924 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5925 
5926 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5927 
5928 static const struct pci_error_handlers ice_pci_err_handler = {
5929 	.error_detected = ice_pci_err_detected,
5930 	.slot_reset = ice_pci_err_slot_reset,
5931 	.reset_prepare = ice_pci_err_reset_prepare,
5932 	.reset_done = ice_pci_err_reset_done,
5933 	.resume = ice_pci_err_resume
5934 };
5935 
5936 static struct pci_driver ice_driver = {
5937 	.name = KBUILD_MODNAME,
5938 	.id_table = ice_pci_tbl,
5939 	.probe = ice_probe,
5940 	.remove = ice_remove,
5941 	.driver.pm = pm_sleep_ptr(&ice_pm_ops),
5942 	.shutdown = ice_shutdown,
5943 	.sriov_configure = ice_sriov_configure,
5944 	.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5945 	.sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5946 	.err_handler = &ice_pci_err_handler
5947 };
5948 
5949 /**
5950  * ice_module_init - Driver registration routine
5951  *
5952  * ice_module_init is the first routine called when the driver is
5953  * loaded. All it does is register with the PCI subsystem.
5954  */
ice_module_init(void)5955 static int __init ice_module_init(void)
5956 {
5957 	int status = -ENOMEM;
5958 
5959 	pr_info("%s\n", ice_driver_string);
5960 	pr_info("%s\n", ice_copyright);
5961 
5962 	ice_adv_lnk_speed_maps_init();
5963 
5964 	ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
5965 	if (!ice_wq) {
5966 		pr_err("Failed to create workqueue\n");
5967 		return status;
5968 	}
5969 
5970 	ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5971 	if (!ice_lag_wq) {
5972 		pr_err("Failed to create LAG workqueue\n");
5973 		goto err_dest_wq;
5974 	}
5975 
5976 	ice_debugfs_init();
5977 
5978 	status = pci_register_driver(&ice_driver);
5979 	if (status) {
5980 		pr_err("failed to register PCI driver, err %d\n", status);
5981 		goto err_dest_lag_wq;
5982 	}
5983 
5984 	status = ice_sf_driver_register();
5985 	if (status) {
5986 		pr_err("Failed to register SF driver, err %d\n", status);
5987 		goto err_sf_driver;
5988 	}
5989 
5990 	return 0;
5991 
5992 err_sf_driver:
5993 	pci_unregister_driver(&ice_driver);
5994 err_dest_lag_wq:
5995 	destroy_workqueue(ice_lag_wq);
5996 	ice_debugfs_exit();
5997 err_dest_wq:
5998 	destroy_workqueue(ice_wq);
5999 	return status;
6000 }
6001 module_init(ice_module_init);
6002 
6003 /**
6004  * ice_module_exit - Driver exit cleanup routine
6005  *
6006  * ice_module_exit is called just before the driver is removed
6007  * from memory.
6008  */
ice_module_exit(void)6009 static void __exit ice_module_exit(void)
6010 {
6011 	ice_sf_driver_unregister();
6012 	pci_unregister_driver(&ice_driver);
6013 	ice_debugfs_exit();
6014 	destroy_workqueue(ice_wq);
6015 	destroy_workqueue(ice_lag_wq);
6016 	pr_info("module unloaded\n");
6017 }
6018 module_exit(ice_module_exit);
6019 
6020 /**
6021  * ice_set_mac_address - NDO callback to set MAC address
6022  * @netdev: network interface device structure
6023  * @pi: pointer to an address structure
6024  *
6025  * Returns 0 on success, negative on failure
6026  */
ice_set_mac_address(struct net_device * netdev,void * pi)6027 static int ice_set_mac_address(struct net_device *netdev, void *pi)
6028 {
6029 	struct ice_netdev_priv *np = netdev_priv(netdev);
6030 	struct ice_vsi *vsi = np->vsi;
6031 	struct ice_pf *pf = vsi->back;
6032 	struct ice_hw *hw = &pf->hw;
6033 	struct sockaddr *addr = pi;
6034 	u8 old_mac[ETH_ALEN];
6035 	u8 flags = 0;
6036 	u8 *mac;
6037 	int err;
6038 
6039 	mac = (u8 *)addr->sa_data;
6040 
6041 	if (!is_valid_ether_addr(mac))
6042 		return -EADDRNOTAVAIL;
6043 
6044 	if (test_bit(ICE_DOWN, pf->state) ||
6045 	    ice_is_reset_in_progress(pf->state)) {
6046 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
6047 			   mac);
6048 		return -EBUSY;
6049 	}
6050 
6051 	if (ice_chnl_dmac_fltr_cnt(pf)) {
6052 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
6053 			   mac);
6054 		return -EAGAIN;
6055 	}
6056 
6057 	netif_addr_lock_bh(netdev);
6058 	ether_addr_copy(old_mac, netdev->dev_addr);
6059 	/* change the netdev's MAC address */
6060 	eth_hw_addr_set(netdev, mac);
6061 	netif_addr_unlock_bh(netdev);
6062 
6063 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
6064 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
6065 	if (err && err != -ENOENT) {
6066 		err = -EADDRNOTAVAIL;
6067 		goto err_update_filters;
6068 	}
6069 
6070 	/* Add filter for new MAC. If filter exists, return success */
6071 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
6072 	if (err == -EEXIST) {
6073 		/* Although this MAC filter is already present in hardware it's
6074 		 * possible in some cases (e.g. bonding) that dev_addr was
6075 		 * modified outside of the driver and needs to be restored back
6076 		 * to this value.
6077 		 */
6078 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
6079 
6080 		return 0;
6081 	} else if (err) {
6082 		/* error if the new filter addition failed */
6083 		err = -EADDRNOTAVAIL;
6084 	}
6085 
6086 err_update_filters:
6087 	if (err) {
6088 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6089 			   mac);
6090 		netif_addr_lock_bh(netdev);
6091 		eth_hw_addr_set(netdev, old_mac);
6092 		netif_addr_unlock_bh(netdev);
6093 		return err;
6094 	}
6095 
6096 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6097 		   netdev->dev_addr);
6098 
6099 	/* write new MAC address to the firmware */
6100 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6101 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6102 	if (err) {
6103 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6104 			   mac, err);
6105 	}
6106 	return 0;
6107 }
6108 
6109 /**
6110  * ice_set_rx_mode - NDO callback to set the netdev filters
6111  * @netdev: network interface device structure
6112  */
ice_set_rx_mode(struct net_device * netdev)6113 static void ice_set_rx_mode(struct net_device *netdev)
6114 {
6115 	struct ice_netdev_priv *np = netdev_priv(netdev);
6116 	struct ice_vsi *vsi = np->vsi;
6117 
6118 	if (!vsi || ice_is_switchdev_running(vsi->back))
6119 		return;
6120 
6121 	/* Set the flags to synchronize filters
6122 	 * ndo_set_rx_mode may be triggered even without a change in netdev
6123 	 * flags
6124 	 */
6125 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6126 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6127 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6128 
6129 	/* schedule our worker thread which will take care of
6130 	 * applying the new filter changes
6131 	 */
6132 	ice_service_task_schedule(vsi->back);
6133 }
6134 
6135 /**
6136  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6137  * @netdev: network interface device structure
6138  * @queue_index: Queue ID
6139  * @maxrate: maximum bandwidth in Mbps
6140  */
6141 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)6142 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6143 {
6144 	struct ice_netdev_priv *np = netdev_priv(netdev);
6145 	struct ice_vsi *vsi = np->vsi;
6146 	u16 q_handle;
6147 	int status;
6148 	u8 tc;
6149 
6150 	/* Validate maxrate requested is within permitted range */
6151 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6152 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6153 			   maxrate, queue_index);
6154 		return -EINVAL;
6155 	}
6156 
6157 	q_handle = vsi->tx_rings[queue_index]->q_handle;
6158 	tc = ice_dcb_get_tc(vsi, queue_index);
6159 
6160 	vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6161 	if (!vsi) {
6162 		netdev_err(netdev, "Invalid VSI for given queue %d\n",
6163 			   queue_index);
6164 		return -EINVAL;
6165 	}
6166 
6167 	/* Set BW back to default, when user set maxrate to 0 */
6168 	if (!maxrate)
6169 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6170 					       q_handle, ICE_MAX_BW);
6171 	else
6172 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6173 					  q_handle, ICE_MAX_BW, maxrate * 1000);
6174 	if (status)
6175 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6176 			   status);
6177 
6178 	return status;
6179 }
6180 
6181 /**
6182  * ice_fdb_add - add an entry to the hardware database
6183  * @ndm: the input from the stack
6184  * @tb: pointer to array of nladdr (unused)
6185  * @dev: the net device pointer
6186  * @addr: the MAC address entry being added
6187  * @vid: VLAN ID
6188  * @flags: instructions from stack about fdb operation
6189  * @notified: whether notification was emitted
6190  * @extack: netlink extended ack
6191  */
6192 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,bool * notified,struct netlink_ext_ack __always_unused * extack)6193 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6194 	    struct net_device *dev, const unsigned char *addr, u16 vid,
6195 	    u16 flags, bool *notified,
6196 	    struct netlink_ext_ack __always_unused *extack)
6197 {
6198 	int err;
6199 
6200 	if (vid) {
6201 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6202 		return -EINVAL;
6203 	}
6204 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6205 		netdev_err(dev, "FDB only supports static addresses\n");
6206 		return -EINVAL;
6207 	}
6208 
6209 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6210 		err = dev_uc_add_excl(dev, addr);
6211 	else if (is_multicast_ether_addr(addr))
6212 		err = dev_mc_add_excl(dev, addr);
6213 	else
6214 		err = -EINVAL;
6215 
6216 	/* Only return duplicate errors if NLM_F_EXCL is set */
6217 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
6218 		err = 0;
6219 
6220 	return err;
6221 }
6222 
6223 /**
6224  * ice_fdb_del - delete an entry from the hardware database
6225  * @ndm: the input from the stack
6226  * @tb: pointer to array of nladdr (unused)
6227  * @dev: the net device pointer
6228  * @addr: the MAC address entry being added
6229  * @vid: VLAN ID
6230  * @notified: whether notification was emitted
6231  * @extack: netlink extended ack
6232  */
6233 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,bool * notified,struct netlink_ext_ack * extack)6234 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6235 	    struct net_device *dev, const unsigned char *addr,
6236 	    __always_unused u16 vid, bool *notified,
6237 	    struct netlink_ext_ack *extack)
6238 {
6239 	int err;
6240 
6241 	if (ndm->ndm_state & NUD_PERMANENT) {
6242 		netdev_err(dev, "FDB only supports static addresses\n");
6243 		return -EINVAL;
6244 	}
6245 
6246 	if (is_unicast_ether_addr(addr))
6247 		err = dev_uc_del(dev, addr);
6248 	else if (is_multicast_ether_addr(addr))
6249 		err = dev_mc_del(dev, addr);
6250 	else
6251 		err = -EINVAL;
6252 
6253 	return err;
6254 }
6255 
6256 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6257 					 NETIF_F_HW_VLAN_CTAG_TX | \
6258 					 NETIF_F_HW_VLAN_STAG_RX | \
6259 					 NETIF_F_HW_VLAN_STAG_TX)
6260 
6261 #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6262 					 NETIF_F_HW_VLAN_STAG_RX)
6263 
6264 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
6265 					 NETIF_F_HW_VLAN_STAG_FILTER)
6266 
6267 /**
6268  * ice_fix_features - fix the netdev features flags based on device limitations
6269  * @netdev: ptr to the netdev that flags are being fixed on
6270  * @features: features that need to be checked and possibly fixed
6271  *
6272  * Make sure any fixups are made to features in this callback. This enables the
6273  * driver to not have to check unsupported configurations throughout the driver
6274  * because that's the responsiblity of this callback.
6275  *
6276  * Single VLAN Mode (SVM) Supported Features:
6277  *	NETIF_F_HW_VLAN_CTAG_FILTER
6278  *	NETIF_F_HW_VLAN_CTAG_RX
6279  *	NETIF_F_HW_VLAN_CTAG_TX
6280  *
6281  * Double VLAN Mode (DVM) Supported Features:
6282  *	NETIF_F_HW_VLAN_CTAG_FILTER
6283  *	NETIF_F_HW_VLAN_CTAG_RX
6284  *	NETIF_F_HW_VLAN_CTAG_TX
6285  *
6286  *	NETIF_F_HW_VLAN_STAG_FILTER
6287  *	NETIF_HW_VLAN_STAG_RX
6288  *	NETIF_HW_VLAN_STAG_TX
6289  *
6290  * Features that need fixing:
6291  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6292  *	These are mutually exlusive as the VSI context cannot support multiple
6293  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
6294  *	is not done, then default to clearing the requested STAG offload
6295  *	settings.
6296  *
6297  *	All supported filtering has to be enabled or disabled together. For
6298  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6299  *	together. If this is not done, then default to VLAN filtering disabled.
6300  *	These are mutually exclusive as there is currently no way to
6301  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6302  *	prune rules.
6303  */
6304 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)6305 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6306 {
6307 	struct ice_netdev_priv *np = netdev_priv(netdev);
6308 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6309 	bool cur_ctag, cur_stag, req_ctag, req_stag;
6310 
6311 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6312 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6313 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6314 
6315 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6316 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6317 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6318 
6319 	if (req_vlan_fltr != cur_vlan_fltr) {
6320 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6321 			if (req_ctag && req_stag) {
6322 				features |= NETIF_VLAN_FILTERING_FEATURES;
6323 			} else if (!req_ctag && !req_stag) {
6324 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6325 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
6326 				   (!cur_stag && req_stag && !cur_ctag)) {
6327 				features |= NETIF_VLAN_FILTERING_FEATURES;
6328 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6329 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
6330 				   (cur_stag && !req_stag && cur_ctag)) {
6331 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6332 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6333 			}
6334 		} else {
6335 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6336 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6337 
6338 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6339 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6340 		}
6341 	}
6342 
6343 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6344 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6345 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6346 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6347 			      NETIF_F_HW_VLAN_STAG_TX);
6348 	}
6349 
6350 	if (!(netdev->features & NETIF_F_RXFCS) &&
6351 	    (features & NETIF_F_RXFCS) &&
6352 	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6353 	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6354 		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6355 		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6356 	}
6357 
6358 	return features;
6359 }
6360 
6361 /**
6362  * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6363  * @vsi: PF's VSI
6364  * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6365  *
6366  * Store current stripped VLAN proto in ring packet context,
6367  * so it can be accessed more efficiently by packet processing code.
6368  */
6369 static void
ice_set_rx_rings_vlan_proto(struct ice_vsi * vsi,__be16 vlan_ethertype)6370 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6371 {
6372 	u16 i;
6373 
6374 	ice_for_each_alloc_rxq(vsi, i)
6375 		vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6376 }
6377 
6378 /**
6379  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6380  * @vsi: PF's VSI
6381  * @features: features used to determine VLAN offload settings
6382  *
6383  * First, determine the vlan_ethertype based on the VLAN offload bits in
6384  * features. Then determine if stripping and insertion should be enabled or
6385  * disabled. Finally enable or disable VLAN stripping and insertion.
6386  */
6387 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)6388 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6389 {
6390 	bool enable_stripping = true, enable_insertion = true;
6391 	struct ice_vsi_vlan_ops *vlan_ops;
6392 	int strip_err = 0, insert_err = 0;
6393 	u16 vlan_ethertype = 0;
6394 
6395 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6396 
6397 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6398 		vlan_ethertype = ETH_P_8021AD;
6399 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6400 		vlan_ethertype = ETH_P_8021Q;
6401 
6402 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6403 		enable_stripping = false;
6404 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6405 		enable_insertion = false;
6406 
6407 	if (enable_stripping)
6408 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6409 	else
6410 		strip_err = vlan_ops->dis_stripping(vsi);
6411 
6412 	if (enable_insertion)
6413 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6414 	else
6415 		insert_err = vlan_ops->dis_insertion(vsi);
6416 
6417 	if (strip_err || insert_err)
6418 		return -EIO;
6419 
6420 	ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6421 				    htons(vlan_ethertype) : 0);
6422 
6423 	return 0;
6424 }
6425 
6426 /**
6427  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6428  * @vsi: PF's VSI
6429  * @features: features used to determine VLAN filtering settings
6430  *
6431  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6432  * features.
6433  */
6434 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)6435 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6436 {
6437 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6438 	int err = 0;
6439 
6440 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6441 	 * if either bit is set. In switchdev mode Rx filtering should never be
6442 	 * enabled.
6443 	 */
6444 	if ((features &
6445 	     (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
6446 	     !ice_is_eswitch_mode_switchdev(vsi->back))
6447 		err = vlan_ops->ena_rx_filtering(vsi);
6448 	else
6449 		err = vlan_ops->dis_rx_filtering(vsi);
6450 
6451 	return err;
6452 }
6453 
6454 /**
6455  * ice_set_vlan_features - set VLAN settings based on suggested feature set
6456  * @netdev: ptr to the netdev being adjusted
6457  * @features: the feature set that the stack is suggesting
6458  *
6459  * Only update VLAN settings if the requested_vlan_features are different than
6460  * the current_vlan_features.
6461  */
6462 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6463 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6464 {
6465 	netdev_features_t current_vlan_features, requested_vlan_features;
6466 	struct ice_netdev_priv *np = netdev_priv(netdev);
6467 	struct ice_vsi *vsi = np->vsi;
6468 	int err;
6469 
6470 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6471 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6472 	if (current_vlan_features ^ requested_vlan_features) {
6473 		if ((features & NETIF_F_RXFCS) &&
6474 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6475 			dev_err(ice_pf_to_dev(vsi->back),
6476 				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6477 			return -EIO;
6478 		}
6479 
6480 		err = ice_set_vlan_offload_features(vsi, features);
6481 		if (err)
6482 			return err;
6483 	}
6484 
6485 	current_vlan_features = netdev->features &
6486 		NETIF_VLAN_FILTERING_FEATURES;
6487 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6488 	if (current_vlan_features ^ requested_vlan_features) {
6489 		err = ice_set_vlan_filtering_features(vsi, features);
6490 		if (err)
6491 			return err;
6492 	}
6493 
6494 	return 0;
6495 }
6496 
6497 /**
6498  * ice_set_loopback - turn on/off loopback mode on underlying PF
6499  * @vsi: ptr to VSI
6500  * @ena: flag to indicate the on/off setting
6501  */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6502 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6503 {
6504 	bool if_running = netif_running(vsi->netdev);
6505 	int ret;
6506 
6507 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6508 		ret = ice_down(vsi);
6509 		if (ret) {
6510 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6511 			return ret;
6512 		}
6513 	}
6514 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6515 	if (ret)
6516 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6517 	if (if_running)
6518 		ret = ice_up(vsi);
6519 
6520 	return ret;
6521 }
6522 
6523 /**
6524  * ice_set_features - set the netdev feature flags
6525  * @netdev: ptr to the netdev being adjusted
6526  * @features: the feature set that the stack is suggesting
6527  */
6528 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6529 ice_set_features(struct net_device *netdev, netdev_features_t features)
6530 {
6531 	netdev_features_t changed = netdev->features ^ features;
6532 	struct ice_netdev_priv *np = netdev_priv(netdev);
6533 	struct ice_vsi *vsi = np->vsi;
6534 	struct ice_pf *pf = vsi->back;
6535 	int ret = 0;
6536 
6537 	/* Don't set any netdev advanced features with device in Safe Mode */
6538 	if (ice_is_safe_mode(pf)) {
6539 		dev_err(ice_pf_to_dev(pf),
6540 			"Device is in Safe Mode - not enabling advanced netdev features\n");
6541 		return ret;
6542 	}
6543 
6544 	/* Do not change setting during reset */
6545 	if (ice_is_reset_in_progress(pf->state)) {
6546 		dev_err(ice_pf_to_dev(pf),
6547 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6548 		return -EBUSY;
6549 	}
6550 
6551 	/* Multiple features can be changed in one call so keep features in
6552 	 * separate if/else statements to guarantee each feature is checked
6553 	 */
6554 	if (changed & NETIF_F_RXHASH)
6555 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6556 
6557 	ret = ice_set_vlan_features(netdev, features);
6558 	if (ret)
6559 		return ret;
6560 
6561 	/* Turn on receive of FCS aka CRC, and after setting this
6562 	 * flag the packet data will have the 4 byte CRC appended
6563 	 */
6564 	if (changed & NETIF_F_RXFCS) {
6565 		if ((features & NETIF_F_RXFCS) &&
6566 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6567 			dev_err(ice_pf_to_dev(vsi->back),
6568 				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6569 			return -EIO;
6570 		}
6571 
6572 		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6573 		ret = ice_down_up(vsi);
6574 		if (ret)
6575 			return ret;
6576 	}
6577 
6578 	if (changed & NETIF_F_NTUPLE) {
6579 		bool ena = !!(features & NETIF_F_NTUPLE);
6580 
6581 		ice_vsi_manage_fdir(vsi, ena);
6582 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6583 	}
6584 
6585 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6586 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6587 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6588 		return -EACCES;
6589 	}
6590 
6591 	if (changed & NETIF_F_HW_TC) {
6592 		bool ena = !!(features & NETIF_F_HW_TC);
6593 
6594 		assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
6595 	}
6596 
6597 	if (changed & NETIF_F_LOOPBACK)
6598 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6599 
6600 	return ret;
6601 }
6602 
6603 /**
6604  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6605  * @vsi: VSI to setup VLAN properties for
6606  */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6607 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6608 {
6609 	int err;
6610 
6611 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6612 	if (err)
6613 		return err;
6614 
6615 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6616 	if (err)
6617 		return err;
6618 
6619 	return ice_vsi_add_vlan_zero(vsi);
6620 }
6621 
6622 /**
6623  * ice_vsi_cfg_lan - Setup the VSI lan related config
6624  * @vsi: the VSI being configured
6625  *
6626  * Return 0 on success and negative value on error
6627  */
ice_vsi_cfg_lan(struct ice_vsi * vsi)6628 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6629 {
6630 	int err;
6631 
6632 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6633 		ice_set_rx_mode(vsi->netdev);
6634 
6635 		err = ice_vsi_vlan_setup(vsi);
6636 		if (err)
6637 			return err;
6638 	}
6639 	ice_vsi_cfg_dcb_rings(vsi);
6640 
6641 	err = ice_vsi_cfg_lan_txqs(vsi);
6642 	if (!err && ice_is_xdp_ena_vsi(vsi))
6643 		err = ice_vsi_cfg_xdp_txqs(vsi);
6644 	if (!err)
6645 		err = ice_vsi_cfg_rxqs(vsi);
6646 
6647 	return err;
6648 }
6649 
6650 /* THEORY OF MODERATION:
6651  * The ice driver hardware works differently than the hardware that DIMLIB was
6652  * originally made for. ice hardware doesn't have packet count limits that
6653  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6654  * which is hard-coded to a limit of 250,000 ints/second.
6655  * If not using dynamic moderation, the INTRL value can be modified
6656  * by ethtool rx-usecs-high.
6657  */
6658 struct ice_dim {
6659 	/* the throttle rate for interrupts, basically worst case delay before
6660 	 * an initial interrupt fires, value is stored in microseconds.
6661 	 */
6662 	u16 itr;
6663 };
6664 
6665 /* Make a different profile for Rx that doesn't allow quite so aggressive
6666  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6667  * second.
6668  */
6669 static const struct ice_dim rx_profile[] = {
6670 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6671 	{8},    /* 125,000 ints/s */
6672 	{16},   /*  62,500 ints/s */
6673 	{62},   /*  16,129 ints/s */
6674 	{126}   /*   7,936 ints/s */
6675 };
6676 
6677 /* The transmit profile, which has the same sorts of values
6678  * as the previous struct
6679  */
6680 static const struct ice_dim tx_profile[] = {
6681 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6682 	{8},    /* 125,000 ints/s */
6683 	{40},   /*  16,125 ints/s */
6684 	{128},  /*   7,812 ints/s */
6685 	{256}   /*   3,906 ints/s */
6686 };
6687 
ice_tx_dim_work(struct work_struct * work)6688 static void ice_tx_dim_work(struct work_struct *work)
6689 {
6690 	struct ice_ring_container *rc;
6691 	struct dim *dim;
6692 	u16 itr;
6693 
6694 	dim = container_of(work, struct dim, work);
6695 	rc = dim->priv;
6696 
6697 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6698 
6699 	/* look up the values in our local table */
6700 	itr = tx_profile[dim->profile_ix].itr;
6701 
6702 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6703 	ice_write_itr(rc, itr);
6704 
6705 	dim->state = DIM_START_MEASURE;
6706 }
6707 
ice_rx_dim_work(struct work_struct * work)6708 static void ice_rx_dim_work(struct work_struct *work)
6709 {
6710 	struct ice_ring_container *rc;
6711 	struct dim *dim;
6712 	u16 itr;
6713 
6714 	dim = container_of(work, struct dim, work);
6715 	rc = dim->priv;
6716 
6717 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6718 
6719 	/* look up the values in our local table */
6720 	itr = rx_profile[dim->profile_ix].itr;
6721 
6722 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6723 	ice_write_itr(rc, itr);
6724 
6725 	dim->state = DIM_START_MEASURE;
6726 }
6727 
6728 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6729 
6730 /**
6731  * ice_init_moderation - set up interrupt moderation
6732  * @q_vector: the vector containing rings to be configured
6733  *
6734  * Set up interrupt moderation registers, with the intent to do the right thing
6735  * when called from reset or from probe, and whether or not dynamic moderation
6736  * is enabled or not. Take special care to write all the registers in both
6737  * dynamic moderation mode or not in order to make sure hardware is in a known
6738  * state.
6739  */
ice_init_moderation(struct ice_q_vector * q_vector)6740 static void ice_init_moderation(struct ice_q_vector *q_vector)
6741 {
6742 	struct ice_ring_container *rc;
6743 	bool tx_dynamic, rx_dynamic;
6744 
6745 	rc = &q_vector->tx;
6746 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6747 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6748 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6749 	rc->dim.priv = rc;
6750 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6751 
6752 	/* set the initial TX ITR to match the above */
6753 	ice_write_itr(rc, tx_dynamic ?
6754 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6755 
6756 	rc = &q_vector->rx;
6757 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6758 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6759 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6760 	rc->dim.priv = rc;
6761 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6762 
6763 	/* set the initial RX ITR to match the above */
6764 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6765 				       rc->itr_setting);
6766 
6767 	ice_set_q_vector_intrl(q_vector);
6768 }
6769 
6770 /**
6771  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6772  * @vsi: the VSI being configured
6773  */
ice_napi_enable_all(struct ice_vsi * vsi)6774 static void ice_napi_enable_all(struct ice_vsi *vsi)
6775 {
6776 	int q_idx;
6777 
6778 	if (!vsi->netdev)
6779 		return;
6780 
6781 	ice_for_each_q_vector(vsi, q_idx) {
6782 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6783 
6784 		ice_init_moderation(q_vector);
6785 
6786 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6787 			napi_enable(&q_vector->napi);
6788 	}
6789 }
6790 
6791 /**
6792  * ice_up_complete - Finish the last steps of bringing up a connection
6793  * @vsi: The VSI being configured
6794  *
6795  * Return 0 on success and negative value on error
6796  */
ice_up_complete(struct ice_vsi * vsi)6797 static int ice_up_complete(struct ice_vsi *vsi)
6798 {
6799 	struct ice_pf *pf = vsi->back;
6800 	int err;
6801 
6802 	ice_vsi_cfg_msix(vsi);
6803 
6804 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6805 	 * Tx queue group list was configured and the context bits were
6806 	 * programmed using ice_vsi_cfg_txqs
6807 	 */
6808 	err = ice_vsi_start_all_rx_rings(vsi);
6809 	if (err)
6810 		return err;
6811 
6812 	clear_bit(ICE_VSI_DOWN, vsi->state);
6813 	ice_napi_enable_all(vsi);
6814 	ice_vsi_ena_irq(vsi);
6815 
6816 	if (vsi->port_info &&
6817 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6818 	    ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
6819 			      vsi->type == ICE_VSI_SF)))) {
6820 		ice_print_link_msg(vsi, true);
6821 		netif_tx_start_all_queues(vsi->netdev);
6822 		netif_carrier_on(vsi->netdev);
6823 		ice_ptp_link_change(pf, true);
6824 	}
6825 
6826 	/* Perform an initial read of the statistics registers now to
6827 	 * set the baseline so counters are ready when interface is up
6828 	 */
6829 	ice_update_eth_stats(vsi);
6830 
6831 	if (vsi->type == ICE_VSI_PF)
6832 		ice_service_task_schedule(pf);
6833 
6834 	return 0;
6835 }
6836 
6837 /**
6838  * ice_up - Bring the connection back up after being down
6839  * @vsi: VSI being configured
6840  */
ice_up(struct ice_vsi * vsi)6841 int ice_up(struct ice_vsi *vsi)
6842 {
6843 	int err;
6844 
6845 	err = ice_vsi_cfg_lan(vsi);
6846 	if (!err)
6847 		err = ice_up_complete(vsi);
6848 
6849 	return err;
6850 }
6851 
6852 /**
6853  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6854  * @syncp: pointer to u64_stats_sync
6855  * @stats: stats that pkts and bytes count will be taken from
6856  * @pkts: packets stats counter
6857  * @bytes: bytes stats counter
6858  *
6859  * This function fetches stats from the ring considering the atomic operations
6860  * that needs to be performed to read u64 values in 32 bit machine.
6861  */
6862 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6863 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6864 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6865 {
6866 	unsigned int start;
6867 
6868 	do {
6869 		start = u64_stats_fetch_begin(syncp);
6870 		*pkts = stats.pkts;
6871 		*bytes = stats.bytes;
6872 	} while (u64_stats_fetch_retry(syncp, start));
6873 }
6874 
6875 /**
6876  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6877  * @vsi: the VSI to be updated
6878  * @vsi_stats: the stats struct to be updated
6879  * @rings: rings to work on
6880  * @count: number of rings
6881  */
6882 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6883 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6884 			     struct rtnl_link_stats64 *vsi_stats,
6885 			     struct ice_tx_ring **rings, u16 count)
6886 {
6887 	u16 i;
6888 
6889 	for (i = 0; i < count; i++) {
6890 		struct ice_tx_ring *ring;
6891 		u64 pkts = 0, bytes = 0;
6892 
6893 		ring = READ_ONCE(rings[i]);
6894 		if (!ring || !ring->ring_stats)
6895 			continue;
6896 		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6897 					     ring->ring_stats->stats, &pkts,
6898 					     &bytes);
6899 		vsi_stats->tx_packets += pkts;
6900 		vsi_stats->tx_bytes += bytes;
6901 		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6902 		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6903 		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6904 	}
6905 }
6906 
6907 /**
6908  * ice_update_vsi_ring_stats - Update VSI stats counters
6909  * @vsi: the VSI to be updated
6910  */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6911 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6912 {
6913 	struct rtnl_link_stats64 *net_stats, *stats_prev;
6914 	struct rtnl_link_stats64 *vsi_stats;
6915 	struct ice_pf *pf = vsi->back;
6916 	u64 pkts, bytes;
6917 	int i;
6918 
6919 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6920 	if (!vsi_stats)
6921 		return;
6922 
6923 	/* reset non-netdev (extended) stats */
6924 	vsi->tx_restart = 0;
6925 	vsi->tx_busy = 0;
6926 	vsi->tx_linearize = 0;
6927 	vsi->rx_buf_failed = 0;
6928 	vsi->rx_page_failed = 0;
6929 
6930 	rcu_read_lock();
6931 
6932 	/* update Tx rings counters */
6933 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6934 				     vsi->num_txq);
6935 
6936 	/* update Rx rings counters */
6937 	ice_for_each_rxq(vsi, i) {
6938 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6939 		struct ice_ring_stats *ring_stats;
6940 
6941 		ring_stats = ring->ring_stats;
6942 		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6943 					     ring_stats->stats, &pkts,
6944 					     &bytes);
6945 		vsi_stats->rx_packets += pkts;
6946 		vsi_stats->rx_bytes += bytes;
6947 		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6948 		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6949 	}
6950 
6951 	/* update XDP Tx rings counters */
6952 	if (ice_is_xdp_ena_vsi(vsi))
6953 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6954 					     vsi->num_xdp_txq);
6955 
6956 	rcu_read_unlock();
6957 
6958 	net_stats = &vsi->net_stats;
6959 	stats_prev = &vsi->net_stats_prev;
6960 
6961 	/* Update netdev counters, but keep in mind that values could start at
6962 	 * random value after PF reset. And as we increase the reported stat by
6963 	 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6964 	 * let's skip this round.
6965 	 */
6966 	if (likely(pf->stat_prev_loaded)) {
6967 		net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6968 		net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6969 		net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6970 		net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6971 	}
6972 
6973 	stats_prev->tx_packets = vsi_stats->tx_packets;
6974 	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6975 	stats_prev->rx_packets = vsi_stats->rx_packets;
6976 	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6977 
6978 	kfree(vsi_stats);
6979 }
6980 
6981 /**
6982  * ice_update_vsi_stats - Update VSI stats counters
6983  * @vsi: the VSI to be updated
6984  */
ice_update_vsi_stats(struct ice_vsi * vsi)6985 void ice_update_vsi_stats(struct ice_vsi *vsi)
6986 {
6987 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6988 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6989 	struct ice_pf *pf = vsi->back;
6990 
6991 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6992 	    test_bit(ICE_CFG_BUSY, pf->state))
6993 		return;
6994 
6995 	/* get stats as recorded by Tx/Rx rings */
6996 	ice_update_vsi_ring_stats(vsi);
6997 
6998 	/* get VSI stats as recorded by the hardware */
6999 	ice_update_eth_stats(vsi);
7000 
7001 	cur_ns->tx_errors = cur_es->tx_errors;
7002 	cur_ns->rx_dropped = cur_es->rx_discards;
7003 	cur_ns->tx_dropped = cur_es->tx_discards;
7004 	cur_ns->multicast = cur_es->rx_multicast;
7005 
7006 	/* update some more netdev stats if this is main VSI */
7007 	if (vsi->type == ICE_VSI_PF) {
7008 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
7009 		cur_ns->rx_errors = pf->stats.crc_errors +
7010 				    pf->stats.illegal_bytes +
7011 				    pf->stats.rx_undersize +
7012 				    pf->hw_csum_rx_error +
7013 				    pf->stats.rx_jabber +
7014 				    pf->stats.rx_fragments +
7015 				    pf->stats.rx_oversize;
7016 		/* record drops from the port level */
7017 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
7018 	}
7019 }
7020 
7021 /**
7022  * ice_update_pf_stats - Update PF port stats counters
7023  * @pf: PF whose stats needs to be updated
7024  */
ice_update_pf_stats(struct ice_pf * pf)7025 void ice_update_pf_stats(struct ice_pf *pf)
7026 {
7027 	struct ice_hw_port_stats *prev_ps, *cur_ps;
7028 	struct ice_hw *hw = &pf->hw;
7029 	u16 fd_ctr_base;
7030 	u8 port;
7031 
7032 	port = hw->port_info->lport;
7033 	prev_ps = &pf->stats_prev;
7034 	cur_ps = &pf->stats;
7035 
7036 	if (ice_is_reset_in_progress(pf->state))
7037 		pf->stat_prev_loaded = false;
7038 
7039 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
7040 			  &prev_ps->eth.rx_bytes,
7041 			  &cur_ps->eth.rx_bytes);
7042 
7043 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
7044 			  &prev_ps->eth.rx_unicast,
7045 			  &cur_ps->eth.rx_unicast);
7046 
7047 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
7048 			  &prev_ps->eth.rx_multicast,
7049 			  &cur_ps->eth.rx_multicast);
7050 
7051 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
7052 			  &prev_ps->eth.rx_broadcast,
7053 			  &cur_ps->eth.rx_broadcast);
7054 
7055 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
7056 			  &prev_ps->eth.rx_discards,
7057 			  &cur_ps->eth.rx_discards);
7058 
7059 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
7060 			  &prev_ps->eth.tx_bytes,
7061 			  &cur_ps->eth.tx_bytes);
7062 
7063 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
7064 			  &prev_ps->eth.tx_unicast,
7065 			  &cur_ps->eth.tx_unicast);
7066 
7067 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
7068 			  &prev_ps->eth.tx_multicast,
7069 			  &cur_ps->eth.tx_multicast);
7070 
7071 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
7072 			  &prev_ps->eth.tx_broadcast,
7073 			  &cur_ps->eth.tx_broadcast);
7074 
7075 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
7076 			  &prev_ps->tx_dropped_link_down,
7077 			  &cur_ps->tx_dropped_link_down);
7078 
7079 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
7080 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
7081 
7082 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
7083 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
7084 
7085 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
7086 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
7087 
7088 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
7089 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7090 
7091 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7092 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7093 
7094 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7095 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7096 
7097 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7098 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7099 
7100 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7101 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7102 
7103 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7104 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7105 
7106 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7107 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7108 
7109 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7110 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7111 
7112 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7113 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7114 
7115 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7116 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7117 
7118 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7119 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7120 
7121 	fd_ctr_base = hw->fd_ctr_base;
7122 
7123 	ice_stat_update40(hw,
7124 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7125 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7126 			  &cur_ps->fd_sb_match);
7127 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7128 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7129 
7130 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7131 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7132 
7133 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7134 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7135 
7136 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7137 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7138 
7139 	ice_update_dcb_stats(pf);
7140 
7141 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7142 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
7143 
7144 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7145 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7146 
7147 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7148 			  &prev_ps->mac_local_faults,
7149 			  &cur_ps->mac_local_faults);
7150 
7151 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7152 			  &prev_ps->mac_remote_faults,
7153 			  &cur_ps->mac_remote_faults);
7154 
7155 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7156 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7157 
7158 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7159 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7160 
7161 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7162 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7163 
7164 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7165 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7166 
7167 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7168 
7169 	pf->stat_prev_loaded = true;
7170 }
7171 
7172 /**
7173  * ice_get_stats64 - get statistics for network device structure
7174  * @netdev: network interface device structure
7175  * @stats: main device statistics structure
7176  */
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)7177 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7178 {
7179 	struct ice_netdev_priv *np = netdev_priv(netdev);
7180 	struct rtnl_link_stats64 *vsi_stats;
7181 	struct ice_vsi *vsi = np->vsi;
7182 
7183 	vsi_stats = &vsi->net_stats;
7184 
7185 	if (!vsi->num_txq || !vsi->num_rxq)
7186 		return;
7187 
7188 	/* netdev packet/byte stats come from ring counter. These are obtained
7189 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7190 	 * But, only call the update routine and read the registers if VSI is
7191 	 * not down.
7192 	 */
7193 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
7194 		ice_update_vsi_ring_stats(vsi);
7195 	stats->tx_packets = vsi_stats->tx_packets;
7196 	stats->tx_bytes = vsi_stats->tx_bytes;
7197 	stats->rx_packets = vsi_stats->rx_packets;
7198 	stats->rx_bytes = vsi_stats->rx_bytes;
7199 
7200 	/* The rest of the stats can be read from the hardware but instead we
7201 	 * just return values that the watchdog task has already obtained from
7202 	 * the hardware.
7203 	 */
7204 	stats->multicast = vsi_stats->multicast;
7205 	stats->tx_errors = vsi_stats->tx_errors;
7206 	stats->tx_dropped = vsi_stats->tx_dropped;
7207 	stats->rx_errors = vsi_stats->rx_errors;
7208 	stats->rx_dropped = vsi_stats->rx_dropped;
7209 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7210 	stats->rx_length_errors = vsi_stats->rx_length_errors;
7211 }
7212 
7213 /**
7214  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7215  * @vsi: VSI having NAPI disabled
7216  */
ice_napi_disable_all(struct ice_vsi * vsi)7217 static void ice_napi_disable_all(struct ice_vsi *vsi)
7218 {
7219 	int q_idx;
7220 
7221 	if (!vsi->netdev)
7222 		return;
7223 
7224 	ice_for_each_q_vector(vsi, q_idx) {
7225 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7226 
7227 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7228 			napi_disable(&q_vector->napi);
7229 
7230 		cancel_work_sync(&q_vector->tx.dim.work);
7231 		cancel_work_sync(&q_vector->rx.dim.work);
7232 	}
7233 }
7234 
7235 /**
7236  * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7237  * @vsi: the VSI being un-configured
7238  */
ice_vsi_dis_irq(struct ice_vsi * vsi)7239 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7240 {
7241 	struct ice_pf *pf = vsi->back;
7242 	struct ice_hw *hw = &pf->hw;
7243 	u32 val;
7244 	int i;
7245 
7246 	/* disable interrupt causation from each Rx queue; Tx queues are
7247 	 * handled in ice_vsi_stop_tx_ring()
7248 	 */
7249 	if (vsi->rx_rings) {
7250 		ice_for_each_rxq(vsi, i) {
7251 			if (vsi->rx_rings[i]) {
7252 				u16 reg;
7253 
7254 				reg = vsi->rx_rings[i]->reg_idx;
7255 				val = rd32(hw, QINT_RQCTL(reg));
7256 				val &= ~QINT_RQCTL_CAUSE_ENA_M;
7257 				wr32(hw, QINT_RQCTL(reg), val);
7258 			}
7259 		}
7260 	}
7261 
7262 	/* disable each interrupt */
7263 	ice_for_each_q_vector(vsi, i) {
7264 		if (!vsi->q_vectors[i])
7265 			continue;
7266 		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7267 	}
7268 
7269 	ice_flush(hw);
7270 
7271 	/* don't call synchronize_irq() for VF's from the host */
7272 	if (vsi->type == ICE_VSI_VF)
7273 		return;
7274 
7275 	ice_for_each_q_vector(vsi, i)
7276 		synchronize_irq(vsi->q_vectors[i]->irq.virq);
7277 }
7278 
7279 /**
7280  * ice_down - Shutdown the connection
7281  * @vsi: The VSI being stopped
7282  *
7283  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7284  */
ice_down(struct ice_vsi * vsi)7285 int ice_down(struct ice_vsi *vsi)
7286 {
7287 	int i, tx_err, rx_err, vlan_err = 0;
7288 
7289 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7290 
7291 	if (vsi->netdev) {
7292 		vlan_err = ice_vsi_del_vlan_zero(vsi);
7293 		ice_ptp_link_change(vsi->back, false);
7294 		netif_carrier_off(vsi->netdev);
7295 		netif_tx_disable(vsi->netdev);
7296 	}
7297 
7298 	ice_vsi_dis_irq(vsi);
7299 
7300 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7301 	if (tx_err)
7302 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7303 			   vsi->vsi_num, tx_err);
7304 	if (!tx_err && vsi->xdp_rings) {
7305 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7306 		if (tx_err)
7307 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7308 				   vsi->vsi_num, tx_err);
7309 	}
7310 
7311 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
7312 	if (rx_err)
7313 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7314 			   vsi->vsi_num, rx_err);
7315 
7316 	ice_napi_disable_all(vsi);
7317 
7318 	ice_for_each_txq(vsi, i)
7319 		ice_clean_tx_ring(vsi->tx_rings[i]);
7320 
7321 	if (vsi->xdp_rings)
7322 		ice_for_each_xdp_txq(vsi, i)
7323 			ice_clean_tx_ring(vsi->xdp_rings[i]);
7324 
7325 	ice_for_each_rxq(vsi, i)
7326 		ice_clean_rx_ring(vsi->rx_rings[i]);
7327 
7328 	if (tx_err || rx_err || vlan_err) {
7329 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7330 			   vsi->vsi_num, vsi->vsw->sw_id);
7331 		return -EIO;
7332 	}
7333 
7334 	return 0;
7335 }
7336 
7337 /**
7338  * ice_down_up - shutdown the VSI connection and bring it up
7339  * @vsi: the VSI to be reconnected
7340  */
ice_down_up(struct ice_vsi * vsi)7341 int ice_down_up(struct ice_vsi *vsi)
7342 {
7343 	int ret;
7344 
7345 	/* if DOWN already set, nothing to do */
7346 	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7347 		return 0;
7348 
7349 	ret = ice_down(vsi);
7350 	if (ret)
7351 		return ret;
7352 
7353 	ret = ice_up(vsi);
7354 	if (ret) {
7355 		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7356 		return ret;
7357 	}
7358 
7359 	return 0;
7360 }
7361 
7362 /**
7363  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7364  * @vsi: VSI having resources allocated
7365  *
7366  * Return 0 on success, negative on failure
7367  */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)7368 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7369 {
7370 	int i, err = 0;
7371 
7372 	if (!vsi->num_txq) {
7373 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7374 			vsi->vsi_num);
7375 		return -EINVAL;
7376 	}
7377 
7378 	ice_for_each_txq(vsi, i) {
7379 		struct ice_tx_ring *ring = vsi->tx_rings[i];
7380 
7381 		if (!ring)
7382 			return -EINVAL;
7383 
7384 		if (vsi->netdev)
7385 			ring->netdev = vsi->netdev;
7386 		err = ice_setup_tx_ring(ring);
7387 		if (err)
7388 			break;
7389 	}
7390 
7391 	return err;
7392 }
7393 
7394 /**
7395  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7396  * @vsi: VSI having resources allocated
7397  *
7398  * Return 0 on success, negative on failure
7399  */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)7400 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7401 {
7402 	int i, err = 0;
7403 
7404 	if (!vsi->num_rxq) {
7405 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7406 			vsi->vsi_num);
7407 		return -EINVAL;
7408 	}
7409 
7410 	ice_for_each_rxq(vsi, i) {
7411 		struct ice_rx_ring *ring = vsi->rx_rings[i];
7412 
7413 		if (!ring)
7414 			return -EINVAL;
7415 
7416 		if (vsi->netdev)
7417 			ring->netdev = vsi->netdev;
7418 		err = ice_setup_rx_ring(ring);
7419 		if (err)
7420 			break;
7421 	}
7422 
7423 	return err;
7424 }
7425 
7426 /**
7427  * ice_vsi_open_ctrl - open control VSI for use
7428  * @vsi: the VSI to open
7429  *
7430  * Initialization of the Control VSI
7431  *
7432  * Returns 0 on success, negative value on error
7433  */
ice_vsi_open_ctrl(struct ice_vsi * vsi)7434 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7435 {
7436 	char int_name[ICE_INT_NAME_STR_LEN];
7437 	struct ice_pf *pf = vsi->back;
7438 	struct device *dev;
7439 	int err;
7440 
7441 	dev = ice_pf_to_dev(pf);
7442 	/* allocate descriptors */
7443 	err = ice_vsi_setup_tx_rings(vsi);
7444 	if (err)
7445 		goto err_setup_tx;
7446 
7447 	err = ice_vsi_setup_rx_rings(vsi);
7448 	if (err)
7449 		goto err_setup_rx;
7450 
7451 	err = ice_vsi_cfg_lan(vsi);
7452 	if (err)
7453 		goto err_setup_rx;
7454 
7455 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7456 		 dev_driver_string(dev), dev_name(dev));
7457 	err = ice_vsi_req_irq_msix(vsi, int_name);
7458 	if (err)
7459 		goto err_setup_rx;
7460 
7461 	ice_vsi_cfg_msix(vsi);
7462 
7463 	err = ice_vsi_start_all_rx_rings(vsi);
7464 	if (err)
7465 		goto err_up_complete;
7466 
7467 	clear_bit(ICE_VSI_DOWN, vsi->state);
7468 	ice_vsi_ena_irq(vsi);
7469 
7470 	return 0;
7471 
7472 err_up_complete:
7473 	ice_down(vsi);
7474 err_setup_rx:
7475 	ice_vsi_free_rx_rings(vsi);
7476 err_setup_tx:
7477 	ice_vsi_free_tx_rings(vsi);
7478 
7479 	return err;
7480 }
7481 
7482 /**
7483  * ice_vsi_open - Called when a network interface is made active
7484  * @vsi: the VSI to open
7485  *
7486  * Initialization of the VSI
7487  *
7488  * Returns 0 on success, negative value on error
7489  */
ice_vsi_open(struct ice_vsi * vsi)7490 int ice_vsi_open(struct ice_vsi *vsi)
7491 {
7492 	char int_name[ICE_INT_NAME_STR_LEN];
7493 	struct ice_pf *pf = vsi->back;
7494 	int err;
7495 
7496 	/* allocate descriptors */
7497 	err = ice_vsi_setup_tx_rings(vsi);
7498 	if (err)
7499 		goto err_setup_tx;
7500 
7501 	err = ice_vsi_setup_rx_rings(vsi);
7502 	if (err)
7503 		goto err_setup_rx;
7504 
7505 	err = ice_vsi_cfg_lan(vsi);
7506 	if (err)
7507 		goto err_setup_rx;
7508 
7509 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7510 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7511 	err = ice_vsi_req_irq_msix(vsi, int_name);
7512 	if (err)
7513 		goto err_setup_rx;
7514 
7515 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7516 
7517 	if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
7518 		/* Notify the stack of the actual queue counts. */
7519 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7520 		if (err)
7521 			goto err_set_qs;
7522 
7523 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7524 		if (err)
7525 			goto err_set_qs;
7526 
7527 		ice_vsi_set_napi_queues(vsi);
7528 	}
7529 
7530 	err = ice_up_complete(vsi);
7531 	if (err)
7532 		goto err_up_complete;
7533 
7534 	return 0;
7535 
7536 err_up_complete:
7537 	ice_down(vsi);
7538 err_set_qs:
7539 	ice_vsi_free_irq(vsi);
7540 err_setup_rx:
7541 	ice_vsi_free_rx_rings(vsi);
7542 err_setup_tx:
7543 	ice_vsi_free_tx_rings(vsi);
7544 
7545 	return err;
7546 }
7547 
7548 /**
7549  * ice_vsi_release_all - Delete all VSIs
7550  * @pf: PF from which all VSIs are being removed
7551  */
ice_vsi_release_all(struct ice_pf * pf)7552 static void ice_vsi_release_all(struct ice_pf *pf)
7553 {
7554 	int err, i;
7555 
7556 	if (!pf->vsi)
7557 		return;
7558 
7559 	ice_for_each_vsi(pf, i) {
7560 		if (!pf->vsi[i])
7561 			continue;
7562 
7563 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7564 			continue;
7565 
7566 		err = ice_vsi_release(pf->vsi[i]);
7567 		if (err)
7568 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7569 				i, err, pf->vsi[i]->vsi_num);
7570 	}
7571 }
7572 
7573 /**
7574  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7575  * @pf: pointer to the PF instance
7576  * @type: VSI type to rebuild
7577  *
7578  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7579  */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7580 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7581 {
7582 	struct device *dev = ice_pf_to_dev(pf);
7583 	int i, err;
7584 
7585 	ice_for_each_vsi(pf, i) {
7586 		struct ice_vsi *vsi = pf->vsi[i];
7587 
7588 		if (!vsi || vsi->type != type)
7589 			continue;
7590 
7591 		/* rebuild the VSI */
7592 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7593 		if (err) {
7594 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7595 				err, vsi->idx, ice_vsi_type_str(type));
7596 			return err;
7597 		}
7598 
7599 		/* replay filters for the VSI */
7600 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7601 		if (err) {
7602 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7603 				err, vsi->idx, ice_vsi_type_str(type));
7604 			return err;
7605 		}
7606 
7607 		/* Re-map HW VSI number, using VSI handle that has been
7608 		 * previously validated in ice_replay_vsi() call above
7609 		 */
7610 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7611 
7612 		/* enable the VSI */
7613 		err = ice_ena_vsi(vsi, false);
7614 		if (err) {
7615 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7616 				err, vsi->idx, ice_vsi_type_str(type));
7617 			return err;
7618 		}
7619 
7620 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7621 			 ice_vsi_type_str(type));
7622 	}
7623 
7624 	return 0;
7625 }
7626 
7627 /**
7628  * ice_update_pf_netdev_link - Update PF netdev link status
7629  * @pf: pointer to the PF instance
7630  */
ice_update_pf_netdev_link(struct ice_pf * pf)7631 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7632 {
7633 	bool link_up;
7634 	int i;
7635 
7636 	ice_for_each_vsi(pf, i) {
7637 		struct ice_vsi *vsi = pf->vsi[i];
7638 
7639 		if (!vsi || vsi->type != ICE_VSI_PF)
7640 			return;
7641 
7642 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7643 		if (link_up) {
7644 			netif_carrier_on(pf->vsi[i]->netdev);
7645 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7646 		} else {
7647 			netif_carrier_off(pf->vsi[i]->netdev);
7648 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7649 		}
7650 	}
7651 }
7652 
7653 /**
7654  * ice_rebuild - rebuild after reset
7655  * @pf: PF to rebuild
7656  * @reset_type: type of reset
7657  *
7658  * Do not rebuild VF VSI in this flow because that is already handled via
7659  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7660  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7661  * to reset/rebuild all the VF VSI twice.
7662  */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7663 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7664 {
7665 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
7666 	struct device *dev = ice_pf_to_dev(pf);
7667 	struct ice_hw *hw = &pf->hw;
7668 	bool dvm;
7669 	int err;
7670 
7671 	if (test_bit(ICE_DOWN, pf->state))
7672 		goto clear_recovery;
7673 
7674 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7675 
7676 #define ICE_EMP_RESET_SLEEP_MS 5000
7677 	if (reset_type == ICE_RESET_EMPR) {
7678 		/* If an EMP reset has occurred, any previously pending flash
7679 		 * update will have completed. We no longer know whether or
7680 		 * not the NVM update EMP reset is restricted.
7681 		 */
7682 		pf->fw_emp_reset_disabled = false;
7683 
7684 		msleep(ICE_EMP_RESET_SLEEP_MS);
7685 	}
7686 
7687 	err = ice_init_all_ctrlq(hw);
7688 	if (err) {
7689 		dev_err(dev, "control queues init failed %d\n", err);
7690 		goto err_init_ctrlq;
7691 	}
7692 
7693 	/* if DDP was previously loaded successfully */
7694 	if (!ice_is_safe_mode(pf)) {
7695 		/* reload the SW DB of filter tables */
7696 		if (reset_type == ICE_RESET_PFR)
7697 			ice_fill_blk_tbls(hw);
7698 		else
7699 			/* Reload DDP Package after CORER/GLOBR reset */
7700 			ice_load_pkg(NULL, pf);
7701 	}
7702 
7703 	err = ice_clear_pf_cfg(hw);
7704 	if (err) {
7705 		dev_err(dev, "clear PF configuration failed %d\n", err);
7706 		goto err_init_ctrlq;
7707 	}
7708 
7709 	ice_clear_pxe_mode(hw);
7710 
7711 	err = ice_init_nvm(hw);
7712 	if (err) {
7713 		dev_err(dev, "ice_init_nvm failed %d\n", err);
7714 		goto err_init_ctrlq;
7715 	}
7716 
7717 	err = ice_get_caps(hw);
7718 	if (err) {
7719 		dev_err(dev, "ice_get_caps failed %d\n", err);
7720 		goto err_init_ctrlq;
7721 	}
7722 
7723 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7724 	if (err) {
7725 		dev_err(dev, "set_mac_cfg failed %d\n", err);
7726 		goto err_init_ctrlq;
7727 	}
7728 
7729 	dvm = ice_is_dvm_ena(hw);
7730 
7731 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7732 	if (err)
7733 		goto err_init_ctrlq;
7734 
7735 	err = ice_sched_init_port(hw->port_info);
7736 	if (err)
7737 		goto err_sched_init_port;
7738 
7739 	/* start misc vector */
7740 	err = ice_req_irq_msix_misc(pf);
7741 	if (err) {
7742 		dev_err(dev, "misc vector setup failed: %d\n", err);
7743 		goto err_sched_init_port;
7744 	}
7745 
7746 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7747 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7748 		if (!rd32(hw, PFQF_FD_SIZE)) {
7749 			u16 unused, guar, b_effort;
7750 
7751 			guar = hw->func_caps.fd_fltr_guar;
7752 			b_effort = hw->func_caps.fd_fltr_best_effort;
7753 
7754 			/* force guaranteed filter pool for PF */
7755 			ice_alloc_fd_guar_item(hw, &unused, guar);
7756 			/* force shared filter pool for PF */
7757 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7758 		}
7759 	}
7760 
7761 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7762 		ice_dcb_rebuild(pf);
7763 
7764 	/* If the PF previously had enabled PTP, PTP init needs to happen before
7765 	 * the VSI rebuild. If not, this causes the PTP link status events to
7766 	 * fail.
7767 	 */
7768 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7769 		ice_ptp_rebuild(pf, reset_type);
7770 
7771 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7772 		ice_gnss_init(pf);
7773 
7774 	/* rebuild PF VSI */
7775 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7776 	if (err) {
7777 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7778 		goto err_vsi_rebuild;
7779 	}
7780 
7781 	if (reset_type == ICE_RESET_PFR) {
7782 		err = ice_rebuild_channels(pf);
7783 		if (err) {
7784 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7785 				err);
7786 			goto err_vsi_rebuild;
7787 		}
7788 	}
7789 
7790 	/* If Flow Director is active */
7791 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7792 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7793 		if (err) {
7794 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7795 			goto err_vsi_rebuild;
7796 		}
7797 
7798 		/* replay HW Flow Director recipes */
7799 		if (hw->fdir_prof)
7800 			ice_fdir_replay_flows(hw);
7801 
7802 		/* replay Flow Director filters */
7803 		ice_fdir_replay_fltrs(pf);
7804 
7805 		ice_rebuild_arfs(pf);
7806 	}
7807 
7808 	if (vsi && vsi->netdev)
7809 		netif_device_attach(vsi->netdev);
7810 
7811 	ice_update_pf_netdev_link(pf);
7812 
7813 	/* tell the firmware we are up */
7814 	err = ice_send_version(pf);
7815 	if (err) {
7816 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7817 			err);
7818 		goto err_vsi_rebuild;
7819 	}
7820 
7821 	ice_replay_post(hw);
7822 
7823 	/* if we get here, reset flow is successful */
7824 	clear_bit(ICE_RESET_FAILED, pf->state);
7825 
7826 	ice_health_clear(pf);
7827 
7828 	ice_plug_aux_dev(pf);
7829 	if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7830 		ice_lag_rebuild(pf);
7831 
7832 	/* Restore timestamp mode settings after VSI rebuild */
7833 	ice_ptp_restore_timestamp_mode(pf);
7834 	return;
7835 
7836 err_vsi_rebuild:
7837 err_sched_init_port:
7838 	ice_sched_cleanup_all(hw);
7839 err_init_ctrlq:
7840 	ice_shutdown_all_ctrlq(hw, false);
7841 	set_bit(ICE_RESET_FAILED, pf->state);
7842 clear_recovery:
7843 	/* set this bit in PF state to control service task scheduling */
7844 	set_bit(ICE_NEEDS_RESTART, pf->state);
7845 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7846 }
7847 
7848 /**
7849  * ice_change_mtu - NDO callback to change the MTU
7850  * @netdev: network interface device structure
7851  * @new_mtu: new value for maximum frame size
7852  *
7853  * Returns 0 on success, negative on failure
7854  */
ice_change_mtu(struct net_device * netdev,int new_mtu)7855 int ice_change_mtu(struct net_device *netdev, int new_mtu)
7856 {
7857 	struct ice_netdev_priv *np = netdev_priv(netdev);
7858 	struct ice_vsi *vsi = np->vsi;
7859 	struct ice_pf *pf = vsi->back;
7860 	struct bpf_prog *prog;
7861 	u8 count = 0;
7862 	int err = 0;
7863 
7864 	if (new_mtu == (int)netdev->mtu) {
7865 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7866 		return 0;
7867 	}
7868 
7869 	prog = vsi->xdp_prog;
7870 	if (prog && !prog->aux->xdp_has_frags) {
7871 		int frame_size = ice_max_xdp_frame_size(vsi);
7872 
7873 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7874 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7875 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7876 			return -EINVAL;
7877 		}
7878 	} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
7879 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
7880 			netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
7881 				   ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
7882 			return -EINVAL;
7883 		}
7884 	}
7885 
7886 	/* if a reset is in progress, wait for some time for it to complete */
7887 	do {
7888 		if (ice_is_reset_in_progress(pf->state)) {
7889 			count++;
7890 			usleep_range(1000, 2000);
7891 		} else {
7892 			break;
7893 		}
7894 
7895 	} while (count < 100);
7896 
7897 	if (count == 100) {
7898 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7899 		return -EBUSY;
7900 	}
7901 
7902 	WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7903 	err = ice_down_up(vsi);
7904 	if (err)
7905 		return err;
7906 
7907 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7908 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7909 
7910 	return err;
7911 }
7912 
7913 /**
7914  * ice_eth_ioctl - Access the hwtstamp interface
7915  * @netdev: network interface device structure
7916  * @ifr: interface request data
7917  * @cmd: ioctl command
7918  */
ice_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)7919 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7920 {
7921 	struct ice_netdev_priv *np = netdev_priv(netdev);
7922 	struct ice_pf *pf = np->vsi->back;
7923 
7924 	switch (cmd) {
7925 	case SIOCGHWTSTAMP:
7926 		return ice_ptp_get_ts_config(pf, ifr);
7927 	case SIOCSHWTSTAMP:
7928 		return ice_ptp_set_ts_config(pf, ifr);
7929 	default:
7930 		return -EOPNOTSUPP;
7931 	}
7932 }
7933 
7934 /**
7935  * ice_aq_str - convert AQ err code to a string
7936  * @aq_err: the AQ error code to convert
7937  */
ice_aq_str(enum ice_aq_err aq_err)7938 const char *ice_aq_str(enum ice_aq_err aq_err)
7939 {
7940 	switch (aq_err) {
7941 	case ICE_AQ_RC_OK:
7942 		return "OK";
7943 	case ICE_AQ_RC_EPERM:
7944 		return "ICE_AQ_RC_EPERM";
7945 	case ICE_AQ_RC_ENOENT:
7946 		return "ICE_AQ_RC_ENOENT";
7947 	case ICE_AQ_RC_ENOMEM:
7948 		return "ICE_AQ_RC_ENOMEM";
7949 	case ICE_AQ_RC_EBUSY:
7950 		return "ICE_AQ_RC_EBUSY";
7951 	case ICE_AQ_RC_EEXIST:
7952 		return "ICE_AQ_RC_EEXIST";
7953 	case ICE_AQ_RC_EINVAL:
7954 		return "ICE_AQ_RC_EINVAL";
7955 	case ICE_AQ_RC_ENOSPC:
7956 		return "ICE_AQ_RC_ENOSPC";
7957 	case ICE_AQ_RC_ENOSYS:
7958 		return "ICE_AQ_RC_ENOSYS";
7959 	case ICE_AQ_RC_EMODE:
7960 		return "ICE_AQ_RC_EMODE";
7961 	case ICE_AQ_RC_ENOSEC:
7962 		return "ICE_AQ_RC_ENOSEC";
7963 	case ICE_AQ_RC_EBADSIG:
7964 		return "ICE_AQ_RC_EBADSIG";
7965 	case ICE_AQ_RC_ESVN:
7966 		return "ICE_AQ_RC_ESVN";
7967 	case ICE_AQ_RC_EBADMAN:
7968 		return "ICE_AQ_RC_EBADMAN";
7969 	case ICE_AQ_RC_EBADBUF:
7970 		return "ICE_AQ_RC_EBADBUF";
7971 	}
7972 
7973 	return "ICE_AQ_RC_UNKNOWN";
7974 }
7975 
7976 /**
7977  * ice_set_rss_lut - Set RSS LUT
7978  * @vsi: Pointer to VSI structure
7979  * @lut: Lookup table
7980  * @lut_size: Lookup table size
7981  *
7982  * Returns 0 on success, negative on failure
7983  */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7984 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7985 {
7986 	struct ice_aq_get_set_rss_lut_params params = {};
7987 	struct ice_hw *hw = &vsi->back->hw;
7988 	int status;
7989 
7990 	if (!lut)
7991 		return -EINVAL;
7992 
7993 	params.vsi_handle = vsi->idx;
7994 	params.lut_size = lut_size;
7995 	params.lut_type = vsi->rss_lut_type;
7996 	params.lut = lut;
7997 
7998 	status = ice_aq_set_rss_lut(hw, &params);
7999 	if (status)
8000 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
8001 			status, ice_aq_str(hw->adminq.sq_last_status));
8002 
8003 	return status;
8004 }
8005 
8006 /**
8007  * ice_set_rss_key - Set RSS key
8008  * @vsi: Pointer to the VSI structure
8009  * @seed: RSS hash seed
8010  *
8011  * Returns 0 on success, negative on failure
8012  */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)8013 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
8014 {
8015 	struct ice_hw *hw = &vsi->back->hw;
8016 	int status;
8017 
8018 	if (!seed)
8019 		return -EINVAL;
8020 
8021 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
8022 	if (status)
8023 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
8024 			status, ice_aq_str(hw->adminq.sq_last_status));
8025 
8026 	return status;
8027 }
8028 
8029 /**
8030  * ice_get_rss_lut - Get RSS LUT
8031  * @vsi: Pointer to VSI structure
8032  * @lut: Buffer to store the lookup table entries
8033  * @lut_size: Size of buffer to store the lookup table entries
8034  *
8035  * Returns 0 on success, negative on failure
8036  */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)8037 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
8038 {
8039 	struct ice_aq_get_set_rss_lut_params params = {};
8040 	struct ice_hw *hw = &vsi->back->hw;
8041 	int status;
8042 
8043 	if (!lut)
8044 		return -EINVAL;
8045 
8046 	params.vsi_handle = vsi->idx;
8047 	params.lut_size = lut_size;
8048 	params.lut_type = vsi->rss_lut_type;
8049 	params.lut = lut;
8050 
8051 	status = ice_aq_get_rss_lut(hw, &params);
8052 	if (status)
8053 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
8054 			status, ice_aq_str(hw->adminq.sq_last_status));
8055 
8056 	return status;
8057 }
8058 
8059 /**
8060  * ice_get_rss_key - Get RSS key
8061  * @vsi: Pointer to VSI structure
8062  * @seed: Buffer to store the key in
8063  *
8064  * Returns 0 on success, negative on failure
8065  */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)8066 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
8067 {
8068 	struct ice_hw *hw = &vsi->back->hw;
8069 	int status;
8070 
8071 	if (!seed)
8072 		return -EINVAL;
8073 
8074 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
8075 	if (status)
8076 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
8077 			status, ice_aq_str(hw->adminq.sq_last_status));
8078 
8079 	return status;
8080 }
8081 
8082 /**
8083  * ice_set_rss_hfunc - Set RSS HASH function
8084  * @vsi: Pointer to VSI structure
8085  * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
8086  *
8087  * Returns 0 on success, negative on failure
8088  */
ice_set_rss_hfunc(struct ice_vsi * vsi,u8 hfunc)8089 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
8090 {
8091 	struct ice_hw *hw = &vsi->back->hw;
8092 	struct ice_vsi_ctx *ctx;
8093 	bool symm;
8094 	int err;
8095 
8096 	if (hfunc == vsi->rss_hfunc)
8097 		return 0;
8098 
8099 	if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8100 	    hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8101 		return -EOPNOTSUPP;
8102 
8103 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8104 	if (!ctx)
8105 		return -ENOMEM;
8106 
8107 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8108 	ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8109 	ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8110 	ctx->info.q_opt_rss |=
8111 		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8112 	ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8113 	ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8114 
8115 	err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8116 	if (err) {
8117 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8118 			vsi->vsi_num, err);
8119 	} else {
8120 		vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8121 		vsi->rss_hfunc = hfunc;
8122 		netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8123 			    hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8124 			    "Symmetric " : "");
8125 	}
8126 	kfree(ctx);
8127 	if (err)
8128 		return err;
8129 
8130 	/* Fix the symmetry setting for all existing RSS configurations */
8131 	symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8132 	return ice_set_rss_cfg_symm(hw, vsi, symm);
8133 }
8134 
8135 /**
8136  * ice_bridge_getlink - Get the hardware bridge mode
8137  * @skb: skb buff
8138  * @pid: process ID
8139  * @seq: RTNL message seq
8140  * @dev: the netdev being configured
8141  * @filter_mask: filter mask passed in
8142  * @nlflags: netlink flags passed in
8143  *
8144  * Return the bridge mode (VEB/VEPA)
8145  */
8146 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)8147 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8148 		   struct net_device *dev, u32 filter_mask, int nlflags)
8149 {
8150 	struct ice_netdev_priv *np = netdev_priv(dev);
8151 	struct ice_vsi *vsi = np->vsi;
8152 	struct ice_pf *pf = vsi->back;
8153 	u16 bmode;
8154 
8155 	bmode = pf->first_sw->bridge_mode;
8156 
8157 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8158 				       filter_mask, NULL);
8159 }
8160 
8161 /**
8162  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8163  * @vsi: Pointer to VSI structure
8164  * @bmode: Hardware bridge mode (VEB/VEPA)
8165  *
8166  * Returns 0 on success, negative on failure
8167  */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)8168 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8169 {
8170 	struct ice_aqc_vsi_props *vsi_props;
8171 	struct ice_hw *hw = &vsi->back->hw;
8172 	struct ice_vsi_ctx *ctxt;
8173 	int ret;
8174 
8175 	vsi_props = &vsi->info;
8176 
8177 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8178 	if (!ctxt)
8179 		return -ENOMEM;
8180 
8181 	ctxt->info = vsi->info;
8182 
8183 	if (bmode == BRIDGE_MODE_VEB)
8184 		/* change from VEPA to VEB mode */
8185 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8186 	else
8187 		/* change from VEB to VEPA mode */
8188 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8189 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8190 
8191 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8192 	if (ret) {
8193 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8194 			bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
8195 		goto out;
8196 	}
8197 	/* Update sw flags for book keeping */
8198 	vsi_props->sw_flags = ctxt->info.sw_flags;
8199 
8200 out:
8201 	kfree(ctxt);
8202 	return ret;
8203 }
8204 
8205 /**
8206  * ice_bridge_setlink - Set the hardware bridge mode
8207  * @dev: the netdev being configured
8208  * @nlh: RTNL message
8209  * @flags: bridge setlink flags
8210  * @extack: netlink extended ack
8211  *
8212  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8213  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8214  * not already set for all VSIs connected to this switch. And also update the
8215  * unicast switch filter rules for the corresponding switch of the netdev.
8216  */
8217 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)8218 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8219 		   u16 __always_unused flags,
8220 		   struct netlink_ext_ack __always_unused *extack)
8221 {
8222 	struct ice_netdev_priv *np = netdev_priv(dev);
8223 	struct ice_pf *pf = np->vsi->back;
8224 	struct nlattr *attr, *br_spec;
8225 	struct ice_hw *hw = &pf->hw;
8226 	struct ice_sw *pf_sw;
8227 	int rem, v, err = 0;
8228 
8229 	pf_sw = pf->first_sw;
8230 	/* find the attribute in the netlink message */
8231 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8232 	if (!br_spec)
8233 		return -EINVAL;
8234 
8235 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8236 		__u16 mode = nla_get_u16(attr);
8237 
8238 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8239 			return -EINVAL;
8240 		/* Continue  if bridge mode is not being flipped */
8241 		if (mode == pf_sw->bridge_mode)
8242 			continue;
8243 		/* Iterates through the PF VSI list and update the loopback
8244 		 * mode of the VSI
8245 		 */
8246 		ice_for_each_vsi(pf, v) {
8247 			if (!pf->vsi[v])
8248 				continue;
8249 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8250 			if (err)
8251 				return err;
8252 		}
8253 
8254 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8255 		/* Update the unicast switch filter rules for the corresponding
8256 		 * switch of the netdev
8257 		 */
8258 		err = ice_update_sw_rule_bridge_mode(hw);
8259 		if (err) {
8260 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8261 				   mode, err,
8262 				   ice_aq_str(hw->adminq.sq_last_status));
8263 			/* revert hw->evb_veb */
8264 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8265 			return err;
8266 		}
8267 
8268 		pf_sw->bridge_mode = mode;
8269 	}
8270 
8271 	return 0;
8272 }
8273 
8274 /**
8275  * ice_tx_timeout - Respond to a Tx Hang
8276  * @netdev: network interface device structure
8277  * @txqueue: Tx queue
8278  */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)8279 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8280 {
8281 	struct ice_netdev_priv *np = netdev_priv(netdev);
8282 	struct ice_tx_ring *tx_ring = NULL;
8283 	struct ice_vsi *vsi = np->vsi;
8284 	struct ice_pf *pf = vsi->back;
8285 	u32 i;
8286 
8287 	pf->tx_timeout_count++;
8288 
8289 	/* Check if PFC is enabled for the TC to which the queue belongs
8290 	 * to. If yes then Tx timeout is not caused by a hung queue, no
8291 	 * need to reset and rebuild
8292 	 */
8293 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8294 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8295 			 txqueue);
8296 		return;
8297 	}
8298 
8299 	/* now that we have an index, find the tx_ring struct */
8300 	ice_for_each_txq(vsi, i)
8301 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8302 			if (txqueue == vsi->tx_rings[i]->q_index) {
8303 				tx_ring = vsi->tx_rings[i];
8304 				break;
8305 			}
8306 
8307 	/* Reset recovery level if enough time has elapsed after last timeout.
8308 	 * Also ensure no new reset action happens before next timeout period.
8309 	 */
8310 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8311 		pf->tx_timeout_recovery_level = 1;
8312 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8313 				       netdev->watchdog_timeo)))
8314 		return;
8315 
8316 	if (tx_ring) {
8317 		struct ice_hw *hw = &pf->hw;
8318 		u32 head, intr = 0;
8319 
8320 		head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8321 				 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8322 		/* Read interrupt register */
8323 		intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8324 
8325 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8326 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8327 			    head, tx_ring->next_to_use, intr);
8328 
8329 		ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
8330 	}
8331 
8332 	pf->tx_timeout_last_recovery = jiffies;
8333 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8334 		    pf->tx_timeout_recovery_level, txqueue);
8335 
8336 	switch (pf->tx_timeout_recovery_level) {
8337 	case 1:
8338 		set_bit(ICE_PFR_REQ, pf->state);
8339 		break;
8340 	case 2:
8341 		set_bit(ICE_CORER_REQ, pf->state);
8342 		break;
8343 	case 3:
8344 		set_bit(ICE_GLOBR_REQ, pf->state);
8345 		break;
8346 	default:
8347 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8348 		set_bit(ICE_DOWN, pf->state);
8349 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8350 		set_bit(ICE_SERVICE_DIS, pf->state);
8351 		break;
8352 	}
8353 
8354 	ice_service_task_schedule(pf);
8355 	pf->tx_timeout_recovery_level++;
8356 }
8357 
8358 /**
8359  * ice_setup_tc_cls_flower - flower classifier offloads
8360  * @np: net device to configure
8361  * @filter_dev: device on which filter is added
8362  * @cls_flower: offload data
8363  */
8364 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower)8365 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8366 			struct net_device *filter_dev,
8367 			struct flow_cls_offload *cls_flower)
8368 {
8369 	struct ice_vsi *vsi = np->vsi;
8370 
8371 	if (cls_flower->common.chain_index)
8372 		return -EOPNOTSUPP;
8373 
8374 	switch (cls_flower->command) {
8375 	case FLOW_CLS_REPLACE:
8376 		return ice_add_cls_flower(filter_dev, vsi, cls_flower);
8377 	case FLOW_CLS_DESTROY:
8378 		return ice_del_cls_flower(vsi, cls_flower);
8379 	default:
8380 		return -EINVAL;
8381 	}
8382 }
8383 
8384 /**
8385  * ice_setup_tc_block_cb - callback handler registered for TC block
8386  * @type: TC SETUP type
8387  * @type_data: TC flower offload data that contains user input
8388  * @cb_priv: netdev private data
8389  */
8390 static int
ice_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)8391 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
8392 {
8393 	struct ice_netdev_priv *np = cb_priv;
8394 
8395 	switch (type) {
8396 	case TC_SETUP_CLSFLOWER:
8397 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8398 					       type_data);
8399 	default:
8400 		return -EOPNOTSUPP;
8401 	}
8402 }
8403 
8404 /**
8405  * ice_validate_mqprio_qopt - Validate TCF input parameters
8406  * @vsi: Pointer to VSI
8407  * @mqprio_qopt: input parameters for mqprio queue configuration
8408  *
8409  * This function validates MQPRIO params, such as qcount (power of 2 wherever
8410  * needed), and make sure user doesn't specify qcount and BW rate limit
8411  * for TCs, which are more than "num_tc"
8412  */
8413 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)8414 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8415 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
8416 {
8417 	int non_power_of_2_qcount = 0;
8418 	struct ice_pf *pf = vsi->back;
8419 	int max_rss_q_cnt = 0;
8420 	u64 sum_min_rate = 0;
8421 	struct device *dev;
8422 	int i, speed;
8423 	u8 num_tc;
8424 
8425 	if (vsi->type != ICE_VSI_PF)
8426 		return -EINVAL;
8427 
8428 	if (mqprio_qopt->qopt.offset[0] != 0 ||
8429 	    mqprio_qopt->qopt.num_tc < 1 ||
8430 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8431 		return -EINVAL;
8432 
8433 	dev = ice_pf_to_dev(pf);
8434 	vsi->ch_rss_size = 0;
8435 	num_tc = mqprio_qopt->qopt.num_tc;
8436 	speed = ice_get_link_speed_kbps(vsi);
8437 
8438 	for (i = 0; num_tc; i++) {
8439 		int qcount = mqprio_qopt->qopt.count[i];
8440 		u64 max_rate, min_rate, rem;
8441 
8442 		if (!qcount)
8443 			return -EINVAL;
8444 
8445 		if (is_power_of_2(qcount)) {
8446 			if (non_power_of_2_qcount &&
8447 			    qcount > non_power_of_2_qcount) {
8448 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8449 					qcount, non_power_of_2_qcount);
8450 				return -EINVAL;
8451 			}
8452 			if (qcount > max_rss_q_cnt)
8453 				max_rss_q_cnt = qcount;
8454 		} else {
8455 			if (non_power_of_2_qcount &&
8456 			    qcount != non_power_of_2_qcount) {
8457 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8458 					qcount, non_power_of_2_qcount);
8459 				return -EINVAL;
8460 			}
8461 			if (qcount < max_rss_q_cnt) {
8462 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8463 					qcount, max_rss_q_cnt);
8464 				return -EINVAL;
8465 			}
8466 			max_rss_q_cnt = qcount;
8467 			non_power_of_2_qcount = qcount;
8468 		}
8469 
8470 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8471 		 * converts the bandwidth rate limit into Bytes/s when
8472 		 * passing it down to the driver. So convert input bandwidth
8473 		 * from Bytes/s to Kbps
8474 		 */
8475 		max_rate = mqprio_qopt->max_rate[i];
8476 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8477 
8478 		/* min_rate is minimum guaranteed rate and it can't be zero */
8479 		min_rate = mqprio_qopt->min_rate[i];
8480 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8481 		sum_min_rate += min_rate;
8482 
8483 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8484 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8485 				min_rate, ICE_MIN_BW_LIMIT);
8486 			return -EINVAL;
8487 		}
8488 
8489 		if (max_rate && max_rate > speed) {
8490 			dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8491 				i, max_rate, speed);
8492 			return -EINVAL;
8493 		}
8494 
8495 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8496 		if (rem) {
8497 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8498 				i, ICE_MIN_BW_LIMIT);
8499 			return -EINVAL;
8500 		}
8501 
8502 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8503 		if (rem) {
8504 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8505 				i, ICE_MIN_BW_LIMIT);
8506 			return -EINVAL;
8507 		}
8508 
8509 		/* min_rate can't be more than max_rate, except when max_rate
8510 		 * is zero (implies max_rate sought is max line rate). In such
8511 		 * a case min_rate can be more than max.
8512 		 */
8513 		if (max_rate && min_rate > max_rate) {
8514 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8515 				min_rate, max_rate);
8516 			return -EINVAL;
8517 		}
8518 
8519 		if (i >= mqprio_qopt->qopt.num_tc - 1)
8520 			break;
8521 		if (mqprio_qopt->qopt.offset[i + 1] !=
8522 		    (mqprio_qopt->qopt.offset[i] + qcount))
8523 			return -EINVAL;
8524 	}
8525 	if (vsi->num_rxq <
8526 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8527 		return -EINVAL;
8528 	if (vsi->num_txq <
8529 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8530 		return -EINVAL;
8531 
8532 	if (sum_min_rate && sum_min_rate > (u64)speed) {
8533 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8534 			sum_min_rate, speed);
8535 		return -EINVAL;
8536 	}
8537 
8538 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8539 	vsi->ch_rss_size = max_rss_q_cnt;
8540 
8541 	return 0;
8542 }
8543 
8544 /**
8545  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8546  * @pf: ptr to PF device
8547  * @vsi: ptr to VSI
8548  */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8549 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8550 {
8551 	struct device *dev = ice_pf_to_dev(pf);
8552 	bool added = false;
8553 	struct ice_hw *hw;
8554 	int flow;
8555 
8556 	if (!(vsi->num_gfltr || vsi->num_bfltr))
8557 		return -EINVAL;
8558 
8559 	hw = &pf->hw;
8560 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8561 		struct ice_fd_hw_prof *prof;
8562 		int tun, status;
8563 		u64 entry_h;
8564 
8565 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8566 		      hw->fdir_prof[flow]->cnt))
8567 			continue;
8568 
8569 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8570 			enum ice_flow_priority prio;
8571 
8572 			/* add this VSI to FDir profile for this flow */
8573 			prio = ICE_FLOW_PRIO_NORMAL;
8574 			prof = hw->fdir_prof[flow];
8575 			status = ice_flow_add_entry(hw, ICE_BLK_FD,
8576 						    prof->prof_id[tun],
8577 						    prof->vsi_h[0], vsi->idx,
8578 						    prio, prof->fdir_seg[tun],
8579 						    &entry_h);
8580 			if (status) {
8581 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8582 					vsi->idx, flow);
8583 				continue;
8584 			}
8585 
8586 			prof->entry_h[prof->cnt][tun] = entry_h;
8587 		}
8588 
8589 		/* store VSI for filter replay and delete */
8590 		prof->vsi_h[prof->cnt] = vsi->idx;
8591 		prof->cnt++;
8592 
8593 		added = true;
8594 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8595 			flow);
8596 	}
8597 
8598 	if (!added)
8599 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8600 
8601 	return 0;
8602 }
8603 
8604 /**
8605  * ice_add_channel - add a channel by adding VSI
8606  * @pf: ptr to PF device
8607  * @sw_id: underlying HW switching element ID
8608  * @ch: ptr to channel structure
8609  *
8610  * Add a channel (VSI) using add_vsi and queue_map
8611  */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8612 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8613 {
8614 	struct device *dev = ice_pf_to_dev(pf);
8615 	struct ice_vsi *vsi;
8616 
8617 	if (ch->type != ICE_VSI_CHNL) {
8618 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8619 		return -EINVAL;
8620 	}
8621 
8622 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8623 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8624 		dev_err(dev, "create chnl VSI failure\n");
8625 		return -EINVAL;
8626 	}
8627 
8628 	ice_add_vsi_to_fdir(pf, vsi);
8629 
8630 	ch->sw_id = sw_id;
8631 	ch->vsi_num = vsi->vsi_num;
8632 	ch->info.mapping_flags = vsi->info.mapping_flags;
8633 	ch->ch_vsi = vsi;
8634 	/* set the back pointer of channel for newly created VSI */
8635 	vsi->ch = ch;
8636 
8637 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8638 	       sizeof(vsi->info.q_mapping));
8639 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8640 	       sizeof(vsi->info.tc_mapping));
8641 
8642 	return 0;
8643 }
8644 
8645 /**
8646  * ice_chnl_cfg_res
8647  * @vsi: the VSI being setup
8648  * @ch: ptr to channel structure
8649  *
8650  * Configure channel specific resources such as rings, vector.
8651  */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8652 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8653 {
8654 	int i;
8655 
8656 	for (i = 0; i < ch->num_txq; i++) {
8657 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8658 		struct ice_ring_container *rc;
8659 		struct ice_tx_ring *tx_ring;
8660 		struct ice_rx_ring *rx_ring;
8661 
8662 		tx_ring = vsi->tx_rings[ch->base_q + i];
8663 		rx_ring = vsi->rx_rings[ch->base_q + i];
8664 		if (!tx_ring || !rx_ring)
8665 			continue;
8666 
8667 		/* setup ring being channel enabled */
8668 		tx_ring->ch = ch;
8669 		rx_ring->ch = ch;
8670 
8671 		/* following code block sets up vector specific attributes */
8672 		tx_q_vector = tx_ring->q_vector;
8673 		rx_q_vector = rx_ring->q_vector;
8674 		if (!tx_q_vector && !rx_q_vector)
8675 			continue;
8676 
8677 		if (tx_q_vector) {
8678 			tx_q_vector->ch = ch;
8679 			/* setup Tx and Rx ITR setting if DIM is off */
8680 			rc = &tx_q_vector->tx;
8681 			if (!ITR_IS_DYNAMIC(rc))
8682 				ice_write_itr(rc, rc->itr_setting);
8683 		}
8684 		if (rx_q_vector) {
8685 			rx_q_vector->ch = ch;
8686 			/* setup Tx and Rx ITR setting if DIM is off */
8687 			rc = &rx_q_vector->rx;
8688 			if (!ITR_IS_DYNAMIC(rc))
8689 				ice_write_itr(rc, rc->itr_setting);
8690 		}
8691 	}
8692 
8693 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8694 	 * GLINT_ITR register would have written to perform in-context
8695 	 * update, hence perform flush
8696 	 */
8697 	if (ch->num_txq || ch->num_rxq)
8698 		ice_flush(&vsi->back->hw);
8699 }
8700 
8701 /**
8702  * ice_cfg_chnl_all_res - configure channel resources
8703  * @vsi: pte to main_vsi
8704  * @ch: ptr to channel structure
8705  *
8706  * This function configures channel specific resources such as flow-director
8707  * counter index, and other resources such as queues, vectors, ITR settings
8708  */
8709 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8710 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8711 {
8712 	/* configure channel (aka ADQ) resources such as queues, vectors,
8713 	 * ITR settings for channel specific vectors and anything else
8714 	 */
8715 	ice_chnl_cfg_res(vsi, ch);
8716 }
8717 
8718 /**
8719  * ice_setup_hw_channel - setup new channel
8720  * @pf: ptr to PF device
8721  * @vsi: the VSI being setup
8722  * @ch: ptr to channel structure
8723  * @sw_id: underlying HW switching element ID
8724  * @type: type of channel to be created (VMDq2/VF)
8725  *
8726  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8727  * and configures Tx rings accordingly
8728  */
8729 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8730 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8731 		     struct ice_channel *ch, u16 sw_id, u8 type)
8732 {
8733 	struct device *dev = ice_pf_to_dev(pf);
8734 	int ret;
8735 
8736 	ch->base_q = vsi->next_base_q;
8737 	ch->type = type;
8738 
8739 	ret = ice_add_channel(pf, sw_id, ch);
8740 	if (ret) {
8741 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8742 		return ret;
8743 	}
8744 
8745 	/* configure/setup ADQ specific resources */
8746 	ice_cfg_chnl_all_res(vsi, ch);
8747 
8748 	/* make sure to update the next_base_q so that subsequent channel's
8749 	 * (aka ADQ) VSI queue map is correct
8750 	 */
8751 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8752 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8753 		ch->num_rxq);
8754 
8755 	return 0;
8756 }
8757 
8758 /**
8759  * ice_setup_channel - setup new channel using uplink element
8760  * @pf: ptr to PF device
8761  * @vsi: the VSI being setup
8762  * @ch: ptr to channel structure
8763  *
8764  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8765  * and uplink switching element
8766  */
8767 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8768 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8769 		  struct ice_channel *ch)
8770 {
8771 	struct device *dev = ice_pf_to_dev(pf);
8772 	u16 sw_id;
8773 	int ret;
8774 
8775 	if (vsi->type != ICE_VSI_PF) {
8776 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8777 		return false;
8778 	}
8779 
8780 	sw_id = pf->first_sw->sw_id;
8781 
8782 	/* create channel (VSI) */
8783 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8784 	if (ret) {
8785 		dev_err(dev, "failed to setup hw_channel\n");
8786 		return false;
8787 	}
8788 	dev_dbg(dev, "successfully created channel()\n");
8789 
8790 	return ch->ch_vsi ? true : false;
8791 }
8792 
8793 /**
8794  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8795  * @vsi: VSI to be configured
8796  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8797  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8798  */
8799 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8800 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8801 {
8802 	int err;
8803 
8804 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8805 	if (err)
8806 		return err;
8807 
8808 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8809 }
8810 
8811 /**
8812  * ice_create_q_channel - function to create channel
8813  * @vsi: VSI to be configured
8814  * @ch: ptr to channel (it contains channel specific params)
8815  *
8816  * This function creates channel (VSI) using num_queues specified by user,
8817  * reconfigs RSS if needed.
8818  */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8819 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8820 {
8821 	struct ice_pf *pf = vsi->back;
8822 	struct device *dev;
8823 
8824 	if (!ch)
8825 		return -EINVAL;
8826 
8827 	dev = ice_pf_to_dev(pf);
8828 	if (!ch->num_txq || !ch->num_rxq) {
8829 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8830 		return -EINVAL;
8831 	}
8832 
8833 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8834 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8835 			vsi->cnt_q_avail, ch->num_txq);
8836 		return -EINVAL;
8837 	}
8838 
8839 	if (!ice_setup_channel(pf, vsi, ch)) {
8840 		dev_info(dev, "Failed to setup channel\n");
8841 		return -EINVAL;
8842 	}
8843 	/* configure BW rate limit */
8844 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8845 		int ret;
8846 
8847 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8848 				       ch->min_tx_rate);
8849 		if (ret)
8850 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8851 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8852 		else
8853 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8854 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8855 	}
8856 
8857 	vsi->cnt_q_avail -= ch->num_txq;
8858 
8859 	return 0;
8860 }
8861 
8862 /**
8863  * ice_rem_all_chnl_fltrs - removes all channel filters
8864  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8865  *
8866  * Remove all advanced switch filters only if they are channel specific
8867  * tc-flower based filter
8868  */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8869 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8870 {
8871 	struct ice_tc_flower_fltr *fltr;
8872 	struct hlist_node *node;
8873 
8874 	/* to remove all channel filters, iterate an ordered list of filters */
8875 	hlist_for_each_entry_safe(fltr, node,
8876 				  &pf->tc_flower_fltr_list,
8877 				  tc_flower_node) {
8878 		struct ice_rule_query_data rule;
8879 		int status;
8880 
8881 		/* for now process only channel specific filters */
8882 		if (!ice_is_chnl_fltr(fltr))
8883 			continue;
8884 
8885 		rule.rid = fltr->rid;
8886 		rule.rule_id = fltr->rule_id;
8887 		rule.vsi_handle = fltr->dest_vsi_handle;
8888 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8889 		if (status) {
8890 			if (status == -ENOENT)
8891 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8892 					rule.rule_id);
8893 			else
8894 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8895 					status);
8896 		} else if (fltr->dest_vsi) {
8897 			/* update advanced switch filter count */
8898 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8899 				u32 flags = fltr->flags;
8900 
8901 				fltr->dest_vsi->num_chnl_fltr--;
8902 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8903 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8904 					pf->num_dmac_chnl_fltrs--;
8905 			}
8906 		}
8907 
8908 		hlist_del(&fltr->tc_flower_node);
8909 		kfree(fltr);
8910 	}
8911 }
8912 
8913 /**
8914  * ice_remove_q_channels - Remove queue channels for the TCs
8915  * @vsi: VSI to be configured
8916  * @rem_fltr: delete advanced switch filter or not
8917  *
8918  * Remove queue channels for the TCs
8919  */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8920 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8921 {
8922 	struct ice_channel *ch, *ch_tmp;
8923 	struct ice_pf *pf = vsi->back;
8924 	int i;
8925 
8926 	/* remove all tc-flower based filter if they are channel filters only */
8927 	if (rem_fltr)
8928 		ice_rem_all_chnl_fltrs(pf);
8929 
8930 	/* remove ntuple filters since queue configuration is being changed */
8931 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8932 		struct ice_hw *hw = &pf->hw;
8933 
8934 		mutex_lock(&hw->fdir_fltr_lock);
8935 		ice_fdir_del_all_fltrs(vsi);
8936 		mutex_unlock(&hw->fdir_fltr_lock);
8937 	}
8938 
8939 	/* perform cleanup for channels if they exist */
8940 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8941 		struct ice_vsi *ch_vsi;
8942 
8943 		list_del(&ch->list);
8944 		ch_vsi = ch->ch_vsi;
8945 		if (!ch_vsi) {
8946 			kfree(ch);
8947 			continue;
8948 		}
8949 
8950 		/* Reset queue contexts */
8951 		for (i = 0; i < ch->num_rxq; i++) {
8952 			struct ice_tx_ring *tx_ring;
8953 			struct ice_rx_ring *rx_ring;
8954 
8955 			tx_ring = vsi->tx_rings[ch->base_q + i];
8956 			rx_ring = vsi->rx_rings[ch->base_q + i];
8957 			if (tx_ring) {
8958 				tx_ring->ch = NULL;
8959 				if (tx_ring->q_vector)
8960 					tx_ring->q_vector->ch = NULL;
8961 			}
8962 			if (rx_ring) {
8963 				rx_ring->ch = NULL;
8964 				if (rx_ring->q_vector)
8965 					rx_ring->q_vector->ch = NULL;
8966 			}
8967 		}
8968 
8969 		/* Release FD resources for the channel VSI */
8970 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8971 
8972 		/* clear the VSI from scheduler tree */
8973 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8974 
8975 		/* Delete VSI from FW, PF and HW VSI arrays */
8976 		ice_vsi_delete(ch->ch_vsi);
8977 
8978 		/* free the channel */
8979 		kfree(ch);
8980 	}
8981 
8982 	/* clear the channel VSI map which is stored in main VSI */
8983 	ice_for_each_chnl_tc(i)
8984 		vsi->tc_map_vsi[i] = NULL;
8985 
8986 	/* reset main VSI's all TC information */
8987 	vsi->all_enatc = 0;
8988 	vsi->all_numtc = 0;
8989 }
8990 
8991 /**
8992  * ice_rebuild_channels - rebuild channel
8993  * @pf: ptr to PF
8994  *
8995  * Recreate channel VSIs and replay filters
8996  */
ice_rebuild_channels(struct ice_pf * pf)8997 static int ice_rebuild_channels(struct ice_pf *pf)
8998 {
8999 	struct device *dev = ice_pf_to_dev(pf);
9000 	struct ice_vsi *main_vsi;
9001 	bool rem_adv_fltr = true;
9002 	struct ice_channel *ch;
9003 	struct ice_vsi *vsi;
9004 	int tc_idx = 1;
9005 	int i, err;
9006 
9007 	main_vsi = ice_get_main_vsi(pf);
9008 	if (!main_vsi)
9009 		return 0;
9010 
9011 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
9012 	    main_vsi->old_numtc == 1)
9013 		return 0; /* nothing to be done */
9014 
9015 	/* reconfigure main VSI based on old value of TC and cached values
9016 	 * for MQPRIO opts
9017 	 */
9018 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
9019 	if (err) {
9020 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
9021 			main_vsi->old_ena_tc, main_vsi->vsi_num);
9022 		return err;
9023 	}
9024 
9025 	/* rebuild ADQ VSIs */
9026 	ice_for_each_vsi(pf, i) {
9027 		enum ice_vsi_type type;
9028 
9029 		vsi = pf->vsi[i];
9030 		if (!vsi || vsi->type != ICE_VSI_CHNL)
9031 			continue;
9032 
9033 		type = vsi->type;
9034 
9035 		/* rebuild ADQ VSI */
9036 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
9037 		if (err) {
9038 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
9039 				ice_vsi_type_str(type), vsi->idx, err);
9040 			goto cleanup;
9041 		}
9042 
9043 		/* Re-map HW VSI number, using VSI handle that has been
9044 		 * previously validated in ice_replay_vsi() call above
9045 		 */
9046 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
9047 
9048 		/* replay filters for the VSI */
9049 		err = ice_replay_vsi(&pf->hw, vsi->idx);
9050 		if (err) {
9051 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
9052 				ice_vsi_type_str(type), err, vsi->idx);
9053 			rem_adv_fltr = false;
9054 			goto cleanup;
9055 		}
9056 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
9057 			 ice_vsi_type_str(type), vsi->idx);
9058 
9059 		/* store ADQ VSI at correct TC index in main VSI's
9060 		 * map of TC to VSI
9061 		 */
9062 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
9063 	}
9064 
9065 	/* ADQ VSI(s) has been rebuilt successfully, so setup
9066 	 * channel for main VSI's Tx and Rx rings
9067 	 */
9068 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
9069 		struct ice_vsi *ch_vsi;
9070 
9071 		ch_vsi = ch->ch_vsi;
9072 		if (!ch_vsi)
9073 			continue;
9074 
9075 		/* reconfig channel resources */
9076 		ice_cfg_chnl_all_res(main_vsi, ch);
9077 
9078 		/* replay BW rate limit if it is non-zero */
9079 		if (!ch->max_tx_rate && !ch->min_tx_rate)
9080 			continue;
9081 
9082 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
9083 				       ch->min_tx_rate);
9084 		if (err)
9085 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9086 				err, ch->max_tx_rate, ch->min_tx_rate,
9087 				ch_vsi->vsi_num);
9088 		else
9089 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9090 				ch->max_tx_rate, ch->min_tx_rate,
9091 				ch_vsi->vsi_num);
9092 	}
9093 
9094 	/* reconfig RSS for main VSI */
9095 	if (main_vsi->ch_rss_size)
9096 		ice_vsi_cfg_rss_lut_key(main_vsi);
9097 
9098 	return 0;
9099 
9100 cleanup:
9101 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
9102 	return err;
9103 }
9104 
9105 /**
9106  * ice_create_q_channels - Add queue channel for the given TCs
9107  * @vsi: VSI to be configured
9108  *
9109  * Configures queue channel mapping to the given TCs
9110  */
ice_create_q_channels(struct ice_vsi * vsi)9111 static int ice_create_q_channels(struct ice_vsi *vsi)
9112 {
9113 	struct ice_pf *pf = vsi->back;
9114 	struct ice_channel *ch;
9115 	int ret = 0, i;
9116 
9117 	ice_for_each_chnl_tc(i) {
9118 		if (!(vsi->all_enatc & BIT(i)))
9119 			continue;
9120 
9121 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9122 		if (!ch) {
9123 			ret = -ENOMEM;
9124 			goto err_free;
9125 		}
9126 		INIT_LIST_HEAD(&ch->list);
9127 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9128 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9129 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9130 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9131 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9132 
9133 		/* convert to Kbits/s */
9134 		if (ch->max_tx_rate)
9135 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
9136 						  ICE_BW_KBPS_DIVISOR);
9137 		if (ch->min_tx_rate)
9138 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
9139 						  ICE_BW_KBPS_DIVISOR);
9140 
9141 		ret = ice_create_q_channel(vsi, ch);
9142 		if (ret) {
9143 			dev_err(ice_pf_to_dev(pf),
9144 				"failed creating channel TC:%d\n", i);
9145 			kfree(ch);
9146 			goto err_free;
9147 		}
9148 		list_add_tail(&ch->list, &vsi->ch_list);
9149 		vsi->tc_map_vsi[i] = ch->ch_vsi;
9150 		dev_dbg(ice_pf_to_dev(pf),
9151 			"successfully created channel: VSI %pK\n", ch->ch_vsi);
9152 	}
9153 	return 0;
9154 
9155 err_free:
9156 	ice_remove_q_channels(vsi, false);
9157 
9158 	return ret;
9159 }
9160 
9161 /**
9162  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9163  * @netdev: net device to configure
9164  * @type_data: TC offload data
9165  */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)9166 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9167 {
9168 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9169 	struct ice_netdev_priv *np = netdev_priv(netdev);
9170 	struct ice_vsi *vsi = np->vsi;
9171 	struct ice_pf *pf = vsi->back;
9172 	u16 mode, ena_tc_qdisc = 0;
9173 	int cur_txq, cur_rxq;
9174 	u8 hw = 0, num_tcf;
9175 	struct device *dev;
9176 	int ret, i;
9177 
9178 	dev = ice_pf_to_dev(pf);
9179 	num_tcf = mqprio_qopt->qopt.num_tc;
9180 	hw = mqprio_qopt->qopt.hw;
9181 	mode = mqprio_qopt->mode;
9182 	if (!hw) {
9183 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9184 		vsi->ch_rss_size = 0;
9185 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9186 		goto config_tcf;
9187 	}
9188 
9189 	/* Generate queue region map for number of TCF requested */
9190 	for (i = 0; i < num_tcf; i++)
9191 		ena_tc_qdisc |= BIT(i);
9192 
9193 	switch (mode) {
9194 	case TC_MQPRIO_MODE_CHANNEL:
9195 
9196 		if (pf->hw.port_info->is_custom_tx_enabled) {
9197 			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9198 			return -EBUSY;
9199 		}
9200 		ice_tear_down_devlink_rate_tree(pf);
9201 
9202 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9203 		if (ret) {
9204 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9205 				   ret);
9206 			return ret;
9207 		}
9208 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9209 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9210 		/* don't assume state of hw_tc_offload during driver load
9211 		 * and set the flag for TC flower filter if hw_tc_offload
9212 		 * already ON
9213 		 */
9214 		if (vsi->netdev->features & NETIF_F_HW_TC)
9215 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9216 		break;
9217 	default:
9218 		return -EINVAL;
9219 	}
9220 
9221 config_tcf:
9222 
9223 	/* Requesting same TCF configuration as already enabled */
9224 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9225 	    mode != TC_MQPRIO_MODE_CHANNEL)
9226 		return 0;
9227 
9228 	/* Pause VSI queues */
9229 	ice_dis_vsi(vsi, true);
9230 
9231 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9232 		ice_remove_q_channels(vsi, true);
9233 
9234 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9235 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9236 				     num_online_cpus());
9237 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9238 				     num_online_cpus());
9239 	} else {
9240 		/* logic to rebuild VSI, same like ethtool -L */
9241 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9242 
9243 		for (i = 0; i < num_tcf; i++) {
9244 			if (!(ena_tc_qdisc & BIT(i)))
9245 				continue;
9246 
9247 			offset = vsi->mqprio_qopt.qopt.offset[i];
9248 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9249 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9250 		}
9251 		vsi->req_txq = offset + qcount_tx;
9252 		vsi->req_rxq = offset + qcount_rx;
9253 
9254 		/* store away original rss_size info, so that it gets reused
9255 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9256 		 * determine, what should be the rss_sizefor main VSI
9257 		 */
9258 		vsi->orig_rss_size = vsi->rss_size;
9259 	}
9260 
9261 	/* save current values of Tx and Rx queues before calling VSI rebuild
9262 	 * for fallback option
9263 	 */
9264 	cur_txq = vsi->num_txq;
9265 	cur_rxq = vsi->num_rxq;
9266 
9267 	/* proceed with rebuild main VSI using correct number of queues */
9268 	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9269 	if (ret) {
9270 		/* fallback to current number of queues */
9271 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9272 		vsi->req_txq = cur_txq;
9273 		vsi->req_rxq = cur_rxq;
9274 		clear_bit(ICE_RESET_FAILED, pf->state);
9275 		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9276 			dev_err(dev, "Rebuild of main VSI failed again\n");
9277 			return ret;
9278 		}
9279 	}
9280 
9281 	vsi->all_numtc = num_tcf;
9282 	vsi->all_enatc = ena_tc_qdisc;
9283 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9284 	if (ret) {
9285 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9286 			   vsi->vsi_num);
9287 		goto exit;
9288 	}
9289 
9290 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9291 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9292 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9293 
9294 		/* set TC0 rate limit if specified */
9295 		if (max_tx_rate || min_tx_rate) {
9296 			/* convert to Kbits/s */
9297 			if (max_tx_rate)
9298 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9299 			if (min_tx_rate)
9300 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9301 
9302 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9303 			if (!ret) {
9304 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9305 					max_tx_rate, min_tx_rate, vsi->vsi_num);
9306 			} else {
9307 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9308 					max_tx_rate, min_tx_rate, vsi->vsi_num);
9309 				goto exit;
9310 			}
9311 		}
9312 		ret = ice_create_q_channels(vsi);
9313 		if (ret) {
9314 			netdev_err(netdev, "failed configuring queue channels\n");
9315 			goto exit;
9316 		} else {
9317 			netdev_dbg(netdev, "successfully configured channels\n");
9318 		}
9319 	}
9320 
9321 	if (vsi->ch_rss_size)
9322 		ice_vsi_cfg_rss_lut_key(vsi);
9323 
9324 exit:
9325 	/* if error, reset the all_numtc and all_enatc */
9326 	if (ret) {
9327 		vsi->all_numtc = 0;
9328 		vsi->all_enatc = 0;
9329 	}
9330 	/* resume VSI */
9331 	ice_ena_vsi(vsi, true);
9332 
9333 	return ret;
9334 }
9335 
9336 static LIST_HEAD(ice_block_cb_list);
9337 
9338 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)9339 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9340 	     void *type_data)
9341 {
9342 	struct ice_netdev_priv *np = netdev_priv(netdev);
9343 	struct ice_pf *pf = np->vsi->back;
9344 	bool locked = false;
9345 	int err;
9346 
9347 	switch (type) {
9348 	case TC_SETUP_BLOCK:
9349 		return flow_block_cb_setup_simple(type_data,
9350 						  &ice_block_cb_list,
9351 						  ice_setup_tc_block_cb,
9352 						  np, np, true);
9353 	case TC_SETUP_QDISC_MQPRIO:
9354 		if (ice_is_eswitch_mode_switchdev(pf)) {
9355 			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9356 			return -EOPNOTSUPP;
9357 		}
9358 
9359 		if (pf->adev) {
9360 			mutex_lock(&pf->adev_mutex);
9361 			device_lock(&pf->adev->dev);
9362 			locked = true;
9363 			if (pf->adev->dev.driver) {
9364 				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9365 				err = -EBUSY;
9366 				goto adev_unlock;
9367 			}
9368 		}
9369 
9370 		/* setup traffic classifier for receive side */
9371 		mutex_lock(&pf->tc_mutex);
9372 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9373 		mutex_unlock(&pf->tc_mutex);
9374 
9375 adev_unlock:
9376 		if (locked) {
9377 			device_unlock(&pf->adev->dev);
9378 			mutex_unlock(&pf->adev_mutex);
9379 		}
9380 		return err;
9381 	default:
9382 		return -EOPNOTSUPP;
9383 	}
9384 	return -EOPNOTSUPP;
9385 }
9386 
9387 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)9388 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9389 			   struct net_device *netdev)
9390 {
9391 	struct ice_indr_block_priv *cb_priv;
9392 
9393 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9394 		if (!cb_priv->netdev)
9395 			return NULL;
9396 		if (cb_priv->netdev == netdev)
9397 			return cb_priv;
9398 	}
9399 	return NULL;
9400 }
9401 
9402 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)9403 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9404 			void *indr_priv)
9405 {
9406 	struct ice_indr_block_priv *priv = indr_priv;
9407 	struct ice_netdev_priv *np = priv->np;
9408 
9409 	switch (type) {
9410 	case TC_SETUP_CLSFLOWER:
9411 		return ice_setup_tc_cls_flower(np, priv->netdev,
9412 					       (struct flow_cls_offload *)
9413 					       type_data);
9414 	default:
9415 		return -EOPNOTSUPP;
9416 	}
9417 }
9418 
9419 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9420 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9421 			struct ice_netdev_priv *np,
9422 			struct flow_block_offload *f, void *data,
9423 			void (*cleanup)(struct flow_block_cb *block_cb))
9424 {
9425 	struct ice_indr_block_priv *indr_priv;
9426 	struct flow_block_cb *block_cb;
9427 
9428 	if (!ice_is_tunnel_supported(netdev) &&
9429 	    !(is_vlan_dev(netdev) &&
9430 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
9431 		return -EOPNOTSUPP;
9432 
9433 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9434 		return -EOPNOTSUPP;
9435 
9436 	switch (f->command) {
9437 	case FLOW_BLOCK_BIND:
9438 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9439 		if (indr_priv)
9440 			return -EEXIST;
9441 
9442 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9443 		if (!indr_priv)
9444 			return -ENOMEM;
9445 
9446 		indr_priv->netdev = netdev;
9447 		indr_priv->np = np;
9448 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9449 
9450 		block_cb =
9451 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9452 						 indr_priv, indr_priv,
9453 						 ice_rep_indr_tc_block_unbind,
9454 						 f, netdev, sch, data, np,
9455 						 cleanup);
9456 
9457 		if (IS_ERR(block_cb)) {
9458 			list_del(&indr_priv->list);
9459 			kfree(indr_priv);
9460 			return PTR_ERR(block_cb);
9461 		}
9462 		flow_block_cb_add(block_cb, f);
9463 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9464 		break;
9465 	case FLOW_BLOCK_UNBIND:
9466 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9467 		if (!indr_priv)
9468 			return -ENOENT;
9469 
9470 		block_cb = flow_block_cb_lookup(f->block,
9471 						ice_indr_setup_block_cb,
9472 						indr_priv);
9473 		if (!block_cb)
9474 			return -ENOENT;
9475 
9476 		flow_indr_block_cb_remove(block_cb, f);
9477 
9478 		list_del(&block_cb->driver_list);
9479 		break;
9480 	default:
9481 		return -EOPNOTSUPP;
9482 	}
9483 	return 0;
9484 }
9485 
9486 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9487 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9488 		     void *cb_priv, enum tc_setup_type type, void *type_data,
9489 		     void *data,
9490 		     void (*cleanup)(struct flow_block_cb *block_cb))
9491 {
9492 	switch (type) {
9493 	case TC_SETUP_BLOCK:
9494 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9495 					       data, cleanup);
9496 
9497 	default:
9498 		return -EOPNOTSUPP;
9499 	}
9500 }
9501 
9502 /**
9503  * ice_open - Called when a network interface becomes active
9504  * @netdev: network interface device structure
9505  *
9506  * The open entry point is called when a network interface is made
9507  * active by the system (IFF_UP). At this point all resources needed
9508  * for transmit and receive operations are allocated, the interrupt
9509  * handler is registered with the OS, the netdev watchdog is enabled,
9510  * and the stack is notified that the interface is ready.
9511  *
9512  * Returns 0 on success, negative value on failure
9513  */
ice_open(struct net_device * netdev)9514 int ice_open(struct net_device *netdev)
9515 {
9516 	struct ice_netdev_priv *np = netdev_priv(netdev);
9517 	struct ice_pf *pf = np->vsi->back;
9518 
9519 	if (ice_is_reset_in_progress(pf->state)) {
9520 		netdev_err(netdev, "can't open net device while reset is in progress");
9521 		return -EBUSY;
9522 	}
9523 
9524 	return ice_open_internal(netdev);
9525 }
9526 
9527 /**
9528  * ice_open_internal - Called when a network interface becomes active
9529  * @netdev: network interface device structure
9530  *
9531  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9532  * handling routine
9533  *
9534  * Returns 0 on success, negative value on failure
9535  */
ice_open_internal(struct net_device * netdev)9536 int ice_open_internal(struct net_device *netdev)
9537 {
9538 	struct ice_netdev_priv *np = netdev_priv(netdev);
9539 	struct ice_vsi *vsi = np->vsi;
9540 	struct ice_pf *pf = vsi->back;
9541 	struct ice_port_info *pi;
9542 	int err;
9543 
9544 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9545 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9546 		return -EIO;
9547 	}
9548 
9549 	netif_carrier_off(netdev);
9550 
9551 	pi = vsi->port_info;
9552 	err = ice_update_link_info(pi);
9553 	if (err) {
9554 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9555 		return err;
9556 	}
9557 
9558 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9559 
9560 	/* Set PHY if there is media, otherwise, turn off PHY */
9561 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9562 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9563 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9564 			err = ice_init_phy_user_cfg(pi);
9565 			if (err) {
9566 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9567 					   err);
9568 				return err;
9569 			}
9570 		}
9571 
9572 		err = ice_configure_phy(vsi);
9573 		if (err) {
9574 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9575 				   err);
9576 			return err;
9577 		}
9578 	} else {
9579 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9580 		ice_set_link(vsi, false);
9581 	}
9582 
9583 	err = ice_vsi_open(vsi);
9584 	if (err)
9585 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9586 			   vsi->vsi_num, vsi->vsw->sw_id);
9587 
9588 	/* Update existing tunnels information */
9589 	udp_tunnel_get_rx_info(netdev);
9590 
9591 	return err;
9592 }
9593 
9594 /**
9595  * ice_stop - Disables a network interface
9596  * @netdev: network interface device structure
9597  *
9598  * The stop entry point is called when an interface is de-activated by the OS,
9599  * and the netdevice enters the DOWN state. The hardware is still under the
9600  * driver's control, but the netdev interface is disabled.
9601  *
9602  * Returns success only - not allowed to fail
9603  */
ice_stop(struct net_device * netdev)9604 int ice_stop(struct net_device *netdev)
9605 {
9606 	struct ice_netdev_priv *np = netdev_priv(netdev);
9607 	struct ice_vsi *vsi = np->vsi;
9608 	struct ice_pf *pf = vsi->back;
9609 
9610 	if (ice_is_reset_in_progress(pf->state)) {
9611 		netdev_err(netdev, "can't stop net device while reset is in progress");
9612 		return -EBUSY;
9613 	}
9614 
9615 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9616 		int link_err = ice_force_phys_link_state(vsi, false);
9617 
9618 		if (link_err) {
9619 			if (link_err == -ENOMEDIUM)
9620 				netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9621 					    vsi->vsi_num);
9622 			else
9623 				netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9624 					   vsi->vsi_num, link_err);
9625 
9626 			ice_vsi_close(vsi);
9627 			return -EIO;
9628 		}
9629 	}
9630 
9631 	ice_vsi_close(vsi);
9632 
9633 	return 0;
9634 }
9635 
9636 /**
9637  * ice_features_check - Validate encapsulated packet conforms to limits
9638  * @skb: skb buffer
9639  * @netdev: This port's netdev
9640  * @features: Offload features that the stack believes apply
9641  */
9642 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9643 ice_features_check(struct sk_buff *skb,
9644 		   struct net_device __always_unused *netdev,
9645 		   netdev_features_t features)
9646 {
9647 	bool gso = skb_is_gso(skb);
9648 	size_t len;
9649 
9650 	/* No point in doing any of this if neither checksum nor GSO are
9651 	 * being requested for this frame. We can rule out both by just
9652 	 * checking for CHECKSUM_PARTIAL
9653 	 */
9654 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9655 		return features;
9656 
9657 	/* We cannot support GSO if the MSS is going to be less than
9658 	 * 64 bytes. If it is then we need to drop support for GSO.
9659 	 */
9660 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9661 		features &= ~NETIF_F_GSO_MASK;
9662 
9663 	len = skb_network_offset(skb);
9664 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9665 		goto out_rm_features;
9666 
9667 	len = skb_network_header_len(skb);
9668 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9669 		goto out_rm_features;
9670 
9671 	if (skb->encapsulation) {
9672 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9673 		 * the case of IPIP frames, the transport header pointer is
9674 		 * after the inner header! So check to make sure that this
9675 		 * is a GRE or UDP_TUNNEL frame before doing that math.
9676 		 */
9677 		if (gso && (skb_shinfo(skb)->gso_type &
9678 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9679 			len = skb_inner_network_header(skb) -
9680 			      skb_transport_header(skb);
9681 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9682 				goto out_rm_features;
9683 		}
9684 
9685 		len = skb_inner_network_header_len(skb);
9686 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9687 			goto out_rm_features;
9688 	}
9689 
9690 	return features;
9691 out_rm_features:
9692 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9693 }
9694 
9695 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9696 	.ndo_open = ice_open,
9697 	.ndo_stop = ice_stop,
9698 	.ndo_start_xmit = ice_start_xmit,
9699 	.ndo_set_mac_address = ice_set_mac_address,
9700 	.ndo_validate_addr = eth_validate_addr,
9701 	.ndo_change_mtu = ice_change_mtu,
9702 	.ndo_get_stats64 = ice_get_stats64,
9703 	.ndo_tx_timeout = ice_tx_timeout,
9704 	.ndo_bpf = ice_xdp_safe_mode,
9705 };
9706 
9707 static const struct net_device_ops ice_netdev_ops = {
9708 	.ndo_open = ice_open,
9709 	.ndo_stop = ice_stop,
9710 	.ndo_start_xmit = ice_start_xmit,
9711 	.ndo_select_queue = ice_select_queue,
9712 	.ndo_features_check = ice_features_check,
9713 	.ndo_fix_features = ice_fix_features,
9714 	.ndo_set_rx_mode = ice_set_rx_mode,
9715 	.ndo_set_mac_address = ice_set_mac_address,
9716 	.ndo_validate_addr = eth_validate_addr,
9717 	.ndo_change_mtu = ice_change_mtu,
9718 	.ndo_get_stats64 = ice_get_stats64,
9719 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9720 	.ndo_eth_ioctl = ice_eth_ioctl,
9721 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9722 	.ndo_set_vf_mac = ice_set_vf_mac,
9723 	.ndo_get_vf_config = ice_get_vf_cfg,
9724 	.ndo_set_vf_trust = ice_set_vf_trust,
9725 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9726 	.ndo_set_vf_link_state = ice_set_vf_link_state,
9727 	.ndo_get_vf_stats = ice_get_vf_stats,
9728 	.ndo_set_vf_rate = ice_set_vf_bw,
9729 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9730 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9731 	.ndo_setup_tc = ice_setup_tc,
9732 	.ndo_set_features = ice_set_features,
9733 	.ndo_bridge_getlink = ice_bridge_getlink,
9734 	.ndo_bridge_setlink = ice_bridge_setlink,
9735 	.ndo_fdb_add = ice_fdb_add,
9736 	.ndo_fdb_del = ice_fdb_del,
9737 #ifdef CONFIG_RFS_ACCEL
9738 	.ndo_rx_flow_steer = ice_rx_flow_steer,
9739 #endif
9740 	.ndo_tx_timeout = ice_tx_timeout,
9741 	.ndo_bpf = ice_xdp,
9742 	.ndo_xdp_xmit = ice_xdp_xmit,
9743 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9744 };
9745