xref: /linux/drivers/net/ethernet/intel/ice/ice_main.c (revision 8fdb05de0e2db89d8f56144c60ab784812e8c3b7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "devlink/devlink.h"
17 #include "devlink/port.h"
18 #include "ice_sf_eth.h"
19 #include "ice_hwmon.h"
20 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
21  * ice tracepoint functions. This must be done exactly once across the
22  * ice driver.
23  */
24 #define CREATE_TRACE_POINTS
25 #include "ice_trace.h"
26 #include "ice_eswitch.h"
27 #include "ice_tc_lib.h"
28 #include "ice_vsi_vlan_ops.h"
29 #include <net/xdp_sock_drv.h>
30 
31 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
32 static const char ice_driver_string[] = DRV_SUMMARY;
33 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
34 
35 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
36 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
37 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
38 
39 MODULE_DESCRIPTION(DRV_SUMMARY);
40 MODULE_IMPORT_NS("LIBETH");
41 MODULE_IMPORT_NS("LIBETH_XDP");
42 MODULE_IMPORT_NS("LIBIE");
43 MODULE_IMPORT_NS("LIBIE_ADMINQ");
44 MODULE_IMPORT_NS("LIBIE_FWLOG");
45 MODULE_LICENSE("GPL v2");
46 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
47 
48 static int debug = -1;
49 module_param(debug, int, 0644);
50 #ifndef CONFIG_DYNAMIC_DEBUG
51 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
52 #else
53 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
54 #endif /* !CONFIG_DYNAMIC_DEBUG */
55 
56 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
57 EXPORT_SYMBOL(ice_xdp_locking_key);
58 
59 /**
60  * ice_hw_to_dev - Get device pointer from the hardware structure
61  * @hw: pointer to the device HW structure
62  *
63  * Used to access the device pointer from compilation units which can't easily
64  * include the definition of struct ice_pf without leading to circular header
65  * dependencies.
66  */
ice_hw_to_dev(struct ice_hw * hw)67 struct device *ice_hw_to_dev(struct ice_hw *hw)
68 {
69 	struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
70 
71 	return &pf->pdev->dev;
72 }
73 
74 static struct workqueue_struct *ice_wq;
75 struct workqueue_struct *ice_lag_wq;
76 static const struct net_device_ops ice_netdev_safe_mode_ops;
77 static const struct net_device_ops ice_netdev_ops;
78 
79 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
80 
81 static void ice_vsi_release_all(struct ice_pf *pf);
82 
83 static int ice_rebuild_channels(struct ice_pf *pf);
84 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
85 
86 static int
87 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
88 		     void *cb_priv, enum tc_setup_type type, void *type_data,
89 		     void *data,
90 		     void (*cleanup)(struct flow_block_cb *block_cb));
91 
netif_is_ice(const struct net_device * dev)92 bool netif_is_ice(const struct net_device *dev)
93 {
94 	return dev && (dev->netdev_ops == &ice_netdev_ops ||
95 		       dev->netdev_ops == &ice_netdev_safe_mode_ops);
96 }
97 
98 /**
99  * ice_get_tx_pending - returns number of Tx descriptors not processed
100  * @ring: the ring of descriptors
101  */
ice_get_tx_pending(struct ice_tx_ring * ring)102 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
103 {
104 	u16 head, tail;
105 
106 	head = ring->next_to_clean;
107 	tail = ring->next_to_use;
108 
109 	if (head != tail)
110 		return (head < tail) ?
111 			tail - head : (tail + ring->count - head);
112 	return 0;
113 }
114 
115 /**
116  * ice_check_for_hang_subtask - check for and recover hung queues
117  * @pf: pointer to PF struct
118  */
ice_check_for_hang_subtask(struct ice_pf * pf)119 static void ice_check_for_hang_subtask(struct ice_pf *pf)
120 {
121 	struct ice_vsi *vsi = NULL;
122 	struct ice_hw *hw;
123 	unsigned int i;
124 	int packets;
125 	u32 v;
126 
127 	ice_for_each_vsi(pf, v)
128 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
129 			vsi = pf->vsi[v];
130 			break;
131 		}
132 
133 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
134 		return;
135 
136 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
137 		return;
138 
139 	hw = &vsi->back->hw;
140 
141 	ice_for_each_txq(vsi, i) {
142 		struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
143 		struct ice_ring_stats *ring_stats;
144 
145 		if (!tx_ring)
146 			continue;
147 		if (ice_ring_ch_enabled(tx_ring))
148 			continue;
149 
150 		ring_stats = tx_ring->ring_stats;
151 		if (!ring_stats)
152 			continue;
153 
154 		if (tx_ring->desc) {
155 			/* If packet counter has not changed the queue is
156 			 * likely stalled, so force an interrupt for this
157 			 * queue.
158 			 *
159 			 * prev_pkt would be negative if there was no
160 			 * pending work.
161 			 */
162 			packets = ring_stats->stats.pkts & INT_MAX;
163 			if (ring_stats->tx_stats.prev_pkt == packets) {
164 				/* Trigger sw interrupt to revive the queue */
165 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
166 				continue;
167 			}
168 
169 			/* Memory barrier between read of packet count and call
170 			 * to ice_get_tx_pending()
171 			 */
172 			smp_rmb();
173 			ring_stats->tx_stats.prev_pkt =
174 			    ice_get_tx_pending(tx_ring) ? packets : -1;
175 		}
176 	}
177 }
178 
179 /**
180  * ice_init_mac_fltr - Set initial MAC filters
181  * @pf: board private structure
182  *
183  * Set initial set of MAC filters for PF VSI; configure filters for permanent
184  * address and broadcast address. If an error is encountered, netdevice will be
185  * unregistered.
186  */
ice_init_mac_fltr(struct ice_pf * pf)187 static int ice_init_mac_fltr(struct ice_pf *pf)
188 {
189 	struct ice_vsi *vsi;
190 	u8 *perm_addr;
191 
192 	vsi = ice_get_main_vsi(pf);
193 	if (!vsi)
194 		return -EINVAL;
195 
196 	perm_addr = vsi->port_info->mac.perm_addr;
197 	return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
198 }
199 
200 /**
201  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
202  * @netdev: the net device on which the sync is happening
203  * @addr: MAC address to sync
204  *
205  * This is a callback function which is called by the in kernel device sync
206  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
207  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
208  * MAC filters from the hardware.
209  */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)210 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
211 {
212 	struct ice_netdev_priv *np = netdev_priv(netdev);
213 	struct ice_vsi *vsi = np->vsi;
214 
215 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
216 				     ICE_FWD_TO_VSI))
217 		return -EINVAL;
218 
219 	return 0;
220 }
221 
222 /**
223  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
224  * @netdev: the net device on which the unsync is happening
225  * @addr: MAC address to unsync
226  *
227  * This is a callback function which is called by the in kernel device unsync
228  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
229  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
230  * delete the MAC filters from the hardware.
231  */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)232 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
233 {
234 	struct ice_netdev_priv *np = netdev_priv(netdev);
235 	struct ice_vsi *vsi = np->vsi;
236 
237 	/* Under some circumstances, we might receive a request to delete our
238 	 * own device address from our uc list. Because we store the device
239 	 * address in the VSI's MAC filter list, we need to ignore such
240 	 * requests and not delete our device address from this list.
241 	 */
242 	if (ether_addr_equal(addr, netdev->dev_addr))
243 		return 0;
244 
245 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
246 				     ICE_FWD_TO_VSI))
247 		return -EINVAL;
248 
249 	return 0;
250 }
251 
252 /**
253  * ice_vsi_fltr_changed - check if filter state changed
254  * @vsi: VSI to be checked
255  *
256  * returns true if filter state has changed, false otherwise.
257  */
ice_vsi_fltr_changed(struct ice_vsi * vsi)258 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
259 {
260 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
261 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
262 }
263 
264 /**
265  * ice_set_promisc - Enable promiscuous mode for a given PF
266  * @vsi: the VSI being configured
267  * @promisc_m: mask of promiscuous config bits
268  *
269  */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)270 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
271 {
272 	int status;
273 
274 	if (vsi->type != ICE_VSI_PF)
275 		return 0;
276 
277 	if (ice_vsi_has_non_zero_vlans(vsi)) {
278 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
279 		status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
280 						       promisc_m);
281 	} else {
282 		status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
283 						  promisc_m, 0);
284 	}
285 	if (status && status != -EEXIST)
286 		return status;
287 
288 	netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
289 		   vsi->vsi_num, promisc_m);
290 	return 0;
291 }
292 
293 /**
294  * ice_clear_promisc - Disable promiscuous mode for a given PF
295  * @vsi: the VSI being configured
296  * @promisc_m: mask of promiscuous config bits
297  *
298  */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)299 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
300 {
301 	int status;
302 
303 	if (vsi->type != ICE_VSI_PF)
304 		return 0;
305 
306 	if (ice_vsi_has_non_zero_vlans(vsi)) {
307 		promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
308 		status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
309 							 promisc_m);
310 	} else {
311 		status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
312 						    promisc_m, 0);
313 	}
314 
315 	netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
316 		   vsi->vsi_num, promisc_m);
317 	return status;
318 }
319 
320 /**
321  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
322  * @vsi: ptr to the VSI
323  *
324  * Push any outstanding VSI filter changes through the AdminQ.
325  */
ice_vsi_sync_fltr(struct ice_vsi * vsi)326 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
327 {
328 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
329 	struct device *dev = ice_pf_to_dev(vsi->back);
330 	struct net_device *netdev = vsi->netdev;
331 	bool promisc_forced_on = false;
332 	struct ice_pf *pf = vsi->back;
333 	struct ice_hw *hw = &pf->hw;
334 	u32 changed_flags = 0;
335 	int err;
336 
337 	if (!vsi->netdev)
338 		return -EINVAL;
339 
340 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
341 		usleep_range(1000, 2000);
342 
343 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
344 	vsi->current_netdev_flags = vsi->netdev->flags;
345 
346 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
347 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
348 
349 	if (ice_vsi_fltr_changed(vsi)) {
350 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
351 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
352 
353 		/* grab the netdev's addr_list_lock */
354 		netif_addr_lock_bh(netdev);
355 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
356 			      ice_add_mac_to_unsync_list);
357 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
358 			      ice_add_mac_to_unsync_list);
359 		/* our temp lists are populated. release lock */
360 		netif_addr_unlock_bh(netdev);
361 	}
362 
363 	/* Remove MAC addresses in the unsync list */
364 	err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
365 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
366 	if (err) {
367 		netdev_err(netdev, "Failed to delete MAC filters\n");
368 		/* if we failed because of alloc failures, just bail */
369 		if (err == -ENOMEM)
370 			goto out;
371 	}
372 
373 	/* Add MAC addresses in the sync list */
374 	err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
375 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
376 	/* If filter is added successfully or already exists, do not go into
377 	 * 'if' condition and report it as error. Instead continue processing
378 	 * rest of the function.
379 	 */
380 	if (err && err != -EEXIST) {
381 		netdev_err(netdev, "Failed to add MAC filters\n");
382 		/* If there is no more space for new umac filters, VSI
383 		 * should go into promiscuous mode. There should be some
384 		 * space reserved for promiscuous filters.
385 		 */
386 		if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC &&
387 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
388 				      vsi->state)) {
389 			promisc_forced_on = true;
390 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
391 				    vsi->vsi_num);
392 		} else {
393 			goto out;
394 		}
395 	}
396 	err = 0;
397 	/* check for changes in promiscuous modes */
398 	if (changed_flags & IFF_ALLMULTI) {
399 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
400 			err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
401 			if (err) {
402 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
403 				goto out_promisc;
404 			}
405 		} else {
406 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
407 			err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
408 			if (err) {
409 				vsi->current_netdev_flags |= IFF_ALLMULTI;
410 				goto out_promisc;
411 			}
412 		}
413 	}
414 
415 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
416 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
417 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
418 		if (vsi->current_netdev_flags & IFF_PROMISC) {
419 			/* Apply Rx filter rule to get traffic from wire */
420 			if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
421 				err = ice_set_dflt_vsi(vsi);
422 				if (err && err != -EEXIST) {
423 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
424 						   err, vsi->vsi_num);
425 					vsi->current_netdev_flags &=
426 						~IFF_PROMISC;
427 					goto out_promisc;
428 				}
429 				err = 0;
430 				vlan_ops->dis_rx_filtering(vsi);
431 
432 				/* promiscuous mode implies allmulticast so
433 				 * that VSIs that are in promiscuous mode are
434 				 * subscribed to multicast packets coming to
435 				 * the port
436 				 */
437 				err = ice_set_promisc(vsi,
438 						      ICE_MCAST_PROMISC_BITS);
439 				if (err)
440 					goto out_promisc;
441 			}
442 		} else {
443 			/* Clear Rx filter to remove traffic from wire */
444 			if (ice_is_vsi_dflt_vsi(vsi)) {
445 				err = ice_clear_dflt_vsi(vsi);
446 				if (err) {
447 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
448 						   err, vsi->vsi_num);
449 					vsi->current_netdev_flags |=
450 						IFF_PROMISC;
451 					goto out_promisc;
452 				}
453 				if (vsi->netdev->features &
454 				    NETIF_F_HW_VLAN_CTAG_FILTER)
455 					vlan_ops->ena_rx_filtering(vsi);
456 			}
457 
458 			/* disable allmulti here, but only if allmulti is not
459 			 * still enabled for the netdev
460 			 */
461 			if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
462 				err = ice_clear_promisc(vsi,
463 							ICE_MCAST_PROMISC_BITS);
464 				if (err) {
465 					netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
466 						   err, vsi->vsi_num);
467 				}
468 			}
469 		}
470 	}
471 	goto exit;
472 
473 out_promisc:
474 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
475 	goto exit;
476 out:
477 	/* if something went wrong then set the changed flag so we try again */
478 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
479 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
480 exit:
481 	clear_bit(ICE_CFG_BUSY, vsi->state);
482 	return err;
483 }
484 
485 /**
486  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
487  * @pf: board private structure
488  */
ice_sync_fltr_subtask(struct ice_pf * pf)489 static void ice_sync_fltr_subtask(struct ice_pf *pf)
490 {
491 	int v;
492 
493 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
494 		return;
495 
496 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
497 
498 	ice_for_each_vsi(pf, v)
499 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
500 		    ice_vsi_sync_fltr(pf->vsi[v])) {
501 			/* come back and try again later */
502 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
503 			break;
504 		}
505 }
506 
507 /**
508  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
509  * @pf: the PF
510  * @locked: is the rtnl_lock already held
511  */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)512 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
513 {
514 	int node;
515 	int v;
516 
517 	ice_for_each_vsi(pf, v)
518 		if (pf->vsi[v])
519 			ice_dis_vsi(pf->vsi[v], locked);
520 
521 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
522 		pf->pf_agg_node[node].num_vsis = 0;
523 
524 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
525 		pf->vf_agg_node[node].num_vsis = 0;
526 }
527 
528 /**
529  * ice_prepare_for_reset - prep for reset
530  * @pf: board private structure
531  * @reset_type: reset type requested
532  *
533  * Inform or close all dependent features in prep for reset.
534  */
535 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)536 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
537 {
538 	struct ice_hw *hw = &pf->hw;
539 	struct ice_vsi *vsi;
540 	struct ice_vf *vf;
541 	unsigned int bkt;
542 
543 	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
544 
545 	/* already prepared for reset */
546 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
547 		return;
548 
549 	synchronize_irq(pf->oicr_irq.virq);
550 
551 	ice_unplug_aux_dev(pf);
552 
553 	/* Notify VFs of impending reset */
554 	if (ice_check_sq_alive(hw, &hw->mailboxq))
555 		ice_vc_notify_reset(pf);
556 
557 	/* Disable VFs until reset is completed */
558 	mutex_lock(&pf->vfs.table_lock);
559 	ice_for_each_vf(pf, bkt, vf)
560 		ice_set_vf_state_dis(vf);
561 	mutex_unlock(&pf->vfs.table_lock);
562 
563 	if (ice_is_eswitch_mode_switchdev(pf)) {
564 		rtnl_lock();
565 		ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
566 		rtnl_unlock();
567 	}
568 
569 	/* release ADQ specific HW and SW resources */
570 	vsi = ice_get_main_vsi(pf);
571 	if (!vsi)
572 		goto skip;
573 
574 	/* to be on safe side, reset orig_rss_size so that normal flow
575 	 * of deciding rss_size can take precedence
576 	 */
577 	vsi->orig_rss_size = 0;
578 
579 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
580 		if (reset_type == ICE_RESET_PFR) {
581 			vsi->old_ena_tc = vsi->all_enatc;
582 			vsi->old_numtc = vsi->all_numtc;
583 		} else {
584 			ice_remove_q_channels(vsi, true);
585 
586 			/* for other reset type, do not support channel rebuild
587 			 * hence reset needed info
588 			 */
589 			vsi->old_ena_tc = 0;
590 			vsi->all_enatc = 0;
591 			vsi->old_numtc = 0;
592 			vsi->all_numtc = 0;
593 			vsi->req_txq = 0;
594 			vsi->req_rxq = 0;
595 			clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
596 			memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
597 		}
598 	}
599 
600 	if (vsi->netdev)
601 		netif_device_detach(vsi->netdev);
602 skip:
603 
604 	/* clear SW filtering DB */
605 	ice_clear_hw_tbls(hw);
606 	/* disable the VSIs and their queues that are not already DOWN */
607 	set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state);
608 	ice_pf_dis_all_vsi(pf, false);
609 
610 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
611 		ice_ptp_prepare_for_reset(pf, reset_type);
612 
613 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
614 		ice_gnss_exit(pf);
615 
616 	if (hw->port_info)
617 		ice_sched_clear_port(hw->port_info);
618 
619 	ice_shutdown_all_ctrlq(hw, false);
620 
621 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
622 }
623 
624 /**
625  * ice_do_reset - Initiate one of many types of resets
626  * @pf: board private structure
627  * @reset_type: reset type requested before this function was called.
628  */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)629 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
630 {
631 	struct device *dev = ice_pf_to_dev(pf);
632 	struct ice_hw *hw = &pf->hw;
633 
634 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
635 
636 	if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
637 		dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
638 		reset_type = ICE_RESET_CORER;
639 	}
640 
641 	ice_prepare_for_reset(pf, reset_type);
642 
643 	/* trigger the reset */
644 	if (ice_reset(hw, reset_type)) {
645 		dev_err(dev, "reset %d failed\n", reset_type);
646 		set_bit(ICE_RESET_FAILED, pf->state);
647 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
648 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
649 		clear_bit(ICE_PFR_REQ, pf->state);
650 		clear_bit(ICE_CORER_REQ, pf->state);
651 		clear_bit(ICE_GLOBR_REQ, pf->state);
652 		wake_up(&pf->reset_wait_queue);
653 		return;
654 	}
655 
656 	/* PFR is a bit of a special case because it doesn't result in an OICR
657 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
658 	 * associated state bits.
659 	 */
660 	if (reset_type == ICE_RESET_PFR) {
661 		pf->pfr_count++;
662 		ice_rebuild(pf, reset_type);
663 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
664 		clear_bit(ICE_PFR_REQ, pf->state);
665 		wake_up(&pf->reset_wait_queue);
666 		ice_reset_all_vfs(pf);
667 	}
668 }
669 
670 /**
671  * ice_reset_subtask - Set up for resetting the device and driver
672  * @pf: board private structure
673  */
ice_reset_subtask(struct ice_pf * pf)674 static void ice_reset_subtask(struct ice_pf *pf)
675 {
676 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
677 
678 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
679 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
680 	 * of reset is pending and sets bits in pf->state indicating the reset
681 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
682 	 * prepare for pending reset if not already (for PF software-initiated
683 	 * global resets the software should already be prepared for it as
684 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
685 	 * by firmware or software on other PFs, that bit is not set so prepare
686 	 * for the reset now), poll for reset done, rebuild and return.
687 	 */
688 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
689 		/* Perform the largest reset requested */
690 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
691 			reset_type = ICE_RESET_CORER;
692 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
693 			reset_type = ICE_RESET_GLOBR;
694 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
695 			reset_type = ICE_RESET_EMPR;
696 		/* return if no valid reset type requested */
697 		if (reset_type == ICE_RESET_INVAL)
698 			return;
699 		ice_prepare_for_reset(pf, reset_type);
700 
701 		/* make sure we are ready to rebuild */
702 		if (ice_check_reset(&pf->hw)) {
703 			set_bit(ICE_RESET_FAILED, pf->state);
704 		} else {
705 			/* done with reset. start rebuild */
706 			pf->hw.reset_ongoing = false;
707 			ice_rebuild(pf, reset_type);
708 			/* clear bit to resume normal operations, but
709 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
710 			 */
711 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
712 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
713 			clear_bit(ICE_PFR_REQ, pf->state);
714 			clear_bit(ICE_CORER_REQ, pf->state);
715 			clear_bit(ICE_GLOBR_REQ, pf->state);
716 			wake_up(&pf->reset_wait_queue);
717 			ice_reset_all_vfs(pf);
718 		}
719 
720 		return;
721 	}
722 
723 	/* No pending resets to finish processing. Check for new resets */
724 	if (test_bit(ICE_PFR_REQ, pf->state)) {
725 		reset_type = ICE_RESET_PFR;
726 		if (pf->lag && pf->lag->bonded) {
727 			dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
728 			reset_type = ICE_RESET_CORER;
729 		}
730 	}
731 	if (test_bit(ICE_CORER_REQ, pf->state))
732 		reset_type = ICE_RESET_CORER;
733 	if (test_bit(ICE_GLOBR_REQ, pf->state))
734 		reset_type = ICE_RESET_GLOBR;
735 	/* If no valid reset type requested just return */
736 	if (reset_type == ICE_RESET_INVAL)
737 		return;
738 
739 	/* reset if not already down or busy */
740 	if (!test_bit(ICE_DOWN, pf->state) &&
741 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
742 		ice_do_reset(pf, reset_type);
743 	}
744 }
745 
746 /**
747  * ice_print_topo_conflict - print topology conflict message
748  * @vsi: the VSI whose topology status is being checked
749  */
ice_print_topo_conflict(struct ice_vsi * vsi)750 static void ice_print_topo_conflict(struct ice_vsi *vsi)
751 {
752 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
753 	case ICE_AQ_LINK_TOPO_CONFLICT:
754 	case ICE_AQ_LINK_MEDIA_CONFLICT:
755 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
756 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
757 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
758 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
759 		break;
760 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
761 		if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
762 			netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
763 		else
764 			netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
765 		break;
766 	default:
767 		break;
768 	}
769 }
770 
771 /**
772  * ice_print_link_msg - print link up or down message
773  * @vsi: the VSI whose link status is being queried
774  * @isup: boolean for if the link is now up or down
775  */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)776 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
777 {
778 	struct ice_aqc_get_phy_caps_data *caps;
779 	const char *an_advertised;
780 	const char *fec_req;
781 	const char *speed;
782 	const char *fec;
783 	const char *fc;
784 	const char *an;
785 	int status;
786 
787 	if (!vsi)
788 		return;
789 
790 	if (vsi->current_isup == isup)
791 		return;
792 
793 	vsi->current_isup = isup;
794 
795 	if (!isup) {
796 		netdev_info(vsi->netdev, "NIC Link is Down\n");
797 		return;
798 	}
799 
800 	switch (vsi->port_info->phy.link_info.link_speed) {
801 	case ICE_AQ_LINK_SPEED_200GB:
802 		speed = "200 G";
803 		break;
804 	case ICE_AQ_LINK_SPEED_100GB:
805 		speed = "100 G";
806 		break;
807 	case ICE_AQ_LINK_SPEED_50GB:
808 		speed = "50 G";
809 		break;
810 	case ICE_AQ_LINK_SPEED_40GB:
811 		speed = "40 G";
812 		break;
813 	case ICE_AQ_LINK_SPEED_25GB:
814 		speed = "25 G";
815 		break;
816 	case ICE_AQ_LINK_SPEED_20GB:
817 		speed = "20 G";
818 		break;
819 	case ICE_AQ_LINK_SPEED_10GB:
820 		speed = "10 G";
821 		break;
822 	case ICE_AQ_LINK_SPEED_5GB:
823 		speed = "5 G";
824 		break;
825 	case ICE_AQ_LINK_SPEED_2500MB:
826 		speed = "2.5 G";
827 		break;
828 	case ICE_AQ_LINK_SPEED_1000MB:
829 		speed = "1 G";
830 		break;
831 	case ICE_AQ_LINK_SPEED_100MB:
832 		speed = "100 M";
833 		break;
834 	default:
835 		speed = "Unknown ";
836 		break;
837 	}
838 
839 	switch (vsi->port_info->fc.current_mode) {
840 	case ICE_FC_FULL:
841 		fc = "Rx/Tx";
842 		break;
843 	case ICE_FC_TX_PAUSE:
844 		fc = "Tx";
845 		break;
846 	case ICE_FC_RX_PAUSE:
847 		fc = "Rx";
848 		break;
849 	case ICE_FC_NONE:
850 		fc = "None";
851 		break;
852 	default:
853 		fc = "Unknown";
854 		break;
855 	}
856 
857 	/* Get FEC mode based on negotiated link info */
858 	switch (vsi->port_info->phy.link_info.fec_info) {
859 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
860 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
861 		fec = "RS-FEC";
862 		break;
863 	case ICE_AQ_LINK_25G_KR_FEC_EN:
864 		fec = "FC-FEC/BASE-R";
865 		break;
866 	default:
867 		fec = "NONE";
868 		break;
869 	}
870 
871 	/* check if autoneg completed, might be false due to not supported */
872 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
873 		an = "True";
874 	else
875 		an = "False";
876 
877 	/* Get FEC mode requested based on PHY caps last SW configuration */
878 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
879 	if (!caps) {
880 		fec_req = "Unknown";
881 		an_advertised = "Unknown";
882 		goto done;
883 	}
884 
885 	status = ice_aq_get_phy_caps(vsi->port_info, false,
886 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
887 	if (status)
888 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
889 
890 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
891 
892 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
893 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
894 		fec_req = "RS-FEC";
895 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
896 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
897 		fec_req = "FC-FEC/BASE-R";
898 	else
899 		fec_req = "NONE";
900 
901 	kfree(caps);
902 
903 done:
904 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
905 		    speed, fec_req, fec, an_advertised, an, fc);
906 	ice_print_topo_conflict(vsi);
907 }
908 
909 /**
910  * ice_vsi_link_event - update the VSI's netdev
911  * @vsi: the VSI on which the link event occurred
912  * @link_up: whether or not the VSI needs to be set up or down
913  */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)914 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
915 {
916 	if (!vsi)
917 		return;
918 
919 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
920 		return;
921 
922 	if (vsi->type == ICE_VSI_PF) {
923 		if (link_up == netif_carrier_ok(vsi->netdev))
924 			return;
925 
926 		if (link_up) {
927 			netif_carrier_on(vsi->netdev);
928 			netif_tx_wake_all_queues(vsi->netdev);
929 		} else {
930 			netif_carrier_off(vsi->netdev);
931 			netif_tx_stop_all_queues(vsi->netdev);
932 		}
933 	}
934 }
935 
936 /**
937  * ice_set_dflt_mib - send a default config MIB to the FW
938  * @pf: private PF struct
939  *
940  * This function sends a default configuration MIB to the FW.
941  *
942  * If this function errors out at any point, the driver is still able to
943  * function.  The main impact is that LFC may not operate as expected.
944  * Therefore an error state in this function should be treated with a DBG
945  * message and continue on with driver rebuild/reenable.
946  */
ice_set_dflt_mib(struct ice_pf * pf)947 static void ice_set_dflt_mib(struct ice_pf *pf)
948 {
949 	struct device *dev = ice_pf_to_dev(pf);
950 	u8 mib_type, *buf, *lldpmib = NULL;
951 	u16 len, typelen, offset = 0;
952 	struct ice_lldp_org_tlv *tlv;
953 	struct ice_hw *hw = &pf->hw;
954 	u32 ouisubtype;
955 
956 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
957 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
958 	if (!lldpmib) {
959 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
960 			__func__);
961 		return;
962 	}
963 
964 	/* Add ETS CFG TLV */
965 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
966 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
967 		   ICE_IEEE_ETS_TLV_LEN);
968 	tlv->typelen = htons(typelen);
969 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
970 		      ICE_IEEE_SUBTYPE_ETS_CFG);
971 	tlv->ouisubtype = htonl(ouisubtype);
972 
973 	buf = tlv->tlvinfo;
974 	buf[0] = 0;
975 
976 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
977 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
978 	 * Octets 13 - 20 are TSA values - leave as zeros
979 	 */
980 	buf[5] = 0x64;
981 	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
982 	offset += len + 2;
983 	tlv = (struct ice_lldp_org_tlv *)
984 		((char *)tlv + sizeof(tlv->typelen) + len);
985 
986 	/* Add ETS REC TLV */
987 	buf = tlv->tlvinfo;
988 	tlv->typelen = htons(typelen);
989 
990 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
991 		      ICE_IEEE_SUBTYPE_ETS_REC);
992 	tlv->ouisubtype = htonl(ouisubtype);
993 
994 	/* First octet of buf is reserved
995 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
996 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
997 	 * Octets 13 - 20 are TSA value - leave as zeros
998 	 */
999 	buf[5] = 0x64;
1000 	offset += len + 2;
1001 	tlv = (struct ice_lldp_org_tlv *)
1002 		((char *)tlv + sizeof(tlv->typelen) + len);
1003 
1004 	/* Add PFC CFG TLV */
1005 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1006 		   ICE_IEEE_PFC_TLV_LEN);
1007 	tlv->typelen = htons(typelen);
1008 
1009 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1010 		      ICE_IEEE_SUBTYPE_PFC_CFG);
1011 	tlv->ouisubtype = htonl(ouisubtype);
1012 
1013 	/* Octet 1 left as all zeros - PFC disabled */
1014 	buf[0] = 0x08;
1015 	len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
1016 	offset += len + 2;
1017 
1018 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1019 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1020 
1021 	kfree(lldpmib);
1022 }
1023 
1024 /**
1025  * ice_check_phy_fw_load - check if PHY FW load failed
1026  * @pf: pointer to PF struct
1027  * @link_cfg_err: bitmap from the link info structure
1028  *
1029  * check if external PHY FW load failed and print an error message if it did
1030  */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1031 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1032 {
1033 	if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1034 		clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1035 		return;
1036 	}
1037 
1038 	if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1039 		return;
1040 
1041 	if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1042 		dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1043 		set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1044 	}
1045 }
1046 
1047 /**
1048  * ice_check_module_power
1049  * @pf: pointer to PF struct
1050  * @link_cfg_err: bitmap from the link info structure
1051  *
1052  * check module power level returned by a previous call to aq_get_link_info
1053  * and print error messages if module power level is not supported
1054  */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1055 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1056 {
1057 	/* if module power level is supported, clear the flag */
1058 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1059 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1060 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1061 		return;
1062 	}
1063 
1064 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1065 	 * above block didn't clear this bit, there's nothing to do
1066 	 */
1067 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1068 		return;
1069 
1070 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1071 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1072 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1073 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1074 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1075 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1076 	}
1077 }
1078 
1079 /**
1080  * ice_check_link_cfg_err - check if link configuration failed
1081  * @pf: pointer to the PF struct
1082  * @link_cfg_err: bitmap from the link info structure
1083  *
1084  * print if any link configuration failure happens due to the value in the
1085  * link_cfg_err parameter in the link info structure
1086  */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1087 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1088 {
1089 	ice_check_module_power(pf, link_cfg_err);
1090 	ice_check_phy_fw_load(pf, link_cfg_err);
1091 }
1092 
1093 /**
1094  * ice_link_event - process the link event
1095  * @pf: PF that the link event is associated with
1096  * @pi: port_info for the port that the link event is associated with
1097  * @link_up: true if the physical link is up and false if it is down
1098  * @link_speed: current link speed received from the link event
1099  *
1100  * Returns 0 on success and negative on failure
1101  */
1102 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1103 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1104 	       u16 link_speed)
1105 {
1106 	struct device *dev = ice_pf_to_dev(pf);
1107 	struct ice_phy_info *phy_info;
1108 	struct ice_vsi *vsi;
1109 	u16 old_link_speed;
1110 	bool old_link;
1111 	int status;
1112 
1113 	phy_info = &pi->phy;
1114 	phy_info->link_info_old = phy_info->link_info;
1115 
1116 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1117 	old_link_speed = phy_info->link_info_old.link_speed;
1118 
1119 	/* update the link info structures and re-enable link events,
1120 	 * don't bail on failure due to other book keeping needed
1121 	 */
1122 	status = ice_update_link_info(pi);
1123 	if (status)
1124 		dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1125 			pi->lport, status,
1126 			libie_aq_str(pi->hw->adminq.sq_last_status));
1127 
1128 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1129 
1130 	/* Check if the link state is up after updating link info, and treat
1131 	 * this event as an UP event since the link is actually UP now.
1132 	 */
1133 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1134 		link_up = true;
1135 
1136 	vsi = ice_get_main_vsi(pf);
1137 	if (!vsi || !vsi->port_info)
1138 		return -EINVAL;
1139 
1140 	/* turn off PHY if media was removed */
1141 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1142 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1143 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1144 		ice_set_link(vsi, false);
1145 	}
1146 
1147 	/* if the old link up/down and speed is the same as the new */
1148 	if (link_up == old_link && link_speed == old_link_speed)
1149 		return 0;
1150 
1151 	if (!link_up && old_link)
1152 		pf->link_down_events++;
1153 
1154 	ice_ptp_link_change(pf, link_up);
1155 
1156 	if (ice_is_dcb_active(pf)) {
1157 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1158 			ice_dcb_rebuild(pf);
1159 	} else {
1160 		if (link_up)
1161 			ice_set_dflt_mib(pf);
1162 	}
1163 	ice_vsi_link_event(vsi, link_up);
1164 	ice_print_link_msg(vsi, link_up);
1165 
1166 	ice_vc_notify_link_state(pf);
1167 
1168 	return 0;
1169 }
1170 
1171 /**
1172  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1173  * @pf: board private structure
1174  */
ice_watchdog_subtask(struct ice_pf * pf)1175 static void ice_watchdog_subtask(struct ice_pf *pf)
1176 {
1177 	int i;
1178 
1179 	/* if interface is down do nothing */
1180 	if (test_bit(ICE_DOWN, pf->state) ||
1181 	    test_bit(ICE_CFG_BUSY, pf->state))
1182 		return;
1183 
1184 	/* make sure we don't do these things too often */
1185 	if (time_before(jiffies,
1186 			pf->serv_tmr_prev + pf->serv_tmr_period))
1187 		return;
1188 
1189 	pf->serv_tmr_prev = jiffies;
1190 
1191 	/* Update the stats for active netdevs so the network stack
1192 	 * can look at updated numbers whenever it cares to
1193 	 */
1194 	ice_update_pf_stats(pf);
1195 	ice_for_each_vsi(pf, i)
1196 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1197 			ice_update_vsi_stats(pf->vsi[i]);
1198 }
1199 
1200 /**
1201  * ice_init_link_events - enable/initialize link events
1202  * @pi: pointer to the port_info instance
1203  *
1204  * Returns -EIO on failure, 0 on success
1205  */
ice_init_link_events(struct ice_port_info * pi)1206 static int ice_init_link_events(struct ice_port_info *pi)
1207 {
1208 	u16 mask;
1209 
1210 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1211 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1212 		       ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1213 
1214 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1215 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1216 			pi->lport);
1217 		return -EIO;
1218 	}
1219 
1220 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1221 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1222 			pi->lport);
1223 		return -EIO;
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 /**
1230  * ice_handle_link_event - handle link event via ARQ
1231  * @pf: PF that the link event is associated with
1232  * @event: event structure containing link status info
1233  */
1234 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1235 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1236 {
1237 	struct ice_aqc_get_link_status_data *link_data;
1238 	struct ice_port_info *port_info;
1239 	int status;
1240 
1241 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1242 	port_info = pf->hw.port_info;
1243 	if (!port_info)
1244 		return -EINVAL;
1245 
1246 	status = ice_link_event(pf, port_info,
1247 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1248 				le16_to_cpu(link_data->link_speed));
1249 	if (status)
1250 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1251 			status);
1252 
1253 	return status;
1254 }
1255 
1256 /**
1257  * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1258  * @pf: pointer to the PF private structure
1259  * @task: intermediate helper storage and identifier for waiting
1260  * @opcode: the opcode to wait for
1261  *
1262  * Prepares to wait for a specific AdminQ completion event on the ARQ for
1263  * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
1264  *
1265  * Calls are separated to allow caller registering for event before sending
1266  * the command, which mitigates a race between registering and FW responding.
1267  *
1268  * To obtain only the descriptor contents, pass an task->event with null
1269  * msg_buf. If the complete data buffer is desired, allocate the
1270  * task->event.msg_buf with enough space ahead of time.
1271  */
ice_aq_prep_for_event(struct ice_pf * pf,struct ice_aq_task * task,u16 opcode)1272 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1273 			   u16 opcode)
1274 {
1275 	INIT_HLIST_NODE(&task->entry);
1276 	task->opcode = opcode;
1277 	task->state = ICE_AQ_TASK_WAITING;
1278 
1279 	spin_lock_bh(&pf->aq_wait_lock);
1280 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1281 	spin_unlock_bh(&pf->aq_wait_lock);
1282 }
1283 
1284 /**
1285  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1286  * @pf: pointer to the PF private structure
1287  * @task: ptr prepared by ice_aq_prep_for_event()
1288  * @timeout: how long to wait, in jiffies
1289  *
1290  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1291  * current thread will be put to sleep until the specified event occurs or
1292  * until the given timeout is reached.
1293  *
1294  * Returns: zero on success, or a negative error code on failure.
1295  */
ice_aq_wait_for_event(struct ice_pf * pf,struct ice_aq_task * task,unsigned long timeout)1296 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
1297 			  unsigned long timeout)
1298 {
1299 	enum ice_aq_task_state *state = &task->state;
1300 	struct device *dev = ice_pf_to_dev(pf);
1301 	unsigned long start = jiffies;
1302 	long ret;
1303 	int err;
1304 
1305 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
1306 					       *state != ICE_AQ_TASK_WAITING,
1307 					       timeout);
1308 	switch (*state) {
1309 	case ICE_AQ_TASK_NOT_PREPARED:
1310 		WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
1311 		err = -EINVAL;
1312 		break;
1313 	case ICE_AQ_TASK_WAITING:
1314 		err = ret < 0 ? ret : -ETIMEDOUT;
1315 		break;
1316 	case ICE_AQ_TASK_CANCELED:
1317 		err = ret < 0 ? ret : -ECANCELED;
1318 		break;
1319 	case ICE_AQ_TASK_COMPLETE:
1320 		err = ret < 0 ? ret : 0;
1321 		break;
1322 	default:
1323 		WARN(1, "Unexpected AdminQ wait task state %u", *state);
1324 		err = -EINVAL;
1325 		break;
1326 	}
1327 
1328 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1329 		jiffies_to_msecs(jiffies - start),
1330 		jiffies_to_msecs(timeout),
1331 		task->opcode);
1332 
1333 	spin_lock_bh(&pf->aq_wait_lock);
1334 	hlist_del(&task->entry);
1335 	spin_unlock_bh(&pf->aq_wait_lock);
1336 
1337 	return err;
1338 }
1339 
1340 /**
1341  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1342  * @pf: pointer to the PF private structure
1343  * @opcode: the opcode of the event
1344  * @event: the event to check
1345  *
1346  * Loops over the current list of pending threads waiting for an AdminQ event.
1347  * For each matching task, copy the contents of the event into the task
1348  * structure and wake up the thread.
1349  *
1350  * If multiple threads wait for the same opcode, they will all be woken up.
1351  *
1352  * Note that event->msg_buf will only be duplicated if the event has a buffer
1353  * with enough space already allocated. Otherwise, only the descriptor and
1354  * message length will be copied.
1355  *
1356  * Returns: true if an event was found, false otherwise
1357  */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1358 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1359 				struct ice_rq_event_info *event)
1360 {
1361 	struct ice_rq_event_info *task_ev;
1362 	struct ice_aq_task *task;
1363 	bool found = false;
1364 
1365 	spin_lock_bh(&pf->aq_wait_lock);
1366 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1367 		if (task->state != ICE_AQ_TASK_WAITING)
1368 			continue;
1369 		if (task->opcode != opcode)
1370 			continue;
1371 
1372 		task_ev = &task->event;
1373 		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1374 		task_ev->msg_len = event->msg_len;
1375 
1376 		/* Only copy the data buffer if a destination was set */
1377 		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1378 			memcpy(task_ev->msg_buf, event->msg_buf,
1379 			       event->buf_len);
1380 			task_ev->buf_len = event->buf_len;
1381 		}
1382 
1383 		task->state = ICE_AQ_TASK_COMPLETE;
1384 		found = true;
1385 	}
1386 	spin_unlock_bh(&pf->aq_wait_lock);
1387 
1388 	if (found)
1389 		wake_up(&pf->aq_wait_queue);
1390 }
1391 
1392 /**
1393  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1394  * @pf: the PF private structure
1395  *
1396  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1397  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1398  */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1399 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1400 {
1401 	struct ice_aq_task *task;
1402 
1403 	spin_lock_bh(&pf->aq_wait_lock);
1404 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1405 		task->state = ICE_AQ_TASK_CANCELED;
1406 	spin_unlock_bh(&pf->aq_wait_lock);
1407 
1408 	wake_up(&pf->aq_wait_queue);
1409 }
1410 
1411 #define ICE_MBX_OVERFLOW_WATERMARK 64
1412 
1413 /**
1414  * __ice_clean_ctrlq - helper function to clean controlq rings
1415  * @pf: ptr to struct ice_pf
1416  * @q_type: specific Control queue type
1417  */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1418 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1419 {
1420 	struct device *dev = ice_pf_to_dev(pf);
1421 	struct ice_rq_event_info event;
1422 	struct ice_hw *hw = &pf->hw;
1423 	struct ice_ctl_q_info *cq;
1424 	u16 pending, i = 0;
1425 	const char *qtype;
1426 	u32 oldval, val;
1427 
1428 	/* Do not clean control queue if/when PF reset fails */
1429 	if (test_bit(ICE_RESET_FAILED, pf->state))
1430 		return 0;
1431 
1432 	switch (q_type) {
1433 	case ICE_CTL_Q_ADMIN:
1434 		cq = &hw->adminq;
1435 		qtype = "Admin";
1436 		break;
1437 	case ICE_CTL_Q_SB:
1438 		cq = &hw->sbq;
1439 		qtype = "Sideband";
1440 		break;
1441 	case ICE_CTL_Q_MAILBOX:
1442 		cq = &hw->mailboxq;
1443 		qtype = "Mailbox";
1444 		/* we are going to try to detect a malicious VF, so set the
1445 		 * state to begin detection
1446 		 */
1447 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1448 		break;
1449 	default:
1450 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1451 		return 0;
1452 	}
1453 
1454 	/* check for error indications - PF_xx_AxQLEN register layout for
1455 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1456 	 */
1457 	val = rd32(hw, cq->rq.len);
1458 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1459 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1460 		oldval = val;
1461 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1462 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1463 				qtype);
1464 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1465 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1466 				qtype);
1467 		}
1468 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1469 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1470 				qtype);
1471 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1472 			 PF_FW_ARQLEN_ARQCRIT_M);
1473 		if (oldval != val)
1474 			wr32(hw, cq->rq.len, val);
1475 	}
1476 
1477 	val = rd32(hw, cq->sq.len);
1478 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1479 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1480 		oldval = val;
1481 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1482 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1483 				qtype);
1484 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1485 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1486 				qtype);
1487 		}
1488 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1489 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1490 				qtype);
1491 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1492 			 PF_FW_ATQLEN_ATQCRIT_M);
1493 		if (oldval != val)
1494 			wr32(hw, cq->sq.len, val);
1495 	}
1496 
1497 	event.buf_len = cq->rq_buf_size;
1498 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1499 	if (!event.msg_buf)
1500 		return 0;
1501 
1502 	do {
1503 		struct ice_mbx_data data = {};
1504 		u16 opcode;
1505 		int ret;
1506 
1507 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1508 		if (ret == -EALREADY)
1509 			break;
1510 		if (ret) {
1511 			dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1512 				ret);
1513 			break;
1514 		}
1515 
1516 		opcode = le16_to_cpu(event.desc.opcode);
1517 
1518 		/* Notify any thread that might be waiting for this event */
1519 		ice_aq_check_events(pf, opcode, &event);
1520 
1521 		switch (opcode) {
1522 		case ice_aqc_opc_get_link_status:
1523 			if (ice_handle_link_event(pf, &event))
1524 				dev_err(dev, "Could not handle link event\n");
1525 			break;
1526 		case ice_aqc_opc_event_lan_overflow:
1527 			ice_vf_lan_overflow_event(pf, &event);
1528 			break;
1529 		case ice_mbx_opc_send_msg_to_pf:
1530 			if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
1531 				ice_vc_process_vf_msg(pf, &event, NULL);
1532 				ice_mbx_vf_dec_trig_e830(hw, &event);
1533 			} else {
1534 				u16 val = hw->mailboxq.num_rq_entries;
1535 
1536 				data.max_num_msgs_mbx = val;
1537 				val = ICE_MBX_OVERFLOW_WATERMARK;
1538 				data.async_watermark_val = val;
1539 				data.num_msg_proc = i;
1540 				data.num_pending_arq = pending;
1541 
1542 				ice_vc_process_vf_msg(pf, &event, &data);
1543 			}
1544 			break;
1545 		case ice_aqc_opc_fw_logs_event:
1546 			libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
1547 					     le16_to_cpu(event.desc.datalen));
1548 			break;
1549 		case ice_aqc_opc_lldp_set_mib_change:
1550 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1551 			break;
1552 		case ice_aqc_opc_get_health_status:
1553 			ice_process_health_status_event(pf, &event);
1554 			break;
1555 		default:
1556 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1557 				qtype, opcode);
1558 			break;
1559 		}
1560 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1561 
1562 	kfree(event.msg_buf);
1563 
1564 	return pending && (i == ICE_DFLT_IRQ_WORK);
1565 }
1566 
1567 /**
1568  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1569  * @hw: pointer to hardware info
1570  * @cq: control queue information
1571  *
1572  * returns true if there are pending messages in a queue, false if there aren't
1573  */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1574 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1575 {
1576 	u16 ntu;
1577 
1578 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1579 	return cq->rq.next_to_clean != ntu;
1580 }
1581 
1582 /**
1583  * ice_clean_adminq_subtask - clean the AdminQ rings
1584  * @pf: board private structure
1585  */
ice_clean_adminq_subtask(struct ice_pf * pf)1586 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1587 {
1588 	struct ice_hw *hw = &pf->hw;
1589 
1590 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1591 		return;
1592 
1593 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1594 		return;
1595 
1596 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1597 
1598 	/* There might be a situation where new messages arrive to a control
1599 	 * queue between processing the last message and clearing the
1600 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1601 	 * ice_ctrlq_pending) and process new messages if any.
1602 	 */
1603 	if (ice_ctrlq_pending(hw, &hw->adminq))
1604 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1605 
1606 	ice_flush(hw);
1607 }
1608 
1609 /**
1610  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1611  * @pf: board private structure
1612  */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1613 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1614 {
1615 	struct ice_hw *hw = &pf->hw;
1616 
1617 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1618 		return;
1619 
1620 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1621 		return;
1622 
1623 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1624 
1625 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1626 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1627 
1628 	ice_flush(hw);
1629 }
1630 
1631 /**
1632  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1633  * @pf: board private structure
1634  */
ice_clean_sbq_subtask(struct ice_pf * pf)1635 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1636 {
1637 	struct ice_hw *hw = &pf->hw;
1638 
1639 	/* if mac_type is not generic, sideband is not supported
1640 	 * and there's nothing to do here
1641 	 */
1642 	if (!ice_is_generic_mac(hw)) {
1643 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1644 		return;
1645 	}
1646 
1647 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1648 		return;
1649 
1650 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1651 		return;
1652 
1653 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1654 
1655 	if (ice_ctrlq_pending(hw, &hw->sbq))
1656 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1657 
1658 	ice_flush(hw);
1659 }
1660 
1661 /**
1662  * ice_service_task_schedule - schedule the service task to wake up
1663  * @pf: board private structure
1664  *
1665  * If not already scheduled, this puts the task into the work queue.
1666  */
ice_service_task_schedule(struct ice_pf * pf)1667 void ice_service_task_schedule(struct ice_pf *pf)
1668 {
1669 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1670 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1671 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1672 		queue_work(ice_wq, &pf->serv_task);
1673 }
1674 
1675 /**
1676  * ice_service_task_complete - finish up the service task
1677  * @pf: board private structure
1678  */
ice_service_task_complete(struct ice_pf * pf)1679 static void ice_service_task_complete(struct ice_pf *pf)
1680 {
1681 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1682 
1683 	/* force memory (pf->state) to sync before next service task */
1684 	smp_mb__before_atomic();
1685 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1686 }
1687 
1688 /**
1689  * ice_service_task_stop - stop service task and cancel works
1690  * @pf: board private structure
1691  *
1692  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1693  * 1 otherwise.
1694  */
ice_service_task_stop(struct ice_pf * pf)1695 static int ice_service_task_stop(struct ice_pf *pf)
1696 {
1697 	int ret;
1698 
1699 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1700 
1701 	if (pf->serv_tmr.function)
1702 		timer_delete_sync(&pf->serv_tmr);
1703 	if (pf->serv_task.func)
1704 		cancel_work_sync(&pf->serv_task);
1705 
1706 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1707 	return ret;
1708 }
1709 
1710 /**
1711  * ice_service_task_restart - restart service task and schedule works
1712  * @pf: board private structure
1713  *
1714  * This function is needed for suspend and resume works (e.g WoL scenario)
1715  */
ice_service_task_restart(struct ice_pf * pf)1716 static void ice_service_task_restart(struct ice_pf *pf)
1717 {
1718 	clear_bit(ICE_SERVICE_DIS, pf->state);
1719 	ice_service_task_schedule(pf);
1720 }
1721 
1722 /**
1723  * ice_service_timer - timer callback to schedule service task
1724  * @t: pointer to timer_list
1725  */
ice_service_timer(struct timer_list * t)1726 static void ice_service_timer(struct timer_list *t)
1727 {
1728 	struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);
1729 
1730 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1731 	ice_service_task_schedule(pf);
1732 }
1733 
1734 /**
1735  * ice_mdd_maybe_reset_vf - reset VF after MDD event
1736  * @pf: pointer to the PF structure
1737  * @vf: pointer to the VF structure
1738  * @reset_vf_tx: whether Tx MDD has occurred
1739  * @reset_vf_rx: whether Rx MDD has occurred
1740  *
1741  * Since the queue can get stuck on VF MDD events, the PF can be configured to
1742  * automatically reset the VF by enabling the private ethtool flag
1743  * mdd-auto-reset-vf.
1744  */
ice_mdd_maybe_reset_vf(struct ice_pf * pf,struct ice_vf * vf,bool reset_vf_tx,bool reset_vf_rx)1745 static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
1746 				   bool reset_vf_tx, bool reset_vf_rx)
1747 {
1748 	struct device *dev = ice_pf_to_dev(pf);
1749 
1750 	if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
1751 		return;
1752 
1753 	/* VF MDD event counters will be cleared by reset, so print the event
1754 	 * prior to reset.
1755 	 */
1756 	if (reset_vf_tx)
1757 		ice_print_vf_tx_mdd_event(vf);
1758 
1759 	if (reset_vf_rx)
1760 		ice_print_vf_rx_mdd_event(vf);
1761 
1762 	dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
1763 		 pf->hw.pf_id, vf->vf_id);
1764 	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1765 }
1766 
1767 /**
1768  * ice_handle_mdd_event - handle malicious driver detect event
1769  * @pf: pointer to the PF structure
1770  *
1771  * Called from service task. OICR interrupt handler indicates MDD event.
1772  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1773  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1774  * disable the queue, the PF can be configured to reset the VF using ethtool
1775  * private flag mdd-auto-reset-vf.
1776  */
ice_handle_mdd_event(struct ice_pf * pf)1777 static void ice_handle_mdd_event(struct ice_pf *pf)
1778 {
1779 	struct device *dev = ice_pf_to_dev(pf);
1780 	struct ice_hw *hw = &pf->hw;
1781 	struct ice_vf *vf;
1782 	unsigned int bkt;
1783 	u32 reg;
1784 
1785 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1786 		/* Since the VF MDD event logging is rate limited, check if
1787 		 * there are pending MDD events.
1788 		 */
1789 		ice_print_vfs_mdd_events(pf);
1790 		return;
1791 	}
1792 
1793 	/* find what triggered an MDD event */
1794 	reg = rd32(hw, GL_MDET_TX_PQM);
1795 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1796 		u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
1797 		u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
1798 		u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
1799 		u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
1800 
1801 		if (netif_msg_tx_err(pf))
1802 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1803 				 event, queue, pf_num, vf_num);
1804 		ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
1805 				     event, queue);
1806 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1807 	}
1808 
1809 	reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
1810 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1811 		u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
1812 		u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
1813 		u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
1814 		u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
1815 
1816 		if (netif_msg_tx_err(pf))
1817 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1818 				 event, queue, pf_num, vf_num);
1819 		ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
1820 				     event, queue);
1821 		wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
1822 	}
1823 
1824 	reg = rd32(hw, GL_MDET_RX);
1825 	if (reg & GL_MDET_RX_VALID_M) {
1826 		u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
1827 		u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
1828 		u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
1829 		u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
1830 
1831 		if (netif_msg_rx_err(pf))
1832 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1833 				 event, queue, pf_num, vf_num);
1834 		ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
1835 				     queue);
1836 		wr32(hw, GL_MDET_RX, 0xffffffff);
1837 	}
1838 
1839 	/* check to see if this PF caused an MDD event */
1840 	reg = rd32(hw, PF_MDET_TX_PQM);
1841 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1842 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1843 		if (netif_msg_tx_err(pf))
1844 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1845 	}
1846 
1847 	reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
1848 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1849 		wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
1850 		if (netif_msg_tx_err(pf))
1851 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1852 	}
1853 
1854 	reg = rd32(hw, PF_MDET_RX);
1855 	if (reg & PF_MDET_RX_VALID_M) {
1856 		wr32(hw, PF_MDET_RX, 0xFFFF);
1857 		if (netif_msg_rx_err(pf))
1858 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1859 	}
1860 
1861 	/* Check to see if one of the VFs caused an MDD event, and then
1862 	 * increment counters and set print pending
1863 	 */
1864 	mutex_lock(&pf->vfs.table_lock);
1865 	ice_for_each_vf(pf, bkt, vf) {
1866 		bool reset_vf_tx = false, reset_vf_rx = false;
1867 
1868 		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1869 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1870 			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1871 			vf->mdd_tx_events.count++;
1872 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1873 			if (netif_msg_tx_err(pf))
1874 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1875 					 vf->vf_id);
1876 
1877 			reset_vf_tx = true;
1878 		}
1879 
1880 		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1881 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1882 			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1883 			vf->mdd_tx_events.count++;
1884 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1885 			if (netif_msg_tx_err(pf))
1886 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1887 					 vf->vf_id);
1888 
1889 			reset_vf_tx = true;
1890 		}
1891 
1892 		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1893 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1894 			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1895 			vf->mdd_tx_events.count++;
1896 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1897 			if (netif_msg_tx_err(pf))
1898 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1899 					 vf->vf_id);
1900 
1901 			reset_vf_tx = true;
1902 		}
1903 
1904 		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1905 		if (reg & VP_MDET_RX_VALID_M) {
1906 			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1907 			vf->mdd_rx_events.count++;
1908 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1909 			if (netif_msg_rx_err(pf))
1910 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1911 					 vf->vf_id);
1912 
1913 			reset_vf_rx = true;
1914 		}
1915 
1916 		if (reset_vf_tx || reset_vf_rx)
1917 			ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
1918 					       reset_vf_rx);
1919 	}
1920 	mutex_unlock(&pf->vfs.table_lock);
1921 
1922 	ice_print_vfs_mdd_events(pf);
1923 }
1924 
1925 /**
1926  * ice_force_phys_link_state - Force the physical link state
1927  * @vsi: VSI to force the physical link state to up/down
1928  * @link_up: true/false indicates to set the physical link to up/down
1929  *
1930  * Force the physical link state by getting the current PHY capabilities from
1931  * hardware and setting the PHY config based on the determined capabilities. If
1932  * link changes a link event will be triggered because both the Enable Automatic
1933  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1934  *
1935  * Returns 0 on success, negative on failure
1936  */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1937 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1938 {
1939 	struct ice_aqc_get_phy_caps_data *pcaps;
1940 	struct ice_aqc_set_phy_cfg_data *cfg;
1941 	struct ice_port_info *pi;
1942 	struct device *dev;
1943 	int retcode;
1944 
1945 	if (!vsi || !vsi->port_info || !vsi->back)
1946 		return -EINVAL;
1947 	if (vsi->type != ICE_VSI_PF)
1948 		return 0;
1949 
1950 	dev = ice_pf_to_dev(vsi->back);
1951 
1952 	pi = vsi->port_info;
1953 
1954 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1955 	if (!pcaps)
1956 		return -ENOMEM;
1957 
1958 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1959 				      NULL);
1960 	if (retcode) {
1961 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1962 			vsi->vsi_num, retcode);
1963 		retcode = -EIO;
1964 		goto out;
1965 	}
1966 
1967 	/* No change in link */
1968 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1969 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1970 		goto out;
1971 
1972 	/* Use the current user PHY configuration. The current user PHY
1973 	 * configuration is initialized during probe from PHY capabilities
1974 	 * software mode, and updated on set PHY configuration.
1975 	 */
1976 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1977 	if (!cfg) {
1978 		retcode = -ENOMEM;
1979 		goto out;
1980 	}
1981 
1982 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1983 	if (link_up)
1984 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1985 	else
1986 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1987 
1988 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1989 	if (retcode) {
1990 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1991 			vsi->vsi_num, retcode);
1992 		retcode = -EIO;
1993 	}
1994 
1995 	kfree(cfg);
1996 out:
1997 	kfree(pcaps);
1998 	return retcode;
1999 }
2000 
2001 /**
2002  * ice_init_nvm_phy_type - Initialize the NVM PHY type
2003  * @pi: port info structure
2004  *
2005  * Initialize nvm_phy_type_[low|high] for link lenient mode support
2006  */
ice_init_nvm_phy_type(struct ice_port_info * pi)2007 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
2008 {
2009 	struct ice_aqc_get_phy_caps_data *pcaps;
2010 	struct ice_pf *pf = pi->hw->back;
2011 	int err;
2012 
2013 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2014 	if (!pcaps)
2015 		return -ENOMEM;
2016 
2017 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
2018 				  pcaps, NULL);
2019 
2020 	if (err) {
2021 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2022 		goto out;
2023 	}
2024 
2025 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
2026 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
2027 
2028 out:
2029 	kfree(pcaps);
2030 	return err;
2031 }
2032 
2033 /**
2034  * ice_init_link_dflt_override - Initialize link default override
2035  * @pi: port info structure
2036  *
2037  * Initialize link default override and PHY total port shutdown during probe
2038  */
ice_init_link_dflt_override(struct ice_port_info * pi)2039 static void ice_init_link_dflt_override(struct ice_port_info *pi)
2040 {
2041 	struct ice_link_default_override_tlv *ldo;
2042 	struct ice_pf *pf = pi->hw->back;
2043 
2044 	ldo = &pf->link_dflt_override;
2045 	if (ice_get_link_default_override(ldo, pi))
2046 		return;
2047 
2048 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
2049 		return;
2050 
2051 	/* Enable Total Port Shutdown (override/replace link-down-on-close
2052 	 * ethtool private flag) for ports with Port Disable bit set.
2053 	 */
2054 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2055 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2056 }
2057 
2058 /**
2059  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2060  * @pi: port info structure
2061  *
2062  * If default override is enabled, initialize the user PHY cfg speed and FEC
2063  * settings using the default override mask from the NVM.
2064  *
2065  * The PHY should only be configured with the default override settings the
2066  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2067  * is used to indicate that the user PHY cfg default override is initialized
2068  * and the PHY has not been configured with the default override settings. The
2069  * state is set here, and cleared in ice_configure_phy the first time the PHY is
2070  * configured.
2071  *
2072  * This function should be called only if the FW doesn't support default
2073  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2074  */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2075 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2076 {
2077 	struct ice_link_default_override_tlv *ldo;
2078 	struct ice_aqc_set_phy_cfg_data *cfg;
2079 	struct ice_phy_info *phy = &pi->phy;
2080 	struct ice_pf *pf = pi->hw->back;
2081 
2082 	ldo = &pf->link_dflt_override;
2083 
2084 	/* If link default override is enabled, use to mask NVM PHY capabilities
2085 	 * for speed and FEC default configuration.
2086 	 */
2087 	cfg = &phy->curr_user_phy_cfg;
2088 
2089 	if (ldo->phy_type_low || ldo->phy_type_high) {
2090 		cfg->phy_type_low = pf->nvm_phy_type_lo &
2091 				    cpu_to_le64(ldo->phy_type_low);
2092 		cfg->phy_type_high = pf->nvm_phy_type_hi &
2093 				     cpu_to_le64(ldo->phy_type_high);
2094 	}
2095 	cfg->link_fec_opt = ldo->fec_options;
2096 	phy->curr_user_fec_req = ICE_FEC_AUTO;
2097 
2098 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2099 }
2100 
2101 /**
2102  * ice_init_phy_user_cfg - Initialize the PHY user configuration
2103  * @pi: port info structure
2104  *
2105  * Initialize the current user PHY configuration, speed, FEC, and FC requested
2106  * mode to default. The PHY defaults are from get PHY capabilities topology
2107  * with media so call when media is first available. An error is returned if
2108  * called when media is not available. The PHY initialization completed state is
2109  * set here.
2110  *
2111  * These configurations are used when setting PHY
2112  * configuration. The user PHY configuration is updated on set PHY
2113  * configuration. Returns 0 on success, negative on failure
2114  */
ice_init_phy_user_cfg(struct ice_port_info * pi)2115 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2116 {
2117 	struct ice_aqc_get_phy_caps_data *pcaps;
2118 	struct ice_phy_info *phy = &pi->phy;
2119 	struct ice_pf *pf = pi->hw->back;
2120 	int err;
2121 
2122 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2123 		return -EIO;
2124 
2125 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2126 	if (!pcaps)
2127 		return -ENOMEM;
2128 
2129 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2130 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2131 					  pcaps, NULL);
2132 	else
2133 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2134 					  pcaps, NULL);
2135 	if (err) {
2136 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2137 		goto err_out;
2138 	}
2139 
2140 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2141 
2142 	/* check if lenient mode is supported and enabled */
2143 	if (ice_fw_supports_link_override(pi->hw) &&
2144 	    !(pcaps->module_compliance_enforcement &
2145 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2146 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2147 
2148 		/* if the FW supports default PHY configuration mode, then the driver
2149 		 * does not have to apply link override settings. If not,
2150 		 * initialize user PHY configuration with link override values
2151 		 */
2152 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2153 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2154 			ice_init_phy_cfg_dflt_override(pi);
2155 			goto out;
2156 		}
2157 	}
2158 
2159 	/* if link default override is not enabled, set user flow control and
2160 	 * FEC settings based on what get_phy_caps returned
2161 	 */
2162 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2163 						      pcaps->link_fec_options);
2164 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2165 
2166 out:
2167 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2168 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2169 err_out:
2170 	kfree(pcaps);
2171 	return err;
2172 }
2173 
2174 /**
2175  * ice_configure_phy - configure PHY
2176  * @vsi: VSI of PHY
2177  *
2178  * Set the PHY configuration. If the current PHY configuration is the same as
2179  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2180  * configure the based get PHY capabilities for topology with media.
2181  */
ice_configure_phy(struct ice_vsi * vsi)2182 static int ice_configure_phy(struct ice_vsi *vsi)
2183 {
2184 	struct device *dev = ice_pf_to_dev(vsi->back);
2185 	struct ice_port_info *pi = vsi->port_info;
2186 	struct ice_aqc_get_phy_caps_data *pcaps;
2187 	struct ice_aqc_set_phy_cfg_data *cfg;
2188 	struct ice_phy_info *phy = &pi->phy;
2189 	struct ice_pf *pf = vsi->back;
2190 	int err;
2191 
2192 	/* Ensure we have media as we cannot configure a medialess port */
2193 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2194 		return -ENOMEDIUM;
2195 
2196 	ice_print_topo_conflict(vsi);
2197 
2198 	if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2199 	    phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2200 		return -EPERM;
2201 
2202 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2203 		return ice_force_phys_link_state(vsi, true);
2204 
2205 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2206 	if (!pcaps)
2207 		return -ENOMEM;
2208 
2209 	/* Get current PHY config */
2210 	err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2211 				  NULL);
2212 	if (err) {
2213 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2214 			vsi->vsi_num, err);
2215 		goto done;
2216 	}
2217 
2218 	/* If PHY enable link is configured and configuration has not changed,
2219 	 * there's nothing to do
2220 	 */
2221 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2222 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2223 		goto done;
2224 
2225 	/* Use PHY topology as baseline for configuration */
2226 	memset(pcaps, 0, sizeof(*pcaps));
2227 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2228 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2229 					  pcaps, NULL);
2230 	else
2231 		err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2232 					  pcaps, NULL);
2233 	if (err) {
2234 		dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2235 			vsi->vsi_num, err);
2236 		goto done;
2237 	}
2238 
2239 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2240 	if (!cfg) {
2241 		err = -ENOMEM;
2242 		goto done;
2243 	}
2244 
2245 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2246 
2247 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2248 	 * ice_init_phy_user_cfg_ldo.
2249 	 */
2250 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2251 			       vsi->back->state)) {
2252 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2253 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2254 	} else {
2255 		u64 phy_low = 0, phy_high = 0;
2256 
2257 		ice_update_phy_type(&phy_low, &phy_high,
2258 				    pi->phy.curr_user_speed_req);
2259 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2260 		cfg->phy_type_high = pcaps->phy_type_high &
2261 				     cpu_to_le64(phy_high);
2262 	}
2263 
2264 	/* Can't provide what was requested; use PHY capabilities */
2265 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2266 		cfg->phy_type_low = pcaps->phy_type_low;
2267 		cfg->phy_type_high = pcaps->phy_type_high;
2268 	}
2269 
2270 	/* FEC */
2271 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2272 
2273 	/* Can't provide what was requested; use PHY capabilities */
2274 	if (cfg->link_fec_opt !=
2275 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2276 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2277 		cfg->link_fec_opt = pcaps->link_fec_options;
2278 	}
2279 
2280 	/* Flow Control - always supported; no need to check against
2281 	 * capabilities
2282 	 */
2283 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2284 
2285 	/* Enable link and link update */
2286 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2287 
2288 	err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2289 	if (err)
2290 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2291 			vsi->vsi_num, err);
2292 
2293 	kfree(cfg);
2294 done:
2295 	kfree(pcaps);
2296 	return err;
2297 }
2298 
2299 /**
2300  * ice_check_media_subtask - Check for media
2301  * @pf: pointer to PF struct
2302  *
2303  * If media is available, then initialize PHY user configuration if it is not
2304  * been, and configure the PHY if the interface is up.
2305  */
ice_check_media_subtask(struct ice_pf * pf)2306 static void ice_check_media_subtask(struct ice_pf *pf)
2307 {
2308 	struct ice_port_info *pi;
2309 	struct ice_vsi *vsi;
2310 	int err;
2311 
2312 	/* No need to check for media if it's already present */
2313 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2314 		return;
2315 
2316 	vsi = ice_get_main_vsi(pf);
2317 	if (!vsi)
2318 		return;
2319 
2320 	/* Refresh link info and check if media is present */
2321 	pi = vsi->port_info;
2322 	err = ice_update_link_info(pi);
2323 	if (err)
2324 		return;
2325 
2326 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2327 
2328 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2329 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2330 			ice_init_phy_user_cfg(pi);
2331 
2332 		/* PHY settings are reset on media insertion, reconfigure
2333 		 * PHY to preserve settings.
2334 		 */
2335 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2336 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2337 			return;
2338 
2339 		err = ice_configure_phy(vsi);
2340 		if (!err)
2341 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2342 
2343 		/* A Link Status Event will be generated; the event handler
2344 		 * will complete bringing the interface up
2345 		 */
2346 	}
2347 }
2348 
ice_service_task_recovery_mode(struct work_struct * work)2349 static void ice_service_task_recovery_mode(struct work_struct *work)
2350 {
2351 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2352 
2353 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2354 	ice_clean_adminq_subtask(pf);
2355 
2356 	ice_service_task_complete(pf);
2357 
2358 	mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
2359 }
2360 
2361 /**
2362  * ice_service_task - manage and run subtasks
2363  * @work: pointer to work_struct contained by the PF struct
2364  */
ice_service_task(struct work_struct * work)2365 static void ice_service_task(struct work_struct *work)
2366 {
2367 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2368 	unsigned long start_time = jiffies;
2369 
2370 	if (pf->health_reporters.tx_hang_buf.tx_ring) {
2371 		ice_report_tx_hang(pf);
2372 		pf->health_reporters.tx_hang_buf.tx_ring = NULL;
2373 	}
2374 
2375 	ice_reset_subtask(pf);
2376 
2377 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2378 	if (ice_is_reset_in_progress(pf->state) ||
2379 	    test_bit(ICE_SUSPENDED, pf->state) ||
2380 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2381 		ice_service_task_complete(pf);
2382 		return;
2383 	}
2384 
2385 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2386 		struct iidc_rdma_event *event;
2387 
2388 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2389 		if (event) {
2390 			set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
2391 			/* report the entire OICR value to AUX driver */
2392 			swap(event->reg, pf->oicr_err_reg);
2393 			ice_send_event_to_aux(pf, event);
2394 			kfree(event);
2395 		}
2396 	}
2397 
2398 	/* unplug aux dev per request, if an unplug request came in
2399 	 * while processing a plug request, this will handle it
2400 	 */
2401 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2402 		ice_unplug_aux_dev(pf);
2403 
2404 	/* Plug aux device per request */
2405 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2406 		ice_plug_aux_dev(pf);
2407 
2408 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2409 		struct iidc_rdma_event *event;
2410 
2411 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2412 		if (event) {
2413 			set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
2414 			ice_send_event_to_aux(pf, event);
2415 			kfree(event);
2416 		}
2417 	}
2418 
2419 	ice_clean_adminq_subtask(pf);
2420 	ice_check_media_subtask(pf);
2421 	ice_check_for_hang_subtask(pf);
2422 	ice_sync_fltr_subtask(pf);
2423 	ice_handle_mdd_event(pf);
2424 	ice_watchdog_subtask(pf);
2425 
2426 	if (ice_is_safe_mode(pf)) {
2427 		ice_service_task_complete(pf);
2428 		return;
2429 	}
2430 
2431 	ice_process_vflr_event(pf);
2432 	ice_clean_mailboxq_subtask(pf);
2433 	ice_clean_sbq_subtask(pf);
2434 	ice_sync_arfs_fltrs(pf);
2435 	ice_flush_fdir_ctx(pf);
2436 
2437 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2438 	ice_service_task_complete(pf);
2439 
2440 	/* If the tasks have taken longer than one service timer period
2441 	 * or there is more work to be done, reset the service timer to
2442 	 * schedule the service task now.
2443 	 */
2444 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2445 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2446 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2447 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2448 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2449 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2450 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2451 		mod_timer(&pf->serv_tmr, jiffies);
2452 }
2453 
2454 /**
2455  * ice_set_ctrlq_len - helper function to set controlq length
2456  * @hw: pointer to the HW instance
2457  */
ice_set_ctrlq_len(struct ice_hw * hw)2458 static void ice_set_ctrlq_len(struct ice_hw *hw)
2459 {
2460 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2461 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2462 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2463 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2464 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2465 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2466 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2467 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2468 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2469 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2470 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2471 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2472 }
2473 
2474 /**
2475  * ice_schedule_reset - schedule a reset
2476  * @pf: board private structure
2477  * @reset: reset being requested
2478  */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2479 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2480 {
2481 	struct device *dev = ice_pf_to_dev(pf);
2482 
2483 	/* bail out if earlier reset has failed */
2484 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2485 		dev_dbg(dev, "earlier reset has failed\n");
2486 		return -EIO;
2487 	}
2488 	/* bail if reset/recovery already in progress */
2489 	if (ice_is_reset_in_progress(pf->state)) {
2490 		dev_dbg(dev, "Reset already in progress\n");
2491 		return -EBUSY;
2492 	}
2493 
2494 	switch (reset) {
2495 	case ICE_RESET_PFR:
2496 		set_bit(ICE_PFR_REQ, pf->state);
2497 		break;
2498 	case ICE_RESET_CORER:
2499 		set_bit(ICE_CORER_REQ, pf->state);
2500 		break;
2501 	case ICE_RESET_GLOBR:
2502 		set_bit(ICE_GLOBR_REQ, pf->state);
2503 		break;
2504 	default:
2505 		return -EINVAL;
2506 	}
2507 
2508 	ice_service_task_schedule(pf);
2509 	return 0;
2510 }
2511 
2512 /**
2513  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2514  * @vsi: the VSI being configured
2515  */
ice_vsi_ena_irq(struct ice_vsi * vsi)2516 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2517 {
2518 	struct ice_hw *hw = &vsi->back->hw;
2519 	int i;
2520 
2521 	ice_for_each_q_vector(vsi, i)
2522 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2523 
2524 	ice_flush(hw);
2525 	return 0;
2526 }
2527 
2528 /**
2529  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2530  * @vsi: the VSI being configured
2531  * @basename: name for the vector
2532  */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2533 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2534 {
2535 	int q_vectors = vsi->num_q_vectors;
2536 	struct ice_pf *pf = vsi->back;
2537 	struct device *dev;
2538 	int rx_int_idx = 0;
2539 	int tx_int_idx = 0;
2540 	int vector, err;
2541 	int irq_num;
2542 
2543 	dev = ice_pf_to_dev(pf);
2544 	for (vector = 0; vector < q_vectors; vector++) {
2545 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2546 
2547 		irq_num = q_vector->irq.virq;
2548 
2549 		if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2550 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2551 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2552 			tx_int_idx++;
2553 		} else if (q_vector->rx.rx_ring) {
2554 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2555 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2556 		} else if (q_vector->tx.tx_ring) {
2557 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2558 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2559 		} else {
2560 			/* skip this unused q_vector */
2561 			continue;
2562 		}
2563 		if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2564 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2565 					       IRQF_SHARED, q_vector->name,
2566 					       q_vector);
2567 		else
2568 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2569 					       0, q_vector->name, q_vector);
2570 		if (err) {
2571 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2572 				   err);
2573 			goto free_q_irqs;
2574 		}
2575 	}
2576 
2577 	err = ice_set_cpu_rx_rmap(vsi);
2578 	if (err) {
2579 		netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2580 			   vsi->vsi_num, ERR_PTR(err));
2581 		goto free_q_irqs;
2582 	}
2583 
2584 	vsi->irqs_ready = true;
2585 	return 0;
2586 
2587 free_q_irqs:
2588 	while (vector--) {
2589 		irq_num = vsi->q_vectors[vector]->irq.virq;
2590 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2591 	}
2592 	return err;
2593 }
2594 
2595 /**
2596  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2597  * @vsi: VSI to setup Tx rings used by XDP
2598  *
2599  * Return 0 on success and negative value on error
2600  */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2601 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2602 {
2603 	struct device *dev = ice_pf_to_dev(vsi->back);
2604 	struct ice_tx_desc *tx_desc;
2605 	int i, j;
2606 
2607 	ice_for_each_xdp_txq(vsi, i) {
2608 		u16 xdp_q_idx = vsi->alloc_txq + i;
2609 		struct ice_ring_stats *ring_stats;
2610 		struct ice_tx_ring *xdp_ring;
2611 
2612 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2613 		if (!xdp_ring)
2614 			goto free_xdp_rings;
2615 
2616 		ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
2617 		if (!ring_stats) {
2618 			ice_free_tx_ring(xdp_ring);
2619 			goto free_xdp_rings;
2620 		}
2621 
2622 		xdp_ring->ring_stats = ring_stats;
2623 		xdp_ring->q_index = xdp_q_idx;
2624 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2625 		xdp_ring->vsi = vsi;
2626 		xdp_ring->netdev = NULL;
2627 		xdp_ring->dev = dev;
2628 		xdp_ring->count = vsi->num_tx_desc;
2629 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2630 		if (ice_setup_tx_ring(xdp_ring))
2631 			goto free_xdp_rings;
2632 		ice_set_ring_xdp(xdp_ring);
2633 		spin_lock_init(&xdp_ring->tx_lock);
2634 		for (j = 0; j < xdp_ring->count; j++) {
2635 			tx_desc = ICE_TX_DESC(xdp_ring, j);
2636 			tx_desc->cmd_type_offset_bsz = 0;
2637 		}
2638 	}
2639 
2640 	return 0;
2641 
2642 free_xdp_rings:
2643 	for (; i >= 0; i--) {
2644 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
2645 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2646 			vsi->xdp_rings[i]->ring_stats = NULL;
2647 			ice_free_tx_ring(vsi->xdp_rings[i]);
2648 		}
2649 	}
2650 	return -ENOMEM;
2651 }
2652 
2653 /**
2654  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2655  * @vsi: VSI to set the bpf prog on
2656  * @prog: the bpf prog pointer
2657  */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2658 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2659 {
2660 	struct bpf_prog *old_prog;
2661 	int i;
2662 
2663 	old_prog = xchg(&vsi->xdp_prog, prog);
2664 	ice_for_each_rxq(vsi, i)
2665 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2666 
2667 	if (old_prog)
2668 		bpf_prog_put(old_prog);
2669 }
2670 
ice_xdp_ring_from_qid(struct ice_vsi * vsi,int qid)2671 static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
2672 {
2673 	struct ice_q_vector *q_vector;
2674 	struct ice_tx_ring *ring;
2675 
2676 	if (static_key_enabled(&ice_xdp_locking_key))
2677 		return vsi->xdp_rings[qid % vsi->num_xdp_txq];
2678 
2679 	q_vector = vsi->rx_rings[qid]->q_vector;
2680 	ice_for_each_tx_ring(ring, q_vector->tx)
2681 		if (ice_ring_is_xdp(ring))
2682 			return ring;
2683 
2684 	return NULL;
2685 }
2686 
2687 /**
2688  * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2689  * @vsi: the VSI with XDP rings being configured
2690  *
2691  * Map XDP rings to interrupt vectors and perform the configuration steps
2692  * dependent on the mapping.
2693  */
ice_map_xdp_rings(struct ice_vsi * vsi)2694 void ice_map_xdp_rings(struct ice_vsi *vsi)
2695 {
2696 	int xdp_rings_rem = vsi->num_xdp_txq;
2697 	int v_idx, q_idx;
2698 
2699 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2700 	ice_for_each_q_vector(vsi, v_idx) {
2701 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2702 		int xdp_rings_per_v, q_id, q_base;
2703 
2704 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2705 					       vsi->num_q_vectors - v_idx);
2706 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2707 
2708 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2709 			struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2710 
2711 			xdp_ring->q_vector = q_vector;
2712 			xdp_ring->next = q_vector->tx.tx_ring;
2713 			q_vector->tx.tx_ring = xdp_ring;
2714 		}
2715 		xdp_rings_rem -= xdp_rings_per_v;
2716 	}
2717 
2718 	ice_for_each_rxq(vsi, q_idx) {
2719 		vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
2720 								       q_idx);
2721 		ice_tx_xsk_pool(vsi, q_idx);
2722 	}
2723 }
2724 
2725 /**
2726  * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
2727  * @vsi: the VSI with XDP rings being unmapped
2728  */
ice_unmap_xdp_rings(struct ice_vsi * vsi)2729 static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
2730 {
2731 	int v_idx;
2732 
2733 	ice_for_each_q_vector(vsi, v_idx) {
2734 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2735 		struct ice_tx_ring *ring;
2736 
2737 		ice_for_each_tx_ring(ring, q_vector->tx)
2738 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2739 				break;
2740 
2741 		/* restore the value of last node prior to XDP setup */
2742 		q_vector->tx.tx_ring = ring;
2743 	}
2744 }
2745 
2746 /**
2747  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2748  * @vsi: VSI to bring up Tx rings used by XDP
2749  * @prog: bpf program that will be assigned to VSI
2750  * @cfg_type: create from scratch or restore the existing configuration
2751  *
2752  * Return 0 on success and negative value on error
2753  */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog,enum ice_xdp_cfg cfg_type)2754 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
2755 			  enum ice_xdp_cfg cfg_type)
2756 {
2757 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2758 	struct ice_pf *pf = vsi->back;
2759 	struct ice_qs_cfg xdp_qs_cfg = {
2760 		.qs_mutex = &pf->avail_q_mutex,
2761 		.pf_map = pf->avail_txqs,
2762 		.pf_map_size = pf->max_pf_txqs,
2763 		.q_count = vsi->num_xdp_txq,
2764 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2765 		.vsi_map = vsi->txq_map,
2766 		.vsi_map_offset = vsi->alloc_txq,
2767 		.mapping_mode = ICE_VSI_MAP_CONTIG
2768 	};
2769 	struct device *dev;
2770 	int status, i;
2771 
2772 	dev = ice_pf_to_dev(pf);
2773 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2774 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2775 	if (!vsi->xdp_rings)
2776 		return -ENOMEM;
2777 
2778 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2779 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2780 		goto err_map_xdp;
2781 
2782 	if (static_key_enabled(&ice_xdp_locking_key))
2783 		netdev_warn(vsi->netdev,
2784 			    "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2785 
2786 	if (ice_xdp_alloc_setup_rings(vsi))
2787 		goto clear_xdp_rings;
2788 
2789 	/* omit the scheduler update if in reset path; XDP queues will be
2790 	 * taken into account at the end of ice_vsi_rebuild, where
2791 	 * ice_cfg_vsi_lan is being called
2792 	 */
2793 	if (cfg_type == ICE_XDP_CFG_PART)
2794 		return 0;
2795 
2796 	ice_map_xdp_rings(vsi);
2797 
2798 	/* tell the Tx scheduler that right now we have
2799 	 * additional queues
2800 	 */
2801 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2802 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2803 
2804 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2805 				 max_txqs);
2806 	if (status) {
2807 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2808 			status);
2809 		goto unmap_xdp_rings;
2810 	}
2811 
2812 	/* assign the prog only when it's not already present on VSI;
2813 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2814 	 * VSI rebuild that happens under ethtool -L can expose us to
2815 	 * the bpf_prog refcount issues as we would be swapping same
2816 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2817 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2818 	 * this is not harmful as dev_xdp_install bumps the refcount
2819 	 * before calling the op exposed by the driver;
2820 	 */
2821 	if (!ice_is_xdp_ena_vsi(vsi))
2822 		ice_vsi_assign_bpf_prog(vsi, prog);
2823 
2824 	return 0;
2825 unmap_xdp_rings:
2826 	ice_unmap_xdp_rings(vsi);
2827 clear_xdp_rings:
2828 	ice_for_each_xdp_txq(vsi, i)
2829 		if (vsi->xdp_rings[i]) {
2830 			kfree_rcu(vsi->xdp_rings[i], rcu);
2831 			vsi->xdp_rings[i] = NULL;
2832 		}
2833 
2834 err_map_xdp:
2835 	mutex_lock(&pf->avail_q_mutex);
2836 	ice_for_each_xdp_txq(vsi, i) {
2837 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2838 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2839 	}
2840 	mutex_unlock(&pf->avail_q_mutex);
2841 
2842 	devm_kfree(dev, vsi->xdp_rings);
2843 	vsi->xdp_rings = NULL;
2844 
2845 	return -ENOMEM;
2846 }
2847 
2848 /**
2849  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2850  * @vsi: VSI to remove XDP rings
2851  * @cfg_type: disable XDP permanently or allow it to be restored later
2852  *
2853  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2854  * resources
2855  */
ice_destroy_xdp_rings(struct ice_vsi * vsi,enum ice_xdp_cfg cfg_type)2856 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
2857 {
2858 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2859 	struct ice_pf *pf = vsi->back;
2860 	int i;
2861 
2862 	/* q_vectors are freed in reset path so there's no point in detaching
2863 	 * rings
2864 	 */
2865 	if (cfg_type == ICE_XDP_CFG_PART)
2866 		goto free_qmap;
2867 
2868 	ice_unmap_xdp_rings(vsi);
2869 
2870 free_qmap:
2871 	mutex_lock(&pf->avail_q_mutex);
2872 	ice_for_each_xdp_txq(vsi, i) {
2873 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2874 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2875 	}
2876 	mutex_unlock(&pf->avail_q_mutex);
2877 
2878 	ice_for_each_xdp_txq(vsi, i)
2879 		if (vsi->xdp_rings[i]) {
2880 			if (vsi->xdp_rings[i]->desc) {
2881 				synchronize_rcu();
2882 				ice_free_tx_ring(vsi->xdp_rings[i]);
2883 			}
2884 			kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
2885 			vsi->xdp_rings[i]->ring_stats = NULL;
2886 			kfree_rcu(vsi->xdp_rings[i], rcu);
2887 			vsi->xdp_rings[i] = NULL;
2888 		}
2889 
2890 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2891 	vsi->xdp_rings = NULL;
2892 
2893 	if (static_key_enabled(&ice_xdp_locking_key))
2894 		static_branch_dec(&ice_xdp_locking_key);
2895 
2896 	if (cfg_type == ICE_XDP_CFG_PART)
2897 		return 0;
2898 
2899 	ice_vsi_assign_bpf_prog(vsi, NULL);
2900 
2901 	/* notify Tx scheduler that we destroyed XDP queues and bring
2902 	 * back the old number of child nodes
2903 	 */
2904 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2905 		max_txqs[i] = vsi->num_txq;
2906 
2907 	/* change number of XDP Tx queues to 0 */
2908 	vsi->num_xdp_txq = 0;
2909 
2910 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2911 			       max_txqs);
2912 }
2913 
2914 /**
2915  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2916  * @vsi: VSI to schedule napi on
2917  */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2918 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2919 {
2920 	int i;
2921 
2922 	ice_for_each_rxq(vsi, i) {
2923 		struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2924 
2925 		if (READ_ONCE(rx_ring->xsk_pool))
2926 			napi_schedule(&rx_ring->q_vector->napi);
2927 	}
2928 }
2929 
2930 /**
2931  * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2932  * @vsi: VSI to determine the count of XDP Tx qs
2933  *
2934  * returns 0 if Tx qs count is higher than at least half of CPU count,
2935  * -ENOMEM otherwise
2936  */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2937 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2938 {
2939 	u16 avail = ice_get_avail_txq_count(vsi->back);
2940 	u16 cpus = num_possible_cpus();
2941 
2942 	if (avail < cpus / 2)
2943 		return -ENOMEM;
2944 
2945 	if (vsi->type == ICE_VSI_SF)
2946 		avail = vsi->alloc_txq;
2947 
2948 	vsi->num_xdp_txq = min_t(u16, avail, cpus);
2949 
2950 	if (vsi->num_xdp_txq < cpus)
2951 		static_branch_inc(&ice_xdp_locking_key);
2952 
2953 	return 0;
2954 }
2955 
2956 /**
2957  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2958  * @vsi: Pointer to VSI structure
2959  */
ice_max_xdp_frame_size(struct ice_vsi * vsi)2960 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
2961 {
2962 	return ICE_RXBUF_3072;
2963 }
2964 
2965 /**
2966  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2967  * @vsi: VSI to setup XDP for
2968  * @prog: XDP program
2969  * @extack: netlink extended ack
2970  */
2971 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2972 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2973 		   struct netlink_ext_ack *extack)
2974 {
2975 	unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2976 	int ret = 0, xdp_ring_err = 0;
2977 	bool if_running;
2978 
2979 	if (prog && !prog->aux->xdp_has_frags) {
2980 		if (frame_size > ice_max_xdp_frame_size(vsi)) {
2981 			NL_SET_ERR_MSG_MOD(extack,
2982 					   "MTU is too large for linear frames and XDP prog does not support frags");
2983 			return -EOPNOTSUPP;
2984 		}
2985 	}
2986 
2987 	/* hot swap progs and avoid toggling link */
2988 	if (ice_is_xdp_ena_vsi(vsi) == !!prog ||
2989 	    test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) {
2990 		ice_vsi_assign_bpf_prog(vsi, prog);
2991 		return 0;
2992 	}
2993 
2994 	if_running = netif_running(vsi->netdev) &&
2995 		     !test_and_set_bit(ICE_VSI_DOWN, vsi->state);
2996 
2997 	/* need to stop netdev while setting up the program for Rx rings */
2998 	if (if_running) {
2999 		ret = ice_down(vsi);
3000 		if (ret) {
3001 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
3002 			return ret;
3003 		}
3004 	}
3005 
3006 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
3007 		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
3008 		if (xdp_ring_err) {
3009 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3010 			goto resume_if;
3011 		} else {
3012 			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
3013 							     ICE_XDP_CFG_FULL);
3014 			if (xdp_ring_err) {
3015 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3016 				goto resume_if;
3017 			}
3018 		}
3019 		xdp_features_set_redirect_target(vsi->netdev, true);
3020 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
3021 		xdp_features_clear_redirect_target(vsi->netdev);
3022 		xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
3023 		if (xdp_ring_err)
3024 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
3025 	}
3026 
3027 resume_if:
3028 	if (if_running)
3029 		ret = ice_up(vsi);
3030 
3031 	if (!ret && prog)
3032 		ice_vsi_rx_napi_schedule(vsi);
3033 
3034 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
3035 }
3036 
3037 /**
3038  * ice_xdp_safe_mode - XDP handler for safe mode
3039  * @dev: netdevice
3040  * @xdp: XDP command
3041  */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)3042 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
3043 			     struct netdev_bpf *xdp)
3044 {
3045 	NL_SET_ERR_MSG_MOD(xdp->extack,
3046 			   "Please provide working DDP firmware package in order to use XDP\n"
3047 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
3048 	return -EOPNOTSUPP;
3049 }
3050 
3051 /**
3052  * ice_xdp - implements XDP handler
3053  * @dev: netdevice
3054  * @xdp: XDP command
3055  */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)3056 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3057 {
3058 	struct ice_netdev_priv *np = netdev_priv(dev);
3059 	struct ice_vsi *vsi = np->vsi;
3060 	int ret;
3061 
3062 	if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
3063 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
3064 		return -EINVAL;
3065 	}
3066 
3067 	mutex_lock(&vsi->xdp_state_lock);
3068 
3069 	switch (xdp->command) {
3070 	case XDP_SETUP_PROG:
3071 		ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
3072 		break;
3073 	case XDP_SETUP_XSK_POOL:
3074 		ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id);
3075 		break;
3076 	default:
3077 		ret = -EINVAL;
3078 	}
3079 
3080 	mutex_unlock(&vsi->xdp_state_lock);
3081 	return ret;
3082 }
3083 
3084 /**
3085  * ice_ena_misc_vector - enable the non-queue interrupts
3086  * @pf: board private structure
3087  */
ice_ena_misc_vector(struct ice_pf * pf)3088 static void ice_ena_misc_vector(struct ice_pf *pf)
3089 {
3090 	struct ice_hw *hw = &pf->hw;
3091 	u32 pf_intr_start_offset;
3092 	u32 val;
3093 
3094 	/* Disable anti-spoof detection interrupt to prevent spurious event
3095 	 * interrupts during a function reset. Anti-spoof functionally is
3096 	 * still supported.
3097 	 */
3098 	val = rd32(hw, GL_MDCK_TX_TDPU);
3099 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3100 	wr32(hw, GL_MDCK_TX_TDPU, val);
3101 
3102 	/* clear things first */
3103 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
3104 	rd32(hw, PFINT_OICR);		/* read to clear */
3105 
3106 	val = (PFINT_OICR_ECC_ERR_M |
3107 	       PFINT_OICR_MAL_DETECT_M |
3108 	       PFINT_OICR_GRST_M |
3109 	       PFINT_OICR_PCI_EXCEPTION_M |
3110 	       PFINT_OICR_VFLR_M |
3111 	       PFINT_OICR_HMC_ERR_M |
3112 	       PFINT_OICR_PE_PUSH_M |
3113 	       PFINT_OICR_PE_CRITERR_M);
3114 
3115 	wr32(hw, PFINT_OICR_ENA, val);
3116 
3117 	/* SW_ITR_IDX = 0, but don't change INTENA */
3118 	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
3119 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3120 
3121 	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3122 		return;
3123 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3124 	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3125 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3126 }
3127 
3128 /**
3129  * ice_ll_ts_intr - ll_ts interrupt handler
3130  * @irq: interrupt number
3131  * @data: pointer to a q_vector
3132  */
ice_ll_ts_intr(int __always_unused irq,void * data)3133 static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
3134 {
3135 	struct ice_pf *pf = data;
3136 	u32 pf_intr_start_offset;
3137 	struct ice_ptp_tx *tx;
3138 	unsigned long flags;
3139 	struct ice_hw *hw;
3140 	u32 val;
3141 	u8 idx;
3142 
3143 	hw = &pf->hw;
3144 	tx = &pf->ptp.port.tx;
3145 	spin_lock_irqsave(&tx->lock, flags);
3146 	if (tx->init) {
3147 		ice_ptp_complete_tx_single_tstamp(tx);
3148 
3149 		idx = find_next_bit_wrap(tx->in_use, tx->len,
3150 					 tx->last_ll_ts_idx_read + 1);
3151 		if (idx != tx->len)
3152 			ice_ptp_req_tx_single_tstamp(tx, idx);
3153 	}
3154 	spin_unlock_irqrestore(&tx->lock, flags);
3155 
3156 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3157 	      (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
3158 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3159 	wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
3160 	     val);
3161 
3162 	return IRQ_HANDLED;
3163 }
3164 
3165 /**
3166  * ice_misc_intr - misc interrupt handler
3167  * @irq: interrupt number
3168  * @data: pointer to a q_vector
3169  */
ice_misc_intr(int __always_unused irq,void * data)3170 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3171 {
3172 	struct ice_pf *pf = (struct ice_pf *)data;
3173 	irqreturn_t ret = IRQ_HANDLED;
3174 	struct ice_hw *hw = &pf->hw;
3175 	struct device *dev;
3176 	u32 oicr, ena_mask;
3177 
3178 	dev = ice_pf_to_dev(pf);
3179 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3180 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3181 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3182 
3183 	oicr = rd32(hw, PFINT_OICR);
3184 	ena_mask = rd32(hw, PFINT_OICR_ENA);
3185 
3186 	if (oicr & PFINT_OICR_SWINT_M) {
3187 		ena_mask &= ~PFINT_OICR_SWINT_M;
3188 		pf->sw_int_count++;
3189 	}
3190 
3191 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
3192 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3193 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3194 	}
3195 	if (oicr & PFINT_OICR_VFLR_M) {
3196 		/* disable any further VFLR event notifications */
3197 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3198 			u32 reg = rd32(hw, PFINT_OICR_ENA);
3199 
3200 			reg &= ~PFINT_OICR_VFLR_M;
3201 			wr32(hw, PFINT_OICR_ENA, reg);
3202 		} else {
3203 			ena_mask &= ~PFINT_OICR_VFLR_M;
3204 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3205 		}
3206 	}
3207 
3208 	if (oicr & PFINT_OICR_GRST_M) {
3209 		u32 reset;
3210 
3211 		/* we have a reset warning */
3212 		ena_mask &= ~PFINT_OICR_GRST_M;
3213 		reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
3214 				  rd32(hw, GLGEN_RSTAT));
3215 
3216 		if (reset == ICE_RESET_CORER)
3217 			pf->corer_count++;
3218 		else if (reset == ICE_RESET_GLOBR)
3219 			pf->globr_count++;
3220 		else if (reset == ICE_RESET_EMPR)
3221 			pf->empr_count++;
3222 		else
3223 			dev_dbg(dev, "Invalid reset type %d\n", reset);
3224 
3225 		/* If a reset cycle isn't already in progress, we set a bit in
3226 		 * pf->state so that the service task can start a reset/rebuild.
3227 		 */
3228 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3229 			if (reset == ICE_RESET_CORER)
3230 				set_bit(ICE_CORER_RECV, pf->state);
3231 			else if (reset == ICE_RESET_GLOBR)
3232 				set_bit(ICE_GLOBR_RECV, pf->state);
3233 			else
3234 				set_bit(ICE_EMPR_RECV, pf->state);
3235 
3236 			/* There are couple of different bits at play here.
3237 			 * hw->reset_ongoing indicates whether the hardware is
3238 			 * in reset. This is set to true when a reset interrupt
3239 			 * is received and set back to false after the driver
3240 			 * has determined that the hardware is out of reset.
3241 			 *
3242 			 * ICE_RESET_OICR_RECV in pf->state indicates
3243 			 * that a post reset rebuild is required before the
3244 			 * driver is operational again. This is set above.
3245 			 *
3246 			 * As this is the start of the reset/rebuild cycle, set
3247 			 * both to indicate that.
3248 			 */
3249 			hw->reset_ongoing = true;
3250 		}
3251 	}
3252 
3253 	if (oicr & PFINT_OICR_TSYN_TX_M) {
3254 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3255 
3256 		ret = ice_ptp_ts_irq(pf);
3257 	}
3258 
3259 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3260 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3261 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3262 
3263 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3264 
3265 		if (ice_pf_src_tmr_owned(pf)) {
3266 			/* Save EVENTs from GLTSYN register */
3267 			pf->ptp.ext_ts_irq |= gltsyn_stat &
3268 					      (GLTSYN_STAT_EVENT0_M |
3269 					       GLTSYN_STAT_EVENT1_M |
3270 					       GLTSYN_STAT_EVENT2_M);
3271 
3272 			ice_ptp_extts_event(pf);
3273 		}
3274 	}
3275 
3276 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3277 	if (oicr & ICE_AUX_CRIT_ERR) {
3278 		pf->oicr_err_reg |= oicr;
3279 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
3280 		ena_mask &= ~ICE_AUX_CRIT_ERR;
3281 	}
3282 
3283 	/* Report any remaining unexpected interrupts */
3284 	oicr &= ena_mask;
3285 	if (oicr) {
3286 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3287 		/* If a critical error is pending there is no choice but to
3288 		 * reset the device.
3289 		 */
3290 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3291 			    PFINT_OICR_ECC_ERR_M)) {
3292 			set_bit(ICE_PFR_REQ, pf->state);
3293 		}
3294 	}
3295 	ice_service_task_schedule(pf);
3296 	if (ret == IRQ_HANDLED)
3297 		ice_irq_dynamic_ena(hw, NULL, NULL);
3298 
3299 	return ret;
3300 }
3301 
3302 /**
3303  * ice_misc_intr_thread_fn - misc interrupt thread function
3304  * @irq: interrupt number
3305  * @data: pointer to a q_vector
3306  */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3307 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3308 {
3309 	struct ice_pf *pf = data;
3310 	struct ice_hw *hw;
3311 
3312 	hw = &pf->hw;
3313 
3314 	if (ice_is_reset_in_progress(pf->state))
3315 		goto skip_irq;
3316 
3317 	if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread))
3318 		ice_ptp_process_ts(pf);
3319 
3320 skip_irq:
3321 	ice_irq_dynamic_ena(hw, NULL, NULL);
3322 	ice_flush(hw);
3323 
3324 	if (ice_ptp_tx_tstamps_pending(pf)) {
3325 		/* If any new Tx timestamps happened while in interrupt,
3326 		 * re-arm the interrupt to trigger it again.
3327 		 */
3328 		wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
3329 		ice_flush(hw);
3330 	}
3331 
3332 	return IRQ_HANDLED;
3333 }
3334 
3335 /**
3336  * ice_dis_ctrlq_interrupts - disable control queue interrupts
3337  * @hw: pointer to HW structure
3338  */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3339 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3340 {
3341 	/* disable Admin queue Interrupt causes */
3342 	wr32(hw, PFINT_FW_CTL,
3343 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3344 
3345 	/* disable Mailbox queue Interrupt causes */
3346 	wr32(hw, PFINT_MBX_CTL,
3347 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3348 
3349 	wr32(hw, PFINT_SB_CTL,
3350 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3351 
3352 	/* disable Control queue Interrupt causes */
3353 	wr32(hw, PFINT_OICR_CTL,
3354 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3355 
3356 	ice_flush(hw);
3357 }
3358 
3359 /**
3360  * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3361  * @pf: board private structure
3362  */
ice_free_irq_msix_ll_ts(struct ice_pf * pf)3363 static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
3364 {
3365 	int irq_num = pf->ll_ts_irq.virq;
3366 
3367 	synchronize_irq(irq_num);
3368 	devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
3369 
3370 	ice_free_irq(pf, pf->ll_ts_irq);
3371 }
3372 
3373 /**
3374  * ice_free_irq_msix_misc - Unroll misc vector setup
3375  * @pf: board private structure
3376  */
ice_free_irq_msix_misc(struct ice_pf * pf)3377 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3378 {
3379 	int misc_irq_num = pf->oicr_irq.virq;
3380 	struct ice_hw *hw = &pf->hw;
3381 
3382 	ice_dis_ctrlq_interrupts(hw);
3383 
3384 	/* disable OICR interrupt */
3385 	wr32(hw, PFINT_OICR_ENA, 0);
3386 	ice_flush(hw);
3387 
3388 	synchronize_irq(misc_irq_num);
3389 	devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
3390 
3391 	ice_free_irq(pf, pf->oicr_irq);
3392 	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3393 		ice_free_irq_msix_ll_ts(pf);
3394 }
3395 
3396 /**
3397  * ice_ena_ctrlq_interrupts - enable control queue interrupts
3398  * @hw: pointer to HW structure
3399  * @reg_idx: HW vector index to associate the control queue interrupts with
3400  */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3401 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3402 {
3403 	u32 val;
3404 
3405 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3406 	       PFINT_OICR_CTL_CAUSE_ENA_M);
3407 	wr32(hw, PFINT_OICR_CTL, val);
3408 
3409 	/* enable Admin queue Interrupt causes */
3410 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3411 	       PFINT_FW_CTL_CAUSE_ENA_M);
3412 	wr32(hw, PFINT_FW_CTL, val);
3413 
3414 	/* enable Mailbox queue Interrupt causes */
3415 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3416 	       PFINT_MBX_CTL_CAUSE_ENA_M);
3417 	wr32(hw, PFINT_MBX_CTL, val);
3418 
3419 	if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
3420 		/* enable Sideband queue Interrupt causes */
3421 		val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3422 		       PFINT_SB_CTL_CAUSE_ENA_M);
3423 		wr32(hw, PFINT_SB_CTL, val);
3424 	}
3425 
3426 	ice_flush(hw);
3427 }
3428 
3429 /**
3430  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3431  * @pf: board private structure
3432  *
3433  * This sets up the handler for MSIX 0, which is used to manage the
3434  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3435  * when in MSI or Legacy interrupt mode.
3436  */
ice_req_irq_msix_misc(struct ice_pf * pf)3437 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3438 {
3439 	struct device *dev = ice_pf_to_dev(pf);
3440 	struct ice_hw *hw = &pf->hw;
3441 	u32 pf_intr_start_offset;
3442 	struct msi_map irq;
3443 	int err = 0;
3444 
3445 	if (!pf->int_name[0])
3446 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3447 			 dev_driver_string(dev), dev_name(dev));
3448 
3449 	if (!pf->int_name_ll_ts[0])
3450 		snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
3451 			 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
3452 	/* Do not request IRQ but do enable OICR interrupt since settings are
3453 	 * lost during reset. Note that this function is called only during
3454 	 * rebuild path and not while reset is in progress.
3455 	 */
3456 	if (ice_is_reset_in_progress(pf->state))
3457 		goto skip_req_irq;
3458 
3459 	/* reserve one vector in irq_tracker for misc interrupts */
3460 	irq = ice_alloc_irq(pf, false);
3461 	if (irq.index < 0)
3462 		return irq.index;
3463 
3464 	pf->oicr_irq = irq;
3465 	err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
3466 					ice_misc_intr_thread_fn, 0,
3467 					pf->int_name, pf);
3468 	if (err) {
3469 		dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3470 			pf->int_name, err);
3471 		ice_free_irq(pf, pf->oicr_irq);
3472 		return err;
3473 	}
3474 
3475 	/* reserve one vector in irq_tracker for ll_ts interrupt */
3476 	if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3477 		goto skip_req_irq;
3478 
3479 	irq = ice_alloc_irq(pf, false);
3480 	if (irq.index < 0)
3481 		return irq.index;
3482 
3483 	pf->ll_ts_irq = irq;
3484 	err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
3485 			       pf->int_name_ll_ts, pf);
3486 	if (err) {
3487 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3488 			pf->int_name_ll_ts, err);
3489 		ice_free_irq(pf, pf->ll_ts_irq);
3490 		return err;
3491 	}
3492 
3493 skip_req_irq:
3494 	ice_ena_misc_vector(pf);
3495 
3496 	ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
3497 	/* This enables LL TS interrupt */
3498 	pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
3499 	if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
3500 		wr32(hw, PFINT_SB_CTL,
3501 		     ((pf->ll_ts_irq.index + pf_intr_start_offset) &
3502 		      PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
3503 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
3504 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3505 
3506 	ice_flush(hw);
3507 	ice_irq_dynamic_ena(hw, NULL, NULL);
3508 
3509 	return 0;
3510 }
3511 
3512 /**
3513  * ice_set_ops - set netdev and ethtools ops for the given netdev
3514  * @vsi: the VSI associated with the new netdev
3515  */
ice_set_ops(struct ice_vsi * vsi)3516 static void ice_set_ops(struct ice_vsi *vsi)
3517 {
3518 	struct net_device *netdev = vsi->netdev;
3519 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3520 
3521 	if (ice_is_safe_mode(pf)) {
3522 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3523 		ice_set_ethtool_safe_mode_ops(netdev);
3524 		return;
3525 	}
3526 
3527 	netdev->netdev_ops = &ice_netdev_ops;
3528 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3529 	netdev->xdp_metadata_ops = &ice_xdp_md_ops;
3530 	ice_set_ethtool_ops(netdev);
3531 
3532 	if (vsi->type != ICE_VSI_PF)
3533 		return;
3534 
3535 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
3536 			       NETDEV_XDP_ACT_XSK_ZEROCOPY |
3537 			       NETDEV_XDP_ACT_RX_SG;
3538 	netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
3539 }
3540 
3541 /**
3542  * ice_set_netdev_features - set features for the given netdev
3543  * @netdev: netdev instance
3544  */
ice_set_netdev_features(struct net_device * netdev)3545 void ice_set_netdev_features(struct net_device *netdev)
3546 {
3547 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3548 	bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3549 	netdev_features_t csumo_features;
3550 	netdev_features_t vlano_features;
3551 	netdev_features_t dflt_features;
3552 	netdev_features_t tso_features;
3553 
3554 	if (ice_is_safe_mode(pf)) {
3555 		/* safe mode */
3556 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3557 		netdev->hw_features = netdev->features;
3558 		return;
3559 	}
3560 
3561 	dflt_features = NETIF_F_SG	|
3562 			NETIF_F_HIGHDMA	|
3563 			NETIF_F_NTUPLE	|
3564 			NETIF_F_RXHASH;
3565 
3566 	csumo_features = NETIF_F_RXCSUM	  |
3567 			 NETIF_F_IP_CSUM  |
3568 			 NETIF_F_SCTP_CRC |
3569 			 NETIF_F_IPV6_CSUM;
3570 
3571 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3572 			 NETIF_F_HW_VLAN_CTAG_TX     |
3573 			 NETIF_F_HW_VLAN_CTAG_RX;
3574 
3575 	/* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3576 	if (is_dvm_ena)
3577 		vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3578 
3579 	tso_features = NETIF_F_TSO			|
3580 		       NETIF_F_TSO_ECN			|
3581 		       NETIF_F_TSO6			|
3582 		       NETIF_F_GSO_GRE			|
3583 		       NETIF_F_GSO_UDP_TUNNEL		|
3584 		       NETIF_F_GSO_GRE_CSUM		|
3585 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3586 		       NETIF_F_GSO_PARTIAL		|
3587 		       NETIF_F_GSO_IPXIP4		|
3588 		       NETIF_F_GSO_IPXIP6		|
3589 		       NETIF_F_GSO_UDP_L4;
3590 
3591 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3592 					NETIF_F_GSO_GRE_CSUM;
3593 	/* set features that user can change */
3594 	netdev->hw_features = dflt_features | csumo_features |
3595 			      vlano_features | tso_features;
3596 
3597 	/* add support for HW_CSUM on packets with MPLS header */
3598 	netdev->mpls_features =  NETIF_F_HW_CSUM |
3599 				 NETIF_F_TSO     |
3600 				 NETIF_F_TSO6;
3601 
3602 	/* enable features */
3603 	netdev->features |= netdev->hw_features;
3604 
3605 	netdev->hw_features |= NETIF_F_HW_TC;
3606 	netdev->hw_features |= NETIF_F_LOOPBACK;
3607 
3608 	/* encap and VLAN devices inherit default, csumo and tso features */
3609 	netdev->hw_enc_features |= dflt_features | csumo_features |
3610 				   tso_features;
3611 	netdev->vlan_features |= dflt_features | csumo_features |
3612 				 tso_features;
3613 
3614 	/* advertise support but don't enable by default since only one type of
3615 	 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3616 	 * type turns on the other has to be turned off. This is enforced by the
3617 	 * ice_fix_features() ndo callback.
3618 	 */
3619 	if (is_dvm_ena)
3620 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3621 			NETIF_F_HW_VLAN_STAG_TX;
3622 
3623 	/* Leave CRC / FCS stripping enabled by default, but allow the value to
3624 	 * be changed at runtime
3625 	 */
3626 	netdev->hw_features |= NETIF_F_RXFCS;
3627 
3628 	/* Allow core to manage IRQs affinity */
3629 	netif_set_affinity_auto(netdev);
3630 
3631 	/* Mutual exclusivity for TSO and GCS is enforced by the set features
3632 	 * ndo callback.
3633 	 */
3634 	if (ice_is_feature_supported(pf, ICE_F_GCS))
3635 		netdev->hw_features |= NETIF_F_HW_CSUM;
3636 
3637 	netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
3638 }
3639 
3640 /**
3641  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3642  * @lut: Lookup table
3643  * @rss_table_size: Lookup table size
3644  * @rss_size: Range of queue number for hashing
3645  */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3646 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3647 {
3648 	u16 i;
3649 
3650 	for (i = 0; i < rss_table_size; i++)
3651 		lut[i] = i % rss_size;
3652 }
3653 
3654 /**
3655  * ice_pf_vsi_setup - Set up a PF VSI
3656  * @pf: board private structure
3657  * @pi: pointer to the port_info instance
3658  *
3659  * Returns pointer to the successfully allocated VSI software struct
3660  * on success, otherwise returns NULL on failure.
3661  */
3662 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3663 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3664 {
3665 	struct ice_vsi_cfg_params params = {};
3666 
3667 	params.type = ICE_VSI_PF;
3668 	params.port_info = pi;
3669 	params.flags = ICE_VSI_FLAG_INIT;
3670 
3671 	return ice_vsi_setup(pf, &params);
3672 }
3673 
3674 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3675 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3676 		   struct ice_channel *ch)
3677 {
3678 	struct ice_vsi_cfg_params params = {};
3679 
3680 	params.type = ICE_VSI_CHNL;
3681 	params.port_info = pi;
3682 	params.ch = ch;
3683 	params.flags = ICE_VSI_FLAG_INIT;
3684 
3685 	return ice_vsi_setup(pf, &params);
3686 }
3687 
3688 /**
3689  * ice_ctrl_vsi_setup - Set up a control VSI
3690  * @pf: board private structure
3691  * @pi: pointer to the port_info instance
3692  *
3693  * Returns pointer to the successfully allocated VSI software struct
3694  * on success, otherwise returns NULL on failure.
3695  */
3696 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3697 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3698 {
3699 	struct ice_vsi_cfg_params params = {};
3700 
3701 	params.type = ICE_VSI_CTRL;
3702 	params.port_info = pi;
3703 	params.flags = ICE_VSI_FLAG_INIT;
3704 
3705 	return ice_vsi_setup(pf, &params);
3706 }
3707 
3708 /**
3709  * ice_lb_vsi_setup - Set up a loopback VSI
3710  * @pf: board private structure
3711  * @pi: pointer to the port_info instance
3712  *
3713  * Returns pointer to the successfully allocated VSI software struct
3714  * on success, otherwise returns NULL on failure.
3715  */
3716 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3717 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3718 {
3719 	struct ice_vsi_cfg_params params = {};
3720 
3721 	params.type = ICE_VSI_LB;
3722 	params.port_info = pi;
3723 	params.flags = ICE_VSI_FLAG_INIT;
3724 
3725 	return ice_vsi_setup(pf, &params);
3726 }
3727 
3728 /**
3729  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3730  * @netdev: network interface to be adjusted
3731  * @proto: VLAN TPID
3732  * @vid: VLAN ID to be added
3733  *
3734  * net_device_ops implementation for adding VLAN IDs
3735  */
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3736 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3737 {
3738 	struct ice_netdev_priv *np = netdev_priv(netdev);
3739 	struct ice_vsi_vlan_ops *vlan_ops;
3740 	struct ice_vsi *vsi = np->vsi;
3741 	struct ice_vlan vlan;
3742 	int ret;
3743 
3744 	/* VLAN 0 is added by default during load/reset */
3745 	if (!vid)
3746 		return 0;
3747 
3748 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3749 		usleep_range(1000, 2000);
3750 
3751 	/* Add multicast promisc rule for the VLAN ID to be added if
3752 	 * all-multicast is currently enabled.
3753 	 */
3754 	if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3755 		ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3756 					       ICE_MCAST_VLAN_PROMISC_BITS,
3757 					       vid);
3758 		if (ret)
3759 			goto finish;
3760 	}
3761 
3762 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3763 
3764 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3765 	 * packets aren't pruned by the device's internal switch on Rx
3766 	 */
3767 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3768 	ret = vlan_ops->add_vlan(vsi, &vlan);
3769 	if (ret)
3770 		goto finish;
3771 
3772 	/* If all-multicast is currently enabled and this VLAN ID is only one
3773 	 * besides VLAN-0 we have to update look-up type of multicast promisc
3774 	 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3775 	 */
3776 	if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3777 	    ice_vsi_num_non_zero_vlans(vsi) == 1) {
3778 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3779 					   ICE_MCAST_PROMISC_BITS, 0);
3780 		ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3781 					 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3782 	}
3783 
3784 finish:
3785 	clear_bit(ICE_CFG_BUSY, vsi->state);
3786 
3787 	return ret;
3788 }
3789 
3790 /**
3791  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3792  * @netdev: network interface to be adjusted
3793  * @proto: VLAN TPID
3794  * @vid: VLAN ID to be removed
3795  *
3796  * net_device_ops implementation for removing VLAN IDs
3797  */
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3798 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3799 {
3800 	struct ice_netdev_priv *np = netdev_priv(netdev);
3801 	struct ice_vsi_vlan_ops *vlan_ops;
3802 	struct ice_vsi *vsi = np->vsi;
3803 	struct ice_vlan vlan;
3804 	int ret;
3805 
3806 	/* don't allow removal of VLAN 0 */
3807 	if (!vid)
3808 		return 0;
3809 
3810 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3811 		usleep_range(1000, 2000);
3812 
3813 	ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3814 				    ICE_MCAST_VLAN_PROMISC_BITS, vid);
3815 	if (ret) {
3816 		netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3817 			   vsi->vsi_num);
3818 		vsi->current_netdev_flags |= IFF_ALLMULTI;
3819 	}
3820 
3821 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3822 
3823 	/* Make sure VLAN delete is successful before updating VLAN
3824 	 * information
3825 	 */
3826 	vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3827 	ret = vlan_ops->del_vlan(vsi, &vlan);
3828 	if (ret)
3829 		goto finish;
3830 
3831 	/* Remove multicast promisc rule for the removed VLAN ID if
3832 	 * all-multicast is enabled.
3833 	 */
3834 	if (vsi->current_netdev_flags & IFF_ALLMULTI)
3835 		ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3836 					   ICE_MCAST_VLAN_PROMISC_BITS, vid);
3837 
3838 	if (!ice_vsi_has_non_zero_vlans(vsi)) {
3839 		/* Update look-up type of multicast promisc rule for VLAN 0
3840 		 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3841 		 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3842 		 */
3843 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3844 			ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3845 						   ICE_MCAST_VLAN_PROMISC_BITS,
3846 						   0);
3847 			ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3848 						 ICE_MCAST_PROMISC_BITS, 0);
3849 		}
3850 	}
3851 
3852 finish:
3853 	clear_bit(ICE_CFG_BUSY, vsi->state);
3854 
3855 	return ret;
3856 }
3857 
3858 /**
3859  * ice_rep_indr_tc_block_unbind
3860  * @cb_priv: indirection block private data
3861  */
ice_rep_indr_tc_block_unbind(void * cb_priv)3862 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3863 {
3864 	struct ice_indr_block_priv *indr_priv = cb_priv;
3865 
3866 	list_del(&indr_priv->list);
3867 	kfree(indr_priv);
3868 }
3869 
3870 /**
3871  * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3872  * @vsi: VSI struct which has the netdev
3873  */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3874 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3875 {
3876 	struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3877 
3878 	flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3879 				 ice_rep_indr_tc_block_unbind);
3880 }
3881 
3882 /**
3883  * ice_tc_indir_block_register - Register TC indirect block notifications
3884  * @vsi: VSI struct which has the netdev
3885  *
3886  * Returns 0 on success, negative value on failure
3887  */
ice_tc_indir_block_register(struct ice_vsi * vsi)3888 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3889 {
3890 	struct ice_netdev_priv *np;
3891 
3892 	if (!vsi || !vsi->netdev)
3893 		return -EINVAL;
3894 
3895 	np = netdev_priv(vsi->netdev);
3896 
3897 	INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3898 	return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3899 }
3900 
3901 /**
3902  * ice_get_avail_q_count - Get count of queues in use
3903  * @pf_qmap: bitmap to get queue use count from
3904  * @lock: pointer to a mutex that protects access to pf_qmap
3905  * @size: size of the bitmap
3906  */
3907 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3908 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3909 {
3910 	unsigned long bit;
3911 	u16 count = 0;
3912 
3913 	mutex_lock(lock);
3914 	for_each_clear_bit(bit, pf_qmap, size)
3915 		count++;
3916 	mutex_unlock(lock);
3917 
3918 	return count;
3919 }
3920 
3921 /**
3922  * ice_get_avail_txq_count - Get count of Tx queues in use
3923  * @pf: pointer to an ice_pf instance
3924  */
ice_get_avail_txq_count(struct ice_pf * pf)3925 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3926 {
3927 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3928 				     pf->max_pf_txqs);
3929 }
3930 
3931 /**
3932  * ice_get_avail_rxq_count - Get count of Rx queues in use
3933  * @pf: pointer to an ice_pf instance
3934  */
ice_get_avail_rxq_count(struct ice_pf * pf)3935 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3936 {
3937 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3938 				     pf->max_pf_rxqs);
3939 }
3940 
3941 /**
3942  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3943  * @pf: board private structure to initialize
3944  */
ice_deinit_pf(struct ice_pf * pf)3945 void ice_deinit_pf(struct ice_pf *pf)
3946 {
3947 	/* note that we unroll also on ice_init_pf() failure here */
3948 
3949 	mutex_destroy(&pf->lag_mutex);
3950 	mutex_destroy(&pf->adev_mutex);
3951 	mutex_destroy(&pf->sw_mutex);
3952 	mutex_destroy(&pf->tc_mutex);
3953 	mutex_destroy(&pf->avail_q_mutex);
3954 	mutex_destroy(&pf->vfs.table_lock);
3955 
3956 	if (pf->avail_txqs) {
3957 		bitmap_free(pf->avail_txqs);
3958 		pf->avail_txqs = NULL;
3959 	}
3960 
3961 	if (pf->avail_rxqs) {
3962 		bitmap_free(pf->avail_rxqs);
3963 		pf->avail_rxqs = NULL;
3964 	}
3965 
3966 	if (pf->txtime_txqs) {
3967 		bitmap_free(pf->txtime_txqs);
3968 		pf->txtime_txqs = NULL;
3969 	}
3970 
3971 	if (pf->ptp.clock)
3972 		ptp_clock_unregister(pf->ptp.clock);
3973 
3974 	if (!xa_empty(&pf->irq_tracker.entries))
3975 		ice_free_irq_msix_misc(pf);
3976 
3977 	xa_destroy(&pf->dyn_ports);
3978 	xa_destroy(&pf->sf_nums);
3979 }
3980 
3981 /**
3982  * ice_set_pf_caps - set PFs capability flags
3983  * @pf: pointer to the PF instance
3984  */
ice_set_pf_caps(struct ice_pf * pf)3985 static void ice_set_pf_caps(struct ice_pf *pf)
3986 {
3987 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3988 
3989 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3990 	if (func_caps->common_cap.rdma)
3991 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3992 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3993 	if (func_caps->common_cap.dcb)
3994 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3995 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3996 	if (func_caps->common_cap.sr_iov_1_1) {
3997 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3998 		pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3999 					      ICE_MAX_SRIOV_VFS);
4000 	}
4001 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
4002 	if (func_caps->common_cap.rss_table_size)
4003 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
4004 
4005 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
4006 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
4007 		u16 unused;
4008 
4009 		/* ctrl_vsi_idx will be set to a valid value when flow director
4010 		 * is setup by ice_init_fdir
4011 		 */
4012 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4013 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
4014 		/* force guaranteed filter pool for PF */
4015 		ice_alloc_fd_guar_item(&pf->hw, &unused,
4016 				       func_caps->fd_fltr_guar);
4017 		/* force shared filter pool for PF */
4018 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
4019 				       func_caps->fd_fltr_best_effort);
4020 	}
4021 
4022 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4023 	if (func_caps->common_cap.ieee_1588)
4024 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
4025 
4026 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
4027 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
4028 }
4029 
ice_start_service_task(struct ice_pf * pf)4030 void ice_start_service_task(struct ice_pf *pf)
4031 {
4032 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
4033 	pf->serv_tmr_period = HZ;
4034 	INIT_WORK(&pf->serv_task, ice_service_task);
4035 	clear_bit(ICE_SERVICE_SCHED, pf->state);
4036 }
4037 
4038 /**
4039  * ice_init_pf - Initialize general software structures (struct ice_pf)
4040  * @pf: board private structure to initialize
4041  * Return: 0 on success, negative errno otherwise.
4042  */
ice_init_pf(struct ice_pf * pf)4043 int ice_init_pf(struct ice_pf *pf)
4044 {
4045 	struct udp_tunnel_nic_info *udp_tunnel_nic = &pf->hw.udp_tunnel_nic;
4046 	struct device *dev = ice_pf_to_dev(pf);
4047 	struct ice_hw *hw = &pf->hw;
4048 	int err = -ENOMEM;
4049 
4050 	mutex_init(&pf->sw_mutex);
4051 	mutex_init(&pf->tc_mutex);
4052 	mutex_init(&pf->adev_mutex);
4053 	mutex_init(&pf->lag_mutex);
4054 
4055 	INIT_HLIST_HEAD(&pf->aq_wait_list);
4056 	spin_lock_init(&pf->aq_wait_lock);
4057 	init_waitqueue_head(&pf->aq_wait_queue);
4058 
4059 	init_waitqueue_head(&pf->reset_wait_queue);
4060 
4061 	mutex_init(&pf->avail_q_mutex);
4062 
4063 	mutex_init(&pf->vfs.table_lock);
4064 	hash_init(pf->vfs.table);
4065 	if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
4066 		wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
4067 		     ICE_MBX_OVERFLOW_WATERMARK);
4068 	else
4069 		ice_mbx_init_snapshot(&pf->hw);
4070 
4071 	xa_init(&pf->dyn_ports);
4072 	xa_init(&pf->sf_nums);
4073 
4074 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4075 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
4076 	pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
4077 	if (!pf->avail_txqs || !pf->avail_rxqs || !pf->txtime_txqs)
4078 		goto undo_init;
4079 
4080 	udp_tunnel_nic->set_port = ice_udp_tunnel_set_port;
4081 	udp_tunnel_nic->unset_port = ice_udp_tunnel_unset_port;
4082 	udp_tunnel_nic->shared = &hw->udp_tunnel_shared;
4083 	udp_tunnel_nic->tables[0].n_entries = hw->tnl.valid_count[TNL_VXLAN];
4084 	udp_tunnel_nic->tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
4085 	udp_tunnel_nic->tables[1].n_entries = hw->tnl.valid_count[TNL_GENEVE];
4086 	udp_tunnel_nic->tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE;
4087 
4088 	/* In case of MSIX we are going to setup the misc vector right here
4089 	 * to handle admin queue events etc. In case of legacy and MSI
4090 	 * the misc functionality and queue processing is combined in
4091 	 * the same vector and that gets setup at open.
4092 	 */
4093 	err = ice_req_irq_msix_misc(pf);
4094 	if (err) {
4095 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4096 		goto undo_init;
4097 	}
4098 
4099 	return 0;
4100 undo_init:
4101 	/* deinit handles half-initialized pf just fine */
4102 	ice_deinit_pf(pf);
4103 	return err;
4104 }
4105 
4106 /**
4107  * ice_is_wol_supported - check if WoL is supported
4108  * @hw: pointer to hardware info
4109  *
4110  * Check if WoL is supported based on the HW configuration.
4111  * Returns true if NVM supports and enables WoL for this port, false otherwise
4112  */
ice_is_wol_supported(struct ice_hw * hw)4113 bool ice_is_wol_supported(struct ice_hw *hw)
4114 {
4115 	u16 wol_ctrl;
4116 
4117 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4118 	 * word) indicates WoL is not supported on the corresponding PF ID.
4119 	 */
4120 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4121 		return false;
4122 
4123 	return !(BIT(hw->port_info->lport) & wol_ctrl);
4124 }
4125 
4126 /**
4127  * ice_vsi_recfg_qs - Change the number of queues on a VSI
4128  * @vsi: VSI being changed
4129  * @new_rx: new number of Rx queues
4130  * @new_tx: new number of Tx queues
4131  * @locked: is adev device_lock held
4132  *
4133  * Only change the number of queues if new_tx, or new_rx is non-0.
4134  *
4135  * Returns 0 on success.
4136  */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)4137 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4138 {
4139 	struct ice_pf *pf = vsi->back;
4140 	int i, err = 0, timeout = 50;
4141 
4142 	if (!new_rx && !new_tx)
4143 		return -EINVAL;
4144 
4145 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4146 		timeout--;
4147 		if (!timeout)
4148 			return -EBUSY;
4149 		usleep_range(1000, 2000);
4150 	}
4151 
4152 	if (new_tx)
4153 		vsi->req_txq = (u16)new_tx;
4154 	if (new_rx)
4155 		vsi->req_rxq = (u16)new_rx;
4156 
4157 	/* set for the next time the netdev is started */
4158 	if (!netif_running(vsi->netdev)) {
4159 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4160 		if (err)
4161 			goto rebuild_err;
4162 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4163 		goto done;
4164 	}
4165 
4166 	ice_vsi_close(vsi);
4167 	err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
4168 	if (err)
4169 		goto rebuild_err;
4170 
4171 	ice_for_each_traffic_class(i) {
4172 		if (vsi->tc_cfg.ena_tc & BIT(i))
4173 			netdev_set_tc_queue(vsi->netdev,
4174 					    vsi->tc_cfg.tc_info[i].netdev_tc,
4175 					    vsi->tc_cfg.tc_info[i].qcount_tx,
4176 					    vsi->tc_cfg.tc_info[i].qoffset);
4177 	}
4178 	ice_pf_dcb_recfg(pf, locked);
4179 	ice_vsi_open(vsi);
4180 	goto done;
4181 
4182 rebuild_err:
4183 	dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
4184 		err);
4185 done:
4186 	clear_bit(ICE_CFG_BUSY, pf->state);
4187 	return err;
4188 }
4189 
4190 /**
4191  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4192  * @pf: PF to configure
4193  *
4194  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4195  * VSI can still Tx/Rx VLAN tagged packets.
4196  */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4197 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4198 {
4199 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4200 	struct ice_vsi_ctx *ctxt;
4201 	struct ice_hw *hw;
4202 	int status;
4203 
4204 	if (!vsi)
4205 		return;
4206 
4207 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4208 	if (!ctxt)
4209 		return;
4210 
4211 	hw = &pf->hw;
4212 	ctxt->info = vsi->info;
4213 
4214 	ctxt->info.valid_sections =
4215 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4216 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
4217 			    ICE_AQ_VSI_PROP_SW_VALID);
4218 
4219 	/* disable VLAN anti-spoof */
4220 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4221 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4222 
4223 	/* disable VLAN pruning and keep all other settings */
4224 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4225 
4226 	/* allow all VLANs on Tx and don't strip on Rx */
4227 	ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4228 		ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4229 
4230 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4231 	if (status) {
4232 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4233 			status, libie_aq_str(hw->adminq.sq_last_status));
4234 	} else {
4235 		vsi->info.sec_flags = ctxt->info.sec_flags;
4236 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4237 		vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4238 	}
4239 
4240 	kfree(ctxt);
4241 }
4242 
4243 /**
4244  * ice_log_pkg_init - log result of DDP package load
4245  * @hw: pointer to hardware info
4246  * @state: state of package load
4247  */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4248 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4249 {
4250 	struct ice_pf *pf = hw->back;
4251 	struct device *dev;
4252 
4253 	dev = ice_pf_to_dev(pf);
4254 
4255 	switch (state) {
4256 	case ICE_DDP_PKG_SUCCESS:
4257 		dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4258 			 hw->active_pkg_name,
4259 			 hw->active_pkg_ver.major,
4260 			 hw->active_pkg_ver.minor,
4261 			 hw->active_pkg_ver.update,
4262 			 hw->active_pkg_ver.draft);
4263 		break;
4264 	case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4265 		dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4266 			 hw->active_pkg_name,
4267 			 hw->active_pkg_ver.major,
4268 			 hw->active_pkg_ver.minor,
4269 			 hw->active_pkg_ver.update,
4270 			 hw->active_pkg_ver.draft);
4271 		break;
4272 	case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4273 		dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
4274 			hw->active_pkg_name,
4275 			hw->active_pkg_ver.major,
4276 			hw->active_pkg_ver.minor,
4277 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4278 		break;
4279 	case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4280 		dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4281 			 hw->active_pkg_name,
4282 			 hw->active_pkg_ver.major,
4283 			 hw->active_pkg_ver.minor,
4284 			 hw->active_pkg_ver.update,
4285 			 hw->active_pkg_ver.draft,
4286 			 hw->pkg_name,
4287 			 hw->pkg_ver.major,
4288 			 hw->pkg_ver.minor,
4289 			 hw->pkg_ver.update,
4290 			 hw->pkg_ver.draft);
4291 		break;
4292 	case ICE_DDP_PKG_FW_MISMATCH:
4293 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
4294 		break;
4295 	case ICE_DDP_PKG_INVALID_FILE:
4296 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4297 		break;
4298 	case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4299 		dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
4300 		break;
4301 	case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4302 		dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
4303 			ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4304 		break;
4305 	case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4306 		dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
4307 		break;
4308 	case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4309 		dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
4310 		break;
4311 	case ICE_DDP_PKG_LOAD_ERROR:
4312 		dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
4313 		/* poll for reset to complete */
4314 		if (ice_check_reset(hw))
4315 			dev_err(dev, "Error resetting device. Please reload the driver\n");
4316 		break;
4317 	case ICE_DDP_PKG_ERR:
4318 	default:
4319 		dev_err(dev, "An unknown error occurred when loading the DDP package.  Entering Safe Mode.\n");
4320 		break;
4321 	}
4322 }
4323 
4324 /**
4325  * ice_load_pkg - load/reload the DDP Package file
4326  * @firmware: firmware structure when firmware requested or NULL for reload
4327  * @pf: pointer to the PF instance
4328  *
4329  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4330  * initialize HW tables.
4331  */
4332 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4333 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4334 {
4335 	enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4336 	struct device *dev = ice_pf_to_dev(pf);
4337 	struct ice_hw *hw = &pf->hw;
4338 
4339 	/* Load DDP Package */
4340 	if (firmware && !hw->pkg_copy) {
4341 		state = ice_copy_and_init_pkg(hw, firmware->data,
4342 					      firmware->size);
4343 		ice_log_pkg_init(hw, state);
4344 	} else if (!firmware && hw->pkg_copy) {
4345 		/* Reload package during rebuild after CORER/GLOBR reset */
4346 		state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4347 		ice_log_pkg_init(hw, state);
4348 	} else {
4349 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4350 	}
4351 
4352 	if (!ice_is_init_pkg_successful(state)) {
4353 		/* Safe Mode */
4354 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4355 		return;
4356 	}
4357 
4358 	/* Successful download package is the precondition for advanced
4359 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4360 	 */
4361 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4362 }
4363 
4364 /**
4365  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4366  * @pf: pointer to the PF structure
4367  *
4368  * There is no error returned here because the driver should be able to handle
4369  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4370  * specifically with Tx.
4371  */
ice_verify_cacheline_size(struct ice_pf * pf)4372 static void ice_verify_cacheline_size(struct ice_pf *pf)
4373 {
4374 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4375 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4376 			 ICE_CACHE_LINE_BYTES);
4377 }
4378 
4379 /**
4380  * ice_send_version - update firmware with driver version
4381  * @pf: PF struct
4382  *
4383  * Returns 0 on success, else error code
4384  */
ice_send_version(struct ice_pf * pf)4385 static int ice_send_version(struct ice_pf *pf)
4386 {
4387 	struct ice_driver_ver dv;
4388 
4389 	dv.major_ver = 0xff;
4390 	dv.minor_ver = 0xff;
4391 	dv.build_ver = 0xff;
4392 	dv.subbuild_ver = 0;
4393 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4394 		sizeof(dv.driver_string));
4395 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4396 }
4397 
4398 /**
4399  * ice_init_fdir - Initialize flow director VSI and configuration
4400  * @pf: pointer to the PF instance
4401  *
4402  * returns 0 on success, negative on error
4403  */
ice_init_fdir(struct ice_pf * pf)4404 static int ice_init_fdir(struct ice_pf *pf)
4405 {
4406 	struct device *dev = ice_pf_to_dev(pf);
4407 	struct ice_vsi *ctrl_vsi;
4408 	int err;
4409 
4410 	/* Side Band Flow Director needs to have a control VSI.
4411 	 * Allocate it and store it in the PF.
4412 	 */
4413 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4414 	if (!ctrl_vsi) {
4415 		dev_dbg(dev, "could not create control VSI\n");
4416 		return -ENOMEM;
4417 	}
4418 
4419 	err = ice_vsi_open_ctrl(ctrl_vsi);
4420 	if (err) {
4421 		dev_dbg(dev, "could not open control VSI\n");
4422 		goto err_vsi_open;
4423 	}
4424 
4425 	mutex_init(&pf->hw.fdir_fltr_lock);
4426 
4427 	err = ice_fdir_create_dflt_rules(pf);
4428 	if (err)
4429 		goto err_fdir_rule;
4430 
4431 	return 0;
4432 
4433 err_fdir_rule:
4434 	ice_fdir_release_flows(&pf->hw);
4435 	ice_vsi_close(ctrl_vsi);
4436 err_vsi_open:
4437 	ice_vsi_release(ctrl_vsi);
4438 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4439 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4440 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4441 	}
4442 	return err;
4443 }
4444 
ice_deinit_fdir(struct ice_pf * pf)4445 static void ice_deinit_fdir(struct ice_pf *pf)
4446 {
4447 	struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
4448 
4449 	if (!vsi)
4450 		return;
4451 
4452 	ice_vsi_manage_fdir(vsi, false);
4453 	ice_vsi_release(vsi);
4454 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4455 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4456 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4457 	}
4458 
4459 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4460 }
4461 
4462 /**
4463  * ice_get_opt_fw_name - return optional firmware file name or NULL
4464  * @pf: pointer to the PF instance
4465  */
ice_get_opt_fw_name(struct ice_pf * pf)4466 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4467 {
4468 	/* Optional firmware name same as default with additional dash
4469 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4470 	 */
4471 	struct pci_dev *pdev = pf->pdev;
4472 	char *opt_fw_filename;
4473 	u64 dsn;
4474 
4475 	/* Determine the name of the optional file using the DSN (two
4476 	 * dwords following the start of the DSN Capability).
4477 	 */
4478 	dsn = pci_get_dsn(pdev);
4479 	if (!dsn)
4480 		return NULL;
4481 
4482 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4483 	if (!opt_fw_filename)
4484 		return NULL;
4485 
4486 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4487 		 ICE_DDP_PKG_PATH, dsn);
4488 
4489 	return opt_fw_filename;
4490 }
4491 
4492 /**
4493  * ice_request_fw - Device initialization routine
4494  * @pf: pointer to the PF instance
4495  * @firmware: double pointer to firmware struct
4496  *
4497  * Return: zero when successful, negative values otherwise.
4498  */
ice_request_fw(struct ice_pf * pf,const struct firmware ** firmware)4499 static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
4500 {
4501 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4502 	struct device *dev = ice_pf_to_dev(pf);
4503 	int err = 0;
4504 
4505 	/* optional device-specific DDP (if present) overrides the default DDP
4506 	 * package file. kernel logs a debug message if the file doesn't exist,
4507 	 * and warning messages for other errors.
4508 	 */
4509 	if (opt_fw_filename) {
4510 		err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
4511 		kfree(opt_fw_filename);
4512 		if (!err)
4513 			return err;
4514 	}
4515 	err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
4516 	if (err)
4517 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4518 
4519 	return err;
4520 }
4521 
4522 /**
4523  * ice_init_tx_topology - performs Tx topology initialization
4524  * @hw: pointer to the hardware structure
4525  * @firmware: pointer to firmware structure
4526  *
4527  * Return: zero when init was successful, negative values otherwise.
4528  */
4529 static int
ice_init_tx_topology(struct ice_hw * hw,const struct firmware * firmware)4530 ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
4531 {
4532 	u8 num_tx_sched_layers = hw->num_tx_sched_layers;
4533 	struct ice_pf *pf = hw->back;
4534 	struct device *dev;
4535 	int err;
4536 
4537 	dev = ice_pf_to_dev(pf);
4538 	err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
4539 	if (!err) {
4540 		if (hw->num_tx_sched_layers > num_tx_sched_layers)
4541 			dev_info(dev, "Tx scheduling layers switching feature disabled\n");
4542 		else
4543 			dev_info(dev, "Tx scheduling layers switching feature enabled\n");
4544 		return 0;
4545 	} else if (err == -ENODEV) {
4546 		/* If we failed to re-initialize the device, we can no longer
4547 		 * continue loading.
4548 		 */
4549 		dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
4550 		return err;
4551 	} else if (err == -EIO) {
4552 		dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
4553 		return 0;
4554 	} else if (err == -EEXIST) {
4555 		return 0;
4556 	}
4557 
4558 	/* Do not treat this as a fatal error. */
4559 	dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
4560 		 ERR_PTR(err));
4561 	return 0;
4562 }
4563 
4564 /**
4565  * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
4566  * @hw: pointer to the hardware structure
4567  * @pf: pointer to pf structure
4568  *
4569  * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4570  * formats the PF hardware supports. The exact list of supported RXDIDs
4571  * depends on the loaded DDP package. The IDs can be determined by reading the
4572  * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
4573  *
4574  * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
4575  * in the DDP package. The 16-byte legacy descriptor is never supported by
4576  * VFs.
4577  */
ice_init_supported_rxdids(struct ice_hw * hw,struct ice_pf * pf)4578 static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
4579 {
4580 	pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
4581 
4582 	for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
4583 		u32 regval;
4584 
4585 		regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
4586 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
4587 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
4588 			pf->supported_rxdids |= BIT(i);
4589 	}
4590 }
4591 
4592 /**
4593  * ice_init_ddp_config - DDP related configuration
4594  * @hw: pointer to the hardware structure
4595  * @pf: pointer to pf structure
4596  *
4597  * This function loads DDP file from the disk, then initializes Tx
4598  * topology. At the end DDP package is loaded on the card.
4599  *
4600  * Return: zero when init was successful, negative values otherwise.
4601  */
ice_init_ddp_config(struct ice_hw * hw,struct ice_pf * pf)4602 static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
4603 {
4604 	struct device *dev = ice_pf_to_dev(pf);
4605 	const struct firmware *firmware = NULL;
4606 	int err;
4607 
4608 	err = ice_request_fw(pf, &firmware);
4609 	if (err) {
4610 		dev_err(dev, "Fail during requesting FW: %d\n", err);
4611 		return err;
4612 	}
4613 
4614 	err = ice_init_tx_topology(hw, firmware);
4615 	if (err) {
4616 		dev_err(dev, "Fail during initialization of Tx topology: %d\n",
4617 			err);
4618 		release_firmware(firmware);
4619 		return err;
4620 	}
4621 
4622 	/* Download firmware to device */
4623 	ice_load_pkg(firmware, pf);
4624 	release_firmware(firmware);
4625 
4626 	/* Initialize the supported Rx descriptor IDs after loading DDP */
4627 	ice_init_supported_rxdids(hw, pf);
4628 
4629 	return 0;
4630 }
4631 
4632 /**
4633  * ice_print_wake_reason - show the wake up cause in the log
4634  * @pf: pointer to the PF struct
4635  */
ice_print_wake_reason(struct ice_pf * pf)4636 static void ice_print_wake_reason(struct ice_pf *pf)
4637 {
4638 	u32 wus = pf->wakeup_reason;
4639 	const char *wake_str;
4640 
4641 	/* if no wake event, nothing to print */
4642 	if (!wus)
4643 		return;
4644 
4645 	if (wus & PFPM_WUS_LNKC_M)
4646 		wake_str = "Link\n";
4647 	else if (wus & PFPM_WUS_MAG_M)
4648 		wake_str = "Magic Packet\n";
4649 	else if (wus & PFPM_WUS_MNG_M)
4650 		wake_str = "Management\n";
4651 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4652 		wake_str = "Firmware Reset\n";
4653 	else
4654 		wake_str = "Unknown\n";
4655 
4656 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4657 }
4658 
4659 /**
4660  * ice_register_netdev - register netdev
4661  * @vsi: pointer to the VSI struct
4662  */
ice_register_netdev(struct ice_vsi * vsi)4663 static int ice_register_netdev(struct ice_vsi *vsi)
4664 {
4665 	int err;
4666 
4667 	if (!vsi || !vsi->netdev)
4668 		return -EIO;
4669 
4670 	err = register_netdev(vsi->netdev);
4671 	if (err)
4672 		return err;
4673 
4674 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4675 	netif_carrier_off(vsi->netdev);
4676 	netif_tx_stop_all_queues(vsi->netdev);
4677 
4678 	return 0;
4679 }
4680 
ice_unregister_netdev(struct ice_vsi * vsi)4681 static void ice_unregister_netdev(struct ice_vsi *vsi)
4682 {
4683 	if (!vsi || !vsi->netdev)
4684 		return;
4685 
4686 	unregister_netdev(vsi->netdev);
4687 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4688 }
4689 
4690 /**
4691  * ice_cfg_netdev - Allocate, configure and register a netdev
4692  * @vsi: the VSI associated with the new netdev
4693  *
4694  * Returns 0 on success, negative value on failure
4695  */
ice_cfg_netdev(struct ice_vsi * vsi)4696 static int ice_cfg_netdev(struct ice_vsi *vsi)
4697 {
4698 	struct ice_netdev_priv *np;
4699 	struct net_device *netdev;
4700 	u8 mac_addr[ETH_ALEN];
4701 
4702 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
4703 				    vsi->alloc_rxq);
4704 	if (!netdev)
4705 		return -ENOMEM;
4706 
4707 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4708 	vsi->netdev = netdev;
4709 	np = netdev_priv(netdev);
4710 	np->vsi = vsi;
4711 
4712 	ice_set_netdev_features(netdev);
4713 	ice_set_ops(vsi);
4714 
4715 	if (vsi->type == ICE_VSI_PF) {
4716 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
4717 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4718 		eth_hw_addr_set(netdev, mac_addr);
4719 	}
4720 
4721 	netdev->priv_flags |= IFF_UNICAST_FLT;
4722 
4723 	/* Setup netdev TC information */
4724 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
4725 
4726 	netdev->max_mtu = ICE_MAX_MTU;
4727 
4728 	return 0;
4729 }
4730 
ice_decfg_netdev(struct ice_vsi * vsi)4731 static void ice_decfg_netdev(struct ice_vsi *vsi)
4732 {
4733 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4734 	free_netdev(vsi->netdev);
4735 	vsi->netdev = NULL;
4736 }
4737 
ice_init_dev_hw(struct ice_pf * pf)4738 void ice_init_dev_hw(struct ice_pf *pf)
4739 {
4740 	struct ice_hw *hw = &pf->hw;
4741 	int err;
4742 
4743 	ice_init_feature_support(pf);
4744 
4745 	err = ice_init_ddp_config(hw, pf);
4746 
4747 	/* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
4748 	 * set in pf->state, which will cause ice_is_safe_mode to return
4749 	 * true
4750 	 */
4751 	if (err || ice_is_safe_mode(pf)) {
4752 		/* we already got function/device capabilities but these don't
4753 		 * reflect what the driver needs to do in safe mode. Instead of
4754 		 * adding conditional logic everywhere to ignore these
4755 		 * device/function capabilities, override them.
4756 		 */
4757 		ice_set_safe_mode_caps(hw);
4758 	}
4759 }
4760 
ice_init_dev(struct ice_pf * pf)4761 int ice_init_dev(struct ice_pf *pf)
4762 {
4763 	struct device *dev = ice_pf_to_dev(pf);
4764 	int err;
4765 
4766 	ice_set_pf_caps(pf);
4767 	err = ice_init_interrupt_scheme(pf);
4768 	if (err) {
4769 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4770 		return -EIO;
4771 	}
4772 
4773 	ice_start_service_task(pf);
4774 
4775 	return 0;
4776 }
4777 
ice_deinit_dev(struct ice_pf * pf)4778 void ice_deinit_dev(struct ice_pf *pf)
4779 {
4780 	ice_service_task_stop(pf);
4781 
4782 	/* Service task is already stopped, so call reset directly. */
4783 	ice_reset(&pf->hw, ICE_RESET_PFR);
4784 	pci_wait_for_pending_transaction(pf->pdev);
4785 	ice_clear_interrupt_scheme(pf);
4786 }
4787 
ice_init_features(struct ice_pf * pf)4788 static void ice_init_features(struct ice_pf *pf)
4789 {
4790 	struct device *dev = ice_pf_to_dev(pf);
4791 
4792 	if (ice_is_safe_mode(pf))
4793 		return;
4794 
4795 	/* initialize DDP driven features */
4796 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4797 		ice_ptp_init(pf);
4798 
4799 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4800 		ice_gnss_init(pf);
4801 
4802 	if (ice_is_feature_supported(pf, ICE_F_CGU) ||
4803 	    ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
4804 		ice_dpll_init(pf);
4805 
4806 	/* Note: Flow director init failure is non-fatal to load */
4807 	if (ice_init_fdir(pf))
4808 		dev_err(dev, "could not initialize flow director\n");
4809 
4810 	/* Note: DCB init failure is non-fatal to load */
4811 	if (ice_init_pf_dcb(pf, false)) {
4812 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4813 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4814 	} else {
4815 		ice_cfg_lldp_mib_change(&pf->hw, true);
4816 	}
4817 
4818 	if (ice_init_lag(pf))
4819 		dev_warn(dev, "Failed to init link aggregation support\n");
4820 
4821 	ice_hwmon_init(pf);
4822 }
4823 
ice_deinit_features(struct ice_pf * pf)4824 static void ice_deinit_features(struct ice_pf *pf)
4825 {
4826 	if (ice_is_safe_mode(pf))
4827 		return;
4828 
4829 	ice_deinit_lag(pf);
4830 	if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
4831 		ice_cfg_lldp_mib_change(&pf->hw, false);
4832 	ice_deinit_fdir(pf);
4833 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
4834 		ice_gnss_exit(pf);
4835 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4836 		ice_ptp_release(pf);
4837 	if (test_bit(ICE_FLAG_DPLL, pf->flags))
4838 		ice_dpll_deinit(pf);
4839 	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
4840 		xa_destroy(&pf->eswitch.reprs);
4841 	ice_hwmon_exit(pf);
4842 }
4843 
ice_init_wakeup(struct ice_pf * pf)4844 static void ice_init_wakeup(struct ice_pf *pf)
4845 {
4846 	/* Save wakeup reason register for later use */
4847 	pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
4848 
4849 	/* check for a power management event */
4850 	ice_print_wake_reason(pf);
4851 
4852 	/* clear wake status, all bits */
4853 	wr32(&pf->hw, PFPM_WUS, U32_MAX);
4854 
4855 	/* Disable WoL at init, wait for user to enable */
4856 	device_set_wakeup_enable(ice_pf_to_dev(pf), false);
4857 }
4858 
ice_init_link(struct ice_pf * pf)4859 static int ice_init_link(struct ice_pf *pf)
4860 {
4861 	struct device *dev = ice_pf_to_dev(pf);
4862 	int err;
4863 
4864 	err = ice_init_link_events(pf->hw.port_info);
4865 	if (err) {
4866 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4867 		return err;
4868 	}
4869 
4870 	/* not a fatal error if this fails */
4871 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4872 	if (err)
4873 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4874 
4875 	/* not a fatal error if this fails */
4876 	err = ice_update_link_info(pf->hw.port_info);
4877 	if (err)
4878 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4879 
4880 	ice_init_link_dflt_override(pf->hw.port_info);
4881 
4882 	ice_check_link_cfg_err(pf,
4883 			       pf->hw.port_info->phy.link_info.link_cfg_err);
4884 
4885 	/* if media available, initialize PHY settings */
4886 	if (pf->hw.port_info->phy.link_info.link_info &
4887 	    ICE_AQ_MEDIA_AVAILABLE) {
4888 		/* not a fatal error if this fails */
4889 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4890 		if (err)
4891 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4892 
4893 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4894 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4895 
4896 			if (vsi)
4897 				ice_configure_phy(vsi);
4898 		}
4899 	} else {
4900 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4901 	}
4902 
4903 	return err;
4904 }
4905 
ice_init_pf_sw(struct ice_pf * pf)4906 static int ice_init_pf_sw(struct ice_pf *pf)
4907 {
4908 	bool dvm = ice_is_dvm_ena(&pf->hw);
4909 	struct ice_vsi *vsi;
4910 	int err;
4911 
4912 	/* create switch struct for the switch element created by FW on boot */
4913 	pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
4914 	if (!pf->first_sw)
4915 		return -ENOMEM;
4916 
4917 	if (pf->hw.evb_veb)
4918 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4919 	else
4920 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4921 
4922 	pf->first_sw->pf = pf;
4923 
4924 	/* record the sw_id available for later use */
4925 	pf->first_sw->sw_id = pf->hw.port_info->sw_id;
4926 
4927 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
4928 	if (err)
4929 		goto err_aq_set_port_params;
4930 
4931 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
4932 	if (!vsi) {
4933 		err = -ENOMEM;
4934 		goto err_pf_vsi_setup;
4935 	}
4936 
4937 	return 0;
4938 
4939 err_pf_vsi_setup:
4940 err_aq_set_port_params:
4941 	kfree(pf->first_sw);
4942 	return err;
4943 }
4944 
ice_deinit_pf_sw(struct ice_pf * pf)4945 static void ice_deinit_pf_sw(struct ice_pf *pf)
4946 {
4947 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
4948 
4949 	if (!vsi)
4950 		return;
4951 
4952 	ice_vsi_release(vsi);
4953 	kfree(pf->first_sw);
4954 }
4955 
ice_alloc_vsis(struct ice_pf * pf)4956 static int ice_alloc_vsis(struct ice_pf *pf)
4957 {
4958 	struct device *dev = ice_pf_to_dev(pf);
4959 
4960 	pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
4961 	if (!pf->num_alloc_vsi)
4962 		return -EIO;
4963 
4964 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4965 		dev_warn(dev,
4966 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4967 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4968 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4969 	}
4970 
4971 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4972 			       GFP_KERNEL);
4973 	if (!pf->vsi)
4974 		return -ENOMEM;
4975 
4976 	pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
4977 				     sizeof(*pf->vsi_stats), GFP_KERNEL);
4978 	if (!pf->vsi_stats) {
4979 		devm_kfree(dev, pf->vsi);
4980 		return -ENOMEM;
4981 	}
4982 
4983 	return 0;
4984 }
4985 
ice_dealloc_vsis(struct ice_pf * pf)4986 static void ice_dealloc_vsis(struct ice_pf *pf)
4987 {
4988 	devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
4989 	pf->vsi_stats = NULL;
4990 
4991 	pf->num_alloc_vsi = 0;
4992 	devm_kfree(ice_pf_to_dev(pf), pf->vsi);
4993 	pf->vsi = NULL;
4994 }
4995 
ice_init_devlink(struct ice_pf * pf)4996 static int ice_init_devlink(struct ice_pf *pf)
4997 {
4998 	int err;
4999 
5000 	err = ice_devlink_register_params(pf);
5001 	if (err)
5002 		return err;
5003 
5004 	ice_devlink_init_regions(pf);
5005 	ice_devlink_register(pf);
5006 	ice_health_init(pf);
5007 
5008 	return 0;
5009 }
5010 
ice_deinit_devlink(struct ice_pf * pf)5011 static void ice_deinit_devlink(struct ice_pf *pf)
5012 {
5013 	ice_health_deinit(pf);
5014 	ice_devlink_unregister(pf);
5015 	ice_devlink_destroy_regions(pf);
5016 	ice_devlink_unregister_params(pf);
5017 }
5018 
ice_init(struct ice_pf * pf)5019 static int ice_init(struct ice_pf *pf)
5020 {
5021 	struct device *dev = ice_pf_to_dev(pf);
5022 	int err;
5023 
5024 	err = ice_init_pf(pf);
5025 	if (err) {
5026 		dev_err(dev, "ice_init_pf failed: %d\n", err);
5027 		return err;
5028 	}
5029 
5030 	if (pf->hw.mac_type == ICE_MAC_E830) {
5031 		err = pci_enable_ptm(pf->pdev, NULL);
5032 		if (err)
5033 			dev_dbg(dev, "PCIe PTM not supported by PCIe bus/controller\n");
5034 	}
5035 
5036 	err = ice_alloc_vsis(pf);
5037 	if (err)
5038 		goto unroll_pf_init;
5039 
5040 	err = ice_init_pf_sw(pf);
5041 	if (err)
5042 		goto err_init_pf_sw;
5043 
5044 	ice_init_wakeup(pf);
5045 
5046 	err = ice_init_link(pf);
5047 	if (err)
5048 		goto err_init_link;
5049 
5050 	err = ice_send_version(pf);
5051 	if (err)
5052 		goto err_init_link;
5053 
5054 	ice_verify_cacheline_size(pf);
5055 
5056 	if (ice_is_safe_mode(pf))
5057 		ice_set_safe_mode_vlan_cfg(pf);
5058 	else
5059 		/* print PCI link speed and width */
5060 		pcie_print_link_status(pf->pdev);
5061 
5062 	/* ready to go, so clear down state bit */
5063 	clear_bit(ICE_DOWN, pf->state);
5064 	clear_bit(ICE_SERVICE_DIS, pf->state);
5065 
5066 	/* since everything is good, start the service timer */
5067 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5068 
5069 	return 0;
5070 
5071 err_init_link:
5072 	ice_deinit_pf_sw(pf);
5073 err_init_pf_sw:
5074 	ice_dealloc_vsis(pf);
5075 unroll_pf_init:
5076 	ice_deinit_pf(pf);
5077 	return err;
5078 }
5079 
ice_deinit(struct ice_pf * pf)5080 static void ice_deinit(struct ice_pf *pf)
5081 {
5082 	set_bit(ICE_SERVICE_DIS, pf->state);
5083 	set_bit(ICE_DOWN, pf->state);
5084 
5085 	ice_deinit_pf_sw(pf);
5086 	ice_dealloc_vsis(pf);
5087 	ice_deinit_pf(pf);
5088 }
5089 
5090 /**
5091  * ice_load - load pf by init hw and starting VSI
5092  * @pf: pointer to the pf instance
5093  *
5094  * This function has to be called under devl_lock.
5095  */
ice_load(struct ice_pf * pf)5096 int ice_load(struct ice_pf *pf)
5097 {
5098 	struct ice_vsi *vsi;
5099 	int err;
5100 
5101 	devl_assert_locked(priv_to_devlink(pf));
5102 
5103 	vsi = ice_get_main_vsi(pf);
5104 
5105 	/* init channel list */
5106 	INIT_LIST_HEAD(&vsi->ch_list);
5107 
5108 	err = ice_cfg_netdev(vsi);
5109 	if (err)
5110 		return err;
5111 
5112 	/* Setup DCB netlink interface */
5113 	ice_dcbnl_setup(vsi);
5114 
5115 	err = ice_init_mac_fltr(pf);
5116 	if (err)
5117 		goto err_init_mac_fltr;
5118 
5119 	err = ice_devlink_create_pf_port(pf);
5120 	if (err)
5121 		goto err_devlink_create_pf_port;
5122 
5123 	SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
5124 
5125 	err = ice_register_netdev(vsi);
5126 	if (err)
5127 		goto err_register_netdev;
5128 
5129 	err = ice_tc_indir_block_register(vsi);
5130 	if (err)
5131 		goto err_tc_indir_block_register;
5132 
5133 	ice_napi_add(vsi);
5134 
5135 	ice_init_features(pf);
5136 
5137 	err = ice_init_rdma(pf);
5138 	if (err)
5139 		goto err_init_rdma;
5140 
5141 	ice_service_task_restart(pf);
5142 
5143 	clear_bit(ICE_DOWN, pf->state);
5144 
5145 	return 0;
5146 
5147 err_init_rdma:
5148 	ice_deinit_features(pf);
5149 	ice_tc_indir_block_unregister(vsi);
5150 err_tc_indir_block_register:
5151 	ice_unregister_netdev(vsi);
5152 err_register_netdev:
5153 	ice_devlink_destroy_pf_port(pf);
5154 err_devlink_create_pf_port:
5155 err_init_mac_fltr:
5156 	ice_decfg_netdev(vsi);
5157 	return err;
5158 }
5159 
5160 /**
5161  * ice_unload - unload pf by stopping VSI and deinit hw
5162  * @pf: pointer to the pf instance
5163  *
5164  * This function has to be called under devl_lock.
5165  */
ice_unload(struct ice_pf * pf)5166 void ice_unload(struct ice_pf *pf)
5167 {
5168 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
5169 
5170 	devl_assert_locked(priv_to_devlink(pf));
5171 
5172 	ice_deinit_rdma(pf);
5173 	ice_deinit_features(pf);
5174 	ice_tc_indir_block_unregister(vsi);
5175 	ice_unregister_netdev(vsi);
5176 	ice_devlink_destroy_pf_port(pf);
5177 	ice_decfg_netdev(vsi);
5178 }
5179 
ice_probe_recovery_mode(struct ice_pf * pf)5180 static int ice_probe_recovery_mode(struct ice_pf *pf)
5181 {
5182 	struct device *dev = ice_pf_to_dev(pf);
5183 	int err;
5184 
5185 	dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
5186 
5187 	INIT_HLIST_HEAD(&pf->aq_wait_list);
5188 	spin_lock_init(&pf->aq_wait_lock);
5189 	init_waitqueue_head(&pf->aq_wait_queue);
5190 
5191 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
5192 	pf->serv_tmr_period = HZ;
5193 	INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
5194 	clear_bit(ICE_SERVICE_SCHED, pf->state);
5195 	err = ice_create_all_ctrlq(&pf->hw);
5196 	if (err)
5197 		return err;
5198 
5199 	scoped_guard(devl, priv_to_devlink(pf)) {
5200 		err = ice_init_devlink(pf);
5201 		if (err)
5202 			return err;
5203 	}
5204 
5205 	ice_service_task_restart(pf);
5206 
5207 	return 0;
5208 }
5209 
5210 /**
5211  * ice_probe - Device initialization routine
5212  * @pdev: PCI device information struct
5213  * @ent: entry in ice_pci_tbl
5214  *
5215  * Returns 0 on success, negative on failure
5216  */
5217 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)5218 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
5219 {
5220 	struct device *dev = &pdev->dev;
5221 	bool need_dev_deinit = false;
5222 	struct ice_adapter *adapter;
5223 	struct ice_pf *pf;
5224 	struct ice_hw *hw;
5225 	int err;
5226 
5227 	if (pdev->is_virtfn) {
5228 		dev_err(dev, "can't probe a virtual function\n");
5229 		return -EINVAL;
5230 	}
5231 
5232 	/* when under a kdump kernel initiate a reset before enabling the
5233 	 * device in order to clear out any pending DMA transactions. These
5234 	 * transactions can cause some systems to machine check when doing
5235 	 * the pcim_enable_device() below.
5236 	 */
5237 	if (is_kdump_kernel()) {
5238 		pci_save_state(pdev);
5239 		pci_clear_master(pdev);
5240 		err = pcie_flr(pdev);
5241 		if (err)
5242 			return err;
5243 		pci_restore_state(pdev);
5244 	}
5245 
5246 	/* this driver uses devres, see
5247 	 * Documentation/driver-api/driver-model/devres.rst
5248 	 */
5249 	err = pcim_enable_device(pdev);
5250 	if (err)
5251 		return err;
5252 
5253 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
5254 	if (err) {
5255 		dev_err(dev, "BAR0 I/O map error %d\n", err);
5256 		return err;
5257 	}
5258 
5259 	pf = ice_allocate_pf(dev);
5260 	if (!pf)
5261 		return -ENOMEM;
5262 
5263 	/* initialize Auxiliary index to invalid value */
5264 	pf->aux_idx = -1;
5265 
5266 	/* set up for high or low DMA */
5267 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5268 	if (err) {
5269 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
5270 		return err;
5271 	}
5272 
5273 	pci_set_master(pdev);
5274 	pf->pdev = pdev;
5275 	pci_set_drvdata(pdev, pf);
5276 	set_bit(ICE_DOWN, pf->state);
5277 	/* Disable service task until DOWN bit is cleared */
5278 	set_bit(ICE_SERVICE_DIS, pf->state);
5279 
5280 	hw = &pf->hw;
5281 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
5282 	pci_save_state(pdev);
5283 
5284 	hw->back = pf;
5285 	hw->port_info = NULL;
5286 	hw->vendor_id = pdev->vendor;
5287 	hw->device_id = pdev->device;
5288 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
5289 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5290 	hw->subsystem_device_id = pdev->subsystem_device;
5291 	hw->bus.device = PCI_SLOT(pdev->devfn);
5292 	hw->bus.func = PCI_FUNC(pdev->devfn);
5293 	ice_set_ctrlq_len(hw);
5294 
5295 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
5296 
5297 #ifndef CONFIG_DYNAMIC_DEBUG
5298 	if (debug < -1)
5299 		hw->debug_mask = debug;
5300 #endif
5301 
5302 	if (ice_is_recovery_mode(hw))
5303 		return ice_probe_recovery_mode(pf);
5304 
5305 	err = ice_init_hw(hw);
5306 	if (err) {
5307 		dev_err(dev, "ice_init_hw failed: %d\n", err);
5308 		return err;
5309 	}
5310 
5311 	adapter = ice_adapter_get(pdev);
5312 	if (IS_ERR(adapter)) {
5313 		err = PTR_ERR(adapter);
5314 		goto unroll_hw_init;
5315 	}
5316 	pf->adapter = adapter;
5317 
5318 	err = ice_init_dev(pf);
5319 	if (err)
5320 		goto unroll_adapter;
5321 
5322 	err = ice_init(pf);
5323 	if (err)
5324 		goto unroll_dev_init;
5325 
5326 	devl_lock(priv_to_devlink(pf));
5327 	err = ice_load(pf);
5328 	if (err)
5329 		goto unroll_init;
5330 
5331 	err = ice_init_devlink(pf);
5332 	if (err)
5333 		goto unroll_load;
5334 	devl_unlock(priv_to_devlink(pf));
5335 
5336 	return 0;
5337 
5338 unroll_load:
5339 	ice_unload(pf);
5340 unroll_init:
5341 	devl_unlock(priv_to_devlink(pf));
5342 	ice_deinit(pf);
5343 unroll_dev_init:
5344 	need_dev_deinit = true;
5345 unroll_adapter:
5346 	ice_adapter_put(pdev);
5347 unroll_hw_init:
5348 	ice_deinit_hw(hw);
5349 	if (need_dev_deinit)
5350 		ice_deinit_dev(pf);
5351 	return err;
5352 }
5353 
5354 /**
5355  * ice_set_wake - enable or disable Wake on LAN
5356  * @pf: pointer to the PF struct
5357  *
5358  * Simple helper for WoL control
5359  */
ice_set_wake(struct ice_pf * pf)5360 static void ice_set_wake(struct ice_pf *pf)
5361 {
5362 	struct ice_hw *hw = &pf->hw;
5363 	bool wol = pf->wol_ena;
5364 
5365 	/* clear wake state, otherwise new wake events won't fire */
5366 	wr32(hw, PFPM_WUS, U32_MAX);
5367 
5368 	/* enable / disable APM wake up, no RMW needed */
5369 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5370 
5371 	/* set magic packet filter enabled */
5372 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5373 }
5374 
5375 /**
5376  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5377  * @pf: pointer to the PF struct
5378  *
5379  * Issue firmware command to enable multicast magic wake, making
5380  * sure that any locally administered address (LAA) is used for
5381  * wake, and that PF reset doesn't undo the LAA.
5382  */
ice_setup_mc_magic_wake(struct ice_pf * pf)5383 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5384 {
5385 	struct device *dev = ice_pf_to_dev(pf);
5386 	struct ice_hw *hw = &pf->hw;
5387 	u8 mac_addr[ETH_ALEN];
5388 	struct ice_vsi *vsi;
5389 	int status;
5390 	u8 flags;
5391 
5392 	if (!pf->wol_ena)
5393 		return;
5394 
5395 	vsi = ice_get_main_vsi(pf);
5396 	if (!vsi)
5397 		return;
5398 
5399 	/* Get current MAC address in case it's an LAA */
5400 	if (vsi->netdev)
5401 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5402 	else
5403 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5404 
5405 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5406 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5407 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5408 
5409 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5410 	if (status)
5411 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5412 			status, libie_aq_str(hw->adminq.sq_last_status));
5413 }
5414 
5415 /**
5416  * ice_remove - Device removal routine
5417  * @pdev: PCI device information struct
5418  */
ice_remove(struct pci_dev * pdev)5419 static void ice_remove(struct pci_dev *pdev)
5420 {
5421 	struct ice_pf *pf = pci_get_drvdata(pdev);
5422 	int i;
5423 
5424 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5425 		if (!ice_is_reset_in_progress(pf->state))
5426 			break;
5427 		msleep(100);
5428 	}
5429 
5430 	if (ice_is_recovery_mode(&pf->hw)) {
5431 		ice_service_task_stop(pf);
5432 		scoped_guard(devl, priv_to_devlink(pf)) {
5433 			ice_deinit_devlink(pf);
5434 		}
5435 		return;
5436 	}
5437 
5438 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5439 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5440 		ice_free_vfs(pf);
5441 	}
5442 
5443 	if (!ice_is_safe_mode(pf))
5444 		ice_remove_arfs(pf);
5445 
5446 	devl_lock(priv_to_devlink(pf));
5447 	ice_dealloc_all_dynamic_ports(pf);
5448 	ice_deinit_devlink(pf);
5449 
5450 	ice_unload(pf);
5451 	devl_unlock(priv_to_devlink(pf));
5452 
5453 	ice_deinit(pf);
5454 	ice_vsi_release_all(pf);
5455 
5456 	ice_setup_mc_magic_wake(pf);
5457 	ice_set_wake(pf);
5458 
5459 	ice_adapter_put(pdev);
5460 	ice_deinit_hw(&pf->hw);
5461 
5462 	ice_deinit_dev(pf);
5463 	ice_aq_cancel_waiting_tasks(pf);
5464 	set_bit(ICE_DOWN, pf->state);
5465 }
5466 
5467 /**
5468  * ice_shutdown - PCI callback for shutting down device
5469  * @pdev: PCI device information struct
5470  */
ice_shutdown(struct pci_dev * pdev)5471 static void ice_shutdown(struct pci_dev *pdev)
5472 {
5473 	struct ice_pf *pf = pci_get_drvdata(pdev);
5474 
5475 	ice_remove(pdev);
5476 
5477 	if (system_state == SYSTEM_POWER_OFF) {
5478 		pci_wake_from_d3(pdev, pf->wol_ena);
5479 		pci_set_power_state(pdev, PCI_D3hot);
5480 	}
5481 }
5482 
5483 /**
5484  * ice_prepare_for_shutdown - prep for PCI shutdown
5485  * @pf: board private structure
5486  *
5487  * Inform or close all dependent features in prep for PCI device shutdown
5488  */
ice_prepare_for_shutdown(struct ice_pf * pf)5489 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5490 {
5491 	struct ice_hw *hw = &pf->hw;
5492 	u32 v;
5493 
5494 	/* Notify VFs of impending reset */
5495 	if (ice_check_sq_alive(hw, &hw->mailboxq))
5496 		ice_vc_notify_reset(pf);
5497 
5498 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5499 
5500 	/* disable the VSIs and their queues that are not already DOWN */
5501 	ice_pf_dis_all_vsi(pf, false);
5502 
5503 	ice_for_each_vsi(pf, v)
5504 		if (pf->vsi[v])
5505 			pf->vsi[v]->vsi_num = 0;
5506 
5507 	ice_shutdown_all_ctrlq(hw, true);
5508 }
5509 
5510 /**
5511  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5512  * @pf: board private structure to reinitialize
5513  *
5514  * This routine reinitialize interrupt scheme that was cleared during
5515  * power management suspend callback.
5516  *
5517  * This should be called during resume routine to re-allocate the q_vectors
5518  * and reacquire interrupts.
5519  */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5520 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5521 {
5522 	struct device *dev = ice_pf_to_dev(pf);
5523 	int ret, v;
5524 
5525 	/* Since we clear MSIX flag during suspend, we need to
5526 	 * set it back during resume...
5527 	 */
5528 
5529 	ret = ice_init_interrupt_scheme(pf);
5530 	if (ret) {
5531 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5532 		return ret;
5533 	}
5534 
5535 	/* Remap vectors and rings, after successful re-init interrupts */
5536 	ice_for_each_vsi(pf, v) {
5537 		if (!pf->vsi[v])
5538 			continue;
5539 
5540 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5541 		if (ret)
5542 			goto err_reinit;
5543 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5544 		rtnl_lock();
5545 		ice_vsi_set_napi_queues(pf->vsi[v]);
5546 		rtnl_unlock();
5547 	}
5548 
5549 	ret = ice_req_irq_msix_misc(pf);
5550 	if (ret) {
5551 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5552 			ret);
5553 		goto err_reinit;
5554 	}
5555 
5556 	return 0;
5557 
5558 err_reinit:
5559 	while (v--)
5560 		if (pf->vsi[v]) {
5561 			rtnl_lock();
5562 			ice_vsi_clear_napi_queues(pf->vsi[v]);
5563 			rtnl_unlock();
5564 			ice_vsi_free_q_vectors(pf->vsi[v]);
5565 		}
5566 
5567 	return ret;
5568 }
5569 
5570 /**
5571  * ice_suspend
5572  * @dev: generic device information structure
5573  *
5574  * Power Management callback to quiesce the device and prepare
5575  * for D3 transition.
5576  */
ice_suspend(struct device * dev)5577 static int ice_suspend(struct device *dev)
5578 {
5579 	struct pci_dev *pdev = to_pci_dev(dev);
5580 	struct ice_pf *pf;
5581 	int disabled, v;
5582 
5583 	pf = pci_get_drvdata(pdev);
5584 
5585 	if (!ice_pf_state_is_nominal(pf)) {
5586 		dev_err(dev, "Device is not ready, no need to suspend it\n");
5587 		return -EBUSY;
5588 	}
5589 
5590 	/* Stop watchdog tasks until resume completion.
5591 	 * Even though it is most likely that the service task is
5592 	 * disabled if the device is suspended or down, the service task's
5593 	 * state is controlled by a different state bit, and we should
5594 	 * store and honor whatever state that bit is in at this point.
5595 	 */
5596 	disabled = ice_service_task_stop(pf);
5597 
5598 	ice_deinit_rdma(pf);
5599 
5600 	/* Already suspended?, then there is nothing to do */
5601 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5602 		if (!disabled)
5603 			ice_service_task_restart(pf);
5604 		return 0;
5605 	}
5606 
5607 	if (test_bit(ICE_DOWN, pf->state) ||
5608 	    ice_is_reset_in_progress(pf->state)) {
5609 		dev_err(dev, "can't suspend device in reset or already down\n");
5610 		if (!disabled)
5611 			ice_service_task_restart(pf);
5612 		return 0;
5613 	}
5614 
5615 	ice_setup_mc_magic_wake(pf);
5616 
5617 	ice_prepare_for_shutdown(pf);
5618 
5619 	ice_set_wake(pf);
5620 
5621 	/* Free vectors, clear the interrupt scheme and release IRQs
5622 	 * for proper hibernation, especially with large number of CPUs.
5623 	 * Otherwise hibernation might fail when mapping all the vectors back
5624 	 * to CPU0.
5625 	 */
5626 	ice_free_irq_msix_misc(pf);
5627 	ice_for_each_vsi(pf, v) {
5628 		if (!pf->vsi[v])
5629 			continue;
5630 		rtnl_lock();
5631 		ice_vsi_clear_napi_queues(pf->vsi[v]);
5632 		rtnl_unlock();
5633 		ice_vsi_free_q_vectors(pf->vsi[v]);
5634 	}
5635 	ice_clear_interrupt_scheme(pf);
5636 
5637 	pci_save_state(pdev);
5638 	pci_wake_from_d3(pdev, pf->wol_ena);
5639 	pci_set_power_state(pdev, PCI_D3hot);
5640 	return 0;
5641 }
5642 
5643 /**
5644  * ice_resume - PM callback for waking up from D3
5645  * @dev: generic device information structure
5646  */
ice_resume(struct device * dev)5647 static int ice_resume(struct device *dev)
5648 {
5649 	struct pci_dev *pdev = to_pci_dev(dev);
5650 	enum ice_reset_req reset_type;
5651 	struct ice_pf *pf;
5652 	struct ice_hw *hw;
5653 	int ret;
5654 
5655 	pci_set_power_state(pdev, PCI_D0);
5656 	pci_restore_state(pdev);
5657 
5658 	if (!pci_device_is_present(pdev))
5659 		return -ENODEV;
5660 
5661 	ret = pci_enable_device_mem(pdev);
5662 	if (ret) {
5663 		dev_err(dev, "Cannot enable device after suspend\n");
5664 		return ret;
5665 	}
5666 
5667 	pf = pci_get_drvdata(pdev);
5668 	hw = &pf->hw;
5669 
5670 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
5671 	ice_print_wake_reason(pf);
5672 
5673 	/* We cleared the interrupt scheme when we suspended, so we need to
5674 	 * restore it now to resume device functionality.
5675 	 */
5676 	ret = ice_reinit_interrupt_scheme(pf);
5677 	if (ret)
5678 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5679 
5680 	ret = ice_init_rdma(pf);
5681 	if (ret)
5682 		dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
5683 			ret);
5684 
5685 	clear_bit(ICE_DOWN, pf->state);
5686 	/* Now perform PF reset and rebuild */
5687 	reset_type = ICE_RESET_PFR;
5688 	/* re-enable service task for reset, but allow reset to schedule it */
5689 	clear_bit(ICE_SERVICE_DIS, pf->state);
5690 
5691 	if (ice_schedule_reset(pf, reset_type))
5692 		dev_err(dev, "Reset during resume failed.\n");
5693 
5694 	clear_bit(ICE_SUSPENDED, pf->state);
5695 	ice_service_task_restart(pf);
5696 
5697 	/* Restart the service task */
5698 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5699 
5700 	return 0;
5701 }
5702 
5703 /**
5704  * ice_pci_err_detected - warning that PCI error has been detected
5705  * @pdev: PCI device information struct
5706  * @err: the type of PCI error
5707  *
5708  * Called to warn that something happened on the PCI bus and the error handling
5709  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
5710  */
5711 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5712 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5713 {
5714 	struct ice_pf *pf = pci_get_drvdata(pdev);
5715 
5716 	if (!pf) {
5717 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5718 			__func__, err);
5719 		return PCI_ERS_RESULT_DISCONNECT;
5720 	}
5721 
5722 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5723 		ice_service_task_stop(pf);
5724 
5725 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5726 			set_bit(ICE_PFR_REQ, pf->state);
5727 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5728 		}
5729 	}
5730 
5731 	return PCI_ERS_RESULT_NEED_RESET;
5732 }
5733 
5734 /**
5735  * ice_pci_err_slot_reset - a PCI slot reset has just happened
5736  * @pdev: PCI device information struct
5737  *
5738  * Called to determine if the driver can recover from the PCI slot reset by
5739  * using a register read to determine if the device is recoverable.
5740  */
ice_pci_err_slot_reset(struct pci_dev * pdev)5741 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5742 {
5743 	struct ice_pf *pf = pci_get_drvdata(pdev);
5744 	pci_ers_result_t result;
5745 	int err;
5746 	u32 reg;
5747 
5748 	err = pci_enable_device_mem(pdev);
5749 	if (err) {
5750 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5751 			err);
5752 		result = PCI_ERS_RESULT_DISCONNECT;
5753 	} else {
5754 		pci_set_master(pdev);
5755 		pci_restore_state(pdev);
5756 		pci_wake_from_d3(pdev, false);
5757 
5758 		/* Check for life */
5759 		reg = rd32(&pf->hw, GLGEN_RTRIG);
5760 		if (!reg)
5761 			result = PCI_ERS_RESULT_RECOVERED;
5762 		else
5763 			result = PCI_ERS_RESULT_DISCONNECT;
5764 	}
5765 
5766 	return result;
5767 }
5768 
5769 /**
5770  * ice_pci_err_resume - restart operations after PCI error recovery
5771  * @pdev: PCI device information struct
5772  *
5773  * Called to allow the driver to bring things back up after PCI error and/or
5774  * reset recovery have finished
5775  */
ice_pci_err_resume(struct pci_dev * pdev)5776 static void ice_pci_err_resume(struct pci_dev *pdev)
5777 {
5778 	struct ice_pf *pf = pci_get_drvdata(pdev);
5779 
5780 	if (!pf) {
5781 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5782 			__func__);
5783 		return;
5784 	}
5785 
5786 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5787 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5788 			__func__);
5789 		return;
5790 	}
5791 
5792 	ice_restore_all_vfs_msi_state(pf);
5793 
5794 	ice_do_reset(pf, ICE_RESET_PFR);
5795 	ice_service_task_restart(pf);
5796 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5797 }
5798 
5799 /**
5800  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5801  * @pdev: PCI device information struct
5802  */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5803 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5804 {
5805 	struct ice_pf *pf = pci_get_drvdata(pdev);
5806 
5807 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5808 		ice_service_task_stop(pf);
5809 
5810 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5811 			set_bit(ICE_PFR_REQ, pf->state);
5812 			ice_prepare_for_reset(pf, ICE_RESET_PFR);
5813 		}
5814 	}
5815 }
5816 
5817 /**
5818  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5819  * @pdev: PCI device information struct
5820  */
ice_pci_err_reset_done(struct pci_dev * pdev)5821 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5822 {
5823 	ice_pci_err_resume(pdev);
5824 }
5825 
5826 /* ice_pci_tbl - PCI Device ID Table
5827  *
5828  * Wildcard entries (PCI_ANY_ID) should come last
5829  * Last entry must be all 0s
5830  *
5831  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5832  *   Class, Class Mask, private data (not used) }
5833  */
5834 static const struct pci_device_id ice_pci_tbl[] = {
5835 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
5836 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
5837 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
5838 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
5839 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
5840 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
5841 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
5842 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
5843 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
5844 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
5845 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
5846 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
5847 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
5848 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
5849 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
5850 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
5851 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
5852 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
5853 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
5854 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
5855 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
5856 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
5857 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
5858 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
5859 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
5860 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
5861 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
5862 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
5863 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
5864 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
5865 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
5866 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
5867 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
5868 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
5869 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
5870 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
5871 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
5872 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
5873 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
5874 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
5875 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
5876 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
5877 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
5878 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
5879 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
5880 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
5881 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
5882 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
5883 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
5884 	/* required last entry */
5885 	{}
5886 };
5887 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5888 
5889 static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5890 
5891 static const struct pci_error_handlers ice_pci_err_handler = {
5892 	.error_detected = ice_pci_err_detected,
5893 	.slot_reset = ice_pci_err_slot_reset,
5894 	.reset_prepare = ice_pci_err_reset_prepare,
5895 	.reset_done = ice_pci_err_reset_done,
5896 	.resume = ice_pci_err_resume
5897 };
5898 
5899 static struct pci_driver ice_driver = {
5900 	.name = KBUILD_MODNAME,
5901 	.id_table = ice_pci_tbl,
5902 	.probe = ice_probe,
5903 	.remove = ice_remove,
5904 	.driver.pm = pm_sleep_ptr(&ice_pm_ops),
5905 	.shutdown = ice_shutdown,
5906 	.sriov_configure = ice_sriov_configure,
5907 	.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
5908 	.sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
5909 	.err_handler = &ice_pci_err_handler
5910 };
5911 
5912 /**
5913  * ice_module_init - Driver registration routine
5914  *
5915  * ice_module_init is the first routine called when the driver is
5916  * loaded. All it does is register with the PCI subsystem.
5917  */
ice_module_init(void)5918 static int __init ice_module_init(void)
5919 {
5920 	int status = -ENOMEM;
5921 
5922 	pr_info("%s\n", ice_driver_string);
5923 	pr_info("%s\n", ice_copyright);
5924 
5925 	ice_adv_lnk_speed_maps_init();
5926 
5927 	ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
5928 	if (!ice_wq) {
5929 		pr_err("Failed to create workqueue\n");
5930 		return status;
5931 	}
5932 
5933 	ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
5934 	if (!ice_lag_wq) {
5935 		pr_err("Failed to create LAG workqueue\n");
5936 		goto err_dest_wq;
5937 	}
5938 
5939 	ice_debugfs_init();
5940 
5941 	status = pci_register_driver(&ice_driver);
5942 	if (status) {
5943 		pr_err("failed to register PCI driver, err %d\n", status);
5944 		goto err_dest_lag_wq;
5945 	}
5946 
5947 	status = ice_sf_driver_register();
5948 	if (status) {
5949 		pr_err("Failed to register SF driver, err %d\n", status);
5950 		goto err_sf_driver;
5951 	}
5952 
5953 	return 0;
5954 
5955 err_sf_driver:
5956 	pci_unregister_driver(&ice_driver);
5957 err_dest_lag_wq:
5958 	destroy_workqueue(ice_lag_wq);
5959 	ice_debugfs_exit();
5960 err_dest_wq:
5961 	destroy_workqueue(ice_wq);
5962 	return status;
5963 }
5964 module_init(ice_module_init);
5965 
5966 /**
5967  * ice_module_exit - Driver exit cleanup routine
5968  *
5969  * ice_module_exit is called just before the driver is removed
5970  * from memory.
5971  */
ice_module_exit(void)5972 static void __exit ice_module_exit(void)
5973 {
5974 	ice_sf_driver_unregister();
5975 	pci_unregister_driver(&ice_driver);
5976 	ice_debugfs_exit();
5977 	destroy_workqueue(ice_wq);
5978 	destroy_workqueue(ice_lag_wq);
5979 	pr_info("module unloaded\n");
5980 }
5981 module_exit(ice_module_exit);
5982 
5983 /**
5984  * ice_set_mac_address - NDO callback to set MAC address
5985  * @netdev: network interface device structure
5986  * @pi: pointer to an address structure
5987  *
5988  * Returns 0 on success, negative on failure
5989  */
ice_set_mac_address(struct net_device * netdev,void * pi)5990 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5991 {
5992 	struct ice_netdev_priv *np = netdev_priv(netdev);
5993 	struct ice_vsi *vsi = np->vsi;
5994 	struct ice_pf *pf = vsi->back;
5995 	struct ice_hw *hw = &pf->hw;
5996 	struct sockaddr *addr = pi;
5997 	u8 old_mac[ETH_ALEN];
5998 	u8 flags = 0;
5999 	u8 *mac;
6000 	int err;
6001 
6002 	mac = (u8 *)addr->sa_data;
6003 
6004 	if (!is_valid_ether_addr(mac))
6005 		return -EADDRNOTAVAIL;
6006 
6007 	if (test_bit(ICE_DOWN, pf->state) ||
6008 	    ice_is_reset_in_progress(pf->state)) {
6009 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
6010 			   mac);
6011 		return -EBUSY;
6012 	}
6013 
6014 	if (ice_chnl_dmac_fltr_cnt(pf)) {
6015 		netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
6016 			   mac);
6017 		return -EAGAIN;
6018 	}
6019 
6020 	netif_addr_lock_bh(netdev);
6021 	ether_addr_copy(old_mac, netdev->dev_addr);
6022 	/* change the netdev's MAC address */
6023 	eth_hw_addr_set(netdev, mac);
6024 	netif_addr_unlock_bh(netdev);
6025 
6026 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
6027 	err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
6028 	if (err && err != -ENOENT) {
6029 		err = -EADDRNOTAVAIL;
6030 		goto err_update_filters;
6031 	}
6032 
6033 	/* Add filter for new MAC. If filter exists, return success */
6034 	err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
6035 	if (err == -EEXIST) {
6036 		/* Although this MAC filter is already present in hardware it's
6037 		 * possible in some cases (e.g. bonding) that dev_addr was
6038 		 * modified outside of the driver and needs to be restored back
6039 		 * to this value.
6040 		 */
6041 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
6042 
6043 		return 0;
6044 	} else if (err) {
6045 		/* error if the new filter addition failed */
6046 		err = -EADDRNOTAVAIL;
6047 	}
6048 
6049 err_update_filters:
6050 	if (err) {
6051 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
6052 			   mac);
6053 		netif_addr_lock_bh(netdev);
6054 		eth_hw_addr_set(netdev, old_mac);
6055 		netif_addr_unlock_bh(netdev);
6056 		return err;
6057 	}
6058 
6059 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
6060 		   netdev->dev_addr);
6061 
6062 	/* write new MAC address to the firmware */
6063 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
6064 	err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
6065 	if (err) {
6066 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
6067 			   mac, err);
6068 	}
6069 	return 0;
6070 }
6071 
6072 /**
6073  * ice_set_rx_mode - NDO callback to set the netdev filters
6074  * @netdev: network interface device structure
6075  */
ice_set_rx_mode(struct net_device * netdev)6076 static void ice_set_rx_mode(struct net_device *netdev)
6077 {
6078 	struct ice_netdev_priv *np = netdev_priv(netdev);
6079 	struct ice_vsi *vsi = np->vsi;
6080 
6081 	if (!vsi || ice_is_switchdev_running(vsi->back))
6082 		return;
6083 
6084 	/* Set the flags to synchronize filters
6085 	 * ndo_set_rx_mode may be triggered even without a change in netdev
6086 	 * flags
6087 	 */
6088 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
6089 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
6090 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
6091 
6092 	/* schedule our worker thread which will take care of
6093 	 * applying the new filter changes
6094 	 */
6095 	ice_service_task_schedule(vsi->back);
6096 }
6097 
6098 /**
6099  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6100  * @netdev: network interface device structure
6101  * @queue_index: Queue ID
6102  * @maxrate: maximum bandwidth in Mbps
6103  */
6104 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)6105 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
6106 {
6107 	struct ice_netdev_priv *np = netdev_priv(netdev);
6108 	struct ice_vsi *vsi = np->vsi;
6109 	u16 q_handle;
6110 	int status;
6111 	u8 tc;
6112 
6113 	/* Validate maxrate requested is within permitted range */
6114 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
6115 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
6116 			   maxrate, queue_index);
6117 		return -EINVAL;
6118 	}
6119 
6120 	q_handle = vsi->tx_rings[queue_index]->q_handle;
6121 	tc = ice_dcb_get_tc(vsi, queue_index);
6122 
6123 	vsi = ice_locate_vsi_using_queue(vsi, queue_index);
6124 	if (!vsi) {
6125 		netdev_err(netdev, "Invalid VSI for given queue %d\n",
6126 			   queue_index);
6127 		return -EINVAL;
6128 	}
6129 
6130 	/* Set BW back to default, when user set maxrate to 0 */
6131 	if (!maxrate)
6132 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
6133 					       q_handle, ICE_MAX_BW);
6134 	else
6135 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
6136 					  q_handle, ICE_MAX_BW, maxrate * 1000);
6137 	if (status)
6138 		netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
6139 			   status);
6140 
6141 	return status;
6142 }
6143 
6144 /**
6145  * ice_fdb_add - add an entry to the hardware database
6146  * @ndm: the input from the stack
6147  * @tb: pointer to array of nladdr (unused)
6148  * @dev: the net device pointer
6149  * @addr: the MAC address entry being added
6150  * @vid: VLAN ID
6151  * @flags: instructions from stack about fdb operation
6152  * @notified: whether notification was emitted
6153  * @extack: netlink extended ack
6154  */
6155 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,bool * notified,struct netlink_ext_ack __always_unused * extack)6156 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
6157 	    struct net_device *dev, const unsigned char *addr, u16 vid,
6158 	    u16 flags, bool *notified,
6159 	    struct netlink_ext_ack __always_unused *extack)
6160 {
6161 	int err;
6162 
6163 	if (vid) {
6164 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
6165 		return -EINVAL;
6166 	}
6167 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6168 		netdev_err(dev, "FDB only supports static addresses\n");
6169 		return -EINVAL;
6170 	}
6171 
6172 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6173 		err = dev_uc_add_excl(dev, addr);
6174 	else if (is_multicast_ether_addr(addr))
6175 		err = dev_mc_add_excl(dev, addr);
6176 	else
6177 		err = -EINVAL;
6178 
6179 	/* Only return duplicate errors if NLM_F_EXCL is set */
6180 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
6181 		err = 0;
6182 
6183 	return err;
6184 }
6185 
6186 /**
6187  * ice_fdb_del - delete an entry from the hardware database
6188  * @ndm: the input from the stack
6189  * @tb: pointer to array of nladdr (unused)
6190  * @dev: the net device pointer
6191  * @addr: the MAC address entry being added
6192  * @vid: VLAN ID
6193  * @notified: whether notification was emitted
6194  * @extack: netlink extended ack
6195  */
6196 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,bool * notified,struct netlink_ext_ack * extack)6197 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
6198 	    struct net_device *dev, const unsigned char *addr,
6199 	    __always_unused u16 vid, bool *notified,
6200 	    struct netlink_ext_ack *extack)
6201 {
6202 	int err;
6203 
6204 	if (ndm->ndm_state & NUD_PERMANENT) {
6205 		netdev_err(dev, "FDB only supports static addresses\n");
6206 		return -EINVAL;
6207 	}
6208 
6209 	if (is_unicast_ether_addr(addr))
6210 		err = dev_uc_del(dev, addr);
6211 	else if (is_multicast_ether_addr(addr))
6212 		err = dev_mc_del(dev, addr);
6213 	else
6214 		err = -EINVAL;
6215 
6216 	return err;
6217 }
6218 
6219 #define NETIF_VLAN_OFFLOAD_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6220 					 NETIF_F_HW_VLAN_CTAG_TX | \
6221 					 NETIF_F_HW_VLAN_STAG_RX | \
6222 					 NETIF_F_HW_VLAN_STAG_TX)
6223 
6224 #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
6225 					 NETIF_F_HW_VLAN_STAG_RX)
6226 
6227 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
6228 					 NETIF_F_HW_VLAN_STAG_FILTER)
6229 
6230 /**
6231  * ice_fix_features - fix the netdev features flags based on device limitations
6232  * @netdev: ptr to the netdev that flags are being fixed on
6233  * @features: features that need to be checked and possibly fixed
6234  *
6235  * Make sure any fixups are made to features in this callback. This enables the
6236  * driver to not have to check unsupported configurations throughout the driver
6237  * because that's the responsiblity of this callback.
6238  *
6239  * Single VLAN Mode (SVM) Supported Features:
6240  *	NETIF_F_HW_VLAN_CTAG_FILTER
6241  *	NETIF_F_HW_VLAN_CTAG_RX
6242  *	NETIF_F_HW_VLAN_CTAG_TX
6243  *
6244  * Double VLAN Mode (DVM) Supported Features:
6245  *	NETIF_F_HW_VLAN_CTAG_FILTER
6246  *	NETIF_F_HW_VLAN_CTAG_RX
6247  *	NETIF_F_HW_VLAN_CTAG_TX
6248  *
6249  *	NETIF_F_HW_VLAN_STAG_FILTER
6250  *	NETIF_HW_VLAN_STAG_RX
6251  *	NETIF_HW_VLAN_STAG_TX
6252  *
6253  * Features that need fixing:
6254  *	Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
6255  *	These are mutually exlusive as the VSI context cannot support multiple
6256  *	VLAN ethertypes simultaneously for stripping and/or insertion. If this
6257  *	is not done, then default to clearing the requested STAG offload
6258  *	settings.
6259  *
6260  *	All supported filtering has to be enabled or disabled together. For
6261  *	example, in DVM, CTAG and STAG filtering have to be enabled and disabled
6262  *	together. If this is not done, then default to VLAN filtering disabled.
6263  *	These are mutually exclusive as there is currently no way to
6264  *	enable/disable VLAN filtering based on VLAN ethertype when using VLAN
6265  *	prune rules.
6266  */
6267 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)6268 ice_fix_features(struct net_device *netdev, netdev_features_t features)
6269 {
6270 	struct ice_netdev_priv *np = netdev_priv(netdev);
6271 	netdev_features_t req_vlan_fltr, cur_vlan_fltr;
6272 	bool cur_ctag, cur_stag, req_ctag, req_stag;
6273 
6274 	cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
6275 	cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6276 	cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6277 
6278 	req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
6279 	req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
6280 	req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
6281 
6282 	if (req_vlan_fltr != cur_vlan_fltr) {
6283 		if (ice_is_dvm_ena(&np->vsi->back->hw)) {
6284 			if (req_ctag && req_stag) {
6285 				features |= NETIF_VLAN_FILTERING_FEATURES;
6286 			} else if (!req_ctag && !req_stag) {
6287 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6288 			} else if ((!cur_ctag && req_ctag && !cur_stag) ||
6289 				   (!cur_stag && req_stag && !cur_ctag)) {
6290 				features |= NETIF_VLAN_FILTERING_FEATURES;
6291 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
6292 			} else if ((cur_ctag && !req_ctag && cur_stag) ||
6293 				   (cur_stag && !req_stag && cur_ctag)) {
6294 				features &= ~NETIF_VLAN_FILTERING_FEATURES;
6295 				netdev_warn(netdev,  "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
6296 			}
6297 		} else {
6298 			if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
6299 				netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
6300 
6301 			if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
6302 				features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6303 		}
6304 	}
6305 
6306 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
6307 	    (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
6308 		netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
6309 		features &= ~(NETIF_F_HW_VLAN_STAG_RX |
6310 			      NETIF_F_HW_VLAN_STAG_TX);
6311 	}
6312 
6313 	if (!(netdev->features & NETIF_F_RXFCS) &&
6314 	    (features & NETIF_F_RXFCS) &&
6315 	    (features & NETIF_VLAN_STRIPPING_FEATURES) &&
6316 	    !ice_vsi_has_non_zero_vlans(np->vsi)) {
6317 		netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
6318 		features &= ~NETIF_VLAN_STRIPPING_FEATURES;
6319 	}
6320 
6321 	return features;
6322 }
6323 
6324 /**
6325  * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6326  * @vsi: PF's VSI
6327  * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
6328  *
6329  * Store current stripped VLAN proto in ring packet context,
6330  * so it can be accessed more efficiently by packet processing code.
6331  */
6332 static void
ice_set_rx_rings_vlan_proto(struct ice_vsi * vsi,__be16 vlan_ethertype)6333 ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
6334 {
6335 	u16 i;
6336 
6337 	ice_for_each_alloc_rxq(vsi, i)
6338 		vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
6339 }
6340 
6341 /**
6342  * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6343  * @vsi: PF's VSI
6344  * @features: features used to determine VLAN offload settings
6345  *
6346  * First, determine the vlan_ethertype based on the VLAN offload bits in
6347  * features. Then determine if stripping and insertion should be enabled or
6348  * disabled. Finally enable or disable VLAN stripping and insertion.
6349  */
6350 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)6351 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
6352 {
6353 	bool enable_stripping = true, enable_insertion = true;
6354 	struct ice_vsi_vlan_ops *vlan_ops;
6355 	int strip_err = 0, insert_err = 0;
6356 	u16 vlan_ethertype = 0;
6357 
6358 	vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6359 
6360 	if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
6361 		vlan_ethertype = ETH_P_8021AD;
6362 	else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
6363 		vlan_ethertype = ETH_P_8021Q;
6364 
6365 	if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
6366 		enable_stripping = false;
6367 	if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
6368 		enable_insertion = false;
6369 
6370 	if (enable_stripping)
6371 		strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
6372 	else
6373 		strip_err = vlan_ops->dis_stripping(vsi);
6374 
6375 	if (enable_insertion)
6376 		insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
6377 	else
6378 		insert_err = vlan_ops->dis_insertion(vsi);
6379 
6380 	if (strip_err || insert_err)
6381 		return -EIO;
6382 
6383 	ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
6384 				    htons(vlan_ethertype) : 0);
6385 
6386 	return 0;
6387 }
6388 
6389 /**
6390  * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6391  * @vsi: PF's VSI
6392  * @features: features used to determine VLAN filtering settings
6393  *
6394  * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
6395  * features.
6396  */
6397 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)6398 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
6399 {
6400 	struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
6401 	int err = 0;
6402 
6403 	/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
6404 	 * if either bit is set. In switchdev mode Rx filtering should never be
6405 	 * enabled.
6406 	 */
6407 	if ((features &
6408 	     (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
6409 	     !ice_is_eswitch_mode_switchdev(vsi->back))
6410 		err = vlan_ops->ena_rx_filtering(vsi);
6411 	else
6412 		err = vlan_ops->dis_rx_filtering(vsi);
6413 
6414 	return err;
6415 }
6416 
6417 /**
6418  * ice_set_vlan_features - set VLAN settings based on suggested feature set
6419  * @netdev: ptr to the netdev being adjusted
6420  * @features: the feature set that the stack is suggesting
6421  *
6422  * Only update VLAN settings if the requested_vlan_features are different than
6423  * the current_vlan_features.
6424  */
6425 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6426 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6427 {
6428 	netdev_features_t current_vlan_features, requested_vlan_features;
6429 	struct ice_netdev_priv *np = netdev_priv(netdev);
6430 	struct ice_vsi *vsi = np->vsi;
6431 	int err;
6432 
6433 	current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6434 	requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6435 	if (current_vlan_features ^ requested_vlan_features) {
6436 		if ((features & NETIF_F_RXFCS) &&
6437 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6438 			dev_err(ice_pf_to_dev(vsi->back),
6439 				"To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6440 			return -EIO;
6441 		}
6442 
6443 		err = ice_set_vlan_offload_features(vsi, features);
6444 		if (err)
6445 			return err;
6446 	}
6447 
6448 	current_vlan_features = netdev->features &
6449 		NETIF_VLAN_FILTERING_FEATURES;
6450 	requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6451 	if (current_vlan_features ^ requested_vlan_features) {
6452 		err = ice_set_vlan_filtering_features(vsi, features);
6453 		if (err)
6454 			return err;
6455 	}
6456 
6457 	return 0;
6458 }
6459 
6460 /**
6461  * ice_set_loopback - turn on/off loopback mode on underlying PF
6462  * @vsi: ptr to VSI
6463  * @ena: flag to indicate the on/off setting
6464  */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6465 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6466 {
6467 	bool if_running = netif_running(vsi->netdev);
6468 	int ret;
6469 
6470 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6471 		ret = ice_down(vsi);
6472 		if (ret) {
6473 			netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6474 			return ret;
6475 		}
6476 	}
6477 	ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6478 	if (ret)
6479 		netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6480 	if (if_running)
6481 		ret = ice_up(vsi);
6482 
6483 	return ret;
6484 }
6485 
6486 /**
6487  * ice_set_features - set the netdev feature flags
6488  * @netdev: ptr to the netdev being adjusted
6489  * @features: the feature set that the stack is suggesting
6490  */
6491 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6492 ice_set_features(struct net_device *netdev, netdev_features_t features)
6493 {
6494 	netdev_features_t changed = netdev->features ^ features;
6495 	struct ice_netdev_priv *np = netdev_priv(netdev);
6496 	struct ice_vsi *vsi = np->vsi;
6497 	struct ice_pf *pf = vsi->back;
6498 	int ret = 0;
6499 
6500 	/* Don't set any netdev advanced features with device in Safe Mode */
6501 	if (ice_is_safe_mode(pf)) {
6502 		dev_err(ice_pf_to_dev(pf),
6503 			"Device is in Safe Mode - not enabling advanced netdev features\n");
6504 		return ret;
6505 	}
6506 
6507 	/* Do not change setting during reset */
6508 	if (ice_is_reset_in_progress(pf->state)) {
6509 		dev_err(ice_pf_to_dev(pf),
6510 			"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6511 		return -EBUSY;
6512 	}
6513 
6514 	/* Multiple features can be changed in one call so keep features in
6515 	 * separate if/else statements to guarantee each feature is checked
6516 	 */
6517 	if (changed & NETIF_F_RXHASH)
6518 		ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6519 
6520 	ret = ice_set_vlan_features(netdev, features);
6521 	if (ret)
6522 		return ret;
6523 
6524 	/* Turn on receive of FCS aka CRC, and after setting this
6525 	 * flag the packet data will have the 4 byte CRC appended
6526 	 */
6527 	if (changed & NETIF_F_RXFCS) {
6528 		if ((features & NETIF_F_RXFCS) &&
6529 		    (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6530 			dev_err(ice_pf_to_dev(vsi->back),
6531 				"To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6532 			return -EIO;
6533 		}
6534 
6535 		ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6536 		ret = ice_down_up(vsi);
6537 		if (ret)
6538 			return ret;
6539 	}
6540 
6541 	if (changed & NETIF_F_NTUPLE) {
6542 		bool ena = !!(features & NETIF_F_NTUPLE);
6543 
6544 		ice_vsi_manage_fdir(vsi, ena);
6545 		ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6546 	}
6547 
6548 	/* don't turn off hw_tc_offload when ADQ is already enabled */
6549 	if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6550 		dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6551 		return -EACCES;
6552 	}
6553 
6554 	if (changed & NETIF_F_HW_TC) {
6555 		bool ena = !!(features & NETIF_F_HW_TC);
6556 
6557 		assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
6558 	}
6559 
6560 	if (changed & NETIF_F_LOOPBACK)
6561 		ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6562 
6563 	/* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
6564 	 * (NETIF_F_HW_CSUM) is not supported.
6565 	 */
6566 	if (ice_is_feature_supported(pf, ICE_F_GCS) &&
6567 	    ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
6568 		if (netdev->features & NETIF_F_HW_CSUM)
6569 			dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
6570 		else
6571 			dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
6572 		return -EIO;
6573 	}
6574 
6575 	return ret;
6576 }
6577 
6578 /**
6579  * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6580  * @vsi: VSI to setup VLAN properties for
6581  */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6582 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6583 {
6584 	int err;
6585 
6586 	err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6587 	if (err)
6588 		return err;
6589 
6590 	err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6591 	if (err)
6592 		return err;
6593 
6594 	return ice_vsi_add_vlan_zero(vsi);
6595 }
6596 
6597 /**
6598  * ice_vsi_cfg_lan - Setup the VSI lan related config
6599  * @vsi: the VSI being configured
6600  *
6601  * Return 0 on success and negative value on error
6602  */
ice_vsi_cfg_lan(struct ice_vsi * vsi)6603 int ice_vsi_cfg_lan(struct ice_vsi *vsi)
6604 {
6605 	int err;
6606 
6607 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6608 		ice_set_rx_mode(vsi->netdev);
6609 
6610 		err = ice_vsi_vlan_setup(vsi);
6611 		if (err)
6612 			return err;
6613 	}
6614 	ice_vsi_cfg_dcb_rings(vsi);
6615 
6616 	err = ice_vsi_cfg_lan_txqs(vsi);
6617 	if (!err && ice_is_xdp_ena_vsi(vsi))
6618 		err = ice_vsi_cfg_xdp_txqs(vsi);
6619 	if (!err)
6620 		err = ice_vsi_cfg_rxqs(vsi);
6621 
6622 	return err;
6623 }
6624 
6625 /* THEORY OF MODERATION:
6626  * The ice driver hardware works differently than the hardware that DIMLIB was
6627  * originally made for. ice hardware doesn't have packet count limits that
6628  * can trigger an interrupt, but it *does* have interrupt rate limit support,
6629  * which is hard-coded to a limit of 250,000 ints/second.
6630  * If not using dynamic moderation, the INTRL value can be modified
6631  * by ethtool rx-usecs-high.
6632  */
6633 struct ice_dim {
6634 	/* the throttle rate for interrupts, basically worst case delay before
6635 	 * an initial interrupt fires, value is stored in microseconds.
6636 	 */
6637 	u16 itr;
6638 };
6639 
6640 /* Make a different profile for Rx that doesn't allow quite so aggressive
6641  * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6642  * second.
6643  */
6644 static const struct ice_dim rx_profile[] = {
6645 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6646 	{8},    /* 125,000 ints/s */
6647 	{16},   /*  62,500 ints/s */
6648 	{62},   /*  16,129 ints/s */
6649 	{126}   /*   7,936 ints/s */
6650 };
6651 
6652 /* The transmit profile, which has the same sorts of values
6653  * as the previous struct
6654  */
6655 static const struct ice_dim tx_profile[] = {
6656 	{2},    /* 500,000 ints/s, capped at 250K by INTRL */
6657 	{8},    /* 125,000 ints/s */
6658 	{40},   /*  16,125 ints/s */
6659 	{128},  /*   7,812 ints/s */
6660 	{256}   /*   3,906 ints/s */
6661 };
6662 
ice_tx_dim_work(struct work_struct * work)6663 static void ice_tx_dim_work(struct work_struct *work)
6664 {
6665 	struct ice_ring_container *rc;
6666 	struct dim *dim;
6667 	u16 itr;
6668 
6669 	dim = container_of(work, struct dim, work);
6670 	rc = dim->priv;
6671 
6672 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6673 
6674 	/* look up the values in our local table */
6675 	itr = tx_profile[dim->profile_ix].itr;
6676 
6677 	ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6678 	ice_write_itr(rc, itr);
6679 
6680 	dim->state = DIM_START_MEASURE;
6681 }
6682 
ice_rx_dim_work(struct work_struct * work)6683 static void ice_rx_dim_work(struct work_struct *work)
6684 {
6685 	struct ice_ring_container *rc;
6686 	struct dim *dim;
6687 	u16 itr;
6688 
6689 	dim = container_of(work, struct dim, work);
6690 	rc = dim->priv;
6691 
6692 	WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6693 
6694 	/* look up the values in our local table */
6695 	itr = rx_profile[dim->profile_ix].itr;
6696 
6697 	ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6698 	ice_write_itr(rc, itr);
6699 
6700 	dim->state = DIM_START_MEASURE;
6701 }
6702 
6703 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6704 
6705 /**
6706  * ice_init_moderation - set up interrupt moderation
6707  * @q_vector: the vector containing rings to be configured
6708  *
6709  * Set up interrupt moderation registers, with the intent to do the right thing
6710  * when called from reset or from probe, and whether or not dynamic moderation
6711  * is enabled or not. Take special care to write all the registers in both
6712  * dynamic moderation mode or not in order to make sure hardware is in a known
6713  * state.
6714  */
ice_init_moderation(struct ice_q_vector * q_vector)6715 static void ice_init_moderation(struct ice_q_vector *q_vector)
6716 {
6717 	struct ice_ring_container *rc;
6718 	bool tx_dynamic, rx_dynamic;
6719 
6720 	rc = &q_vector->tx;
6721 	INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6722 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6723 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6724 	rc->dim.priv = rc;
6725 	tx_dynamic = ITR_IS_DYNAMIC(rc);
6726 
6727 	/* set the initial TX ITR to match the above */
6728 	ice_write_itr(rc, tx_dynamic ?
6729 		      tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6730 
6731 	rc = &q_vector->rx;
6732 	INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6733 	rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6734 	rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6735 	rc->dim.priv = rc;
6736 	rx_dynamic = ITR_IS_DYNAMIC(rc);
6737 
6738 	/* set the initial RX ITR to match the above */
6739 	ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6740 				       rc->itr_setting);
6741 
6742 	ice_set_q_vector_intrl(q_vector);
6743 }
6744 
6745 /**
6746  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6747  * @vsi: the VSI being configured
6748  */
ice_napi_enable_all(struct ice_vsi * vsi)6749 static void ice_napi_enable_all(struct ice_vsi *vsi)
6750 {
6751 	int q_idx;
6752 
6753 	if (!vsi->netdev)
6754 		return;
6755 
6756 	ice_for_each_q_vector(vsi, q_idx) {
6757 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6758 
6759 		ice_init_moderation(q_vector);
6760 
6761 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6762 			napi_enable(&q_vector->napi);
6763 	}
6764 }
6765 
6766 /**
6767  * ice_up_complete - Finish the last steps of bringing up a connection
6768  * @vsi: The VSI being configured
6769  *
6770  * Return 0 on success and negative value on error
6771  */
ice_up_complete(struct ice_vsi * vsi)6772 static int ice_up_complete(struct ice_vsi *vsi)
6773 {
6774 	struct ice_pf *pf = vsi->back;
6775 	int err;
6776 
6777 	ice_vsi_cfg_msix(vsi);
6778 
6779 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
6780 	 * Tx queue group list was configured and the context bits were
6781 	 * programmed using ice_vsi_cfg_txqs
6782 	 */
6783 	err = ice_vsi_start_all_rx_rings(vsi);
6784 	if (err)
6785 		return err;
6786 
6787 	clear_bit(ICE_VSI_DOWN, vsi->state);
6788 	ice_napi_enable_all(vsi);
6789 	ice_vsi_ena_irq(vsi);
6790 
6791 	if (vsi->port_info &&
6792 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6793 	    ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
6794 			      vsi->type == ICE_VSI_SF)))) {
6795 		ice_print_link_msg(vsi, true);
6796 		netif_tx_start_all_queues(vsi->netdev);
6797 		netif_carrier_on(vsi->netdev);
6798 		ice_ptp_link_change(pf, true);
6799 	}
6800 
6801 	/* Perform an initial read of the statistics registers now to
6802 	 * set the baseline so counters are ready when interface is up
6803 	 */
6804 	ice_update_eth_stats(vsi);
6805 
6806 	if (vsi->type == ICE_VSI_PF)
6807 		ice_service_task_schedule(pf);
6808 
6809 	return 0;
6810 }
6811 
6812 /**
6813  * ice_up - Bring the connection back up after being down
6814  * @vsi: VSI being configured
6815  */
ice_up(struct ice_vsi * vsi)6816 int ice_up(struct ice_vsi *vsi)
6817 {
6818 	int err;
6819 
6820 	err = ice_vsi_cfg_lan(vsi);
6821 	if (!err)
6822 		err = ice_up_complete(vsi);
6823 
6824 	return err;
6825 }
6826 
6827 /**
6828  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6829  * @syncp: pointer to u64_stats_sync
6830  * @stats: stats that pkts and bytes count will be taken from
6831  * @pkts: packets stats counter
6832  * @bytes: bytes stats counter
6833  *
6834  * This function fetches stats from the ring considering the atomic operations
6835  * that needs to be performed to read u64 values in 32 bit machine.
6836  */
6837 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6838 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6839 			     struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6840 {
6841 	unsigned int start;
6842 
6843 	do {
6844 		start = u64_stats_fetch_begin(syncp);
6845 		*pkts = stats.pkts;
6846 		*bytes = stats.bytes;
6847 	} while (u64_stats_fetch_retry(syncp, start));
6848 }
6849 
6850 /**
6851  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6852  * @vsi: the VSI to be updated
6853  * @vsi_stats: the stats struct to be updated
6854  * @rings: rings to work on
6855  * @count: number of rings
6856  */
6857 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6858 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6859 			     struct rtnl_link_stats64 *vsi_stats,
6860 			     struct ice_tx_ring **rings, u16 count)
6861 {
6862 	u16 i;
6863 
6864 	for (i = 0; i < count; i++) {
6865 		struct ice_tx_ring *ring;
6866 		u64 pkts = 0, bytes = 0;
6867 
6868 		ring = READ_ONCE(rings[i]);
6869 		if (!ring || !ring->ring_stats)
6870 			continue;
6871 		ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
6872 					     ring->ring_stats->stats, &pkts,
6873 					     &bytes);
6874 		vsi_stats->tx_packets += pkts;
6875 		vsi_stats->tx_bytes += bytes;
6876 		vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
6877 		vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
6878 		vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
6879 	}
6880 }
6881 
6882 /**
6883  * ice_update_vsi_ring_stats - Update VSI stats counters
6884  * @vsi: the VSI to be updated
6885  */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6886 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6887 {
6888 	struct rtnl_link_stats64 *net_stats, *stats_prev;
6889 	struct rtnl_link_stats64 *vsi_stats;
6890 	struct ice_pf *pf = vsi->back;
6891 	u64 pkts, bytes;
6892 	int i;
6893 
6894 	vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6895 	if (!vsi_stats)
6896 		return;
6897 
6898 	/* reset non-netdev (extended) stats */
6899 	vsi->tx_restart = 0;
6900 	vsi->tx_busy = 0;
6901 	vsi->tx_linearize = 0;
6902 	vsi->rx_buf_failed = 0;
6903 	vsi->rx_page_failed = 0;
6904 
6905 	rcu_read_lock();
6906 
6907 	/* update Tx rings counters */
6908 	ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6909 				     vsi->num_txq);
6910 
6911 	/* update Rx rings counters */
6912 	ice_for_each_rxq(vsi, i) {
6913 		struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6914 		struct ice_ring_stats *ring_stats;
6915 
6916 		ring_stats = ring->ring_stats;
6917 		ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
6918 					     ring_stats->stats, &pkts,
6919 					     &bytes);
6920 		vsi_stats->rx_packets += pkts;
6921 		vsi_stats->rx_bytes += bytes;
6922 		vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
6923 		vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
6924 	}
6925 
6926 	/* update XDP Tx rings counters */
6927 	if (ice_is_xdp_ena_vsi(vsi))
6928 		ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6929 					     vsi->num_xdp_txq);
6930 
6931 	rcu_read_unlock();
6932 
6933 	net_stats = &vsi->net_stats;
6934 	stats_prev = &vsi->net_stats_prev;
6935 
6936 	/* Update netdev counters, but keep in mind that values could start at
6937 	 * random value after PF reset. And as we increase the reported stat by
6938 	 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
6939 	 * let's skip this round.
6940 	 */
6941 	if (likely(pf->stat_prev_loaded)) {
6942 		net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
6943 		net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
6944 		net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
6945 		net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
6946 	}
6947 
6948 	stats_prev->tx_packets = vsi_stats->tx_packets;
6949 	stats_prev->tx_bytes = vsi_stats->tx_bytes;
6950 	stats_prev->rx_packets = vsi_stats->rx_packets;
6951 	stats_prev->rx_bytes = vsi_stats->rx_bytes;
6952 
6953 	kfree(vsi_stats);
6954 }
6955 
6956 /**
6957  * ice_update_vsi_stats - Update VSI stats counters
6958  * @vsi: the VSI to be updated
6959  */
ice_update_vsi_stats(struct ice_vsi * vsi)6960 void ice_update_vsi_stats(struct ice_vsi *vsi)
6961 {
6962 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6963 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
6964 	struct ice_pf *pf = vsi->back;
6965 
6966 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6967 	    test_bit(ICE_CFG_BUSY, pf->state))
6968 		return;
6969 
6970 	/* get stats as recorded by Tx/Rx rings */
6971 	ice_update_vsi_ring_stats(vsi);
6972 
6973 	/* get VSI stats as recorded by the hardware */
6974 	ice_update_eth_stats(vsi);
6975 
6976 	cur_ns->tx_errors = cur_es->tx_errors;
6977 	cur_ns->rx_dropped = cur_es->rx_discards;
6978 	cur_ns->tx_dropped = cur_es->tx_discards;
6979 	cur_ns->multicast = cur_es->rx_multicast;
6980 
6981 	/* update some more netdev stats if this is main VSI */
6982 	if (vsi->type == ICE_VSI_PF) {
6983 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
6984 		cur_ns->rx_errors = pf->stats.crc_errors +
6985 				    pf->stats.illegal_bytes +
6986 				    pf->stats.rx_undersize +
6987 				    pf->stats.rx_jabber +
6988 				    pf->stats.rx_fragments +
6989 				    pf->stats.rx_oversize;
6990 		/* record drops from the port level */
6991 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6992 	}
6993 }
6994 
6995 /**
6996  * ice_update_pf_stats - Update PF port stats counters
6997  * @pf: PF whose stats needs to be updated
6998  */
ice_update_pf_stats(struct ice_pf * pf)6999 void ice_update_pf_stats(struct ice_pf *pf)
7000 {
7001 	struct ice_hw_port_stats *prev_ps, *cur_ps;
7002 	struct ice_hw *hw = &pf->hw;
7003 	u16 fd_ctr_base;
7004 	u8 port;
7005 
7006 	port = hw->port_info->lport;
7007 	prev_ps = &pf->stats_prev;
7008 	cur_ps = &pf->stats;
7009 
7010 	if (ice_is_reset_in_progress(pf->state))
7011 		pf->stat_prev_loaded = false;
7012 
7013 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
7014 			  &prev_ps->eth.rx_bytes,
7015 			  &cur_ps->eth.rx_bytes);
7016 
7017 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
7018 			  &prev_ps->eth.rx_unicast,
7019 			  &cur_ps->eth.rx_unicast);
7020 
7021 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
7022 			  &prev_ps->eth.rx_multicast,
7023 			  &cur_ps->eth.rx_multicast);
7024 
7025 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
7026 			  &prev_ps->eth.rx_broadcast,
7027 			  &cur_ps->eth.rx_broadcast);
7028 
7029 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
7030 			  &prev_ps->eth.rx_discards,
7031 			  &cur_ps->eth.rx_discards);
7032 
7033 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
7034 			  &prev_ps->eth.tx_bytes,
7035 			  &cur_ps->eth.tx_bytes);
7036 
7037 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
7038 			  &prev_ps->eth.tx_unicast,
7039 			  &cur_ps->eth.tx_unicast);
7040 
7041 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
7042 			  &prev_ps->eth.tx_multicast,
7043 			  &cur_ps->eth.tx_multicast);
7044 
7045 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
7046 			  &prev_ps->eth.tx_broadcast,
7047 			  &cur_ps->eth.tx_broadcast);
7048 
7049 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
7050 			  &prev_ps->tx_dropped_link_down,
7051 			  &cur_ps->tx_dropped_link_down);
7052 
7053 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
7054 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
7055 
7056 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
7057 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
7058 
7059 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
7060 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
7061 
7062 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
7063 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
7064 
7065 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
7066 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
7067 
7068 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
7069 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
7070 
7071 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
7072 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
7073 
7074 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
7075 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
7076 
7077 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
7078 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
7079 
7080 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
7081 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
7082 
7083 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
7084 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
7085 
7086 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
7087 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
7088 
7089 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
7090 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
7091 
7092 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
7093 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
7094 
7095 	fd_ctr_base = hw->fd_ctr_base;
7096 
7097 	ice_stat_update40(hw,
7098 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
7099 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
7100 			  &cur_ps->fd_sb_match);
7101 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
7102 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
7103 
7104 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
7105 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
7106 
7107 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
7108 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
7109 
7110 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
7111 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
7112 
7113 	ice_update_dcb_stats(pf);
7114 
7115 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
7116 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
7117 
7118 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
7119 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
7120 
7121 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
7122 			  &prev_ps->mac_local_faults,
7123 			  &cur_ps->mac_local_faults);
7124 
7125 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
7126 			  &prev_ps->mac_remote_faults,
7127 			  &cur_ps->mac_remote_faults);
7128 
7129 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
7130 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
7131 
7132 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
7133 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
7134 
7135 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
7136 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
7137 
7138 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
7139 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
7140 
7141 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
7142 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
7143 
7144 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
7145 
7146 	pf->stat_prev_loaded = true;
7147 }
7148 
7149 /**
7150  * ice_get_stats64 - get statistics for network device structure
7151  * @netdev: network interface device structure
7152  * @stats: main device statistics structure
7153  */
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)7154 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
7155 {
7156 	struct ice_netdev_priv *np = netdev_priv(netdev);
7157 	struct rtnl_link_stats64 *vsi_stats;
7158 	struct ice_vsi *vsi = np->vsi;
7159 
7160 	vsi_stats = &vsi->net_stats;
7161 
7162 	if (!vsi->num_txq || !vsi->num_rxq)
7163 		return;
7164 
7165 	/* netdev packet/byte stats come from ring counter. These are obtained
7166 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
7167 	 * But, only call the update routine and read the registers if VSI is
7168 	 * not down.
7169 	 */
7170 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
7171 		ice_update_vsi_ring_stats(vsi);
7172 	stats->tx_packets = vsi_stats->tx_packets;
7173 	stats->tx_bytes = vsi_stats->tx_bytes;
7174 	stats->rx_packets = vsi_stats->rx_packets;
7175 	stats->rx_bytes = vsi_stats->rx_bytes;
7176 
7177 	/* The rest of the stats can be read from the hardware but instead we
7178 	 * just return values that the watchdog task has already obtained from
7179 	 * the hardware.
7180 	 */
7181 	stats->multicast = vsi_stats->multicast;
7182 	stats->tx_errors = vsi_stats->tx_errors;
7183 	stats->tx_dropped = vsi_stats->tx_dropped;
7184 	stats->rx_errors = vsi_stats->rx_errors;
7185 	stats->rx_dropped = vsi_stats->rx_dropped;
7186 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
7187 	stats->rx_length_errors = vsi_stats->rx_length_errors;
7188 }
7189 
7190 /**
7191  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7192  * @vsi: VSI having NAPI disabled
7193  */
ice_napi_disable_all(struct ice_vsi * vsi)7194 static void ice_napi_disable_all(struct ice_vsi *vsi)
7195 {
7196 	int q_idx;
7197 
7198 	if (!vsi->netdev)
7199 		return;
7200 
7201 	ice_for_each_q_vector(vsi, q_idx) {
7202 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
7203 
7204 		if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
7205 			napi_disable(&q_vector->napi);
7206 
7207 		cancel_work_sync(&q_vector->tx.dim.work);
7208 		cancel_work_sync(&q_vector->rx.dim.work);
7209 	}
7210 }
7211 
7212 /**
7213  * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7214  * @vsi: the VSI being un-configured
7215  */
ice_vsi_dis_irq(struct ice_vsi * vsi)7216 static void ice_vsi_dis_irq(struct ice_vsi *vsi)
7217 {
7218 	struct ice_pf *pf = vsi->back;
7219 	struct ice_hw *hw = &pf->hw;
7220 	u32 val;
7221 	int i;
7222 
7223 	/* disable interrupt causation from each Rx queue; Tx queues are
7224 	 * handled in ice_vsi_stop_tx_ring()
7225 	 */
7226 	if (vsi->rx_rings) {
7227 		ice_for_each_rxq(vsi, i) {
7228 			if (vsi->rx_rings[i]) {
7229 				u16 reg;
7230 
7231 				reg = vsi->rx_rings[i]->reg_idx;
7232 				val = rd32(hw, QINT_RQCTL(reg));
7233 				val &= ~QINT_RQCTL_CAUSE_ENA_M;
7234 				wr32(hw, QINT_RQCTL(reg), val);
7235 			}
7236 		}
7237 	}
7238 
7239 	/* disable each interrupt */
7240 	ice_for_each_q_vector(vsi, i) {
7241 		if (!vsi->q_vectors[i])
7242 			continue;
7243 		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
7244 	}
7245 
7246 	ice_flush(hw);
7247 
7248 	/* don't call synchronize_irq() for VF's from the host */
7249 	if (vsi->type == ICE_VSI_VF)
7250 		return;
7251 
7252 	ice_for_each_q_vector(vsi, i)
7253 		synchronize_irq(vsi->q_vectors[i]->irq.virq);
7254 }
7255 
7256 /**
7257  * ice_down - Shutdown the connection
7258  * @vsi: The VSI being stopped
7259  *
7260  * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7261  */
ice_down(struct ice_vsi * vsi)7262 int ice_down(struct ice_vsi *vsi)
7263 {
7264 	int i, tx_err, rx_err, vlan_err = 0;
7265 
7266 	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
7267 
7268 	if (vsi->netdev) {
7269 		vlan_err = ice_vsi_del_vlan_zero(vsi);
7270 		ice_ptp_link_change(vsi->back, false);
7271 		netif_carrier_off(vsi->netdev);
7272 		netif_tx_disable(vsi->netdev);
7273 	}
7274 
7275 	ice_vsi_dis_irq(vsi);
7276 
7277 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
7278 	if (tx_err)
7279 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
7280 			   vsi->vsi_num, tx_err);
7281 	if (!tx_err && vsi->xdp_rings) {
7282 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
7283 		if (tx_err)
7284 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
7285 				   vsi->vsi_num, tx_err);
7286 	}
7287 
7288 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
7289 	if (rx_err)
7290 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
7291 			   vsi->vsi_num, rx_err);
7292 
7293 	ice_napi_disable_all(vsi);
7294 
7295 	ice_for_each_txq(vsi, i)
7296 		ice_clean_tx_ring(vsi->tx_rings[i]);
7297 
7298 	if (vsi->xdp_rings)
7299 		ice_for_each_xdp_txq(vsi, i)
7300 			ice_clean_tx_ring(vsi->xdp_rings[i]);
7301 
7302 	ice_for_each_rxq(vsi, i)
7303 		ice_clean_rx_ring(vsi->rx_rings[i]);
7304 
7305 	if (tx_err || rx_err || vlan_err) {
7306 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
7307 			   vsi->vsi_num, vsi->vsw->sw_id);
7308 		return -EIO;
7309 	}
7310 
7311 	return 0;
7312 }
7313 
7314 /**
7315  * ice_down_up - shutdown the VSI connection and bring it up
7316  * @vsi: the VSI to be reconnected
7317  */
ice_down_up(struct ice_vsi * vsi)7318 int ice_down_up(struct ice_vsi *vsi)
7319 {
7320 	int ret;
7321 
7322 	/* if DOWN already set, nothing to do */
7323 	if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
7324 		return 0;
7325 
7326 	ret = ice_down(vsi);
7327 	if (ret)
7328 		return ret;
7329 
7330 	ret = ice_up(vsi);
7331 	if (ret) {
7332 		netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
7333 		return ret;
7334 	}
7335 
7336 	return 0;
7337 }
7338 
7339 /**
7340  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7341  * @vsi: VSI having resources allocated
7342  *
7343  * Return 0 on success, negative on failure
7344  */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)7345 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
7346 {
7347 	int i, err = 0;
7348 
7349 	if (!vsi->num_txq) {
7350 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
7351 			vsi->vsi_num);
7352 		return -EINVAL;
7353 	}
7354 
7355 	ice_for_each_txq(vsi, i) {
7356 		struct ice_tx_ring *ring = vsi->tx_rings[i];
7357 
7358 		if (!ring)
7359 			return -EINVAL;
7360 
7361 		if (vsi->netdev)
7362 			ring->netdev = vsi->netdev;
7363 		err = ice_setup_tx_ring(ring);
7364 		if (err)
7365 			break;
7366 	}
7367 
7368 	return err;
7369 }
7370 
7371 /**
7372  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7373  * @vsi: VSI having resources allocated
7374  *
7375  * Return 0 on success, negative on failure
7376  */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)7377 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
7378 {
7379 	int i, err = 0;
7380 
7381 	if (!vsi->num_rxq) {
7382 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
7383 			vsi->vsi_num);
7384 		return -EINVAL;
7385 	}
7386 
7387 	ice_for_each_rxq(vsi, i) {
7388 		struct ice_rx_ring *ring = vsi->rx_rings[i];
7389 
7390 		if (!ring)
7391 			return -EINVAL;
7392 
7393 		if (vsi->netdev)
7394 			ring->netdev = vsi->netdev;
7395 		err = ice_setup_rx_ring(ring);
7396 		if (err)
7397 			break;
7398 	}
7399 
7400 	return err;
7401 }
7402 
7403 /**
7404  * ice_vsi_open_ctrl - open control VSI for use
7405  * @vsi: the VSI to open
7406  *
7407  * Initialization of the Control VSI
7408  *
7409  * Returns 0 on success, negative value on error
7410  */
ice_vsi_open_ctrl(struct ice_vsi * vsi)7411 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
7412 {
7413 	char int_name[ICE_INT_NAME_STR_LEN];
7414 	struct ice_pf *pf = vsi->back;
7415 	struct device *dev;
7416 	int err;
7417 
7418 	dev = ice_pf_to_dev(pf);
7419 	/* allocate descriptors */
7420 	err = ice_vsi_setup_tx_rings(vsi);
7421 	if (err)
7422 		goto err_setup_tx;
7423 
7424 	err = ice_vsi_setup_rx_rings(vsi);
7425 	if (err)
7426 		goto err_setup_rx;
7427 
7428 	err = ice_vsi_cfg_lan(vsi);
7429 	if (err)
7430 		goto err_setup_rx;
7431 
7432 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
7433 		 dev_driver_string(dev), dev_name(dev));
7434 	err = ice_vsi_req_irq_msix(vsi, int_name);
7435 	if (err)
7436 		goto err_setup_rx;
7437 
7438 	ice_vsi_cfg_msix(vsi);
7439 
7440 	err = ice_vsi_start_all_rx_rings(vsi);
7441 	if (err)
7442 		goto err_up_complete;
7443 
7444 	clear_bit(ICE_VSI_DOWN, vsi->state);
7445 	ice_vsi_ena_irq(vsi);
7446 
7447 	return 0;
7448 
7449 err_up_complete:
7450 	ice_down(vsi);
7451 err_setup_rx:
7452 	ice_vsi_free_rx_rings(vsi);
7453 err_setup_tx:
7454 	ice_vsi_free_tx_rings(vsi);
7455 
7456 	return err;
7457 }
7458 
7459 /**
7460  * ice_vsi_open - Called when a network interface is made active
7461  * @vsi: the VSI to open
7462  *
7463  * Initialization of the VSI
7464  *
7465  * Returns 0 on success, negative value on error
7466  */
ice_vsi_open(struct ice_vsi * vsi)7467 int ice_vsi_open(struct ice_vsi *vsi)
7468 {
7469 	char int_name[ICE_INT_NAME_STR_LEN];
7470 	struct ice_pf *pf = vsi->back;
7471 	int err;
7472 
7473 	/* allocate descriptors */
7474 	err = ice_vsi_setup_tx_rings(vsi);
7475 	if (err)
7476 		goto err_setup_tx;
7477 
7478 	err = ice_vsi_setup_rx_rings(vsi);
7479 	if (err)
7480 		goto err_setup_rx;
7481 
7482 	err = ice_vsi_cfg_lan(vsi);
7483 	if (err)
7484 		goto err_setup_rx;
7485 
7486 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7487 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7488 	err = ice_vsi_req_irq_msix(vsi, int_name);
7489 	if (err)
7490 		goto err_setup_rx;
7491 
7492 	if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs))
7493 		ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7494 
7495 	if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
7496 		/* Notify the stack of the actual queue counts. */
7497 		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7498 		if (err)
7499 			goto err_set_qs;
7500 
7501 		err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7502 		if (err)
7503 			goto err_set_qs;
7504 
7505 		ice_vsi_set_napi_queues(vsi);
7506 	}
7507 
7508 	err = ice_up_complete(vsi);
7509 	if (err)
7510 		goto err_up_complete;
7511 
7512 	return 0;
7513 
7514 err_up_complete:
7515 	ice_down(vsi);
7516 err_set_qs:
7517 	ice_vsi_free_irq(vsi);
7518 err_setup_rx:
7519 	ice_vsi_free_rx_rings(vsi);
7520 err_setup_tx:
7521 	ice_vsi_free_tx_rings(vsi);
7522 
7523 	return err;
7524 }
7525 
7526 /**
7527  * ice_vsi_release_all - Delete all VSIs
7528  * @pf: PF from which all VSIs are being removed
7529  */
ice_vsi_release_all(struct ice_pf * pf)7530 static void ice_vsi_release_all(struct ice_pf *pf)
7531 {
7532 	int err, i;
7533 
7534 	if (!pf->vsi)
7535 		return;
7536 
7537 	ice_for_each_vsi(pf, i) {
7538 		if (!pf->vsi[i])
7539 			continue;
7540 
7541 		if (pf->vsi[i]->type == ICE_VSI_CHNL)
7542 			continue;
7543 
7544 		err = ice_vsi_release(pf->vsi[i]);
7545 		if (err)
7546 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7547 				i, err, pf->vsi[i]->vsi_num);
7548 	}
7549 }
7550 
7551 /**
7552  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7553  * @pf: pointer to the PF instance
7554  * @type: VSI type to rebuild
7555  *
7556  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7557  */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7558 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7559 {
7560 	struct device *dev = ice_pf_to_dev(pf);
7561 	int i, err;
7562 
7563 	ice_for_each_vsi(pf, i) {
7564 		struct ice_vsi *vsi = pf->vsi[i];
7565 
7566 		if (!vsi || vsi->type != type)
7567 			continue;
7568 
7569 		/* rebuild the VSI */
7570 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
7571 		if (err) {
7572 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7573 				err, vsi->idx, ice_vsi_type_str(type));
7574 			return err;
7575 		}
7576 
7577 		/* replay filters for the VSI */
7578 		err = ice_replay_vsi(&pf->hw, vsi->idx);
7579 		if (err) {
7580 			dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7581 				err, vsi->idx, ice_vsi_type_str(type));
7582 			return err;
7583 		}
7584 
7585 		/* Re-map HW VSI number, using VSI handle that has been
7586 		 * previously validated in ice_replay_vsi() call above
7587 		 */
7588 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7589 
7590 		/* enable the VSI */
7591 		err = ice_ena_vsi(vsi, false);
7592 		if (err) {
7593 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7594 				err, vsi->idx, ice_vsi_type_str(type));
7595 			return err;
7596 		}
7597 
7598 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7599 			 ice_vsi_type_str(type));
7600 	}
7601 
7602 	return 0;
7603 }
7604 
7605 /**
7606  * ice_update_pf_netdev_link - Update PF netdev link status
7607  * @pf: pointer to the PF instance
7608  */
ice_update_pf_netdev_link(struct ice_pf * pf)7609 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7610 {
7611 	bool link_up;
7612 	int i;
7613 
7614 	ice_for_each_vsi(pf, i) {
7615 		struct ice_vsi *vsi = pf->vsi[i];
7616 
7617 		if (!vsi || vsi->type != ICE_VSI_PF)
7618 			return;
7619 
7620 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7621 		if (link_up) {
7622 			netif_carrier_on(pf->vsi[i]->netdev);
7623 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7624 		} else {
7625 			netif_carrier_off(pf->vsi[i]->netdev);
7626 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7627 		}
7628 	}
7629 }
7630 
7631 /**
7632  * ice_rebuild - rebuild after reset
7633  * @pf: PF to rebuild
7634  * @reset_type: type of reset
7635  *
7636  * Do not rebuild VF VSI in this flow because that is already handled via
7637  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7638  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7639  * to reset/rebuild all the VF VSI twice.
7640  */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7641 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7642 {
7643 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
7644 	struct device *dev = ice_pf_to_dev(pf);
7645 	struct ice_hw *hw = &pf->hw;
7646 	bool dvm;
7647 	int err;
7648 
7649 	if (test_bit(ICE_DOWN, pf->state))
7650 		goto clear_recovery;
7651 
7652 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7653 
7654 #define ICE_EMP_RESET_SLEEP_MS 5000
7655 	if (reset_type == ICE_RESET_EMPR) {
7656 		/* If an EMP reset has occurred, any previously pending flash
7657 		 * update will have completed. We no longer know whether or
7658 		 * not the NVM update EMP reset is restricted.
7659 		 */
7660 		pf->fw_emp_reset_disabled = false;
7661 
7662 		msleep(ICE_EMP_RESET_SLEEP_MS);
7663 	}
7664 
7665 	err = ice_init_all_ctrlq(hw);
7666 	if (err) {
7667 		dev_err(dev, "control queues init failed %d\n", err);
7668 		goto err_init_ctrlq;
7669 	}
7670 
7671 	/* if DDP was previously loaded successfully */
7672 	if (!ice_is_safe_mode(pf)) {
7673 		/* reload the SW DB of filter tables */
7674 		if (reset_type == ICE_RESET_PFR)
7675 			ice_fill_blk_tbls(hw);
7676 		else
7677 			/* Reload DDP Package after CORER/GLOBR reset */
7678 			ice_load_pkg(NULL, pf);
7679 	}
7680 
7681 	err = ice_clear_pf_cfg(hw);
7682 	if (err) {
7683 		dev_err(dev, "clear PF configuration failed %d\n", err);
7684 		goto err_init_ctrlq;
7685 	}
7686 
7687 	ice_clear_pxe_mode(hw);
7688 
7689 	err = ice_init_nvm(hw);
7690 	if (err) {
7691 		dev_err(dev, "ice_init_nvm failed %d\n", err);
7692 		goto err_init_ctrlq;
7693 	}
7694 
7695 	err = ice_get_caps(hw);
7696 	if (err) {
7697 		dev_err(dev, "ice_get_caps failed %d\n", err);
7698 		goto err_init_ctrlq;
7699 	}
7700 
7701 	err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7702 	if (err) {
7703 		dev_err(dev, "set_mac_cfg failed %d\n", err);
7704 		goto err_init_ctrlq;
7705 	}
7706 
7707 	dvm = ice_is_dvm_ena(hw);
7708 
7709 	err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7710 	if (err)
7711 		goto err_init_ctrlq;
7712 
7713 	err = ice_sched_init_port(hw->port_info);
7714 	if (err)
7715 		goto err_sched_init_port;
7716 
7717 	/* start misc vector */
7718 	err = ice_req_irq_msix_misc(pf);
7719 	if (err) {
7720 		dev_err(dev, "misc vector setup failed: %d\n", err);
7721 		goto err_sched_init_port;
7722 	}
7723 
7724 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7725 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7726 		if (!rd32(hw, PFQF_FD_SIZE)) {
7727 			u16 unused, guar, b_effort;
7728 
7729 			guar = hw->func_caps.fd_fltr_guar;
7730 			b_effort = hw->func_caps.fd_fltr_best_effort;
7731 
7732 			/* force guaranteed filter pool for PF */
7733 			ice_alloc_fd_guar_item(hw, &unused, guar);
7734 			/* force shared filter pool for PF */
7735 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7736 		}
7737 	}
7738 
7739 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7740 		ice_dcb_rebuild(pf);
7741 
7742 	/* If the PF previously had enabled PTP, PTP init needs to happen before
7743 	 * the VSI rebuild. If not, this causes the PTP link status events to
7744 	 * fail.
7745 	 */
7746 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7747 		ice_ptp_rebuild(pf, reset_type);
7748 
7749 	if (ice_is_feature_supported(pf, ICE_F_GNSS))
7750 		ice_gnss_init(pf);
7751 
7752 	/* rebuild PF VSI */
7753 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7754 	if (err) {
7755 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7756 		goto err_vsi_rebuild;
7757 	}
7758 
7759 	if (reset_type == ICE_RESET_PFR) {
7760 		err = ice_rebuild_channels(pf);
7761 		if (err) {
7762 			dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7763 				err);
7764 			goto err_vsi_rebuild;
7765 		}
7766 	}
7767 
7768 	/* If Flow Director is active */
7769 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7770 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7771 		if (err) {
7772 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
7773 			goto err_vsi_rebuild;
7774 		}
7775 
7776 		/* replay HW Flow Director recipes */
7777 		if (hw->fdir_prof)
7778 			ice_fdir_replay_flows(hw);
7779 
7780 		/* replay Flow Director filters */
7781 		ice_fdir_replay_fltrs(pf);
7782 
7783 		ice_rebuild_arfs(pf);
7784 	}
7785 
7786 	if (vsi && vsi->netdev)
7787 		netif_device_attach(vsi->netdev);
7788 
7789 	ice_update_pf_netdev_link(pf);
7790 
7791 	/* tell the firmware we are up */
7792 	err = ice_send_version(pf);
7793 	if (err) {
7794 		dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7795 			err);
7796 		goto err_vsi_rebuild;
7797 	}
7798 
7799 	ice_replay_post(hw);
7800 
7801 	/* if we get here, reset flow is successful */
7802 	clear_bit(ICE_RESET_FAILED, pf->state);
7803 
7804 	ice_health_clear(pf);
7805 
7806 	ice_plug_aux_dev(pf);
7807 	if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
7808 		ice_lag_rebuild(pf);
7809 
7810 	/* Restore timestamp mode settings after VSI rebuild */
7811 	ice_ptp_restore_timestamp_mode(pf);
7812 
7813 	/* Start PTP periodic work after VSI is fully rebuilt */
7814 	ice_ptp_queue_work(pf);
7815 	return;
7816 
7817 err_vsi_rebuild:
7818 err_sched_init_port:
7819 	ice_sched_cleanup_all(hw);
7820 err_init_ctrlq:
7821 	ice_shutdown_all_ctrlq(hw, false);
7822 	set_bit(ICE_RESET_FAILED, pf->state);
7823 clear_recovery:
7824 	/* set this bit in PF state to control service task scheduling */
7825 	set_bit(ICE_NEEDS_RESTART, pf->state);
7826 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
7827 }
7828 
7829 /**
7830  * ice_change_mtu - NDO callback to change the MTU
7831  * @netdev: network interface device structure
7832  * @new_mtu: new value for maximum frame size
7833  *
7834  * Returns 0 on success, negative on failure
7835  */
ice_change_mtu(struct net_device * netdev,int new_mtu)7836 int ice_change_mtu(struct net_device *netdev, int new_mtu)
7837 {
7838 	struct ice_netdev_priv *np = netdev_priv(netdev);
7839 	struct ice_vsi *vsi = np->vsi;
7840 	struct ice_pf *pf = vsi->back;
7841 	struct bpf_prog *prog;
7842 	u8 count = 0;
7843 	int err = 0;
7844 
7845 	if (new_mtu == (int)netdev->mtu) {
7846 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7847 		return 0;
7848 	}
7849 
7850 	prog = vsi->xdp_prog;
7851 	if (prog && !prog->aux->xdp_has_frags) {
7852 		int frame_size = ice_max_xdp_frame_size(vsi);
7853 
7854 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7855 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
7856 				   frame_size - ICE_ETH_PKT_HDR_PAD);
7857 			return -EINVAL;
7858 		}
7859 	}
7860 
7861 	/* if a reset is in progress, wait for some time for it to complete */
7862 	do {
7863 		if (ice_is_reset_in_progress(pf->state)) {
7864 			count++;
7865 			usleep_range(1000, 2000);
7866 		} else {
7867 			break;
7868 		}
7869 
7870 	} while (count < 100);
7871 
7872 	if (count == 100) {
7873 		netdev_err(netdev, "can't change MTU. Device is busy\n");
7874 		return -EBUSY;
7875 	}
7876 
7877 	WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
7878 	err = ice_down_up(vsi);
7879 	if (err)
7880 		return err;
7881 
7882 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7883 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7884 
7885 	return err;
7886 }
7887 
7888 /**
7889  * ice_set_rss_lut - Set RSS LUT
7890  * @vsi: Pointer to VSI structure
7891  * @lut: Lookup table
7892  * @lut_size: Lookup table size
7893  *
7894  * Returns 0 on success, negative on failure
7895  */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7896 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7897 {
7898 	struct ice_aq_get_set_rss_lut_params params = {};
7899 	struct ice_hw *hw = &vsi->back->hw;
7900 	int status;
7901 
7902 	if (!lut)
7903 		return -EINVAL;
7904 
7905 	params.vsi_handle = vsi->idx;
7906 	params.lut_size = lut_size;
7907 	params.lut_type = vsi->rss_lut_type;
7908 	params.lut = lut;
7909 
7910 	status = ice_aq_set_rss_lut(hw, &params);
7911 	if (status)
7912 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7913 			status, libie_aq_str(hw->adminq.sq_last_status));
7914 
7915 	return status;
7916 }
7917 
7918 /**
7919  * ice_set_rss_key - Set RSS key
7920  * @vsi: Pointer to the VSI structure
7921  * @seed: RSS hash seed
7922  *
7923  * Returns 0 on success, negative on failure
7924  */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)7925 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7926 {
7927 	struct ice_hw *hw = &vsi->back->hw;
7928 	int status;
7929 
7930 	if (!seed)
7931 		return -EINVAL;
7932 
7933 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7934 	if (status)
7935 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7936 			status, libie_aq_str(hw->adminq.sq_last_status));
7937 
7938 	return status;
7939 }
7940 
7941 /**
7942  * ice_get_rss_lut - Get RSS LUT
7943  * @vsi: Pointer to VSI structure
7944  * @lut: Buffer to store the lookup table entries
7945  * @lut_size: Size of buffer to store the lookup table entries
7946  *
7947  * Returns 0 on success, negative on failure
7948  */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7949 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7950 {
7951 	struct ice_aq_get_set_rss_lut_params params = {};
7952 	struct ice_hw *hw = &vsi->back->hw;
7953 	int status;
7954 
7955 	if (!lut)
7956 		return -EINVAL;
7957 
7958 	params.vsi_handle = vsi->idx;
7959 	params.lut_size = lut_size;
7960 	params.lut_type = vsi->rss_lut_type;
7961 	params.lut = lut;
7962 
7963 	status = ice_aq_get_rss_lut(hw, &params);
7964 	if (status)
7965 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7966 			status, libie_aq_str(hw->adminq.sq_last_status));
7967 
7968 	return status;
7969 }
7970 
7971 /**
7972  * ice_get_rss_key - Get RSS key
7973  * @vsi: Pointer to VSI structure
7974  * @seed: Buffer to store the key in
7975  *
7976  * Returns 0 on success, negative on failure
7977  */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)7978 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7979 {
7980 	struct ice_hw *hw = &vsi->back->hw;
7981 	int status;
7982 
7983 	if (!seed)
7984 		return -EINVAL;
7985 
7986 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7987 	if (status)
7988 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7989 			status, libie_aq_str(hw->adminq.sq_last_status));
7990 
7991 	return status;
7992 }
7993 
7994 /**
7995  * ice_get_rss - Get RSS LUT and/or key
7996  * @vsi: Pointer to VSI structure
7997  * @seed: Buffer to store the key in
7998  * @lut: Buffer to store the lookup table entries
7999  * @lut_size: Size of buffer to store the lookup table entries
8000  *
8001  * Return: 0 on success, negative on failure
8002  */
ice_get_rss(struct ice_vsi * vsi,u8 * seed,u8 * lut,u16 lut_size)8003 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8004 {
8005 	int err;
8006 
8007 	if (seed) {
8008 		err = ice_get_rss_key(vsi, seed);
8009 		if (err)
8010 			return err;
8011 	}
8012 
8013 	if (lut) {
8014 		err = ice_get_rss_lut(vsi, lut, lut_size);
8015 		if (err)
8016 			return err;
8017 	}
8018 
8019 	return 0;
8020 }
8021 
8022 /**
8023  * ice_set_rss_hfunc - Set RSS HASH function
8024  * @vsi: Pointer to VSI structure
8025  * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
8026  *
8027  * Returns 0 on success, negative on failure
8028  */
ice_set_rss_hfunc(struct ice_vsi * vsi,u8 hfunc)8029 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
8030 {
8031 	struct ice_hw *hw = &vsi->back->hw;
8032 	struct ice_vsi_ctx *ctx;
8033 	bool symm;
8034 	int err;
8035 
8036 	if (hfunc == vsi->rss_hfunc)
8037 		return 0;
8038 
8039 	if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
8040 	    hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
8041 		return -EOPNOTSUPP;
8042 
8043 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
8044 	if (!ctx)
8045 		return -ENOMEM;
8046 
8047 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
8048 	ctx->info.q_opt_rss = vsi->info.q_opt_rss;
8049 	ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
8050 	ctx->info.q_opt_rss |=
8051 		FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
8052 	ctx->info.q_opt_tc = vsi->info.q_opt_tc;
8053 	ctx->info.q_opt_flags = vsi->info.q_opt_rss;
8054 
8055 	err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
8056 	if (err) {
8057 		dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
8058 			vsi->vsi_num, err);
8059 	} else {
8060 		vsi->info.q_opt_rss = ctx->info.q_opt_rss;
8061 		vsi->rss_hfunc = hfunc;
8062 		netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
8063 			    hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
8064 			    "Symmetric " : "");
8065 	}
8066 	kfree(ctx);
8067 	if (err)
8068 		return err;
8069 
8070 	/* Fix the symmetry setting for all existing RSS configurations */
8071 	symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
8072 	return ice_set_rss_cfg_symm(hw, vsi, symm);
8073 }
8074 
8075 /**
8076  * ice_bridge_getlink - Get the hardware bridge mode
8077  * @skb: skb buff
8078  * @pid: process ID
8079  * @seq: RTNL message seq
8080  * @dev: the netdev being configured
8081  * @filter_mask: filter mask passed in
8082  * @nlflags: netlink flags passed in
8083  *
8084  * Return the bridge mode (VEB/VEPA)
8085  */
8086 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)8087 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8088 		   struct net_device *dev, u32 filter_mask, int nlflags)
8089 {
8090 	struct ice_pf *pf = ice_netdev_to_pf(dev);
8091 	u16 bmode;
8092 
8093 	bmode = pf->first_sw->bridge_mode;
8094 
8095 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
8096 				       filter_mask, NULL);
8097 }
8098 
8099 /**
8100  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8101  * @vsi: Pointer to VSI structure
8102  * @bmode: Hardware bridge mode (VEB/VEPA)
8103  *
8104  * Returns 0 on success, negative on failure
8105  */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)8106 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
8107 {
8108 	struct ice_aqc_vsi_props *vsi_props;
8109 	struct ice_hw *hw = &vsi->back->hw;
8110 	struct ice_vsi_ctx *ctxt;
8111 	int ret;
8112 
8113 	vsi_props = &vsi->info;
8114 
8115 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
8116 	if (!ctxt)
8117 		return -ENOMEM;
8118 
8119 	ctxt->info = vsi->info;
8120 
8121 	if (bmode == BRIDGE_MODE_VEB)
8122 		/* change from VEPA to VEB mode */
8123 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8124 	else
8125 		/* change from VEB to VEPA mode */
8126 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
8127 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
8128 
8129 	ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
8130 	if (ret) {
8131 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
8132 			bmode, ret, libie_aq_str(hw->adminq.sq_last_status));
8133 		goto out;
8134 	}
8135 	/* Update sw flags for book keeping */
8136 	vsi_props->sw_flags = ctxt->info.sw_flags;
8137 
8138 out:
8139 	kfree(ctxt);
8140 	return ret;
8141 }
8142 
8143 /**
8144  * ice_bridge_setlink - Set the hardware bridge mode
8145  * @dev: the netdev being configured
8146  * @nlh: RTNL message
8147  * @flags: bridge setlink flags
8148  * @extack: netlink extended ack
8149  *
8150  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
8151  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
8152  * not already set for all VSIs connected to this switch. And also update the
8153  * unicast switch filter rules for the corresponding switch of the netdev.
8154  */
8155 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)8156 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
8157 		   u16 __always_unused flags,
8158 		   struct netlink_ext_ack __always_unused *extack)
8159 {
8160 	struct ice_pf *pf = ice_netdev_to_pf(dev);
8161 	struct nlattr *attr, *br_spec;
8162 	struct ice_hw *hw = &pf->hw;
8163 	struct ice_sw *pf_sw;
8164 	int rem, v, err = 0;
8165 
8166 	pf_sw = pf->first_sw;
8167 	/* find the attribute in the netlink message */
8168 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8169 	if (!br_spec)
8170 		return -EINVAL;
8171 
8172 	nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
8173 		__u16 mode = nla_get_u16(attr);
8174 
8175 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
8176 			return -EINVAL;
8177 		/* Continue  if bridge mode is not being flipped */
8178 		if (mode == pf_sw->bridge_mode)
8179 			continue;
8180 		/* Iterates through the PF VSI list and update the loopback
8181 		 * mode of the VSI
8182 		 */
8183 		ice_for_each_vsi(pf, v) {
8184 			if (!pf->vsi[v])
8185 				continue;
8186 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
8187 			if (err)
8188 				return err;
8189 		}
8190 
8191 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
8192 		/* Update the unicast switch filter rules for the corresponding
8193 		 * switch of the netdev
8194 		 */
8195 		err = ice_update_sw_rule_bridge_mode(hw);
8196 		if (err) {
8197 			netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
8198 				   mode, err,
8199 				   libie_aq_str(hw->adminq.sq_last_status));
8200 			/* revert hw->evb_veb */
8201 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
8202 			return err;
8203 		}
8204 
8205 		pf_sw->bridge_mode = mode;
8206 	}
8207 
8208 	return 0;
8209 }
8210 
8211 /**
8212  * ice_tx_timeout - Respond to a Tx Hang
8213  * @netdev: network interface device structure
8214  * @txqueue: Tx queue
8215  */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)8216 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
8217 {
8218 	struct ice_netdev_priv *np = netdev_priv(netdev);
8219 	struct ice_tx_ring *tx_ring = NULL;
8220 	struct ice_vsi *vsi = np->vsi;
8221 	struct ice_pf *pf = vsi->back;
8222 	u32 i;
8223 
8224 	pf->tx_timeout_count++;
8225 
8226 	/* Check if PFC is enabled for the TC to which the queue belongs
8227 	 * to. If yes then Tx timeout is not caused by a hung queue, no
8228 	 * need to reset and rebuild
8229 	 */
8230 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
8231 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
8232 			 txqueue);
8233 		return;
8234 	}
8235 
8236 	/* now that we have an index, find the tx_ring struct */
8237 	ice_for_each_txq(vsi, i)
8238 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
8239 			if (txqueue == vsi->tx_rings[i]->q_index) {
8240 				tx_ring = vsi->tx_rings[i];
8241 				break;
8242 			}
8243 
8244 	/* Reset recovery level if enough time has elapsed after last timeout.
8245 	 * Also ensure no new reset action happens before next timeout period.
8246 	 */
8247 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
8248 		pf->tx_timeout_recovery_level = 1;
8249 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
8250 				       netdev->watchdog_timeo)))
8251 		return;
8252 
8253 	if (tx_ring) {
8254 		struct ice_hw *hw = &pf->hw;
8255 		u32 head, intr = 0;
8256 
8257 		head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
8258 				 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
8259 		/* Read interrupt register */
8260 		intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
8261 
8262 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
8263 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
8264 			    head, tx_ring->next_to_use, intr);
8265 
8266 		ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
8267 	}
8268 
8269 	pf->tx_timeout_last_recovery = jiffies;
8270 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
8271 		    pf->tx_timeout_recovery_level, txqueue);
8272 
8273 	switch (pf->tx_timeout_recovery_level) {
8274 	case 1:
8275 		set_bit(ICE_PFR_REQ, pf->state);
8276 		break;
8277 	case 2:
8278 		set_bit(ICE_CORER_REQ, pf->state);
8279 		break;
8280 	case 3:
8281 		set_bit(ICE_GLOBR_REQ, pf->state);
8282 		break;
8283 	default:
8284 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
8285 		set_bit(ICE_DOWN, pf->state);
8286 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
8287 		set_bit(ICE_SERVICE_DIS, pf->state);
8288 		break;
8289 	}
8290 
8291 	ice_service_task_schedule(pf);
8292 	pf->tx_timeout_recovery_level++;
8293 }
8294 
8295 /**
8296  * ice_setup_tc_cls_flower - flower classifier offloads
8297  * @np: net device to configure
8298  * @filter_dev: device on which filter is added
8299  * @cls_flower: offload data
8300  * @ingress: if the rule is added to an ingress block
8301  *
8302  * Return: 0 if the flower was successfully added or deleted,
8303  *	   negative error code otherwise.
8304  */
8305 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower,bool ingress)8306 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
8307 			struct net_device *filter_dev,
8308 			struct flow_cls_offload *cls_flower,
8309 			bool ingress)
8310 {
8311 	struct ice_vsi *vsi = np->vsi;
8312 
8313 	if (cls_flower->common.chain_index)
8314 		return -EOPNOTSUPP;
8315 
8316 	switch (cls_flower->command) {
8317 	case FLOW_CLS_REPLACE:
8318 		return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
8319 	case FLOW_CLS_DESTROY:
8320 		return ice_del_cls_flower(vsi, cls_flower);
8321 	default:
8322 		return -EINVAL;
8323 	}
8324 }
8325 
8326 /**
8327  * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
8328  * @type: TC SETUP type
8329  * @type_data: TC flower offload data that contains user input
8330  * @cb_priv: netdev private data
8331  *
8332  * Return: 0 if the setup was successful, negative error code otherwise.
8333  */
8334 static int
ice_setup_tc_block_cb_ingress(enum tc_setup_type type,void * type_data,void * cb_priv)8335 ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
8336 			      void *cb_priv)
8337 {
8338 	struct ice_netdev_priv *np = cb_priv;
8339 
8340 	switch (type) {
8341 	case TC_SETUP_CLSFLOWER:
8342 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8343 					       type_data, true);
8344 	default:
8345 		return -EOPNOTSUPP;
8346 	}
8347 }
8348 
8349 /**
8350  * ice_setup_tc_block_cb_egress - callback handler for egress TC block
8351  * @type: TC SETUP type
8352  * @type_data: TC flower offload data that contains user input
8353  * @cb_priv: netdev private data
8354  *
8355  * Return: 0 if the setup was successful, negative error code otherwise.
8356  */
8357 static int
ice_setup_tc_block_cb_egress(enum tc_setup_type type,void * type_data,void * cb_priv)8358 ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
8359 			     void *cb_priv)
8360 {
8361 	struct ice_netdev_priv *np = cb_priv;
8362 
8363 	switch (type) {
8364 	case TC_SETUP_CLSFLOWER:
8365 		return ice_setup_tc_cls_flower(np, np->vsi->netdev,
8366 					       type_data, false);
8367 	default:
8368 		return -EOPNOTSUPP;
8369 	}
8370 }
8371 
8372 /**
8373  * ice_validate_mqprio_qopt - Validate TCF input parameters
8374  * @vsi: Pointer to VSI
8375  * @mqprio_qopt: input parameters for mqprio queue configuration
8376  *
8377  * This function validates MQPRIO params, such as qcount (power of 2 wherever
8378  * needed), and make sure user doesn't specify qcount and BW rate limit
8379  * for TCs, which are more than "num_tc"
8380  */
8381 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)8382 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
8383 			 struct tc_mqprio_qopt_offload *mqprio_qopt)
8384 {
8385 	int non_power_of_2_qcount = 0;
8386 	struct ice_pf *pf = vsi->back;
8387 	int max_rss_q_cnt = 0;
8388 	u64 sum_min_rate = 0;
8389 	struct device *dev;
8390 	int i, speed;
8391 	u8 num_tc;
8392 
8393 	if (vsi->type != ICE_VSI_PF)
8394 		return -EINVAL;
8395 
8396 	if (mqprio_qopt->qopt.offset[0] != 0 ||
8397 	    mqprio_qopt->qopt.num_tc < 1 ||
8398 	    mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
8399 		return -EINVAL;
8400 
8401 	dev = ice_pf_to_dev(pf);
8402 	vsi->ch_rss_size = 0;
8403 	num_tc = mqprio_qopt->qopt.num_tc;
8404 	speed = ice_get_link_speed_kbps(vsi);
8405 
8406 	for (i = 0; num_tc; i++) {
8407 		int qcount = mqprio_qopt->qopt.count[i];
8408 		u64 max_rate, min_rate, rem;
8409 
8410 		if (!qcount)
8411 			return -EINVAL;
8412 
8413 		if (is_power_of_2(qcount)) {
8414 			if (non_power_of_2_qcount &&
8415 			    qcount > non_power_of_2_qcount) {
8416 				dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
8417 					qcount, non_power_of_2_qcount);
8418 				return -EINVAL;
8419 			}
8420 			if (qcount > max_rss_q_cnt)
8421 				max_rss_q_cnt = qcount;
8422 		} else {
8423 			if (non_power_of_2_qcount &&
8424 			    qcount != non_power_of_2_qcount) {
8425 				dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
8426 					qcount, non_power_of_2_qcount);
8427 				return -EINVAL;
8428 			}
8429 			if (qcount < max_rss_q_cnt) {
8430 				dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
8431 					qcount, max_rss_q_cnt);
8432 				return -EINVAL;
8433 			}
8434 			max_rss_q_cnt = qcount;
8435 			non_power_of_2_qcount = qcount;
8436 		}
8437 
8438 		/* TC command takes input in K/N/Gbps or K/M/Gbit etc but
8439 		 * converts the bandwidth rate limit into Bytes/s when
8440 		 * passing it down to the driver. So convert input bandwidth
8441 		 * from Bytes/s to Kbps
8442 		 */
8443 		max_rate = mqprio_qopt->max_rate[i];
8444 		max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
8445 
8446 		/* min_rate is minimum guaranteed rate and it can't be zero */
8447 		min_rate = mqprio_qopt->min_rate[i];
8448 		min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
8449 		sum_min_rate += min_rate;
8450 
8451 		if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
8452 			dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
8453 				min_rate, ICE_MIN_BW_LIMIT);
8454 			return -EINVAL;
8455 		}
8456 
8457 		if (max_rate && max_rate > speed) {
8458 			dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
8459 				i, max_rate, speed);
8460 			return -EINVAL;
8461 		}
8462 
8463 		iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
8464 		if (rem) {
8465 			dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
8466 				i, ICE_MIN_BW_LIMIT);
8467 			return -EINVAL;
8468 		}
8469 
8470 		iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
8471 		if (rem) {
8472 			dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
8473 				i, ICE_MIN_BW_LIMIT);
8474 			return -EINVAL;
8475 		}
8476 
8477 		/* min_rate can't be more than max_rate, except when max_rate
8478 		 * is zero (implies max_rate sought is max line rate). In such
8479 		 * a case min_rate can be more than max.
8480 		 */
8481 		if (max_rate && min_rate > max_rate) {
8482 			dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
8483 				min_rate, max_rate);
8484 			return -EINVAL;
8485 		}
8486 
8487 		if (i >= mqprio_qopt->qopt.num_tc - 1)
8488 			break;
8489 		if (mqprio_qopt->qopt.offset[i + 1] !=
8490 		    (mqprio_qopt->qopt.offset[i] + qcount))
8491 			return -EINVAL;
8492 	}
8493 	if (vsi->num_rxq <
8494 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8495 		return -EINVAL;
8496 	if (vsi->num_txq <
8497 	    (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
8498 		return -EINVAL;
8499 
8500 	if (sum_min_rate && sum_min_rate > (u64)speed) {
8501 		dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
8502 			sum_min_rate, speed);
8503 		return -EINVAL;
8504 	}
8505 
8506 	/* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
8507 	vsi->ch_rss_size = max_rss_q_cnt;
8508 
8509 	return 0;
8510 }
8511 
8512 /**
8513  * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8514  * @pf: ptr to PF device
8515  * @vsi: ptr to VSI
8516  */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8517 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8518 {
8519 	struct device *dev = ice_pf_to_dev(pf);
8520 	bool added = false;
8521 	struct ice_hw *hw;
8522 	int flow;
8523 
8524 	if (!(vsi->num_gfltr || vsi->num_bfltr))
8525 		return -EINVAL;
8526 
8527 	hw = &pf->hw;
8528 	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8529 		struct ice_fd_hw_prof *prof;
8530 		int tun, status;
8531 		u64 entry_h;
8532 
8533 		if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8534 		      hw->fdir_prof[flow]->cnt))
8535 			continue;
8536 
8537 		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8538 			enum ice_flow_priority prio;
8539 
8540 			/* add this VSI to FDir profile for this flow */
8541 			prio = ICE_FLOW_PRIO_NORMAL;
8542 			prof = hw->fdir_prof[flow];
8543 			status = ice_flow_add_entry(hw, ICE_BLK_FD,
8544 						    prof->prof_id[tun],
8545 						    prof->vsi_h[0], vsi->idx,
8546 						    prio, prof->fdir_seg[tun],
8547 						    &entry_h);
8548 			if (status) {
8549 				dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8550 					vsi->idx, flow);
8551 				continue;
8552 			}
8553 
8554 			prof->entry_h[prof->cnt][tun] = entry_h;
8555 		}
8556 
8557 		/* store VSI for filter replay and delete */
8558 		prof->vsi_h[prof->cnt] = vsi->idx;
8559 		prof->cnt++;
8560 
8561 		added = true;
8562 		dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8563 			flow);
8564 	}
8565 
8566 	if (!added)
8567 		dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8568 
8569 	return 0;
8570 }
8571 
8572 /**
8573  * ice_add_channel - add a channel by adding VSI
8574  * @pf: ptr to PF device
8575  * @sw_id: underlying HW switching element ID
8576  * @ch: ptr to channel structure
8577  *
8578  * Add a channel (VSI) using add_vsi and queue_map
8579  */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8580 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8581 {
8582 	struct device *dev = ice_pf_to_dev(pf);
8583 	struct ice_vsi *vsi;
8584 
8585 	if (ch->type != ICE_VSI_CHNL) {
8586 		dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8587 		return -EINVAL;
8588 	}
8589 
8590 	vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8591 	if (!vsi || vsi->type != ICE_VSI_CHNL) {
8592 		dev_err(dev, "create chnl VSI failure\n");
8593 		return -EINVAL;
8594 	}
8595 
8596 	ice_add_vsi_to_fdir(pf, vsi);
8597 
8598 	ch->sw_id = sw_id;
8599 	ch->vsi_num = vsi->vsi_num;
8600 	ch->info.mapping_flags = vsi->info.mapping_flags;
8601 	ch->ch_vsi = vsi;
8602 	/* set the back pointer of channel for newly created VSI */
8603 	vsi->ch = ch;
8604 
8605 	memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8606 	       sizeof(vsi->info.q_mapping));
8607 	memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8608 	       sizeof(vsi->info.tc_mapping));
8609 
8610 	return 0;
8611 }
8612 
8613 /**
8614  * ice_chnl_cfg_res
8615  * @vsi: the VSI being setup
8616  * @ch: ptr to channel structure
8617  *
8618  * Configure channel specific resources such as rings, vector.
8619  */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8620 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8621 {
8622 	int i;
8623 
8624 	for (i = 0; i < ch->num_txq; i++) {
8625 		struct ice_q_vector *tx_q_vector, *rx_q_vector;
8626 		struct ice_ring_container *rc;
8627 		struct ice_tx_ring *tx_ring;
8628 		struct ice_rx_ring *rx_ring;
8629 
8630 		tx_ring = vsi->tx_rings[ch->base_q + i];
8631 		rx_ring = vsi->rx_rings[ch->base_q + i];
8632 		if (!tx_ring || !rx_ring)
8633 			continue;
8634 
8635 		/* setup ring being channel enabled */
8636 		tx_ring->ch = ch;
8637 		rx_ring->ch = ch;
8638 
8639 		/* following code block sets up vector specific attributes */
8640 		tx_q_vector = tx_ring->q_vector;
8641 		rx_q_vector = rx_ring->q_vector;
8642 		if (!tx_q_vector && !rx_q_vector)
8643 			continue;
8644 
8645 		if (tx_q_vector) {
8646 			tx_q_vector->ch = ch;
8647 			/* setup Tx and Rx ITR setting if DIM is off */
8648 			rc = &tx_q_vector->tx;
8649 			if (!ITR_IS_DYNAMIC(rc))
8650 				ice_write_itr(rc, rc->itr_setting);
8651 		}
8652 		if (rx_q_vector) {
8653 			rx_q_vector->ch = ch;
8654 			/* setup Tx and Rx ITR setting if DIM is off */
8655 			rc = &rx_q_vector->rx;
8656 			if (!ITR_IS_DYNAMIC(rc))
8657 				ice_write_itr(rc, rc->itr_setting);
8658 		}
8659 	}
8660 
8661 	/* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8662 	 * GLINT_ITR register would have written to perform in-context
8663 	 * update, hence perform flush
8664 	 */
8665 	if (ch->num_txq || ch->num_rxq)
8666 		ice_flush(&vsi->back->hw);
8667 }
8668 
8669 /**
8670  * ice_cfg_chnl_all_res - configure channel resources
8671  * @vsi: pte to main_vsi
8672  * @ch: ptr to channel structure
8673  *
8674  * This function configures channel specific resources such as flow-director
8675  * counter index, and other resources such as queues, vectors, ITR settings
8676  */
8677 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8678 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8679 {
8680 	/* configure channel (aka ADQ) resources such as queues, vectors,
8681 	 * ITR settings for channel specific vectors and anything else
8682 	 */
8683 	ice_chnl_cfg_res(vsi, ch);
8684 }
8685 
8686 /**
8687  * ice_setup_hw_channel - setup new channel
8688  * @pf: ptr to PF device
8689  * @vsi: the VSI being setup
8690  * @ch: ptr to channel structure
8691  * @sw_id: underlying HW switching element ID
8692  * @type: type of channel to be created (VMDq2/VF)
8693  *
8694  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8695  * and configures Tx rings accordingly
8696  */
8697 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8698 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8699 		     struct ice_channel *ch, u16 sw_id, u8 type)
8700 {
8701 	struct device *dev = ice_pf_to_dev(pf);
8702 	int ret;
8703 
8704 	ch->base_q = vsi->next_base_q;
8705 	ch->type = type;
8706 
8707 	ret = ice_add_channel(pf, sw_id, ch);
8708 	if (ret) {
8709 		dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8710 		return ret;
8711 	}
8712 
8713 	/* configure/setup ADQ specific resources */
8714 	ice_cfg_chnl_all_res(vsi, ch);
8715 
8716 	/* make sure to update the next_base_q so that subsequent channel's
8717 	 * (aka ADQ) VSI queue map is correct
8718 	 */
8719 	vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8720 	dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8721 		ch->num_rxq);
8722 
8723 	return 0;
8724 }
8725 
8726 /**
8727  * ice_setup_channel - setup new channel using uplink element
8728  * @pf: ptr to PF device
8729  * @vsi: the VSI being setup
8730  * @ch: ptr to channel structure
8731  *
8732  * Setup new channel (VSI) based on specified type (VMDq2/VF)
8733  * and uplink switching element
8734  */
8735 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8736 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8737 		  struct ice_channel *ch)
8738 {
8739 	struct device *dev = ice_pf_to_dev(pf);
8740 	u16 sw_id;
8741 	int ret;
8742 
8743 	if (vsi->type != ICE_VSI_PF) {
8744 		dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8745 		return false;
8746 	}
8747 
8748 	sw_id = pf->first_sw->sw_id;
8749 
8750 	/* create channel (VSI) */
8751 	ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8752 	if (ret) {
8753 		dev_err(dev, "failed to setup hw_channel\n");
8754 		return false;
8755 	}
8756 	dev_dbg(dev, "successfully created channel()\n");
8757 
8758 	return ch->ch_vsi ? true : false;
8759 }
8760 
8761 /**
8762  * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8763  * @vsi: VSI to be configured
8764  * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8765  * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8766  */
8767 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8768 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8769 {
8770 	int err;
8771 
8772 	err = ice_set_min_bw_limit(vsi, min_tx_rate);
8773 	if (err)
8774 		return err;
8775 
8776 	return ice_set_max_bw_limit(vsi, max_tx_rate);
8777 }
8778 
8779 /**
8780  * ice_create_q_channel - function to create channel
8781  * @vsi: VSI to be configured
8782  * @ch: ptr to channel (it contains channel specific params)
8783  *
8784  * This function creates channel (VSI) using num_queues specified by user,
8785  * reconfigs RSS if needed.
8786  */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8787 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8788 {
8789 	struct ice_pf *pf = vsi->back;
8790 	struct device *dev;
8791 
8792 	if (!ch)
8793 		return -EINVAL;
8794 
8795 	dev = ice_pf_to_dev(pf);
8796 	if (!ch->num_txq || !ch->num_rxq) {
8797 		dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8798 		return -EINVAL;
8799 	}
8800 
8801 	if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8802 		dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8803 			vsi->cnt_q_avail, ch->num_txq);
8804 		return -EINVAL;
8805 	}
8806 
8807 	if (!ice_setup_channel(pf, vsi, ch)) {
8808 		dev_info(dev, "Failed to setup channel\n");
8809 		return -EINVAL;
8810 	}
8811 	/* configure BW rate limit */
8812 	if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8813 		int ret;
8814 
8815 		ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8816 				       ch->min_tx_rate);
8817 		if (ret)
8818 			dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8819 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8820 		else
8821 			dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8822 				ch->max_tx_rate, ch->ch_vsi->vsi_num);
8823 	}
8824 
8825 	vsi->cnt_q_avail -= ch->num_txq;
8826 
8827 	return 0;
8828 }
8829 
8830 /**
8831  * ice_rem_all_chnl_fltrs - removes all channel filters
8832  * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8833  *
8834  * Remove all advanced switch filters only if they are channel specific
8835  * tc-flower based filter
8836  */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8837 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8838 {
8839 	struct ice_tc_flower_fltr *fltr;
8840 	struct hlist_node *node;
8841 
8842 	/* to remove all channel filters, iterate an ordered list of filters */
8843 	hlist_for_each_entry_safe(fltr, node,
8844 				  &pf->tc_flower_fltr_list,
8845 				  tc_flower_node) {
8846 		struct ice_rule_query_data rule;
8847 		int status;
8848 
8849 		/* for now process only channel specific filters */
8850 		if (!ice_is_chnl_fltr(fltr))
8851 			continue;
8852 
8853 		rule.rid = fltr->rid;
8854 		rule.rule_id = fltr->rule_id;
8855 		rule.vsi_handle = fltr->dest_vsi_handle;
8856 		status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8857 		if (status) {
8858 			if (status == -ENOENT)
8859 				dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8860 					rule.rule_id);
8861 			else
8862 				dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8863 					status);
8864 		} else if (fltr->dest_vsi) {
8865 			/* update advanced switch filter count */
8866 			if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8867 				u32 flags = fltr->flags;
8868 
8869 				fltr->dest_vsi->num_chnl_fltr--;
8870 				if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8871 					     ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8872 					pf->num_dmac_chnl_fltrs--;
8873 			}
8874 		}
8875 
8876 		hlist_del(&fltr->tc_flower_node);
8877 		kfree(fltr);
8878 	}
8879 }
8880 
8881 /**
8882  * ice_remove_q_channels - Remove queue channels for the TCs
8883  * @vsi: VSI to be configured
8884  * @rem_fltr: delete advanced switch filter or not
8885  *
8886  * Remove queue channels for the TCs
8887  */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8888 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8889 {
8890 	struct ice_channel *ch, *ch_tmp;
8891 	struct ice_pf *pf = vsi->back;
8892 	int i;
8893 
8894 	/* remove all tc-flower based filter if they are channel filters only */
8895 	if (rem_fltr)
8896 		ice_rem_all_chnl_fltrs(pf);
8897 
8898 	/* remove ntuple filters since queue configuration is being changed */
8899 	if  (vsi->netdev->features & NETIF_F_NTUPLE) {
8900 		struct ice_hw *hw = &pf->hw;
8901 
8902 		mutex_lock(&hw->fdir_fltr_lock);
8903 		ice_fdir_del_all_fltrs(vsi);
8904 		mutex_unlock(&hw->fdir_fltr_lock);
8905 	}
8906 
8907 	/* perform cleanup for channels if they exist */
8908 	list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8909 		struct ice_vsi *ch_vsi;
8910 
8911 		list_del(&ch->list);
8912 		ch_vsi = ch->ch_vsi;
8913 		if (!ch_vsi) {
8914 			kfree(ch);
8915 			continue;
8916 		}
8917 
8918 		/* Reset queue contexts */
8919 		for (i = 0; i < ch->num_rxq; i++) {
8920 			struct ice_tx_ring *tx_ring;
8921 			struct ice_rx_ring *rx_ring;
8922 
8923 			tx_ring = vsi->tx_rings[ch->base_q + i];
8924 			rx_ring = vsi->rx_rings[ch->base_q + i];
8925 			if (tx_ring) {
8926 				tx_ring->ch = NULL;
8927 				if (tx_ring->q_vector)
8928 					tx_ring->q_vector->ch = NULL;
8929 			}
8930 			if (rx_ring) {
8931 				rx_ring->ch = NULL;
8932 				if (rx_ring->q_vector)
8933 					rx_ring->q_vector->ch = NULL;
8934 			}
8935 		}
8936 
8937 		/* Release FD resources for the channel VSI */
8938 		ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8939 
8940 		/* clear the VSI from scheduler tree */
8941 		ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8942 
8943 		/* Delete VSI from FW, PF and HW VSI arrays */
8944 		ice_vsi_delete(ch->ch_vsi);
8945 
8946 		/* free the channel */
8947 		kfree(ch);
8948 	}
8949 
8950 	/* clear the channel VSI map which is stored in main VSI */
8951 	ice_for_each_chnl_tc(i)
8952 		vsi->tc_map_vsi[i] = NULL;
8953 
8954 	/* reset main VSI's all TC information */
8955 	vsi->all_enatc = 0;
8956 	vsi->all_numtc = 0;
8957 }
8958 
8959 /**
8960  * ice_rebuild_channels - rebuild channel
8961  * @pf: ptr to PF
8962  *
8963  * Recreate channel VSIs and replay filters
8964  */
ice_rebuild_channels(struct ice_pf * pf)8965 static int ice_rebuild_channels(struct ice_pf *pf)
8966 {
8967 	struct device *dev = ice_pf_to_dev(pf);
8968 	struct ice_vsi *main_vsi;
8969 	bool rem_adv_fltr = true;
8970 	struct ice_channel *ch;
8971 	struct ice_vsi *vsi;
8972 	int tc_idx = 1;
8973 	int i, err;
8974 
8975 	main_vsi = ice_get_main_vsi(pf);
8976 	if (!main_vsi)
8977 		return 0;
8978 
8979 	if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8980 	    main_vsi->old_numtc == 1)
8981 		return 0; /* nothing to be done */
8982 
8983 	/* reconfigure main VSI based on old value of TC and cached values
8984 	 * for MQPRIO opts
8985 	 */
8986 	err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8987 	if (err) {
8988 		dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8989 			main_vsi->old_ena_tc, main_vsi->vsi_num);
8990 		return err;
8991 	}
8992 
8993 	/* rebuild ADQ VSIs */
8994 	ice_for_each_vsi(pf, i) {
8995 		enum ice_vsi_type type;
8996 
8997 		vsi = pf->vsi[i];
8998 		if (!vsi || vsi->type != ICE_VSI_CHNL)
8999 			continue;
9000 
9001 		type = vsi->type;
9002 
9003 		/* rebuild ADQ VSI */
9004 		err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
9005 		if (err) {
9006 			dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
9007 				ice_vsi_type_str(type), vsi->idx, err);
9008 			goto cleanup;
9009 		}
9010 
9011 		/* Re-map HW VSI number, using VSI handle that has been
9012 		 * previously validated in ice_replay_vsi() call above
9013 		 */
9014 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
9015 
9016 		/* replay filters for the VSI */
9017 		err = ice_replay_vsi(&pf->hw, vsi->idx);
9018 		if (err) {
9019 			dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
9020 				ice_vsi_type_str(type), err, vsi->idx);
9021 			rem_adv_fltr = false;
9022 			goto cleanup;
9023 		}
9024 		dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
9025 			 ice_vsi_type_str(type), vsi->idx);
9026 
9027 		/* store ADQ VSI at correct TC index in main VSI's
9028 		 * map of TC to VSI
9029 		 */
9030 		main_vsi->tc_map_vsi[tc_idx++] = vsi;
9031 	}
9032 
9033 	/* ADQ VSI(s) has been rebuilt successfully, so setup
9034 	 * channel for main VSI's Tx and Rx rings
9035 	 */
9036 	list_for_each_entry(ch, &main_vsi->ch_list, list) {
9037 		struct ice_vsi *ch_vsi;
9038 
9039 		ch_vsi = ch->ch_vsi;
9040 		if (!ch_vsi)
9041 			continue;
9042 
9043 		/* reconfig channel resources */
9044 		ice_cfg_chnl_all_res(main_vsi, ch);
9045 
9046 		/* replay BW rate limit if it is non-zero */
9047 		if (!ch->max_tx_rate && !ch->min_tx_rate)
9048 			continue;
9049 
9050 		err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
9051 				       ch->min_tx_rate);
9052 		if (err)
9053 			dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9054 				err, ch->max_tx_rate, ch->min_tx_rate,
9055 				ch_vsi->vsi_num);
9056 		else
9057 			dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
9058 				ch->max_tx_rate, ch->min_tx_rate,
9059 				ch_vsi->vsi_num);
9060 	}
9061 
9062 	/* reconfig RSS for main VSI */
9063 	if (main_vsi->ch_rss_size)
9064 		ice_vsi_cfg_rss_lut_key(main_vsi);
9065 
9066 	return 0;
9067 
9068 cleanup:
9069 	ice_remove_q_channels(main_vsi, rem_adv_fltr);
9070 	return err;
9071 }
9072 
9073 /**
9074  * ice_create_q_channels - Add queue channel for the given TCs
9075  * @vsi: VSI to be configured
9076  *
9077  * Configures queue channel mapping to the given TCs
9078  */
ice_create_q_channels(struct ice_vsi * vsi)9079 static int ice_create_q_channels(struct ice_vsi *vsi)
9080 {
9081 	struct ice_pf *pf = vsi->back;
9082 	struct ice_channel *ch;
9083 	int ret = 0, i;
9084 
9085 	ice_for_each_chnl_tc(i) {
9086 		if (!(vsi->all_enatc & BIT(i)))
9087 			continue;
9088 
9089 		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
9090 		if (!ch) {
9091 			ret = -ENOMEM;
9092 			goto err_free;
9093 		}
9094 		INIT_LIST_HEAD(&ch->list);
9095 		ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
9096 		ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
9097 		ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
9098 		ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
9099 		ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
9100 
9101 		/* convert to Kbits/s */
9102 		if (ch->max_tx_rate)
9103 			ch->max_tx_rate = div_u64(ch->max_tx_rate,
9104 						  ICE_BW_KBPS_DIVISOR);
9105 		if (ch->min_tx_rate)
9106 			ch->min_tx_rate = div_u64(ch->min_tx_rate,
9107 						  ICE_BW_KBPS_DIVISOR);
9108 
9109 		ret = ice_create_q_channel(vsi, ch);
9110 		if (ret) {
9111 			dev_err(ice_pf_to_dev(pf),
9112 				"failed creating channel TC:%d\n", i);
9113 			kfree(ch);
9114 			goto err_free;
9115 		}
9116 		list_add_tail(&ch->list, &vsi->ch_list);
9117 		vsi->tc_map_vsi[i] = ch->ch_vsi;
9118 		dev_dbg(ice_pf_to_dev(pf),
9119 			"successfully created channel: VSI %p\n", ch->ch_vsi);
9120 	}
9121 	return 0;
9122 
9123 err_free:
9124 	ice_remove_q_channels(vsi, false);
9125 
9126 	return ret;
9127 }
9128 
9129 /**
9130  * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9131  * @netdev: net device to configure
9132  * @type_data: TC offload data
9133  */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)9134 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
9135 {
9136 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9137 	struct ice_netdev_priv *np = netdev_priv(netdev);
9138 	struct ice_vsi *vsi = np->vsi;
9139 	struct ice_pf *pf = vsi->back;
9140 	u16 mode, ena_tc_qdisc = 0;
9141 	int cur_txq, cur_rxq;
9142 	u8 hw = 0, num_tcf;
9143 	struct device *dev;
9144 	int ret, i;
9145 
9146 	dev = ice_pf_to_dev(pf);
9147 	num_tcf = mqprio_qopt->qopt.num_tc;
9148 	hw = mqprio_qopt->qopt.hw;
9149 	mode = mqprio_qopt->mode;
9150 	if (!hw) {
9151 		clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9152 		vsi->ch_rss_size = 0;
9153 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9154 		goto config_tcf;
9155 	}
9156 
9157 	/* Generate queue region map for number of TCF requested */
9158 	for (i = 0; i < num_tcf; i++)
9159 		ena_tc_qdisc |= BIT(i);
9160 
9161 	switch (mode) {
9162 	case TC_MQPRIO_MODE_CHANNEL:
9163 
9164 		if (pf->hw.port_info->is_custom_tx_enabled) {
9165 			dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
9166 			return -EBUSY;
9167 		}
9168 		ice_tear_down_devlink_rate_tree(pf);
9169 
9170 		ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
9171 		if (ret) {
9172 			netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
9173 				   ret);
9174 			return ret;
9175 		}
9176 		memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
9177 		set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
9178 		/* don't assume state of hw_tc_offload during driver load
9179 		 * and set the flag for TC flower filter if hw_tc_offload
9180 		 * already ON
9181 		 */
9182 		if (vsi->netdev->features & NETIF_F_HW_TC)
9183 			set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
9184 		break;
9185 	default:
9186 		return -EINVAL;
9187 	}
9188 
9189 config_tcf:
9190 
9191 	/* Requesting same TCF configuration as already enabled */
9192 	if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
9193 	    mode != TC_MQPRIO_MODE_CHANNEL)
9194 		return 0;
9195 
9196 	/* Pause VSI queues */
9197 	ice_dis_vsi(vsi, true);
9198 
9199 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
9200 		ice_remove_q_channels(vsi, true);
9201 
9202 	if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9203 		vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
9204 				     num_online_cpus());
9205 		vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
9206 				     num_online_cpus());
9207 	} else {
9208 		/* logic to rebuild VSI, same like ethtool -L */
9209 		u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
9210 
9211 		for (i = 0; i < num_tcf; i++) {
9212 			if (!(ena_tc_qdisc & BIT(i)))
9213 				continue;
9214 
9215 			offset = vsi->mqprio_qopt.qopt.offset[i];
9216 			qcount_rx = vsi->mqprio_qopt.qopt.count[i];
9217 			qcount_tx = vsi->mqprio_qopt.qopt.count[i];
9218 		}
9219 		vsi->req_txq = offset + qcount_tx;
9220 		vsi->req_rxq = offset + qcount_rx;
9221 
9222 		/* store away original rss_size info, so that it gets reused
9223 		 * form ice_vsi_rebuild during tc-qdisc delete stage - to
9224 		 * determine, what should be the rss_sizefor main VSI
9225 		 */
9226 		vsi->orig_rss_size = vsi->rss_size;
9227 	}
9228 
9229 	/* save current values of Tx and Rx queues before calling VSI rebuild
9230 	 * for fallback option
9231 	 */
9232 	cur_txq = vsi->num_txq;
9233 	cur_rxq = vsi->num_rxq;
9234 
9235 	/* proceed with rebuild main VSI using correct number of queues */
9236 	ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
9237 	if (ret) {
9238 		/* fallback to current number of queues */
9239 		dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
9240 		vsi->req_txq = cur_txq;
9241 		vsi->req_rxq = cur_rxq;
9242 		clear_bit(ICE_RESET_FAILED, pf->state);
9243 		if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
9244 			dev_err(dev, "Rebuild of main VSI failed again\n");
9245 			return ret;
9246 		}
9247 	}
9248 
9249 	vsi->all_numtc = num_tcf;
9250 	vsi->all_enatc = ena_tc_qdisc;
9251 	ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
9252 	if (ret) {
9253 		netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
9254 			   vsi->vsi_num);
9255 		goto exit;
9256 	}
9257 
9258 	if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
9259 		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9260 		u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
9261 
9262 		/* set TC0 rate limit if specified */
9263 		if (max_tx_rate || min_tx_rate) {
9264 			/* convert to Kbits/s */
9265 			if (max_tx_rate)
9266 				max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
9267 			if (min_tx_rate)
9268 				min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
9269 
9270 			ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
9271 			if (!ret) {
9272 				dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
9273 					max_tx_rate, min_tx_rate, vsi->vsi_num);
9274 			} else {
9275 				dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
9276 					max_tx_rate, min_tx_rate, vsi->vsi_num);
9277 				goto exit;
9278 			}
9279 		}
9280 		ret = ice_create_q_channels(vsi);
9281 		if (ret) {
9282 			netdev_err(netdev, "failed configuring queue channels\n");
9283 			goto exit;
9284 		} else {
9285 			netdev_dbg(netdev, "successfully configured channels\n");
9286 		}
9287 	}
9288 
9289 	if (vsi->ch_rss_size)
9290 		ice_vsi_cfg_rss_lut_key(vsi);
9291 
9292 exit:
9293 	/* if error, reset the all_numtc and all_enatc */
9294 	if (ret) {
9295 		vsi->all_numtc = 0;
9296 		vsi->all_enatc = 0;
9297 	}
9298 	/* resume VSI */
9299 	ice_ena_vsi(vsi, true);
9300 
9301 	return ret;
9302 }
9303 
9304 /**
9305  * ice_cfg_txtime - configure Tx Time for the Tx ring
9306  * @tx_ring: pointer to the Tx ring structure
9307  *
9308  * Return: 0 on success, negative value on failure.
9309  */
ice_cfg_txtime(struct ice_tx_ring * tx_ring)9310 static int ice_cfg_txtime(struct ice_tx_ring *tx_ring)
9311 {
9312 	int err, timeout = 50;
9313 	struct ice_vsi *vsi;
9314 	struct device *dev;
9315 	struct ice_pf *pf;
9316 	u32 queue;
9317 
9318 	if (!tx_ring)
9319 		return -EINVAL;
9320 
9321 	vsi = tx_ring->vsi;
9322 	pf = vsi->back;
9323 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
9324 		timeout--;
9325 		if (!timeout)
9326 			return -EBUSY;
9327 		usleep_range(1000, 2000);
9328 	}
9329 
9330 	queue = tx_ring->q_index;
9331 	dev = ice_pf_to_dev(pf);
9332 
9333 	/* Ignore return value, and always attempt to enable queue. */
9334 	ice_qp_dis(vsi, queue);
9335 
9336 	err = ice_qp_ena(vsi, queue);
9337 	if (err)
9338 		dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n",
9339 			queue);
9340 
9341 	clear_bit(ICE_CFG_BUSY, pf->state);
9342 	return err;
9343 }
9344 
9345 /**
9346  * ice_offload_txtime - set earliest TxTime first
9347  * @netdev: network interface device structure
9348  * @qopt_off: etf queue option offload from the skb to set
9349  *
9350  * Return: 0 on success, negative value on failure.
9351  */
ice_offload_txtime(struct net_device * netdev,void * qopt_off)9352 static int ice_offload_txtime(struct net_device *netdev,
9353 			      void *qopt_off)
9354 {
9355 	struct ice_netdev_priv *np = netdev_priv(netdev);
9356 	struct ice_pf *pf = np->vsi->back;
9357 	struct tc_etf_qopt_offload *qopt;
9358 	struct ice_vsi *vsi = np->vsi;
9359 	struct ice_tx_ring *tx_ring;
9360 	int ret = 0;
9361 
9362 	if (!ice_is_feature_supported(pf, ICE_F_TXTIME))
9363 		return -EOPNOTSUPP;
9364 
9365 	qopt = qopt_off;
9366 	if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq)
9367 		return -EINVAL;
9368 
9369 	if (qopt->enable)
9370 		set_bit(qopt->queue,  pf->txtime_txqs);
9371 	else
9372 		clear_bit(qopt->queue, pf->txtime_txqs);
9373 
9374 	if (netif_running(vsi->netdev)) {
9375 		tx_ring = vsi->tx_rings[qopt->queue];
9376 		ret = ice_cfg_txtime(tx_ring);
9377 		if (ret)
9378 			goto err;
9379 	}
9380 
9381 	netdev_info(netdev, "%s TxTime on queue: %i\n",
9382 		    str_enable_disable(qopt->enable), qopt->queue);
9383 	return 0;
9384 
9385 err:
9386 	netdev_err(netdev, "Failed to %s TxTime on queue: %i\n",
9387 		   str_enable_disable(qopt->enable), qopt->queue);
9388 
9389 	if (qopt->enable)
9390 		clear_bit(qopt->queue,  pf->txtime_txqs);
9391 	return ret;
9392 }
9393 
9394 static LIST_HEAD(ice_block_cb_list);
9395 
9396 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)9397 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
9398 	     void *type_data)
9399 {
9400 	struct ice_netdev_priv *np = netdev_priv(netdev);
9401 	enum flow_block_binder_type binder_type;
9402 	struct iidc_rdma_core_dev_info *cdev;
9403 	struct ice_pf *pf = np->vsi->back;
9404 	flow_setup_cb_t *flower_handler;
9405 	bool locked = false;
9406 	int err;
9407 
9408 	switch (type) {
9409 	case TC_SETUP_BLOCK:
9410 		binder_type =
9411 			((struct flow_block_offload *)type_data)->binder_type;
9412 
9413 		switch (binder_type) {
9414 		case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
9415 			flower_handler = ice_setup_tc_block_cb_ingress;
9416 			break;
9417 		case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
9418 			flower_handler = ice_setup_tc_block_cb_egress;
9419 			break;
9420 		default:
9421 			return -EOPNOTSUPP;
9422 		}
9423 
9424 		return flow_block_cb_setup_simple(type_data,
9425 						  &ice_block_cb_list,
9426 						  flower_handler,
9427 						  np, np, false);
9428 	case TC_SETUP_QDISC_MQPRIO:
9429 		if (ice_is_eswitch_mode_switchdev(pf)) {
9430 			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
9431 			return -EOPNOTSUPP;
9432 		}
9433 
9434 		cdev = pf->cdev_info;
9435 		if (cdev && cdev->adev) {
9436 			mutex_lock(&pf->adev_mutex);
9437 			device_lock(&cdev->adev->dev);
9438 			locked = true;
9439 			if (cdev->adev->dev.driver) {
9440 				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
9441 				err = -EBUSY;
9442 				goto adev_unlock;
9443 			}
9444 		}
9445 
9446 		/* setup traffic classifier for receive side */
9447 		mutex_lock(&pf->tc_mutex);
9448 		err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
9449 		mutex_unlock(&pf->tc_mutex);
9450 
9451 adev_unlock:
9452 		if (locked) {
9453 			device_unlock(&cdev->adev->dev);
9454 			mutex_unlock(&pf->adev_mutex);
9455 		}
9456 		return err;
9457 	case TC_SETUP_QDISC_ETF:
9458 		return ice_offload_txtime(netdev, type_data);
9459 	default:
9460 		return -EOPNOTSUPP;
9461 	}
9462 	return -EOPNOTSUPP;
9463 }
9464 
9465 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)9466 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
9467 			   struct net_device *netdev)
9468 {
9469 	struct ice_indr_block_priv *cb_priv;
9470 
9471 	list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
9472 		if (!cb_priv->netdev)
9473 			return NULL;
9474 		if (cb_priv->netdev == netdev)
9475 			return cb_priv;
9476 	}
9477 	return NULL;
9478 }
9479 
9480 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)9481 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
9482 			void *indr_priv)
9483 {
9484 	struct ice_indr_block_priv *priv = indr_priv;
9485 	struct ice_netdev_priv *np = priv->np;
9486 
9487 	switch (type) {
9488 	case TC_SETUP_CLSFLOWER:
9489 		return ice_setup_tc_cls_flower(np, priv->netdev,
9490 					       (struct flow_cls_offload *)
9491 					       type_data, false);
9492 	default:
9493 		return -EOPNOTSUPP;
9494 	}
9495 }
9496 
9497 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9498 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
9499 			struct ice_netdev_priv *np,
9500 			struct flow_block_offload *f, void *data,
9501 			void (*cleanup)(struct flow_block_cb *block_cb))
9502 {
9503 	struct ice_indr_block_priv *indr_priv;
9504 	struct flow_block_cb *block_cb;
9505 
9506 	if (!ice_is_tunnel_supported(netdev) &&
9507 	    !(is_vlan_dev(netdev) &&
9508 	      vlan_dev_real_dev(netdev) == np->vsi->netdev))
9509 		return -EOPNOTSUPP;
9510 
9511 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9512 		return -EOPNOTSUPP;
9513 
9514 	switch (f->command) {
9515 	case FLOW_BLOCK_BIND:
9516 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9517 		if (indr_priv)
9518 			return -EEXIST;
9519 
9520 		indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
9521 		if (!indr_priv)
9522 			return -ENOMEM;
9523 
9524 		indr_priv->netdev = netdev;
9525 		indr_priv->np = np;
9526 		list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
9527 
9528 		block_cb =
9529 			flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
9530 						 indr_priv, indr_priv,
9531 						 ice_rep_indr_tc_block_unbind,
9532 						 f, netdev, sch, data, np,
9533 						 cleanup);
9534 
9535 		if (IS_ERR(block_cb)) {
9536 			list_del(&indr_priv->list);
9537 			kfree(indr_priv);
9538 			return PTR_ERR(block_cb);
9539 		}
9540 		flow_block_cb_add(block_cb, f);
9541 		list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
9542 		break;
9543 	case FLOW_BLOCK_UNBIND:
9544 		indr_priv = ice_indr_block_priv_lookup(np, netdev);
9545 		if (!indr_priv)
9546 			return -ENOENT;
9547 
9548 		block_cb = flow_block_cb_lookup(f->block,
9549 						ice_indr_setup_block_cb,
9550 						indr_priv);
9551 		if (!block_cb)
9552 			return -ENOENT;
9553 
9554 		flow_indr_block_cb_remove(block_cb, f);
9555 
9556 		list_del(&block_cb->driver_list);
9557 		break;
9558 	default:
9559 		return -EOPNOTSUPP;
9560 	}
9561 	return 0;
9562 }
9563 
9564 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))9565 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
9566 		     void *cb_priv, enum tc_setup_type type, void *type_data,
9567 		     void *data,
9568 		     void (*cleanup)(struct flow_block_cb *block_cb))
9569 {
9570 	switch (type) {
9571 	case TC_SETUP_BLOCK:
9572 		return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
9573 					       data, cleanup);
9574 
9575 	default:
9576 		return -EOPNOTSUPP;
9577 	}
9578 }
9579 
9580 /**
9581  * ice_open - Called when a network interface becomes active
9582  * @netdev: network interface device structure
9583  *
9584  * The open entry point is called when a network interface is made
9585  * active by the system (IFF_UP). At this point all resources needed
9586  * for transmit and receive operations are allocated, the interrupt
9587  * handler is registered with the OS, the netdev watchdog is enabled,
9588  * and the stack is notified that the interface is ready.
9589  *
9590  * Returns 0 on success, negative value on failure
9591  */
ice_open(struct net_device * netdev)9592 int ice_open(struct net_device *netdev)
9593 {
9594 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
9595 
9596 	if (ice_is_reset_in_progress(pf->state)) {
9597 		netdev_err(netdev, "can't open net device while reset is in progress");
9598 		return -EBUSY;
9599 	}
9600 
9601 	return ice_open_internal(netdev);
9602 }
9603 
9604 /**
9605  * ice_open_internal - Called when a network interface becomes active
9606  * @netdev: network interface device structure
9607  *
9608  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
9609  * handling routine
9610  *
9611  * Returns 0 on success, negative value on failure
9612  */
ice_open_internal(struct net_device * netdev)9613 int ice_open_internal(struct net_device *netdev)
9614 {
9615 	struct ice_netdev_priv *np = netdev_priv(netdev);
9616 	struct ice_vsi *vsi = np->vsi;
9617 	struct ice_pf *pf = vsi->back;
9618 	struct ice_port_info *pi;
9619 	int err;
9620 
9621 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
9622 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
9623 		return -EIO;
9624 	}
9625 
9626 	netif_carrier_off(netdev);
9627 
9628 	pi = vsi->port_info;
9629 	err = ice_update_link_info(pi);
9630 	if (err) {
9631 		netdev_err(netdev, "Failed to get link info, error %d\n", err);
9632 		return err;
9633 	}
9634 
9635 	ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9636 
9637 	/* Set PHY if there is media, otherwise, turn off PHY */
9638 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9639 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9640 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9641 			err = ice_init_phy_user_cfg(pi);
9642 			if (err) {
9643 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9644 					   err);
9645 				return err;
9646 			}
9647 		}
9648 
9649 		err = ice_configure_phy(vsi);
9650 		if (err) {
9651 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
9652 				   err);
9653 			return err;
9654 		}
9655 	} else {
9656 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9657 		ice_set_link(vsi, false);
9658 	}
9659 
9660 	err = ice_vsi_open(vsi);
9661 	if (err)
9662 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9663 			   vsi->vsi_num, vsi->vsw->sw_id);
9664 
9665 	return err;
9666 }
9667 
9668 /**
9669  * ice_stop - Disables a network interface
9670  * @netdev: network interface device structure
9671  *
9672  * The stop entry point is called when an interface is de-activated by the OS,
9673  * and the netdevice enters the DOWN state. The hardware is still under the
9674  * driver's control, but the netdev interface is disabled.
9675  *
9676  * Returns success only - not allowed to fail
9677  */
ice_stop(struct net_device * netdev)9678 int ice_stop(struct net_device *netdev)
9679 {
9680 	struct ice_netdev_priv *np = netdev_priv(netdev);
9681 	struct ice_vsi *vsi = np->vsi;
9682 	struct ice_pf *pf = vsi->back;
9683 
9684 	if (ice_is_reset_in_progress(pf->state)) {
9685 		netdev_err(netdev, "can't stop net device while reset is in progress");
9686 		return -EBUSY;
9687 	}
9688 
9689 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9690 		int link_err = ice_force_phys_link_state(vsi, false);
9691 
9692 		if (link_err) {
9693 			if (link_err == -ENOMEDIUM)
9694 				netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9695 					    vsi->vsi_num);
9696 			else
9697 				netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9698 					   vsi->vsi_num, link_err);
9699 
9700 			ice_vsi_close(vsi);
9701 			return -EIO;
9702 		}
9703 	}
9704 
9705 	ice_vsi_close(vsi);
9706 
9707 	return 0;
9708 }
9709 
9710 /**
9711  * ice_features_check - Validate encapsulated packet conforms to limits
9712  * @skb: skb buffer
9713  * @netdev: This port's netdev
9714  * @features: Offload features that the stack believes apply
9715  */
9716 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9717 ice_features_check(struct sk_buff *skb,
9718 		   struct net_device __always_unused *netdev,
9719 		   netdev_features_t features)
9720 {
9721 	bool gso = skb_is_gso(skb);
9722 	size_t len;
9723 
9724 	/* No point in doing any of this if neither checksum nor GSO are
9725 	 * being requested for this frame. We can rule out both by just
9726 	 * checking for CHECKSUM_PARTIAL
9727 	 */
9728 	if (skb->ip_summed != CHECKSUM_PARTIAL)
9729 		return features;
9730 
9731 	/* We cannot support GSO if the MSS is going to be less than
9732 	 * 64 bytes. If it is then we need to drop support for GSO.
9733 	 */
9734 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9735 		features &= ~NETIF_F_GSO_MASK;
9736 
9737 	len = skb_network_offset(skb);
9738 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9739 		goto out_rm_features;
9740 
9741 	len = skb_network_header_len(skb);
9742 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9743 		goto out_rm_features;
9744 
9745 	if (skb->encapsulation) {
9746 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
9747 		 * the case of IPIP frames, the transport header pointer is
9748 		 * after the inner header! So check to make sure that this
9749 		 * is a GRE or UDP_TUNNEL frame before doing that math.
9750 		 */
9751 		if (gso && (skb_shinfo(skb)->gso_type &
9752 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9753 			len = skb_inner_network_header(skb) -
9754 			      skb_transport_header(skb);
9755 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9756 				goto out_rm_features;
9757 		}
9758 
9759 		len = skb_inner_network_header_len(skb);
9760 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9761 			goto out_rm_features;
9762 	}
9763 
9764 	return features;
9765 out_rm_features:
9766 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9767 }
9768 
9769 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9770 	.ndo_open = ice_open,
9771 	.ndo_stop = ice_stop,
9772 	.ndo_start_xmit = ice_start_xmit,
9773 	.ndo_set_mac_address = ice_set_mac_address,
9774 	.ndo_validate_addr = eth_validate_addr,
9775 	.ndo_change_mtu = ice_change_mtu,
9776 	.ndo_get_stats64 = ice_get_stats64,
9777 	.ndo_tx_timeout = ice_tx_timeout,
9778 	.ndo_bpf = ice_xdp_safe_mode,
9779 };
9780 
9781 static const struct net_device_ops ice_netdev_ops = {
9782 	.ndo_open = ice_open,
9783 	.ndo_stop = ice_stop,
9784 	.ndo_start_xmit = ice_start_xmit,
9785 	.ndo_select_queue = ice_select_queue,
9786 	.ndo_features_check = ice_features_check,
9787 	.ndo_fix_features = ice_fix_features,
9788 	.ndo_set_rx_mode = ice_set_rx_mode,
9789 	.ndo_set_mac_address = ice_set_mac_address,
9790 	.ndo_validate_addr = eth_validate_addr,
9791 	.ndo_change_mtu = ice_change_mtu,
9792 	.ndo_get_stats64 = ice_get_stats64,
9793 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
9794 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9795 	.ndo_set_vf_mac = ice_set_vf_mac,
9796 	.ndo_get_vf_config = ice_get_vf_cfg,
9797 	.ndo_set_vf_trust = ice_set_vf_trust,
9798 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
9799 	.ndo_set_vf_link_state = ice_set_vf_link_state,
9800 	.ndo_get_vf_stats = ice_get_vf_stats,
9801 	.ndo_set_vf_rate = ice_set_vf_bw,
9802 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9803 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9804 	.ndo_setup_tc = ice_setup_tc,
9805 	.ndo_set_features = ice_set_features,
9806 	.ndo_bridge_getlink = ice_bridge_getlink,
9807 	.ndo_bridge_setlink = ice_bridge_setlink,
9808 	.ndo_fdb_add = ice_fdb_add,
9809 	.ndo_fdb_del = ice_fdb_del,
9810 #ifdef CONFIG_RFS_ACCEL
9811 	.ndo_rx_flow_steer = ice_rx_flow_steer,
9812 #endif
9813 	.ndo_tx_timeout = ice_tx_timeout,
9814 	.ndo_bpf = ice_xdp,
9815 	.ndo_xdp_xmit = ice_xdp_xmit,
9816 	.ndo_xsk_wakeup = ice_xsk_wakeup,
9817 	.ndo_hwtstamp_get = ice_ptp_hwtstamp_get,
9818 	.ndo_hwtstamp_set = ice_ptp_hwtstamp_set,
9819 };
9820