1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* ethtool support for ice */
5
6 #include "ice.h"
7 #include "ice_ethtool.h"
8 #include "ice_flow.h"
9 #include "ice_fltr.h"
10 #include "ice_lib.h"
11 #include "ice_dcb_lib.h"
12 #include <net/dcbnl.h>
13 #include <net/libeth/rx.h>
14
15 struct ice_stats {
16 char stat_string[ETH_GSTRING_LEN];
17 int sizeof_stat;
18 int stat_offset;
19 };
20
21 #define ICE_STAT(_type, _name, _stat) { \
22 .stat_string = _name, \
23 .sizeof_stat = sizeof_field(_type, _stat), \
24 .stat_offset = offsetof(_type, _stat) \
25 }
26
27 #define ICE_VSI_STAT(_name, _stat) \
28 ICE_STAT(struct ice_vsi, _name, _stat)
29 #define ICE_PF_STAT(_name, _stat) \
30 ICE_STAT(struct ice_pf, _name, _stat)
31
ice_q_stats_len(struct net_device * netdev)32 static int ice_q_stats_len(struct net_device *netdev)
33 {
34 struct ice_netdev_priv *np = netdev_priv(netdev);
35
36 /* One packets and one bytes count per queue */
37 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 2);
38 }
39
40 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
41 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
42
43 #define ICE_PFC_STATS_LEN ( \
44 (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \
45 sizeof_field(struct ice_pf, stats.priority_xon_rx) + \
46 sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \
47 sizeof_field(struct ice_pf, stats.priority_xon_tx)) \
48 / sizeof(u64))
49 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
50 ICE_VSI_STATS_LEN + ice_q_stats_len(n))
51
52 static const struct ice_stats ice_gstrings_vsi_stats[] = {
53 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
54 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
55 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
56 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
57 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
58 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
59 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
60 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
61 ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards),
62 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
63 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
64 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
65 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
66 ICE_VSI_STAT("tx_linearize", tx_linearize),
67 ICE_VSI_STAT("tx_busy", tx_busy),
68 ICE_VSI_STAT("tx_restart", tx_restart),
69 };
70
71 enum ice_ethtool_test_id {
72 ICE_ETH_TEST_REG = 0,
73 ICE_ETH_TEST_EEPROM,
74 ICE_ETH_TEST_INTR,
75 ICE_ETH_TEST_LOOP,
76 ICE_ETH_TEST_LINK,
77 };
78
79 static const char ice_gstrings_test[][ETH_GSTRING_LEN] = {
80 "Register test (offline)",
81 "EEPROM test (offline)",
82 "Interrupt test (offline)",
83 "Loopback test (offline)",
84 "Link test (on/offline)",
85 };
86
87 #define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN)
88
89 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
90 * but they aren't. This device is capable of supporting multiple
91 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
92 * netdevs whereas the PF_STATs are for the physical function that's
93 * hosting these netdevs.
94 *
95 * The PF_STATs are appended to the netdev stats only when ethtool -S
96 * is queried on the base PF netdev.
97 */
98 static const struct ice_stats ice_gstrings_pf_stats[] = {
99 ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes),
100 ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes),
101 ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast),
102 ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast),
103 ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast),
104 ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast),
105 ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
106 ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
107 ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
108 ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
109 ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
110 ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
111 ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
112 ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127),
113 ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255),
114 ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255),
115 ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511),
116 ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511),
117 ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023),
118 ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023),
119 ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522),
120 ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522),
121 ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big),
122 ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big),
123 ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx),
124 ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx),
125 ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx),
126 ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx),
127 ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down),
128 ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize),
129 ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments),
130 ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
131 ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
132 ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
133 ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
134 ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
135 ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
136 ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
137 ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
138 ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
139 ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
140 ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
141 ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
142 ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
143 ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
144 ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
145 ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
146 };
147
148 static const u32 ice_regs_dump_list[] = {
149 PFGEN_STATE,
150 PRTGEN_STATUS,
151 QRX_CTRL(0),
152 QINT_TQCTL(0),
153 QINT_RQCTL(0),
154 PFINT_OICR_ENA,
155 QRX_ITR(0),
156 #define GLDCB_TLPM_PCI_DM 0x000A0180
157 GLDCB_TLPM_PCI_DM,
158 #define GLDCB_TLPM_TC2PFC 0x000A0194
159 GLDCB_TLPM_TC2PFC,
160 #define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4))
161 TCDCB_TLPM_WAIT_DM(0),
162 TCDCB_TLPM_WAIT_DM(1),
163 TCDCB_TLPM_WAIT_DM(2),
164 TCDCB_TLPM_WAIT_DM(3),
165 TCDCB_TLPM_WAIT_DM(4),
166 TCDCB_TLPM_WAIT_DM(5),
167 TCDCB_TLPM_WAIT_DM(6),
168 TCDCB_TLPM_WAIT_DM(7),
169 TCDCB_TLPM_WAIT_DM(8),
170 TCDCB_TLPM_WAIT_DM(9),
171 TCDCB_TLPM_WAIT_DM(10),
172 TCDCB_TLPM_WAIT_DM(11),
173 TCDCB_TLPM_WAIT_DM(12),
174 TCDCB_TLPM_WAIT_DM(13),
175 TCDCB_TLPM_WAIT_DM(14),
176 TCDCB_TLPM_WAIT_DM(15),
177 TCDCB_TLPM_WAIT_DM(16),
178 TCDCB_TLPM_WAIT_DM(17),
179 TCDCB_TLPM_WAIT_DM(18),
180 TCDCB_TLPM_WAIT_DM(19),
181 TCDCB_TLPM_WAIT_DM(20),
182 TCDCB_TLPM_WAIT_DM(21),
183 TCDCB_TLPM_WAIT_DM(22),
184 TCDCB_TLPM_WAIT_DM(23),
185 TCDCB_TLPM_WAIT_DM(24),
186 TCDCB_TLPM_WAIT_DM(25),
187 TCDCB_TLPM_WAIT_DM(26),
188 TCDCB_TLPM_WAIT_DM(27),
189 TCDCB_TLPM_WAIT_DM(28),
190 TCDCB_TLPM_WAIT_DM(29),
191 TCDCB_TLPM_WAIT_DM(30),
192 TCDCB_TLPM_WAIT_DM(31),
193 #define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90
194 GLPCI_WATMK_CLNT_PIPEMON,
195 #define GLPCI_CUR_CLNT_COMMON 0x000BFD84
196 GLPCI_CUR_CLNT_COMMON,
197 #define GLPCI_CUR_CLNT_PIPEMON 0x000BFD88
198 GLPCI_CUR_CLNT_PIPEMON,
199 #define GLPCI_PCIERR 0x0009DEB0
200 GLPCI_PCIERR,
201 #define GLPSM_DEBUG_CTL_STATUS 0x000B0600
202 GLPSM_DEBUG_CTL_STATUS,
203 #define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0680
204 GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT,
205 #define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0684
206 GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT,
207 #define GLPSM0_DEBUG_DT_OUT_OF_WINDOW 0x000B0688
208 GLPSM0_DEBUG_DT_OUT_OF_WINDOW,
209 #define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT 0x000B069C
210 GLPSM0_DEBUG_INTF_HW_ERROR_DETECT,
211 #define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT 0x000B06A0
212 GLPSM0_DEBUG_MISC_HW_ERROR_DETECT,
213 #define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0E80
214 GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT,
215 #define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0E84
216 GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT,
217 #define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT 0x000B0E88
218 GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT,
219 #define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT 0x000B0E8C
220 GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT,
221 #define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT 0x000B0E90
222 GLPSM1_DEBUG_MISC_HW_ERROR_DETECT,
223 #define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT 0x000B1680
224 GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT,
225 #define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B1684
226 GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT,
227 #define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT 0x000B1688
228 GLPSM2_DEBUG_MISC_HW_ERROR_DETECT,
229 #define GLTDPU_TCLAN_COMP_BOB(_i) (0x00049ADC + ((_i) * 4))
230 GLTDPU_TCLAN_COMP_BOB(1),
231 GLTDPU_TCLAN_COMP_BOB(2),
232 GLTDPU_TCLAN_COMP_BOB(3),
233 GLTDPU_TCLAN_COMP_BOB(4),
234 GLTDPU_TCLAN_COMP_BOB(5),
235 GLTDPU_TCLAN_COMP_BOB(6),
236 GLTDPU_TCLAN_COMP_BOB(7),
237 GLTDPU_TCLAN_COMP_BOB(8),
238 #define GLTDPU_TCB_CMD_BOB(_i) (0x0004975C + ((_i) * 4))
239 GLTDPU_TCB_CMD_BOB(1),
240 GLTDPU_TCB_CMD_BOB(2),
241 GLTDPU_TCB_CMD_BOB(3),
242 GLTDPU_TCB_CMD_BOB(4),
243 GLTDPU_TCB_CMD_BOB(5),
244 GLTDPU_TCB_CMD_BOB(6),
245 GLTDPU_TCB_CMD_BOB(7),
246 GLTDPU_TCB_CMD_BOB(8),
247 #define GLTDPU_PSM_UPDATE_BOB(_i) (0x00049B5C + ((_i) * 4))
248 GLTDPU_PSM_UPDATE_BOB(1),
249 GLTDPU_PSM_UPDATE_BOB(2),
250 GLTDPU_PSM_UPDATE_BOB(3),
251 GLTDPU_PSM_UPDATE_BOB(4),
252 GLTDPU_PSM_UPDATE_BOB(5),
253 GLTDPU_PSM_UPDATE_BOB(6),
254 GLTDPU_PSM_UPDATE_BOB(7),
255 GLTDPU_PSM_UPDATE_BOB(8),
256 #define GLTCB_CMD_IN_BOB(_i) (0x000AE288 + ((_i) * 4))
257 GLTCB_CMD_IN_BOB(1),
258 GLTCB_CMD_IN_BOB(2),
259 GLTCB_CMD_IN_BOB(3),
260 GLTCB_CMD_IN_BOB(4),
261 GLTCB_CMD_IN_BOB(5),
262 GLTCB_CMD_IN_BOB(6),
263 GLTCB_CMD_IN_BOB(7),
264 GLTCB_CMD_IN_BOB(8),
265 #define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i) (0x000FC148 + ((_i) * 4))
266 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1),
267 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2),
268 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3),
269 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4),
270 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5),
271 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6),
272 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7),
273 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8),
274 #define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4))
275 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1),
276 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2),
277 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3),
278 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4),
279 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5),
280 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6),
281 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7),
282 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8),
283 #define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i) (0x000FC1C8 + ((_i) * 4))
284 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1),
285 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2),
286 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3),
287 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4),
288 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5),
289 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6),
290 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7),
291 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8),
292 #define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i) (0x000FC188 + ((_i) * 4))
293 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1),
294 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2),
295 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3),
296 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4),
297 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5),
298 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6),
299 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7),
300 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8),
301 #define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4))
302 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1),
303 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2),
304 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3),
305 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4),
306 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5),
307 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6),
308 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7),
309 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8),
310 #define PRTDCB_TCUPM_REG_CM(_i) (0x000BC360 + ((_i) * 4))
311 PRTDCB_TCUPM_REG_CM(0),
312 PRTDCB_TCUPM_REG_CM(1),
313 PRTDCB_TCUPM_REG_CM(2),
314 PRTDCB_TCUPM_REG_CM(3),
315 #define PRTDCB_TCUPM_REG_DM(_i) (0x000BC3A0 + ((_i) * 4))
316 PRTDCB_TCUPM_REG_DM(0),
317 PRTDCB_TCUPM_REG_DM(1),
318 PRTDCB_TCUPM_REG_DM(2),
319 PRTDCB_TCUPM_REG_DM(3),
320 #define PRTDCB_TLPM_REG_DM(_i) (0x000A0000 + ((_i) * 4))
321 PRTDCB_TLPM_REG_DM(0),
322 PRTDCB_TLPM_REG_DM(1),
323 PRTDCB_TLPM_REG_DM(2),
324 PRTDCB_TLPM_REG_DM(3),
325 };
326
327 struct ice_priv_flag {
328 char name[ETH_GSTRING_LEN];
329 u32 bitno; /* bit position in pf->flags */
330 };
331
332 #define ICE_PRIV_FLAG(_name, _bitno) { \
333 .name = _name, \
334 .bitno = _bitno, \
335 }
336
337 static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
338 ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
339 ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
340 ICE_PRIV_FLAG("vf-true-promisc-support",
341 ICE_FLAG_VF_TRUE_PROMISC_ENA),
342 ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
343 ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
344 };
345
346 #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
347
348 static const u32 ice_adv_lnk_speed_100[] __initconst = {
349 ETHTOOL_LINK_MODE_100baseT_Full_BIT,
350 };
351
352 static const u32 ice_adv_lnk_speed_1000[] __initconst = {
353 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
354 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
355 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
356 };
357
358 static const u32 ice_adv_lnk_speed_2500[] __initconst = {
359 ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
360 ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
361 };
362
363 static const u32 ice_adv_lnk_speed_5000[] __initconst = {
364 ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
365 };
366
367 static const u32 ice_adv_lnk_speed_10000[] __initconst = {
368 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
369 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
370 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
371 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
372 };
373
374 static const u32 ice_adv_lnk_speed_25000[] __initconst = {
375 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
376 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
377 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
378 };
379
380 static const u32 ice_adv_lnk_speed_40000[] __initconst = {
381 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
382 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
383 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
384 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
385 };
386
387 static const u32 ice_adv_lnk_speed_50000[] __initconst = {
388 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
389 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
390 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
391 };
392
393 static const u32 ice_adv_lnk_speed_100000[] __initconst = {
394 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
395 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
396 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
397 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
398 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
399 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
400 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
401 };
402
403 static const u32 ice_adv_lnk_speed_200000[] __initconst = {
404 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
405 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
406 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
407 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
408 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
409 };
410
411 static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = {
412 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100),
413 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000),
414 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500),
415 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000),
416 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000),
417 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000),
418 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000),
419 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000),
420 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000),
421 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000),
422 };
423
ice_adv_lnk_speed_maps_init(void)424 void __init ice_adv_lnk_speed_maps_init(void)
425 {
426 ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps,
427 ARRAY_SIZE(ice_adv_lnk_speed_maps));
428 }
429
430 static void
__ice_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo,struct ice_vsi * vsi)431 __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
432 struct ice_vsi *vsi)
433 {
434 struct ice_pf *pf = vsi->back;
435 struct ice_hw *hw = &pf->hw;
436 struct ice_orom_info *orom;
437 struct ice_nvm_info *nvm;
438
439 nvm = &hw->flash.nvm;
440 orom = &hw->flash.orom;
441
442 strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
443
444 /* Display NVM version (from which the firmware version can be
445 * determined) which contains more pertinent information.
446 */
447 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
448 "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
449 nvm->eetrack, orom->major, orom->build, orom->patch);
450
451 strscpy(drvinfo->bus_info, pci_name(pf->pdev),
452 sizeof(drvinfo->bus_info));
453 }
454
455 static void
ice_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)456 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
457 {
458 struct ice_netdev_priv *np = netdev_priv(netdev);
459
460 __ice_get_drvinfo(netdev, drvinfo, np->vsi);
461 drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
462 }
463
ice_get_regs_len(struct net_device __always_unused * netdev)464 static int ice_get_regs_len(struct net_device __always_unused *netdev)
465 {
466 return (sizeof(ice_regs_dump_list) +
467 sizeof(struct ice_regdump_to_ethtool));
468 }
469
470 /**
471 * ice_ethtool_get_maxspeed - Get the max speed for given lport
472 * @hw: pointer to the HW struct
473 * @lport: logical port for which max speed is requested
474 * @max_speed: return max speed for input lport
475 *
476 * Return: 0 on success, negative on failure.
477 */
ice_ethtool_get_maxspeed(struct ice_hw * hw,u8 lport,u8 * max_speed)478 static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
479 {
480 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
481 bool active_valid = false, pending_valid = true;
482 u8 option_count = ICE_AQC_PORT_OPT_MAX;
483 u8 active_idx = 0, pending_idx = 0;
484 int status;
485
486 status = ice_aq_get_port_options(hw, options, &option_count, lport,
487 true, &active_idx, &active_valid,
488 &pending_idx, &pending_valid);
489 if (status)
490 return -EIO;
491 if (!active_valid)
492 return -EINVAL;
493
494 *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M;
495 return 0;
496 }
497
498 /**
499 * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
500 * @hw: pointer to the HW struct
501 *
502 * Return: true when serdes is muxed, false when serdes is not muxed.
503 */
ice_is_serdes_muxed(struct ice_hw * hw)504 static bool ice_is_serdes_muxed(struct ice_hw *hw)
505 {
506 u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
507
508 return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value);
509 }
510
ice_map_port_topology_for_sfp(struct ice_port_topology * port_topology,u8 lport,bool is_muxed)511 static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology,
512 u8 lport, bool is_muxed)
513 {
514 switch (lport) {
515 case 0:
516 port_topology->pcs_quad_select = 0;
517 port_topology->pcs_port = 0;
518 port_topology->primary_serdes_lane = 0;
519 break;
520 case 1:
521 port_topology->pcs_quad_select = 1;
522 port_topology->pcs_port = 0;
523 if (is_muxed)
524 port_topology->primary_serdes_lane = 2;
525 else
526 port_topology->primary_serdes_lane = 4;
527 break;
528 case 2:
529 port_topology->pcs_quad_select = 0;
530 port_topology->pcs_port = 1;
531 port_topology->primary_serdes_lane = 1;
532 break;
533 case 3:
534 port_topology->pcs_quad_select = 1;
535 port_topology->pcs_port = 1;
536 if (is_muxed)
537 port_topology->primary_serdes_lane = 3;
538 else
539 port_topology->primary_serdes_lane = 5;
540 break;
541 case 4:
542 port_topology->pcs_quad_select = 0;
543 port_topology->pcs_port = 2;
544 port_topology->primary_serdes_lane = 2;
545 break;
546 case 5:
547 port_topology->pcs_quad_select = 1;
548 port_topology->pcs_port = 2;
549 port_topology->primary_serdes_lane = 6;
550 break;
551 case 6:
552 port_topology->pcs_quad_select = 0;
553 port_topology->pcs_port = 3;
554 port_topology->primary_serdes_lane = 3;
555 break;
556 case 7:
557 port_topology->pcs_quad_select = 1;
558 port_topology->pcs_port = 3;
559 port_topology->primary_serdes_lane = 7;
560 break;
561 default:
562 return -EINVAL;
563 }
564
565 return 0;
566 }
567
ice_map_port_topology_for_qsfp(struct ice_port_topology * port_topology,u8 lport,bool is_muxed)568 static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology,
569 u8 lport, bool is_muxed)
570 {
571 switch (lport) {
572 case 0:
573 port_topology->pcs_quad_select = 0;
574 port_topology->pcs_port = 0;
575 port_topology->primary_serdes_lane = 0;
576 break;
577 case 1:
578 port_topology->pcs_quad_select = 1;
579 port_topology->pcs_port = 0;
580 if (is_muxed)
581 port_topology->primary_serdes_lane = 2;
582 else
583 port_topology->primary_serdes_lane = 4;
584 break;
585 case 2:
586 port_topology->pcs_quad_select = 0;
587 port_topology->pcs_port = 1;
588 port_topology->primary_serdes_lane = 1;
589 break;
590 case 3:
591 port_topology->pcs_quad_select = 1;
592 port_topology->pcs_port = 1;
593 if (is_muxed)
594 port_topology->primary_serdes_lane = 3;
595 else
596 port_topology->primary_serdes_lane = 5;
597 break;
598 case 4:
599 port_topology->pcs_quad_select = 0;
600 port_topology->pcs_port = 2;
601 port_topology->primary_serdes_lane = 2;
602 break;
603 case 5:
604 port_topology->pcs_quad_select = 1;
605 port_topology->pcs_port = 2;
606 port_topology->primary_serdes_lane = 6;
607 break;
608 case 6:
609 port_topology->pcs_quad_select = 0;
610 port_topology->pcs_port = 3;
611 port_topology->primary_serdes_lane = 3;
612 break;
613 case 7:
614 port_topology->pcs_quad_select = 1;
615 port_topology->pcs_port = 3;
616 port_topology->primary_serdes_lane = 7;
617 break;
618 default:
619 return -EINVAL;
620 }
621
622 return 0;
623 }
624
625 /**
626 * ice_get_port_topology - returns physical topology like pcsquad, pcsport,
627 * serdes number
628 * @hw: pointer to the HW struct
629 * @lport: logical port for which physical info requested
630 * @port_topology: buffer to hold port topology
631 *
632 * Return: 0 on success, negative on failure.
633 */
ice_get_port_topology(struct ice_hw * hw,u8 lport,struct ice_port_topology * port_topology)634 static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
635 struct ice_port_topology *port_topology)
636 {
637 struct ice_aqc_get_link_topo cmd = {};
638 u16 node_handle = 0;
639 u8 cage_type = 0;
640 bool is_muxed;
641 int err;
642 u8 ctx;
643
644 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
645 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
646 cmd.addr.topo_params.node_type_ctx = ctx;
647
648 err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
649 if (err)
650 return -EINVAL;
651
652 is_muxed = ice_is_serdes_muxed(hw);
653
654 if (cage_type == 0x11 || /* SFP+ */
655 cage_type == 0x12) { /* SFP28 */
656 port_topology->serdes_lane_count = 1;
657 err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed);
658 if (err)
659 return err;
660 } else if (cage_type == 0x13 || /* QSFP */
661 cage_type == 0x14) { /* QSFP28 */
662 u8 max_speed = 0;
663
664 err = ice_ethtool_get_maxspeed(hw, lport, &max_speed);
665 if (err)
666 return err;
667
668 if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
669 port_topology->serdes_lane_count = 4;
670 else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
671 max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
672 port_topology->serdes_lane_count = 2;
673 else
674 port_topology->serdes_lane_count = 1;
675
676 err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed);
677 if (err)
678 return err;
679 } else {
680 return -EINVAL;
681 }
682
683 return 0;
684 }
685
686 /**
687 * ice_get_tx_rx_equa - read serdes tx rx equaliser param
688 * @hw: pointer to the HW struct
689 * @serdes_num: represents the serdes number
690 * @ptr: structure to read all serdes parameter for given serdes
691 *
692 * Return: all serdes equalization parameter supported per serdes number
693 */
ice_get_tx_rx_equa(struct ice_hw * hw,u8 serdes_num,struct ice_serdes_equalization_to_ethtool * ptr)694 static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
695 struct ice_serdes_equalization_to_ethtool *ptr)
696 {
697 static const int tx = ICE_AQC_OP_CODE_TX_EQU;
698 static const int rx = ICE_AQC_OP_CODE_RX_EQU;
699 struct {
700 int data_in;
701 int opcode;
702 int *out;
703 } aq_params[] = {
704 { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 },
705 { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 },
706 { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten },
707 { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 },
708 { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 },
709 { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 },
710 { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 },
711 { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
712 { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
713 { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
714 { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
715 { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
716 { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
717 { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw },
718 { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain },
719 { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 },
720 { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 },
721 { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 },
722 { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 },
723 { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 },
724 { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 },
725 { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 },
726 { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 },
727 { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 },
728 { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 },
729 { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 },
730 { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 },
731 };
732 int err;
733
734 for (int i = 0; i < ARRAY_SIZE(aq_params); i++) {
735 err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in,
736 aq_params[i].opcode,
737 serdes_num, aq_params[i].out);
738 if (err)
739 break;
740 }
741
742 return err;
743 }
744
745 /**
746 * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per
747 * pcsquad, pcsport
748 * @netdev: pointer to net device structure
749 * @p: output buffer to fill requested register dump
750 *
751 * Return: 0 on success, negative on failure.
752 */
ice_get_extended_regs(struct net_device * netdev,void * p)753 static int ice_get_extended_regs(struct net_device *netdev, void *p)
754 {
755 struct ice_netdev_priv *np = netdev_priv(netdev);
756 struct ice_regdump_to_ethtool *ice_prv_regs_buf;
757 struct ice_port_topology port_topology = {};
758 struct ice_port_info *pi;
759 struct ice_pf *pf;
760 struct ice_hw *hw;
761 unsigned int i;
762 int err;
763
764 pf = np->vsi->back;
765 hw = &pf->hw;
766 pi = np->vsi->port_info;
767
768 /* Serdes parameters are not supported if not the PF VSI */
769 if (np->vsi->type != ICE_VSI_PF || !pi)
770 return -EINVAL;
771
772 err = ice_get_port_topology(hw, pi->lport, &port_topology);
773 if (err)
774 return -EINVAL;
775 if (port_topology.serdes_lane_count > 4)
776 return -EINVAL;
777
778 ice_prv_regs_buf = p;
779
780 /* Get serdes equalization parameter for available serdes */
781 for (i = 0; i < port_topology.serdes_lane_count; i++) {
782 u8 serdes_num = 0;
783
784 serdes_num = port_topology.primary_serdes_lane + i;
785 err = ice_get_tx_rx_equa(hw, serdes_num,
786 &ice_prv_regs_buf->equalization[i]);
787 if (err)
788 return -EINVAL;
789 }
790
791 return 0;
792 }
793
794 static void
ice_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)795 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
796 {
797 struct ice_pf *pf = ice_netdev_to_pf(netdev);
798 struct ice_hw *hw = &pf->hw;
799 u32 *regs_buf = (u32 *)p;
800 unsigned int i;
801
802 regs->version = 2;
803
804 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
805 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
806
807 ice_get_extended_regs(netdev, (void *)®s_buf[i]);
808 }
809
ice_get_msglevel(struct net_device * netdev)810 static u32 ice_get_msglevel(struct net_device *netdev)
811 {
812 struct ice_pf *pf = ice_netdev_to_pf(netdev);
813
814 #ifndef CONFIG_DYNAMIC_DEBUG
815 if (pf->hw.debug_mask)
816 netdev_info(netdev, "hw debug_mask: 0x%llX\n",
817 pf->hw.debug_mask);
818 #endif /* !CONFIG_DYNAMIC_DEBUG */
819
820 return pf->msg_enable;
821 }
822
ice_set_msglevel(struct net_device * netdev,u32 data)823 static void ice_set_msglevel(struct net_device *netdev, u32 data)
824 {
825 struct ice_pf *pf = ice_netdev_to_pf(netdev);
826
827 #ifndef CONFIG_DYNAMIC_DEBUG
828 if (ICE_DBG_USER & data)
829 pf->hw.debug_mask = data;
830 else
831 pf->msg_enable = data;
832 #else
833 pf->msg_enable = data;
834 #endif /* !CONFIG_DYNAMIC_DEBUG */
835 }
836
ice_get_link_ext_stats(struct net_device * netdev,struct ethtool_link_ext_stats * stats)837 static void ice_get_link_ext_stats(struct net_device *netdev,
838 struct ethtool_link_ext_stats *stats)
839 {
840 struct ice_pf *pf = ice_netdev_to_pf(netdev);
841
842 stats->link_down_events = pf->link_down_events;
843 }
844
ice_get_eeprom_len(struct net_device * netdev)845 static int ice_get_eeprom_len(struct net_device *netdev)
846 {
847 struct ice_pf *pf = ice_netdev_to_pf(netdev);
848
849 return (int)pf->hw.flash.flash_size;
850 }
851
852 static int
ice_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)853 ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
854 u8 *bytes)
855 {
856 struct ice_pf *pf = ice_netdev_to_pf(netdev);
857 struct ice_hw *hw = &pf->hw;
858 struct device *dev;
859 int ret;
860 u8 *buf;
861
862 dev = ice_pf_to_dev(pf);
863
864 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
865 netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n",
866 eeprom->cmd, eeprom->offset, eeprom->len);
867
868 buf = kzalloc(eeprom->len, GFP_KERNEL);
869 if (!buf)
870 return -ENOMEM;
871
872 ret = ice_acquire_nvm(hw, ICE_RES_READ);
873 if (ret) {
874 dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
875 ret, libie_aq_str(hw->adminq.sq_last_status));
876 goto out;
877 }
878
879 ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
880 false);
881 if (ret) {
882 dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
883 ret, libie_aq_str(hw->adminq.sq_last_status));
884 goto release;
885 }
886
887 memcpy(bytes, buf, eeprom->len);
888 release:
889 ice_release_nvm(hw);
890 out:
891 kfree(buf);
892 return ret;
893 }
894
895 /**
896 * ice_active_vfs - check if there are any active VFs
897 * @pf: board private structure
898 *
899 * Returns true if an active VF is found, otherwise returns false
900 */
ice_active_vfs(struct ice_pf * pf)901 static bool ice_active_vfs(struct ice_pf *pf)
902 {
903 bool active = false;
904 struct ice_vf *vf;
905 unsigned int bkt;
906
907 rcu_read_lock();
908 ice_for_each_vf_rcu(pf, bkt, vf) {
909 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
910 active = true;
911 break;
912 }
913 }
914 rcu_read_unlock();
915
916 return active;
917 }
918
919 /**
920 * ice_link_test - perform a link test on a given net_device
921 * @netdev: network interface device structure
922 *
923 * This function performs one of the self-tests required by ethtool.
924 * Returns 0 on success, non-zero on failure.
925 */
ice_link_test(struct net_device * netdev)926 static u64 ice_link_test(struct net_device *netdev)
927 {
928 struct ice_netdev_priv *np = netdev_priv(netdev);
929 bool link_up = false;
930 int status;
931
932 netdev_info(netdev, "link test\n");
933 status = ice_get_link_status(np->vsi->port_info, &link_up);
934 if (status) {
935 netdev_err(netdev, "link query error, status = %d\n",
936 status);
937 return 1;
938 }
939
940 if (!link_up)
941 return 2;
942
943 return 0;
944 }
945
946 /**
947 * ice_eeprom_test - perform an EEPROM test on a given net_device
948 * @netdev: network interface device structure
949 *
950 * This function performs one of the self-tests required by ethtool.
951 * Returns 0 on success, non-zero on failure.
952 */
ice_eeprom_test(struct net_device * netdev)953 static u64 ice_eeprom_test(struct net_device *netdev)
954 {
955 struct ice_pf *pf = ice_netdev_to_pf(netdev);
956
957 netdev_info(netdev, "EEPROM test\n");
958 return !!(ice_nvm_validate_checksum(&pf->hw));
959 }
960
961 /**
962 * ice_reg_pattern_test
963 * @hw: pointer to the HW struct
964 * @reg: reg to be tested
965 * @mask: bits to be touched
966 */
ice_reg_pattern_test(struct ice_hw * hw,u32 reg,u32 mask)967 static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
968 {
969 struct ice_pf *pf = (struct ice_pf *)hw->back;
970 struct device *dev = ice_pf_to_dev(pf);
971 static const u32 patterns[] = {
972 0x5A5A5A5A, 0xA5A5A5A5,
973 0x00000000, 0xFFFFFFFF
974 };
975 u32 val, orig_val;
976 unsigned int i;
977
978 orig_val = rd32(hw, reg);
979 for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
980 u32 pattern = patterns[i] & mask;
981
982 wr32(hw, reg, pattern);
983 val = rd32(hw, reg);
984 if (val == pattern)
985 continue;
986 dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
987 , __func__, reg, pattern, val);
988 return 1;
989 }
990
991 wr32(hw, reg, orig_val);
992 val = rd32(hw, reg);
993 if (val != orig_val) {
994 dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
995 , __func__, reg, orig_val, val);
996 return 1;
997 }
998
999 return 0;
1000 }
1001
1002 /**
1003 * ice_reg_test - perform a register test on a given net_device
1004 * @netdev: network interface device structure
1005 *
1006 * This function performs one of the self-tests required by ethtool.
1007 * Returns 0 on success, non-zero on failure.
1008 */
ice_reg_test(struct net_device * netdev)1009 static u64 ice_reg_test(struct net_device *netdev)
1010 {
1011 struct ice_netdev_priv *np = netdev_priv(netdev);
1012 struct ice_hw *hw = np->vsi->port_info->hw;
1013 u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ?
1014 hw->func_caps.common_cap.num_msix_vectors - 1 : 1;
1015 struct ice_diag_reg_test_info {
1016 u32 address;
1017 u32 mask;
1018 u32 elem_num;
1019 u32 elem_size;
1020 } ice_reg_list[] = {
1021 {GLINT_ITR(0, 0), 0x00000fff, int_elements,
1022 GLINT_ITR(0, 1) - GLINT_ITR(0, 0)},
1023 {GLINT_ITR(1, 0), 0x00000fff, int_elements,
1024 GLINT_ITR(1, 1) - GLINT_ITR(1, 0)},
1025 {GLINT_ITR(0, 0), 0x00000fff, int_elements,
1026 GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
1027 {GLINT_CTL, 0xffff0001, 1, 0}
1028 };
1029 unsigned int i;
1030
1031 netdev_dbg(netdev, "Register test\n");
1032 for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
1033 u32 j;
1034
1035 for (j = 0; j < ice_reg_list[i].elem_num; ++j) {
1036 u32 mask = ice_reg_list[i].mask;
1037 u32 reg = ice_reg_list[i].address +
1038 (j * ice_reg_list[i].elem_size);
1039
1040 /* bail on failure (non-zero return) */
1041 if (ice_reg_pattern_test(hw, reg, mask))
1042 return 1;
1043 }
1044 }
1045
1046 return 0;
1047 }
1048
1049 /**
1050 * ice_lbtest_prepare_rings - configure Tx/Rx test rings
1051 * @vsi: pointer to the VSI structure
1052 *
1053 * Function configures rings of a VSI for loopback test without
1054 * enabling interrupts or informing the kernel about new queues.
1055 *
1056 * Returns 0 on success, negative on failure.
1057 */
ice_lbtest_prepare_rings(struct ice_vsi * vsi)1058 static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
1059 {
1060 int status;
1061
1062 status = ice_vsi_setup_tx_rings(vsi);
1063 if (status)
1064 goto err_setup_tx_ring;
1065
1066 status = ice_vsi_setup_rx_rings(vsi);
1067 if (status)
1068 goto err_setup_rx_ring;
1069
1070 status = ice_vsi_cfg_lan(vsi);
1071 if (status)
1072 goto err_setup_rx_ring;
1073
1074 status = ice_vsi_start_all_rx_rings(vsi);
1075 if (status)
1076 goto err_start_rx_ring;
1077
1078 return 0;
1079
1080 err_start_rx_ring:
1081 ice_vsi_free_rx_rings(vsi);
1082 err_setup_rx_ring:
1083 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
1084 err_setup_tx_ring:
1085 ice_vsi_free_tx_rings(vsi);
1086
1087 return status;
1088 }
1089
1090 /**
1091 * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test
1092 * @vsi: pointer to the VSI structure
1093 *
1094 * Function stops and frees VSI rings after a loopback test.
1095 * Returns 0 on success, negative on failure.
1096 */
ice_lbtest_disable_rings(struct ice_vsi * vsi)1097 static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
1098 {
1099 int status;
1100
1101 status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
1102 if (status)
1103 netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
1104 vsi->vsi_num, status);
1105
1106 status = ice_vsi_stop_all_rx_rings(vsi);
1107 if (status)
1108 netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
1109 vsi->vsi_num, status);
1110
1111 ice_vsi_free_tx_rings(vsi);
1112 ice_vsi_free_rx_rings(vsi);
1113
1114 return status;
1115 }
1116
1117 /**
1118 * ice_lbtest_create_frame - create test packet
1119 * @pf: pointer to the PF structure
1120 * @ret_data: allocated frame buffer
1121 * @size: size of the packet data
1122 *
1123 * Function allocates a frame with a test pattern on specific offsets.
1124 * Returns 0 on success, non-zero on failure.
1125 */
ice_lbtest_create_frame(struct ice_pf * pf,u8 ** ret_data,u16 size)1126 static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
1127 {
1128 u8 *data;
1129
1130 if (!pf)
1131 return -EINVAL;
1132
1133 data = kzalloc(size, GFP_KERNEL);
1134 if (!data)
1135 return -ENOMEM;
1136
1137 /* Since the ethernet test frame should always be at least
1138 * 64 bytes long, fill some octets in the payload with test data.
1139 */
1140 memset(data, 0xFF, size);
1141 data[32] = 0xDE;
1142 data[42] = 0xAD;
1143 data[44] = 0xBE;
1144 data[46] = 0xEF;
1145
1146 *ret_data = data;
1147
1148 return 0;
1149 }
1150
1151 /**
1152 * ice_lbtest_check_frame - verify received loopback frame
1153 * @frame: pointer to the raw packet data
1154 *
1155 * Function verifies received test frame with a pattern.
1156 * Returns true if frame matches the pattern, false otherwise.
1157 */
ice_lbtest_check_frame(u8 * frame)1158 static bool ice_lbtest_check_frame(u8 *frame)
1159 {
1160 /* Validate bytes of a frame under offsets chosen earlier */
1161 if (frame[32] == 0xDE &&
1162 frame[42] == 0xAD &&
1163 frame[44] == 0xBE &&
1164 frame[46] == 0xEF &&
1165 frame[48] == 0xFF)
1166 return true;
1167
1168 return false;
1169 }
1170
1171 /**
1172 * ice_diag_send - send test frames to the test ring
1173 * @tx_ring: pointer to the transmit ring
1174 * @data: pointer to the raw packet data
1175 * @size: size of the packet to send
1176 *
1177 * Function sends loopback packets on a test Tx ring.
1178 */
ice_diag_send(struct ice_tx_ring * tx_ring,u8 * data,u16 size)1179 static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
1180 {
1181 struct ice_tx_desc *tx_desc;
1182 struct ice_tx_buf *tx_buf;
1183 dma_addr_t dma;
1184 u64 td_cmd;
1185
1186 tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use);
1187 tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use];
1188
1189 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
1190 if (dma_mapping_error(tx_ring->dev, dma))
1191 return -EINVAL;
1192
1193 tx_desc->buf_addr = cpu_to_le64(dma);
1194
1195 /* These flags are required for a descriptor to be pushed out */
1196 td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1197 tx_desc->cmd_type_offset_bsz =
1198 cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1199 (td_cmd << ICE_TXD_QW1_CMD_S) |
1200 ((u64)0 << ICE_TXD_QW1_OFFSET_S) |
1201 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1202 ((u64)0 << ICE_TXD_QW1_L2TAG1_S));
1203
1204 tx_buf->next_to_watch = tx_desc;
1205
1206 /* Force memory write to complete before letting h/w know
1207 * there are new descriptors to fetch.
1208 */
1209 wmb();
1210
1211 tx_ring->next_to_use++;
1212 if (tx_ring->next_to_use >= tx_ring->count)
1213 tx_ring->next_to_use = 0;
1214
1215 writel_relaxed(tx_ring->next_to_use, tx_ring->tail);
1216
1217 /* Wait until the packets get transmitted to the receive queue. */
1218 usleep_range(1000, 2000);
1219 dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE);
1220
1221 return 0;
1222 }
1223
1224 #define ICE_LB_FRAME_SIZE 64
1225 /**
1226 * ice_lbtest_receive_frames - receive and verify test frames
1227 * @rx_ring: pointer to the receive ring
1228 *
1229 * Function receives loopback packets and verify their correctness.
1230 * Returns number of received valid frames.
1231 */
ice_lbtest_receive_frames(struct ice_rx_ring * rx_ring)1232 static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
1233 {
1234 struct libeth_fqe *rx_buf;
1235 int valid_frames, i;
1236 struct page *page;
1237 u8 *received_buf;
1238
1239 valid_frames = 0;
1240
1241 for (i = 0; i < rx_ring->count; i++) {
1242 union ice_32b_rx_flex_desc *rx_desc;
1243
1244 rx_desc = ICE_RX_DESC(rx_ring, i);
1245
1246 if (!(rx_desc->wb.status_error0 &
1247 (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
1248 cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
1249 continue;
1250
1251 rx_buf = &rx_ring->rx_fqes[i];
1252 page = __netmem_to_page(rx_buf->netmem);
1253 received_buf = page_address(page) + rx_buf->offset +
1254 page->pp->p.offset;
1255
1256 if (ice_lbtest_check_frame(received_buf))
1257 valid_frames++;
1258 }
1259
1260 return valid_frames;
1261 }
1262
1263 /**
1264 * ice_loopback_test - perform a loopback test on a given net_device
1265 * @netdev: network interface device structure
1266 *
1267 * This function performs one of the self-tests required by ethtool.
1268 * Returns 0 on success, non-zero on failure.
1269 */
ice_loopback_test(struct net_device * netdev)1270 static u64 ice_loopback_test(struct net_device *netdev)
1271 {
1272 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1273 struct ice_vsi *test_vsi;
1274 u8 *tx_frame __free(kfree) = NULL;
1275 u8 broadcast[ETH_ALEN], ret = 0;
1276 int num_frames, valid_frames;
1277 struct ice_tx_ring *tx_ring;
1278 struct ice_rx_ring *rx_ring;
1279 int i;
1280
1281 netdev_info(netdev, "loopback test\n");
1282
1283 test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
1284 if (!test_vsi) {
1285 netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
1286 return 1;
1287 }
1288
1289 test_vsi->netdev = netdev;
1290 tx_ring = test_vsi->tx_rings[0];
1291 rx_ring = test_vsi->rx_rings[0];
1292 /* Dummy q_vector and napi. Fill the minimum required for
1293 * ice_rxq_pp_create().
1294 */
1295 rx_ring->q_vector->napi.dev = netdev;
1296
1297 if (ice_lbtest_prepare_rings(test_vsi)) {
1298 ret = 2;
1299 goto lbtest_vsi_close;
1300 }
1301
1302 if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) {
1303 ret = 3;
1304 goto lbtest_rings_dis;
1305 }
1306
1307 /* Enable MAC loopback in firmware */
1308 if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) {
1309 ret = 4;
1310 goto lbtest_mac_dis;
1311 }
1312
1313 /* Test VSI needs to receive broadcast packets */
1314 eth_broadcast_addr(broadcast);
1315 if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
1316 ret = 5;
1317 goto lbtest_mac_dis;
1318 }
1319
1320 if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
1321 ret = 7;
1322 goto remove_mac_filters;
1323 }
1324
1325 num_frames = min_t(int, tx_ring->count, 32);
1326 for (i = 0; i < num_frames; i++) {
1327 if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
1328 ret = 8;
1329 goto remove_mac_filters;
1330 }
1331 }
1332
1333 valid_frames = ice_lbtest_receive_frames(rx_ring);
1334 if (!valid_frames)
1335 ret = 9;
1336 else if (valid_frames != num_frames)
1337 ret = 10;
1338
1339 remove_mac_filters:
1340 if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
1341 netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
1342 lbtest_mac_dis:
1343 /* Disable MAC loopback after the test is completed. */
1344 if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
1345 netdev_err(netdev, "Could not disable MAC loopback\n");
1346 lbtest_rings_dis:
1347 if (ice_lbtest_disable_rings(test_vsi))
1348 netdev_err(netdev, "Could not disable test rings\n");
1349 lbtest_vsi_close:
1350 test_vsi->netdev = NULL;
1351 if (ice_vsi_release(test_vsi))
1352 netdev_err(netdev, "Failed to remove the test VSI\n");
1353
1354 return ret;
1355 }
1356
1357 /**
1358 * ice_intr_test - perform an interrupt test on a given net_device
1359 * @netdev: network interface device structure
1360 *
1361 * This function performs one of the self-tests required by ethtool.
1362 * Returns 0 on success, non-zero on failure.
1363 */
ice_intr_test(struct net_device * netdev)1364 static u64 ice_intr_test(struct net_device *netdev)
1365 {
1366 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1367 u16 swic_old = pf->sw_int_count;
1368
1369 netdev_info(netdev, "interrupt test\n");
1370
1371 wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index),
1372 GLINT_DYN_CTL_SW_ITR_INDX_M |
1373 GLINT_DYN_CTL_INTENA_MSK_M |
1374 GLINT_DYN_CTL_SWINT_TRIG_M);
1375
1376 usleep_range(1000, 2000);
1377 return (swic_old == pf->sw_int_count);
1378 }
1379
1380 /**
1381 * ice_self_test - handler function for performing a self-test by ethtool
1382 * @netdev: network interface device structure
1383 * @eth_test: ethtool_test structure
1384 * @data: required by ethtool.self_test
1385 *
1386 * This function is called after invoking 'ethtool -t devname' command where
1387 * devname is the name of the network device on which ethtool should operate.
1388 * It performs a set of self-tests to check if a device works properly.
1389 */
1390 static void
ice_self_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)1391 ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
1392 u64 *data)
1393 {
1394 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1395 bool if_running = netif_running(netdev);
1396 struct device *dev;
1397
1398 dev = ice_pf_to_dev(pf);
1399
1400 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1401 netdev_info(netdev, "offline testing starting\n");
1402
1403 set_bit(ICE_TESTING, pf->state);
1404
1405 if (ice_active_vfs(pf)) {
1406 dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
1407 data[ICE_ETH_TEST_REG] = 1;
1408 data[ICE_ETH_TEST_EEPROM] = 1;
1409 data[ICE_ETH_TEST_INTR] = 1;
1410 data[ICE_ETH_TEST_LOOP] = 1;
1411 data[ICE_ETH_TEST_LINK] = 1;
1412 eth_test->flags |= ETH_TEST_FL_FAILED;
1413 clear_bit(ICE_TESTING, pf->state);
1414 goto skip_ol_tests;
1415 }
1416 /* If the device is online then take it offline */
1417 if (if_running)
1418 /* indicate we're in test mode */
1419 ice_stop(netdev);
1420
1421 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
1422 data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev);
1423 data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev);
1424 data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev);
1425 data[ICE_ETH_TEST_REG] = ice_reg_test(netdev);
1426
1427 if (data[ICE_ETH_TEST_LINK] ||
1428 data[ICE_ETH_TEST_EEPROM] ||
1429 data[ICE_ETH_TEST_LOOP] ||
1430 data[ICE_ETH_TEST_INTR] ||
1431 data[ICE_ETH_TEST_REG])
1432 eth_test->flags |= ETH_TEST_FL_FAILED;
1433
1434 clear_bit(ICE_TESTING, pf->state);
1435
1436 if (if_running) {
1437 int status = ice_open(netdev);
1438
1439 if (status) {
1440 dev_err(dev, "Could not open device %s, err %d\n",
1441 pf->int_name, status);
1442 }
1443 }
1444 } else {
1445 /* Online tests */
1446 netdev_info(netdev, "online testing starting\n");
1447
1448 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
1449 if (data[ICE_ETH_TEST_LINK])
1450 eth_test->flags |= ETH_TEST_FL_FAILED;
1451
1452 /* Offline only tests, not run in online; pass by default */
1453 data[ICE_ETH_TEST_REG] = 0;
1454 data[ICE_ETH_TEST_EEPROM] = 0;
1455 data[ICE_ETH_TEST_INTR] = 0;
1456 data[ICE_ETH_TEST_LOOP] = 0;
1457 }
1458
1459 skip_ol_tests:
1460 netdev_info(netdev, "testing finished\n");
1461 }
1462
1463 static void
__ice_get_strings(struct net_device * netdev,u32 stringset,u8 * data,struct ice_vsi * vsi)1464 __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
1465 struct ice_vsi *vsi)
1466 {
1467 unsigned int i;
1468 u8 *p = data;
1469
1470 switch (stringset) {
1471 case ETH_SS_STATS:
1472 for (i = 0; i < ICE_VSI_STATS_LEN; i++)
1473 ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
1474
1475 if (ice_is_port_repr_netdev(netdev))
1476 return;
1477
1478 ice_for_each_alloc_txq(vsi, i) {
1479 ethtool_sprintf(&p, "tx_queue_%u_packets", i);
1480 ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
1481 }
1482
1483 ice_for_each_alloc_rxq(vsi, i) {
1484 ethtool_sprintf(&p, "rx_queue_%u_packets", i);
1485 ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
1486 }
1487
1488 if (vsi->type != ICE_VSI_PF)
1489 return;
1490
1491 for (i = 0; i < ICE_PF_STATS_LEN; i++)
1492 ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
1493
1494 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
1495 ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
1496 ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i);
1497 }
1498 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
1499 ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i);
1500 ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i);
1501 }
1502 break;
1503 case ETH_SS_TEST:
1504 memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN);
1505 break;
1506 case ETH_SS_PRIV_FLAGS:
1507 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
1508 ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
1509 break;
1510 default:
1511 break;
1512 }
1513 }
1514
ice_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1515 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1516 {
1517 struct ice_netdev_priv *np = netdev_priv(netdev);
1518
1519 __ice_get_strings(netdev, stringset, data, np->vsi);
1520 }
1521
1522 static int
ice_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)1523 ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
1524 {
1525 struct ice_netdev_priv *np = netdev_priv(netdev);
1526 bool led_active;
1527
1528 switch (state) {
1529 case ETHTOOL_ID_ACTIVE:
1530 led_active = true;
1531 break;
1532 case ETHTOOL_ID_INACTIVE:
1533 led_active = false;
1534 break;
1535 default:
1536 return -EINVAL;
1537 }
1538
1539 if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL))
1540 return -EIO;
1541
1542 return 0;
1543 }
1544
1545 /**
1546 * ice_set_fec_cfg - Set link FEC options
1547 * @netdev: network interface device structure
1548 * @req_fec: FEC mode to configure
1549 */
ice_set_fec_cfg(struct net_device * netdev,enum ice_fec_mode req_fec)1550 static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
1551 {
1552 struct ice_netdev_priv *np = netdev_priv(netdev);
1553 struct ice_aqc_set_phy_cfg_data config = { 0 };
1554 struct ice_vsi *vsi = np->vsi;
1555 struct ice_port_info *pi;
1556
1557 pi = vsi->port_info;
1558 if (!pi)
1559 return -EOPNOTSUPP;
1560
1561 /* Changing the FEC parameters is not supported if not the PF VSI */
1562 if (vsi->type != ICE_VSI_PF) {
1563 netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n");
1564 return -EOPNOTSUPP;
1565 }
1566
1567 /* Proceed only if requesting different FEC mode */
1568 if (pi->phy.curr_user_fec_req == req_fec)
1569 return 0;
1570
1571 /* Copy the current user PHY configuration. The current user PHY
1572 * configuration is initialized during probe from PHY capabilities
1573 * software mode, and updated on set PHY configuration.
1574 */
1575 memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config));
1576
1577 ice_cfg_phy_fec(pi, &config, req_fec);
1578 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1579
1580 if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL))
1581 return -EAGAIN;
1582
1583 /* Save requested FEC config */
1584 pi->phy.curr_user_fec_req = req_fec;
1585
1586 return 0;
1587 }
1588
1589 /**
1590 * ice_set_fecparam - Set FEC link options
1591 * @netdev: network interface device structure
1592 * @fecparam: Ethtool structure to retrieve FEC parameters
1593 */
1594 static int
ice_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1595 ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
1596 {
1597 struct ice_netdev_priv *np = netdev_priv(netdev);
1598 struct ice_vsi *vsi = np->vsi;
1599 enum ice_fec_mode fec;
1600
1601 switch (fecparam->fec) {
1602 case ETHTOOL_FEC_AUTO:
1603 fec = ICE_FEC_AUTO;
1604 break;
1605 case ETHTOOL_FEC_RS:
1606 fec = ICE_FEC_RS;
1607 break;
1608 case ETHTOOL_FEC_BASER:
1609 fec = ICE_FEC_BASER;
1610 break;
1611 case ETHTOOL_FEC_OFF:
1612 case ETHTOOL_FEC_NONE:
1613 fec = ICE_FEC_NONE;
1614 break;
1615 default:
1616 dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
1617 fecparam->fec);
1618 return -EINVAL;
1619 }
1620
1621 return ice_set_fec_cfg(netdev, fec);
1622 }
1623
1624 /**
1625 * ice_get_fecparam - Get link FEC options
1626 * @netdev: network interface device structure
1627 * @fecparam: Ethtool structure to retrieve FEC parameters
1628 */
1629 static int
ice_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1630 ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
1631 {
1632 struct ice_netdev_priv *np = netdev_priv(netdev);
1633 struct ice_aqc_get_phy_caps_data *caps;
1634 struct ice_link_status *link_info;
1635 struct ice_vsi *vsi = np->vsi;
1636 struct ice_port_info *pi;
1637 int err;
1638
1639 pi = vsi->port_info;
1640
1641 if (!pi)
1642 return -EOPNOTSUPP;
1643 link_info = &pi->phy.link_info;
1644
1645 /* Set FEC mode based on negotiated link info */
1646 switch (link_info->fec_info) {
1647 case ICE_AQ_LINK_25G_KR_FEC_EN:
1648 fecparam->active_fec = ETHTOOL_FEC_BASER;
1649 break;
1650 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
1651 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
1652 fecparam->active_fec = ETHTOOL_FEC_RS;
1653 break;
1654 default:
1655 fecparam->active_fec = ETHTOOL_FEC_OFF;
1656 break;
1657 }
1658
1659 caps = kzalloc_obj(*caps);
1660 if (!caps)
1661 return -ENOMEM;
1662
1663 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1664 caps, NULL);
1665 if (err)
1666 goto done;
1667
1668 /* Set supported/configured FEC modes based on PHY capability */
1669 if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
1670 fecparam->fec |= ETHTOOL_FEC_AUTO;
1671 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
1672 caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
1673 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN ||
1674 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
1675 fecparam->fec |= ETHTOOL_FEC_BASER;
1676 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
1677 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ ||
1678 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
1679 fecparam->fec |= ETHTOOL_FEC_RS;
1680 if (caps->link_fec_options == 0)
1681 fecparam->fec |= ETHTOOL_FEC_OFF;
1682
1683 done:
1684 kfree(caps);
1685 return err;
1686 }
1687
1688 /**
1689 * ice_nway_reset - restart autonegotiation
1690 * @netdev: network interface device structure
1691 */
ice_nway_reset(struct net_device * netdev)1692 static int ice_nway_reset(struct net_device *netdev)
1693 {
1694 struct ice_netdev_priv *np = netdev_priv(netdev);
1695 struct ice_vsi *vsi = np->vsi;
1696 int err;
1697
1698 /* If VSI state is up, then restart autoneg with link up */
1699 if (!test_bit(ICE_DOWN, vsi->back->state))
1700 err = ice_set_link(vsi, true);
1701 else
1702 err = ice_set_link(vsi, false);
1703
1704 return err;
1705 }
1706
1707 /**
1708 * ice_get_priv_flags - report device private flags
1709 * @netdev: network interface device structure
1710 *
1711 * The get string set count and the string set should be matched for each
1712 * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags
1713 * array.
1714 *
1715 * Returns a u32 bitmap of flags.
1716 */
ice_get_priv_flags(struct net_device * netdev)1717 static u32 ice_get_priv_flags(struct net_device *netdev)
1718 {
1719 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1720 u32 i, ret_flags = 0;
1721
1722 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
1723 const struct ice_priv_flag *priv_flag;
1724
1725 priv_flag = &ice_gstrings_priv_flags[i];
1726
1727 if (test_bit(priv_flag->bitno, pf->flags))
1728 ret_flags |= BIT(i);
1729 }
1730
1731 return ret_flags;
1732 }
1733
1734 /**
1735 * ice_set_priv_flags - set private flags
1736 * @netdev: network interface device structure
1737 * @flags: bit flags to be set
1738 */
ice_set_priv_flags(struct net_device * netdev,u32 flags)1739 static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
1740 {
1741 struct ice_netdev_priv *np = netdev_priv(netdev);
1742 DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
1743 DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
1744 struct ice_vsi *vsi = np->vsi;
1745 struct ice_pf *pf = vsi->back;
1746 struct device *dev;
1747 int ret = 0;
1748 u32 i;
1749
1750 if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
1751 return -EINVAL;
1752
1753 dev = ice_pf_to_dev(pf);
1754 set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
1755
1756 bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
1757 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
1758 const struct ice_priv_flag *priv_flag;
1759
1760 priv_flag = &ice_gstrings_priv_flags[i];
1761
1762 if (flags & BIT(i))
1763 set_bit(priv_flag->bitno, pf->flags);
1764 else
1765 clear_bit(priv_flag->bitno, pf->flags);
1766 }
1767
1768 bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
1769
1770 /* Do not allow change to link-down-on-close when Total Port Shutdown
1771 * is enabled.
1772 */
1773 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) &&
1774 test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
1775 dev_err(dev, "Setting link-down-on-close not supported on this port\n");
1776 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1777 ret = -EINVAL;
1778 goto ethtool_exit;
1779 }
1780
1781 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
1782 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
1783 int status;
1784
1785 /* Disable FW LLDP engine */
1786 status = ice_cfg_lldp_mib_change(&pf->hw, false);
1787
1788 /* If unregistering for LLDP events fails, this is
1789 * not an error state, as there shouldn't be any
1790 * events to respond to.
1791 */
1792 if (status)
1793 dev_info(dev, "Failed to unreg for LLDP events\n");
1794
1795 /* The AQ call to stop the FW LLDP agent will generate
1796 * an error if the agent is already stopped.
1797 */
1798 status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
1799 if (status)
1800 dev_warn(dev, "Fail to stop LLDP agent\n");
1801 /* Use case for having the FW LLDP agent stopped
1802 * will likely not need DCB, so failure to init is
1803 * not a concern of ethtool
1804 */
1805 status = ice_init_pf_dcb(pf, true);
1806 if (status)
1807 dev_warn(dev, "Fail to init DCB\n");
1808
1809 pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
1810 pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
1811 } else {
1812 bool dcbx_agent_status;
1813 int status;
1814
1815 if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
1816 clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
1817 dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n");
1818 ret = -EOPNOTSUPP;
1819 goto ethtool_exit;
1820 }
1821
1822 /* Remove rule to direct LLDP packets to default VSI.
1823 * The FW LLDP engine will now be consuming them.
1824 */
1825 ice_cfg_sw_rx_lldp(vsi->back, false);
1826
1827 /* AQ command to start FW LLDP agent will return an
1828 * error if the agent is already started
1829 */
1830 status = ice_aq_start_lldp(&pf->hw, true, NULL);
1831 if (status)
1832 dev_warn(dev, "Fail to start LLDP Agent\n");
1833
1834 /* AQ command to start FW DCBX agent will fail if
1835 * the agent is already started
1836 */
1837 status = ice_aq_start_stop_dcbx(&pf->hw, true,
1838 &dcbx_agent_status,
1839 NULL);
1840 if (status)
1841 dev_dbg(dev, "Failed to start FW DCBX\n");
1842
1843 dev_info(dev, "FW DCBX agent is %s\n",
1844 dcbx_agent_status ? "ACTIVE" : "DISABLED");
1845
1846 /* Failure to configure MIB change or init DCB is not
1847 * relevant to ethtool. Print notification that
1848 * registration/init failed but do not return error
1849 * state to ethtool
1850 */
1851 status = ice_init_pf_dcb(pf, true);
1852 if (status)
1853 dev_dbg(dev, "Fail to init DCB\n");
1854
1855 /* Register for MIB change events */
1856 status = ice_cfg_lldp_mib_change(&pf->hw, true);
1857 if (status)
1858 dev_dbg(dev, "Fail to enable MIB change events\n");
1859
1860 pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST;
1861 pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
1862
1863 ice_nway_reset(netdev);
1864 }
1865 }
1866 /* don't allow modification of this flag when a single VF is in
1867 * promiscuous mode because it's not supported
1868 */
1869 if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
1870 ice_is_any_vf_in_unicast_promisc(pf)) {
1871 dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
1872 /* toggle bit back to previous state */
1873 change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
1874 ret = -EAGAIN;
1875 }
1876
1877 if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
1878 ice_has_vfs(pf)) {
1879 dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
1880 /* toggle bit back to previous state */
1881 change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
1882 ret = -EOPNOTSUPP;
1883 }
1884 ethtool_exit:
1885 clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
1886 return ret;
1887 }
1888
ice_get_sset_count(struct net_device * netdev,int sset)1889 static int ice_get_sset_count(struct net_device *netdev, int sset)
1890 {
1891 switch (sset) {
1892 case ETH_SS_STATS:
1893 /* The number (and order) of strings reported *must* remain
1894 * constant for a given netdevice. This function must not
1895 * report a different number based on run time parameters
1896 * (such as the number of queues in use, or the setting of
1897 * a private ethtool flag). This is due to the nature of the
1898 * ethtool stats API.
1899 *
1900 * Userspace programs such as ethtool must make 3 separate
1901 * ioctl requests, one for size, one for the strings, and
1902 * finally one for the stats. Since these cross into
1903 * userspace, changes to the number or size could result in
1904 * undefined memory access or incorrect string<->value
1905 * correlations for statistics.
1906 *
1907 * Even if it appears to be safe, changes to the size or
1908 * order of strings will suffer from race conditions and are
1909 * not safe.
1910 */
1911 return ICE_ALL_STATS_LEN(netdev);
1912 case ETH_SS_TEST:
1913 return ICE_TEST_LEN;
1914 case ETH_SS_PRIV_FLAGS:
1915 return ICE_PRIV_FLAG_ARRAY_SIZE;
1916 default:
1917 return -EOPNOTSUPP;
1918 }
1919 }
1920
1921 static void
__ice_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data,struct ice_vsi * vsi)1922 __ice_get_ethtool_stats(struct net_device *netdev,
1923 struct ethtool_stats __always_unused *stats, u64 *data,
1924 struct ice_vsi *vsi)
1925 {
1926 struct ice_pf *pf = vsi->back;
1927 struct ice_tx_ring *tx_ring;
1928 struct ice_rx_ring *rx_ring;
1929 unsigned int j;
1930 int i = 0;
1931 char *p;
1932
1933 if (ice_is_port_repr_netdev(netdev)) {
1934 ice_update_eth_stats(vsi);
1935
1936 for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
1937 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
1938 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
1939 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1940 }
1941 return;
1942 }
1943
1944 ice_update_pf_stats(pf);
1945 ice_update_vsi_stats(vsi);
1946
1947 for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
1948 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
1949 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
1950 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1951 }
1952
1953 /* populate per queue stats */
1954 rcu_read_lock();
1955
1956 ice_for_each_alloc_txq(vsi, j) {
1957 u64 pkts, bytes;
1958
1959 tx_ring = READ_ONCE(vsi->tx_rings[j]);
1960 if (!tx_ring || !tx_ring->ring_stats) {
1961 data[i++] = 0;
1962 data[i++] = 0;
1963 continue;
1964 }
1965
1966 ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes);
1967
1968 data[i++] = pkts;
1969 data[i++] = bytes;
1970 }
1971
1972 ice_for_each_alloc_rxq(vsi, j) {
1973 u64 pkts, bytes;
1974
1975 rx_ring = READ_ONCE(vsi->rx_rings[j]);
1976 if (!rx_ring || !rx_ring->ring_stats) {
1977 data[i++] = 0;
1978 data[i++] = 0;
1979 continue;
1980 }
1981
1982 ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes);
1983
1984 data[i++] = pkts;
1985 data[i++] = bytes;
1986 }
1987
1988 rcu_read_unlock();
1989
1990 if (vsi->type != ICE_VSI_PF)
1991 return;
1992
1993 for (j = 0; j < ICE_PF_STATS_LEN; j++) {
1994 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
1995 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
1996 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1997 }
1998
1999 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
2000 data[i++] = pf->stats.priority_xon_tx[j];
2001 data[i++] = pf->stats.priority_xoff_tx[j];
2002 }
2003
2004 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
2005 data[i++] = pf->stats.priority_xon_rx[j];
2006 data[i++] = pf->stats.priority_xoff_rx[j];
2007 }
2008 }
2009
2010 static void
ice_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)2011 ice_get_ethtool_stats(struct net_device *netdev,
2012 struct ethtool_stats __always_unused *stats, u64 *data)
2013 {
2014 struct ice_netdev_priv *np = netdev_priv(netdev);
2015
2016 __ice_get_ethtool_stats(netdev, stats, data, np->vsi);
2017 }
2018
2019 #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
2020 ICE_PHY_TYPE_LOW_100M_SGMII)
2021
2022 #define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \
2023 ICE_PHY_TYPE_LOW_1000BASE_T | \
2024 ICE_PHY_TYPE_LOW_1000BASE_SX | \
2025 ICE_PHY_TYPE_LOW_1000BASE_LX | \
2026 ICE_PHY_TYPE_LOW_1000BASE_KX | \
2027 ICE_PHY_TYPE_LOW_1G_SGMII | \
2028 ICE_PHY_TYPE_LOW_2500BASE_T | \
2029 ICE_PHY_TYPE_LOW_2500BASE_X | \
2030 ICE_PHY_TYPE_LOW_2500BASE_KX | \
2031 ICE_PHY_TYPE_LOW_5GBASE_T | \
2032 ICE_PHY_TYPE_LOW_5GBASE_KR | \
2033 ICE_PHY_TYPE_LOW_10GBASE_T | \
2034 ICE_PHY_TYPE_LOW_10G_SFI_DA | \
2035 ICE_PHY_TYPE_LOW_10GBASE_SR | \
2036 ICE_PHY_TYPE_LOW_10GBASE_LR | \
2037 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
2038 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
2039 ICE_PHY_TYPE_LOW_10G_SFI_C2C)
2040
2041 #define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
2042 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
2043 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
2044 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
2045 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
2046 ICE_PHY_TYPE_LOW_100G_CAUI4 | \
2047 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \
2048 ICE_PHY_TYPE_LOW_100G_AUI4 | \
2049 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
2050 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \
2051 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \
2052 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
2053 ICE_PHY_TYPE_LOW_100GBASE_DR)
2054
2055 #define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
2056 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\
2057 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
2058 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
2059 ICE_PHY_TYPE_HIGH_100G_AUI2)
2060
2061 #define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \
2062 ICE_PHY_TYPE_HIGH_200G_SR4 | \
2063 ICE_PHY_TYPE_HIGH_200G_FR4 | \
2064 ICE_PHY_TYPE_HIGH_200G_LR4 | \
2065 ICE_PHY_TYPE_HIGH_200G_DR4 | \
2066 ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \
2067 ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
2068 ICE_PHY_TYPE_HIGH_200G_AUI4)
2069
2070 /**
2071 * ice_mask_min_supported_speeds
2072 * @hw: pointer to the HW structure
2073 * @phy_types_high: PHY type high
2074 * @phy_types_low: PHY type low to apply minimum supported speeds mask
2075 *
2076 * Apply minimum supported speeds mask to PHY type low. These are the speeds
2077 * for ethtool supported link mode.
2078 */
2079 static void
ice_mask_min_supported_speeds(struct ice_hw * hw,u64 phy_types_high,u64 * phy_types_low)2080 ice_mask_min_supported_speeds(struct ice_hw *hw,
2081 u64 phy_types_high, u64 *phy_types_low)
2082 {
2083 /* if QSFP connection with 100G speed, minimum supported speed is 25G */
2084 if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) ||
2085 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) ||
2086 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G))
2087 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
2088 else if (!ice_is_100m_speed_supported(hw))
2089 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
2090 }
2091
2092 /**
2093 * ice_linkmode_set_bit - set link mode bit
2094 * @phy_to_ethtool: PHY type to ethtool link mode struct to set
2095 * @ks: ethtool link ksettings struct to fill out
2096 * @req_speeds: speed requested by user
2097 * @advert_phy_type: advertised PHY type
2098 * @phy_type: PHY type
2099 */
2100 static void
ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool * phy_to_ethtool,struct ethtool_link_ksettings * ks,u32 req_speeds,u64 advert_phy_type,u32 phy_type)2101 ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool *phy_to_ethtool,
2102 struct ethtool_link_ksettings *ks, u32 req_speeds,
2103 u64 advert_phy_type, u32 phy_type)
2104 {
2105 linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported);
2106
2107 if (req_speeds & phy_to_ethtool->aq_link_speed ||
2108 (!req_speeds && advert_phy_type & BIT(phy_type)))
2109 linkmode_set_bit(phy_to_ethtool->link_mode,
2110 ks->link_modes.advertising);
2111 }
2112
2113 /**
2114 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
2115 * @netdev: network interface device structure
2116 * @ks: ethtool link ksettings struct to fill out
2117 */
2118 static void
ice_phy_type_to_ethtool(struct net_device * netdev,struct ethtool_link_ksettings * ks)2119 ice_phy_type_to_ethtool(struct net_device *netdev,
2120 struct ethtool_link_ksettings *ks)
2121 {
2122 struct ice_netdev_priv *np = netdev_priv(netdev);
2123 struct ice_vsi *vsi = np->vsi;
2124 struct ice_pf *pf = vsi->back;
2125 u64 advert_phy_type_lo = 0;
2126 u64 advert_phy_type_hi = 0;
2127 u64 phy_types_high = 0;
2128 u64 phy_types_low = 0;
2129 u32 req_speeds;
2130 u32 i;
2131
2132 req_speeds = vsi->port_info->phy.link_info.req_speeds;
2133
2134 /* Check if lenient mode is supported and enabled, or in strict mode.
2135 *
2136 * In lenient mode the Supported link modes are the PHY types without
2137 * media. The Advertising link mode is either 1. the user requested
2138 * speed, 2. the override PHY mask, or 3. the PHY types with media.
2139 *
2140 * In strict mode Supported link mode are the PHY type with media,
2141 * and Advertising link modes are the media PHY type or the speed
2142 * requested by user.
2143 */
2144 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
2145 phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
2146 phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
2147
2148 ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
2149 &phy_types_low);
2150 /* determine advertised modes based on link override only
2151 * if it's supported and if the FW doesn't abstract the
2152 * driver from having to account for link overrides
2153 */
2154 if (ice_fw_supports_link_override(&pf->hw) &&
2155 !ice_fw_supports_report_dflt_cfg(&pf->hw)) {
2156 struct ice_link_default_override_tlv *ldo;
2157
2158 ldo = &pf->link_dflt_override;
2159 /* If override enabled and PHY mask set, then
2160 * Advertising link mode is the intersection of the PHY
2161 * types without media and the override PHY mask.
2162 */
2163 if (ldo->options & ICE_LINK_OVERRIDE_EN &&
2164 (ldo->phy_type_low || ldo->phy_type_high)) {
2165 advert_phy_type_lo =
2166 le64_to_cpu(pf->nvm_phy_type_lo) &
2167 ldo->phy_type_low;
2168 advert_phy_type_hi =
2169 le64_to_cpu(pf->nvm_phy_type_hi) &
2170 ldo->phy_type_high;
2171 }
2172 }
2173 } else {
2174 /* strict mode */
2175 phy_types_low = vsi->port_info->phy.phy_type_low;
2176 phy_types_high = vsi->port_info->phy.phy_type_high;
2177 }
2178
2179 /* If Advertising link mode PHY type is not using override PHY type,
2180 * then use PHY type with media.
2181 */
2182 if (!advert_phy_type_lo && !advert_phy_type_hi) {
2183 advert_phy_type_lo = vsi->port_info->phy.phy_type_low;
2184 advert_phy_type_hi = vsi->port_info->phy.phy_type_high;
2185 }
2186
2187 linkmode_zero(ks->link_modes.supported);
2188 linkmode_zero(ks->link_modes.advertising);
2189
2190 for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
2191 if (phy_types_low & BIT_ULL(i))
2192 ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
2193 req_speeds, advert_phy_type_lo,
2194 i);
2195 }
2196
2197 for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
2198 if (phy_types_high & BIT_ULL(i))
2199 ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
2200 req_speeds, advert_phy_type_hi,
2201 i);
2202 }
2203 }
2204
2205 #define TEST_SET_BITS_TIMEOUT 50
2206 #define TEST_SET_BITS_SLEEP_MAX 2000
2207 #define TEST_SET_BITS_SLEEP_MIN 1000
2208
2209 /**
2210 * ice_get_settings_link_up - Get Link settings for when link is up
2211 * @ks: ethtool ksettings to fill in
2212 * @netdev: network interface device structure
2213 */
2214 static void
ice_get_settings_link_up(struct ethtool_link_ksettings * ks,struct net_device * netdev)2215 ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
2216 struct net_device *netdev)
2217 {
2218 struct ice_netdev_priv *np = netdev_priv(netdev);
2219 struct ice_port_info *pi = np->vsi->port_info;
2220 struct ice_link_status *link_info;
2221 struct ice_vsi *vsi = np->vsi;
2222
2223 link_info = &vsi->port_info->phy.link_info;
2224
2225 /* Get supported and advertised settings from PHY ability with media */
2226 ice_phy_type_to_ethtool(netdev, ks);
2227
2228 switch (link_info->link_speed) {
2229 case ICE_AQ_LINK_SPEED_200GB:
2230 ks->base.speed = SPEED_200000;
2231 break;
2232 case ICE_AQ_LINK_SPEED_100GB:
2233 ks->base.speed = SPEED_100000;
2234 break;
2235 case ICE_AQ_LINK_SPEED_50GB:
2236 ks->base.speed = SPEED_50000;
2237 break;
2238 case ICE_AQ_LINK_SPEED_40GB:
2239 ks->base.speed = SPEED_40000;
2240 break;
2241 case ICE_AQ_LINK_SPEED_25GB:
2242 ks->base.speed = SPEED_25000;
2243 break;
2244 case ICE_AQ_LINK_SPEED_20GB:
2245 ks->base.speed = SPEED_20000;
2246 break;
2247 case ICE_AQ_LINK_SPEED_10GB:
2248 ks->base.speed = SPEED_10000;
2249 break;
2250 case ICE_AQ_LINK_SPEED_5GB:
2251 ks->base.speed = SPEED_5000;
2252 break;
2253 case ICE_AQ_LINK_SPEED_2500MB:
2254 ks->base.speed = SPEED_2500;
2255 break;
2256 case ICE_AQ_LINK_SPEED_1000MB:
2257 ks->base.speed = SPEED_1000;
2258 break;
2259 case ICE_AQ_LINK_SPEED_100MB:
2260 ks->base.speed = SPEED_100;
2261 break;
2262 default:
2263 netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n",
2264 link_info->link_speed);
2265 break;
2266 }
2267 ks->base.duplex = DUPLEX_FULL;
2268
2269 if (link_info->an_info & ICE_AQ_AN_COMPLETED)
2270 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2271 Autoneg);
2272
2273 /* Set flow control negotiated Rx/Tx pause */
2274 switch (pi->fc.current_mode) {
2275 case ICE_FC_FULL:
2276 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
2277 break;
2278 case ICE_FC_TX_PAUSE:
2279 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
2280 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2281 Asym_Pause);
2282 break;
2283 case ICE_FC_RX_PAUSE:
2284 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2285 Asym_Pause);
2286 break;
2287 case ICE_FC_PFC:
2288 default:
2289 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
2290 ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
2291 Asym_Pause);
2292 break;
2293 }
2294 }
2295
2296 /**
2297 * ice_get_settings_link_down - Get the Link settings when link is down
2298 * @ks: ethtool ksettings to fill in
2299 * @netdev: network interface device structure
2300 *
2301 * Reports link settings that can be determined when link is down
2302 */
2303 static void
ice_get_settings_link_down(struct ethtool_link_ksettings * ks,struct net_device * netdev)2304 ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
2305 struct net_device *netdev)
2306 {
2307 /* link is down and the driver needs to fall back on
2308 * supported PHY types to figure out what info to display
2309 */
2310 ice_phy_type_to_ethtool(netdev, ks);
2311
2312 /* With no link, speed and duplex are unknown */
2313 ks->base.speed = SPEED_UNKNOWN;
2314 ks->base.duplex = DUPLEX_UNKNOWN;
2315 }
2316
2317 /**
2318 * ice_get_link_ksettings - Get Link Speed and Duplex settings
2319 * @netdev: network interface device structure
2320 * @ks: ethtool ksettings
2321 *
2322 * Reports speed/duplex settings based on media_type
2323 */
2324 static int
ice_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ks)2325 ice_get_link_ksettings(struct net_device *netdev,
2326 struct ethtool_link_ksettings *ks)
2327 {
2328 struct ice_netdev_priv *np = netdev_priv(netdev);
2329 struct ice_aqc_get_phy_caps_data *caps;
2330 struct ice_link_status *hw_link_info;
2331 struct ice_vsi *vsi = np->vsi;
2332 int err;
2333
2334 ethtool_link_ksettings_zero_link_mode(ks, supported);
2335 ethtool_link_ksettings_zero_link_mode(ks, advertising);
2336 ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
2337 hw_link_info = &vsi->port_info->phy.link_info;
2338
2339 /* set speed and duplex */
2340 if (hw_link_info->link_info & ICE_AQ_LINK_UP)
2341 ice_get_settings_link_up(ks, netdev);
2342 else
2343 ice_get_settings_link_down(ks, netdev);
2344
2345 /* set autoneg settings */
2346 ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
2347 AUTONEG_ENABLE : AUTONEG_DISABLE;
2348
2349 /* set media type settings */
2350 switch (vsi->port_info->phy.media_type) {
2351 case ICE_MEDIA_FIBER:
2352 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
2353 ks->base.port = PORT_FIBRE;
2354 break;
2355 case ICE_MEDIA_BASET:
2356 ethtool_link_ksettings_add_link_mode(ks, supported, TP);
2357 ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
2358 ks->base.port = PORT_TP;
2359 break;
2360 case ICE_MEDIA_BACKPLANE:
2361 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
2362 ethtool_link_ksettings_add_link_mode(ks, advertising,
2363 Backplane);
2364 ks->base.port = PORT_NONE;
2365 break;
2366 case ICE_MEDIA_DA:
2367 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
2368 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
2369 ks->base.port = PORT_DA;
2370 break;
2371 default:
2372 ks->base.port = PORT_OTHER;
2373 break;
2374 }
2375
2376 /* flow control is symmetric and always supported */
2377 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
2378
2379 caps = kzalloc_obj(*caps);
2380 if (!caps)
2381 return -ENOMEM;
2382
2383 err = ice_aq_get_phy_caps(vsi->port_info, false,
2384 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
2385 if (err)
2386 goto done;
2387
2388 /* Set the advertised flow control based on the PHY capability */
2389 if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
2390 (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) {
2391 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
2392 ethtool_link_ksettings_add_link_mode(ks, advertising,
2393 Asym_Pause);
2394 } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) {
2395 ethtool_link_ksettings_add_link_mode(ks, advertising,
2396 Asym_Pause);
2397 } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) {
2398 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
2399 ethtool_link_ksettings_add_link_mode(ks, advertising,
2400 Asym_Pause);
2401 } else {
2402 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
2403 ethtool_link_ksettings_del_link_mode(ks, advertising,
2404 Asym_Pause);
2405 }
2406
2407 /* Set advertised FEC modes based on PHY capability */
2408 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
2409
2410 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
2411 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
2412 ethtool_link_ksettings_add_link_mode(ks, advertising,
2413 FEC_BASER);
2414 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
2415 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
2416 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
2417
2418 err = ice_aq_get_phy_caps(vsi->port_info, false,
2419 ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
2420 if (err)
2421 goto done;
2422
2423 /* Set supported FEC modes based on PHY capability */
2424 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
2425
2426 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
2427 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
2428 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
2429 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
2430 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
2431
2432 /* Set supported and advertised autoneg */
2433 if (ice_is_phy_caps_an_enabled(caps)) {
2434 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
2435 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
2436 }
2437
2438 done:
2439 kfree(caps);
2440 return err;
2441 }
2442
2443 /**
2444 * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed
2445 * @speed: ethtool forced speed
2446 */
ice_speed_to_aq_link(int speed)2447 static u16 ice_speed_to_aq_link(int speed)
2448 {
2449 int aq_speed;
2450
2451 switch (speed) {
2452 case SPEED_10:
2453 aq_speed = ICE_AQ_LINK_SPEED_10MB;
2454 break;
2455 case SPEED_100:
2456 aq_speed = ICE_AQ_LINK_SPEED_100MB;
2457 break;
2458 case SPEED_1000:
2459 aq_speed = ICE_AQ_LINK_SPEED_1000MB;
2460 break;
2461 case SPEED_2500:
2462 aq_speed = ICE_AQ_LINK_SPEED_2500MB;
2463 break;
2464 case SPEED_5000:
2465 aq_speed = ICE_AQ_LINK_SPEED_5GB;
2466 break;
2467 case SPEED_10000:
2468 aq_speed = ICE_AQ_LINK_SPEED_10GB;
2469 break;
2470 case SPEED_20000:
2471 aq_speed = ICE_AQ_LINK_SPEED_20GB;
2472 break;
2473 case SPEED_25000:
2474 aq_speed = ICE_AQ_LINK_SPEED_25GB;
2475 break;
2476 case SPEED_40000:
2477 aq_speed = ICE_AQ_LINK_SPEED_40GB;
2478 break;
2479 case SPEED_50000:
2480 aq_speed = ICE_AQ_LINK_SPEED_50GB;
2481 break;
2482 case SPEED_100000:
2483 aq_speed = ICE_AQ_LINK_SPEED_100GB;
2484 break;
2485 default:
2486 aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
2487 break;
2488 }
2489 return aq_speed;
2490 }
2491
2492 /**
2493 * ice_ksettings_find_adv_link_speed - Find advertising link speed
2494 * @ks: ethtool ksettings
2495 */
2496 static u16
ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings * ks)2497 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
2498 {
2499 const struct ethtool_forced_speed_map *map;
2500 u16 adv_link_speed = 0;
2501
2502 for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) {
2503 map = ice_adv_lnk_speed_maps + i;
2504 if (linkmode_intersects(ks->link_modes.advertising, map->caps))
2505 adv_link_speed |= ice_speed_to_aq_link(map->speed);
2506 }
2507
2508 return adv_link_speed;
2509 }
2510
2511 /**
2512 * ice_setup_autoneg
2513 * @p: port info
2514 * @ks: ethtool_link_ksettings
2515 * @config: configuration that will be sent down to FW
2516 * @autoneg_enabled: autonegotiation is enabled or not
2517 * @autoneg_changed: will there a change in autonegotiation
2518 * @netdev: network interface device structure
2519 *
2520 * Setup PHY autonegotiation feature
2521 */
2522 static int
ice_setup_autoneg(struct ice_port_info * p,struct ethtool_link_ksettings * ks,struct ice_aqc_set_phy_cfg_data * config,u8 autoneg_enabled,u8 * autoneg_changed,struct net_device * netdev)2523 ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
2524 struct ice_aqc_set_phy_cfg_data *config,
2525 u8 autoneg_enabled, u8 *autoneg_changed,
2526 struct net_device *netdev)
2527 {
2528 int err = 0;
2529
2530 *autoneg_changed = 0;
2531
2532 /* Check autoneg */
2533 if (autoneg_enabled == AUTONEG_ENABLE) {
2534 /* If autoneg was not already enabled */
2535 if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) {
2536 /* If autoneg is not supported, return error */
2537 if (!ethtool_link_ksettings_test_link_mode(ks,
2538 supported,
2539 Autoneg)) {
2540 netdev_info(netdev, "Autoneg not supported on this phy.\n");
2541 err = -EINVAL;
2542 } else {
2543 /* Autoneg is allowed to change */
2544 config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2545 *autoneg_changed = 1;
2546 }
2547 }
2548 } else {
2549 /* If autoneg is currently enabled */
2550 if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
2551 /* If autoneg is supported 10GBASE_T is the only PHY
2552 * that can disable it, so otherwise return error
2553 */
2554 if (ethtool_link_ksettings_test_link_mode(ks,
2555 supported,
2556 Autoneg)) {
2557 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
2558 err = -EINVAL;
2559 } else {
2560 /* Autoneg is allowed to change */
2561 config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2562 *autoneg_changed = 1;
2563 }
2564 }
2565 }
2566
2567 return err;
2568 }
2569
2570 /**
2571 * ice_set_phy_type_from_speed - set phy_types based on speeds
2572 * and advertised modes
2573 * @ks: ethtool link ksettings struct
2574 * @phy_type_low: pointer to the lower part of phy_type
2575 * @phy_type_high: pointer to the higher part of phy_type
2576 * @adv_link_speed: targeted link speeds bitmap
2577 */
2578 static void
ice_set_phy_type_from_speed(const struct ethtool_link_ksettings * ks,u64 * phy_type_low,u64 * phy_type_high,u16 adv_link_speed)2579 ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
2580 u64 *phy_type_low, u64 *phy_type_high,
2581 u16 adv_link_speed)
2582 {
2583 /* Handle 1000M speed in a special way because ice_update_phy_type
2584 * enables all link modes, but having mixed copper and optical
2585 * standards is not supported.
2586 */
2587 adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
2588
2589 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2590 1000baseT_Full))
2591 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
2592 ICE_PHY_TYPE_LOW_1G_SGMII;
2593
2594 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2595 1000baseKX_Full))
2596 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
2597
2598 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2599 1000baseX_Full))
2600 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
2601 ICE_PHY_TYPE_LOW_1000BASE_LX;
2602
2603 ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
2604 }
2605
2606 /**
2607 * ice_set_link_ksettings - Set Speed and Duplex
2608 * @netdev: network interface device structure
2609 * @ks: ethtool ksettings
2610 *
2611 * Set speed/duplex per media_types advertised/forced
2612 */
2613 static int
ice_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * ks)2614 ice_set_link_ksettings(struct net_device *netdev,
2615 const struct ethtool_link_ksettings *ks)
2616 {
2617 struct ice_netdev_priv *np = netdev_priv(netdev);
2618 u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
2619 struct ethtool_link_ksettings copy_ks = *ks;
2620 struct ethtool_link_ksettings safe_ks = {};
2621 struct ice_aqc_get_phy_caps_data *phy_caps;
2622 struct ice_aqc_set_phy_cfg_data config;
2623 u16 adv_link_speed, curr_link_speed;
2624 struct ice_pf *pf = np->vsi->back;
2625 struct ice_port_info *pi;
2626 u8 autoneg_changed = 0;
2627 u64 phy_type_high = 0;
2628 u64 phy_type_low = 0;
2629 bool linkup;
2630 int err;
2631
2632 pi = np->vsi->port_info;
2633
2634 if (!pi)
2635 return -EIO;
2636
2637 if (pi->phy.media_type != ICE_MEDIA_BASET &&
2638 pi->phy.media_type != ICE_MEDIA_FIBER &&
2639 pi->phy.media_type != ICE_MEDIA_BACKPLANE &&
2640 pi->phy.media_type != ICE_MEDIA_DA &&
2641 pi->phy.link_info.link_info & ICE_AQ_LINK_UP)
2642 return -EOPNOTSUPP;
2643
2644 phy_caps = kzalloc_obj(*phy_caps);
2645 if (!phy_caps)
2646 return -ENOMEM;
2647
2648 /* Get the PHY capabilities based on media */
2649 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2650 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2651 phy_caps, NULL);
2652 else
2653 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2654 phy_caps, NULL);
2655 if (err)
2656 goto done;
2657
2658 /* save autoneg out of ksettings */
2659 autoneg = copy_ks.base.autoneg;
2660
2661 /* Get link modes supported by hardware.*/
2662 ice_phy_type_to_ethtool(netdev, &safe_ks);
2663
2664 /* and check against modes requested by user.
2665 * Return an error if unsupported mode was set.
2666 */
2667 if (!bitmap_subset(copy_ks.link_modes.advertising,
2668 safe_ks.link_modes.supported,
2669 __ETHTOOL_LINK_MODE_MASK_NBITS)) {
2670 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags))
2671 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2672 err = -EOPNOTSUPP;
2673 goto done;
2674 }
2675
2676 /* get our own copy of the bits to check against */
2677 memset(&safe_ks, 0, sizeof(safe_ks));
2678 safe_ks.base.cmd = copy_ks.base.cmd;
2679 safe_ks.base.link_mode_masks_nwords =
2680 copy_ks.base.link_mode_masks_nwords;
2681 ice_get_link_ksettings(netdev, &safe_ks);
2682
2683 /* set autoneg back to what it currently is */
2684 copy_ks.base.autoneg = safe_ks.base.autoneg;
2685 /* we don't compare the speed */
2686 copy_ks.base.speed = safe_ks.base.speed;
2687
2688 /* If copy_ks.base and safe_ks.base are not the same now, then they are
2689 * trying to set something that we do not support.
2690 */
2691 if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) {
2692 err = -EOPNOTSUPP;
2693 goto done;
2694 }
2695
2696 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
2697 timeout--;
2698 if (!timeout) {
2699 err = -EBUSY;
2700 goto done;
2701 }
2702 usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
2703 }
2704
2705 /* Copy the current user PHY configuration. The current user PHY
2706 * configuration is initialized during probe from PHY capabilities
2707 * software mode, and updated on set PHY configuration.
2708 */
2709 config = pi->phy.curr_user_phy_cfg;
2710
2711 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2712
2713 /* Check autoneg */
2714 err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed,
2715 netdev);
2716
2717 if (err)
2718 goto done;
2719
2720 /* Call to get the current link speed */
2721 pi->phy.get_link_info = true;
2722 err = ice_get_link_status(pi, &linkup);
2723 if (err)
2724 goto done;
2725
2726 curr_link_speed = pi->phy.curr_user_speed_req;
2727 adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
2728
2729 /* If speed didn't get set, set it to what it currently is.
2730 * This is needed because if advertise is 0 (as it is when autoneg
2731 * is disabled) then speed won't get set.
2732 */
2733 if (!adv_link_speed)
2734 adv_link_speed = curr_link_speed;
2735
2736 /* Convert the advertise link speeds to their corresponded PHY_TYPE */
2737 ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
2738 adv_link_speed);
2739
2740 if (!autoneg_changed && adv_link_speed == curr_link_speed) {
2741 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
2742 goto done;
2743 }
2744
2745 /* save the requested speeds */
2746 pi->phy.link_info.req_speeds = adv_link_speed;
2747
2748 /* set link and auto negotiation so changes take effect */
2749 config.caps |= ICE_AQ_PHY_ENA_LINK;
2750
2751 /* check if there is a PHY type for the requested advertised speed */
2752 if (!(phy_type_low || phy_type_high)) {
2753 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2754 err = -EOPNOTSUPP;
2755 goto done;
2756 }
2757
2758 /* intersect requested advertised speed PHY types with media PHY types
2759 * for set PHY configuration
2760 */
2761 config.phy_type_high = cpu_to_le64(phy_type_high) &
2762 phy_caps->phy_type_high;
2763 config.phy_type_low = cpu_to_le64(phy_type_low) &
2764 phy_caps->phy_type_low;
2765
2766 if (!(config.phy_type_high || config.phy_type_low)) {
2767 /* If there is no intersection and lenient mode is enabled, then
2768 * intersect the requested advertised speed with NVM media type
2769 * PHY types.
2770 */
2771 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
2772 config.phy_type_high = cpu_to_le64(phy_type_high) &
2773 pf->nvm_phy_type_hi;
2774 config.phy_type_low = cpu_to_le64(phy_type_low) &
2775 pf->nvm_phy_type_lo;
2776 } else {
2777 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2778 err = -EOPNOTSUPP;
2779 goto done;
2780 }
2781 }
2782
2783 /* If link is up put link down */
2784 if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) {
2785 /* Tell the OS link is going down, the link will go
2786 * back up when fw says it is ready asynchronously
2787 */
2788 ice_print_link_msg(np->vsi, false);
2789 netif_carrier_off(netdev);
2790 netif_tx_stop_all_queues(netdev);
2791 }
2792
2793 /* make the aq call */
2794 err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
2795 if (err) {
2796 netdev_info(netdev, "Set phy config failed,\n");
2797 goto done;
2798 }
2799
2800 /* Save speed request */
2801 pi->phy.curr_user_speed_req = adv_link_speed;
2802 done:
2803 kfree(phy_caps);
2804 clear_bit(ICE_CFG_BUSY, pf->state);
2805
2806 return err;
2807 }
2808
ice_parse_hdrs(const struct ethtool_rxfh_fields * nfc)2809 static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
2810 {
2811 u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
2812
2813 switch (nfc->flow_type) {
2814 case TCP_V4_FLOW:
2815 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4;
2816 break;
2817 case UDP_V4_FLOW:
2818 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4;
2819 break;
2820 case SCTP_V4_FLOW:
2821 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
2822 break;
2823 case GTPU_V4_FLOW:
2824 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
2825 break;
2826 case GTPC_V4_FLOW:
2827 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
2828 break;
2829 case GTPC_TEID_V4_FLOW:
2830 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
2831 break;
2832 case GTPU_EH_V4_FLOW:
2833 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
2834 break;
2835 case GTPU_UL_V4_FLOW:
2836 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
2837 break;
2838 case GTPU_DL_V4_FLOW:
2839 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
2840 break;
2841 case TCP_V6_FLOW:
2842 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
2843 break;
2844 case UDP_V6_FLOW:
2845 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6;
2846 break;
2847 case SCTP_V6_FLOW:
2848 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
2849 break;
2850 case GTPU_V6_FLOW:
2851 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
2852 break;
2853 case GTPC_V6_FLOW:
2854 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
2855 break;
2856 case GTPC_TEID_V6_FLOW:
2857 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
2858 break;
2859 case GTPU_EH_V6_FLOW:
2860 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
2861 break;
2862 case GTPU_UL_V6_FLOW:
2863 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
2864 break;
2865 case GTPU_DL_V6_FLOW:
2866 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
2867 break;
2868 default:
2869 break;
2870 }
2871 return hdrs;
2872 }
2873
ice_parse_hash_flds(const struct ethtool_rxfh_fields * nfc,bool symm)2874 static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
2875 {
2876 u64 hfld = ICE_HASH_INVALID;
2877
2878 if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) {
2879 switch (nfc->flow_type) {
2880 case TCP_V4_FLOW:
2881 case UDP_V4_FLOW:
2882 case SCTP_V4_FLOW:
2883 case GTPU_V4_FLOW:
2884 case GTPC_V4_FLOW:
2885 case GTPC_TEID_V4_FLOW:
2886 case GTPU_EH_V4_FLOW:
2887 case GTPU_UL_V4_FLOW:
2888 case GTPU_DL_V4_FLOW:
2889 if (nfc->data & RXH_IP_SRC)
2890 hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
2891 if (nfc->data & RXH_IP_DST)
2892 hfld |= ICE_FLOW_HASH_FLD_IPV4_DA;
2893 break;
2894 case TCP_V6_FLOW:
2895 case UDP_V6_FLOW:
2896 case SCTP_V6_FLOW:
2897 case GTPU_V6_FLOW:
2898 case GTPC_V6_FLOW:
2899 case GTPC_TEID_V6_FLOW:
2900 case GTPU_EH_V6_FLOW:
2901 case GTPU_UL_V6_FLOW:
2902 case GTPU_DL_V6_FLOW:
2903 if (nfc->data & RXH_IP_SRC)
2904 hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
2905 if (nfc->data & RXH_IP_DST)
2906 hfld |= ICE_FLOW_HASH_FLD_IPV6_DA;
2907 break;
2908 default:
2909 break;
2910 }
2911 }
2912
2913 if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) {
2914 switch (nfc->flow_type) {
2915 case TCP_V4_FLOW:
2916 case TCP_V6_FLOW:
2917 if (nfc->data & RXH_L4_B_0_1)
2918 hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT;
2919 if (nfc->data & RXH_L4_B_2_3)
2920 hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT;
2921 break;
2922 case UDP_V4_FLOW:
2923 case UDP_V6_FLOW:
2924 if (nfc->data & RXH_L4_B_0_1)
2925 hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
2926 if (nfc->data & RXH_L4_B_2_3)
2927 hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT;
2928 break;
2929 case SCTP_V4_FLOW:
2930 case SCTP_V6_FLOW:
2931 if (nfc->data & RXH_L4_B_0_1)
2932 hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT;
2933 if (nfc->data & RXH_L4_B_2_3)
2934 hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
2935 break;
2936 default:
2937 break;
2938 }
2939 }
2940
2941 if (nfc->data & RXH_GTP_TEID) {
2942 switch (nfc->flow_type) {
2943 case GTPC_TEID_V4_FLOW:
2944 case GTPC_TEID_V6_FLOW:
2945 hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
2946 break;
2947 case GTPU_V4_FLOW:
2948 case GTPU_V6_FLOW:
2949 hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
2950 break;
2951 case GTPU_EH_V4_FLOW:
2952 case GTPU_EH_V6_FLOW:
2953 hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
2954 break;
2955 case GTPU_UL_V4_FLOW:
2956 case GTPU_UL_V6_FLOW:
2957 hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
2958 break;
2959 case GTPU_DL_V4_FLOW:
2960 case GTPU_DL_V6_FLOW:
2961 hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
2962 break;
2963 default:
2964 break;
2965 }
2966 }
2967
2968 return hfld;
2969 }
2970
2971 static int
ice_set_rxfh_fields(struct net_device * netdev,const struct ethtool_rxfh_fields * nfc,struct netlink_ext_ack * extack)2972 ice_set_rxfh_fields(struct net_device *netdev,
2973 const struct ethtool_rxfh_fields *nfc,
2974 struct netlink_ext_ack *extack)
2975 {
2976 struct ice_netdev_priv *np = netdev_priv(netdev);
2977 struct ice_vsi *vsi = np->vsi;
2978 struct ice_pf *pf = vsi->back;
2979 struct ice_rss_hash_cfg cfg;
2980 struct device *dev;
2981 u64 hashed_flds;
2982 int status;
2983 bool symm;
2984 u32 hdrs;
2985
2986 dev = ice_pf_to_dev(pf);
2987 if (ice_is_safe_mode(pf)) {
2988 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
2989 vsi->vsi_num);
2990 return -EINVAL;
2991 }
2992
2993 symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
2994 hashed_flds = ice_parse_hash_flds(nfc, symm);
2995 if (hashed_flds == ICE_HASH_INVALID) {
2996 dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
2997 vsi->vsi_num);
2998 return -EINVAL;
2999 }
3000
3001 hdrs = ice_parse_hdrs(nfc);
3002 if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
3003 dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
3004 vsi->vsi_num);
3005 return -EINVAL;
3006 }
3007
3008 cfg.hash_flds = hashed_flds;
3009 cfg.addl_hdrs = hdrs;
3010 cfg.hdr_type = ICE_RSS_ANY_HEADERS;
3011 cfg.symm = symm;
3012
3013 status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
3014 if (status) {
3015 dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
3016 vsi->vsi_num, status);
3017 return status;
3018 }
3019
3020 return 0;
3021 }
3022
3023 static int
ice_get_rxfh_fields(struct net_device * netdev,struct ethtool_rxfh_fields * nfc)3024 ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
3025 {
3026 struct ice_netdev_priv *np = netdev_priv(netdev);
3027 struct ice_vsi *vsi = np->vsi;
3028 struct ice_pf *pf = vsi->back;
3029 struct device *dev;
3030 u64 hash_flds;
3031 bool symm;
3032 u32 hdrs;
3033
3034 dev = ice_pf_to_dev(pf);
3035
3036 nfc->data = 0;
3037 if (ice_is_safe_mode(pf)) {
3038 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
3039 vsi->vsi_num);
3040 return 0;
3041 }
3042
3043 hdrs = ice_parse_hdrs(nfc);
3044 if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
3045 dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
3046 vsi->vsi_num);
3047 return 0;
3048 }
3049
3050 hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
3051 if (hash_flds == ICE_HASH_INVALID) {
3052 dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
3053 vsi->vsi_num);
3054 return 0;
3055 }
3056
3057 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
3058 hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA)
3059 nfc->data |= (u64)RXH_IP_SRC;
3060
3061 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA ||
3062 hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA)
3063 nfc->data |= (u64)RXH_IP_DST;
3064
3065 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
3066 hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
3067 hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
3068 nfc->data |= (u64)RXH_L4_B_0_1;
3069
3070 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
3071 hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
3072 hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
3073 nfc->data |= (u64)RXH_L4_B_2_3;
3074
3075 if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
3076 hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
3077 hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
3078 hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
3079 hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
3080 nfc->data |= (u64)RXH_GTP_TEID;
3081
3082 return 0;
3083 }
3084
3085 /**
3086 * ice_set_rxnfc - command to set Rx flow rules.
3087 * @netdev: network interface device structure
3088 * @cmd: ethtool rxnfc command
3089 *
3090 * Returns 0 for success and negative values for errors
3091 */
ice_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)3092 static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3093 {
3094 struct ice_netdev_priv *np = netdev_priv(netdev);
3095 struct ice_vsi *vsi = np->vsi;
3096
3097 switch (cmd->cmd) {
3098 case ETHTOOL_SRXCLSRLINS:
3099 return ice_add_fdir_ethtool(vsi, cmd);
3100 case ETHTOOL_SRXCLSRLDEL:
3101 return ice_del_fdir_ethtool(vsi, cmd);
3102 default:
3103 break;
3104 }
3105 return -EOPNOTSUPP;
3106 }
3107
3108 /**
3109 * ice_get_rx_ring_count - get RX ring count
3110 * @netdev: network interface device structure
3111 *
3112 * Return: number of RX rings.
3113 */
ice_get_rx_ring_count(struct net_device * netdev)3114 static u32 ice_get_rx_ring_count(struct net_device *netdev)
3115 {
3116 struct ice_netdev_priv *np = netdev_priv(netdev);
3117 struct ice_vsi *vsi = np->vsi;
3118
3119 return vsi->rss_size;
3120 }
3121
3122 /**
3123 * ice_get_rxnfc - command to get Rx flow classification rules
3124 * @netdev: network interface device structure
3125 * @cmd: ethtool rxnfc command
3126 * @rule_locs: buffer to rturn Rx flow classification rules
3127 *
3128 * Returns Success if the command is supported.
3129 */
3130 static int
ice_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 __always_unused * rule_locs)3131 ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3132 u32 __always_unused *rule_locs)
3133 {
3134 struct ice_netdev_priv *np = netdev_priv(netdev);
3135 struct ice_vsi *vsi = np->vsi;
3136 int ret = -EOPNOTSUPP;
3137 struct ice_hw *hw;
3138
3139 hw = &vsi->back->hw;
3140
3141 switch (cmd->cmd) {
3142 case ETHTOOL_GRXCLSRLCNT:
3143 cmd->rule_cnt = hw->fdir_active_fltr;
3144 /* report total rule count */
3145 cmd->data = ice_get_fdir_cnt_all(hw);
3146 ret = 0;
3147 break;
3148 case ETHTOOL_GRXCLSRULE:
3149 ret = ice_get_ethtool_fdir_entry(hw, cmd);
3150 break;
3151 case ETHTOOL_GRXCLSRLALL:
3152 ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
3153 break;
3154 default:
3155 break;
3156 }
3157
3158 return ret;
3159 }
3160
3161 static void
ice_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3162 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
3163 struct kernel_ethtool_ringparam *kernel_ring,
3164 struct netlink_ext_ack *extack)
3165 {
3166 struct ice_netdev_priv *np = netdev_priv(netdev);
3167 struct ice_vsi *vsi = np->vsi;
3168 struct ice_hw *hw;
3169
3170 hw = &vsi->back->hw;
3171 ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
3172 ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
3173 if (vsi->tx_rings && vsi->rx_rings) {
3174 ring->rx_pending = vsi->rx_rings[0]->count;
3175 ring->tx_pending = vsi->tx_rings[0]->count;
3176 } else {
3177 ring->rx_pending = 0;
3178 ring->tx_pending = 0;
3179 }
3180
3181 /* Rx mini and jumbo rings are not supported */
3182 ring->rx_mini_max_pending = 0;
3183 ring->rx_jumbo_max_pending = 0;
3184 ring->rx_mini_pending = 0;
3185 ring->rx_jumbo_pending = 0;
3186
3187 kernel_ring->tcp_data_split = vsi->hsplit ?
3188 ETHTOOL_TCP_DATA_SPLIT_ENABLED :
3189 ETHTOOL_TCP_DATA_SPLIT_DISABLED;
3190 }
3191
3192 static int
ice_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3193 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
3194 struct kernel_ethtool_ringparam *kernel_ring,
3195 struct netlink_ext_ack *extack)
3196 {
3197 struct ice_netdev_priv *np = netdev_priv(netdev);
3198 struct ice_tx_ring *xdp_rings = NULL;
3199 struct ice_tx_ring *tx_rings = NULL;
3200 struct ice_rx_ring *rx_rings = NULL;
3201 struct ice_vsi *vsi = np->vsi;
3202 struct ice_pf *pf = vsi->back;
3203 int i, timeout = 50, err = 0;
3204 struct ice_hw *hw = &pf->hw;
3205 u16 new_rx_cnt, new_tx_cnt;
3206 bool hsplit;
3207
3208 if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
3209 ring->tx_pending < ICE_MIN_NUM_DESC ||
3210 ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
3211 ring->rx_pending < ICE_MIN_NUM_DESC) {
3212 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
3213 ring->tx_pending, ring->rx_pending,
3214 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
3215 ICE_REQ_DESC_MULTIPLE);
3216 return -EINVAL;
3217 }
3218
3219 /* Return if there is no rings (device is reloading) */
3220 if (!vsi->tx_rings || !vsi->rx_rings)
3221 return -EBUSY;
3222
3223 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
3224 if (new_tx_cnt != ring->tx_pending)
3225 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
3226 new_tx_cnt);
3227 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
3228 if (new_rx_cnt != ring->rx_pending)
3229 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
3230 new_rx_cnt);
3231
3232 hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
3233
3234 /* if nothing to do return success */
3235 if (new_tx_cnt == vsi->tx_rings[0]->count &&
3236 new_rx_cnt == vsi->rx_rings[0]->count &&
3237 hsplit == vsi->hsplit) {
3238 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
3239 return 0;
3240 }
3241
3242 /* If there is a AF_XDP UMEM attached to any of Rx rings,
3243 * disallow changing the number of descriptors -- regardless
3244 * if the netdev is running or not.
3245 */
3246 if (ice_xsk_any_rx_ring_ena(vsi))
3247 return -EBUSY;
3248
3249 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3250 timeout--;
3251 if (!timeout)
3252 return -EBUSY;
3253 usleep_range(1000, 2000);
3254 }
3255
3256 /* set for the next time the netdev is started */
3257 if (!netif_running(vsi->netdev)) {
3258 ice_for_each_alloc_txq(vsi, i)
3259 vsi->tx_rings[i]->count = new_tx_cnt;
3260 ice_for_each_alloc_rxq(vsi, i)
3261 vsi->rx_rings[i]->count = new_rx_cnt;
3262 if (ice_is_xdp_ena_vsi(vsi))
3263 ice_for_each_xdp_txq(vsi, i)
3264 vsi->xdp_rings[i]->count = new_tx_cnt;
3265 vsi->num_tx_desc = (u16)new_tx_cnt;
3266 vsi->num_rx_desc = (u16)new_rx_cnt;
3267 vsi->hsplit = hsplit;
3268
3269 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
3270 goto done;
3271 }
3272
3273 if (new_tx_cnt == vsi->tx_rings[0]->count)
3274 goto process_rx;
3275
3276 /* alloc updated Tx resources */
3277 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
3278 vsi->tx_rings[0]->count, new_tx_cnt);
3279
3280 tx_rings = kzalloc_objs(*tx_rings, vsi->num_txq);
3281 if (!tx_rings) {
3282 err = -ENOMEM;
3283 goto done;
3284 }
3285
3286 ice_for_each_txq(vsi, i) {
3287 /* clone ring and setup updated count */
3288 tx_rings[i] = *vsi->tx_rings[i];
3289 tx_rings[i].count = new_tx_cnt;
3290 tx_rings[i].desc = NULL;
3291 tx_rings[i].tx_buf = NULL;
3292 tx_rings[i].tstamp_ring = NULL;
3293 tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
3294 err = ice_setup_tx_ring(&tx_rings[i]);
3295 if (err) {
3296 while (i--)
3297 ice_clean_tx_ring(&tx_rings[i]);
3298 kfree(tx_rings);
3299 goto done;
3300 }
3301 }
3302
3303 if (!ice_is_xdp_ena_vsi(vsi))
3304 goto process_rx;
3305
3306 /* alloc updated XDP resources */
3307 netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
3308 vsi->xdp_rings[0]->count, new_tx_cnt);
3309
3310 xdp_rings = kzalloc_objs(*xdp_rings, vsi->num_xdp_txq);
3311 if (!xdp_rings) {
3312 err = -ENOMEM;
3313 goto free_tx;
3314 }
3315
3316 ice_for_each_xdp_txq(vsi, i) {
3317 /* clone ring and setup updated count */
3318 xdp_rings[i] = *vsi->xdp_rings[i];
3319 xdp_rings[i].count = new_tx_cnt;
3320 xdp_rings[i].desc = NULL;
3321 xdp_rings[i].tx_buf = NULL;
3322 err = ice_setup_tx_ring(&xdp_rings[i]);
3323 if (err) {
3324 while (i--)
3325 ice_clean_tx_ring(&xdp_rings[i]);
3326 kfree(xdp_rings);
3327 goto free_tx;
3328 }
3329 ice_set_ring_xdp(&xdp_rings[i]);
3330 }
3331
3332 process_rx:
3333 if (new_rx_cnt == vsi->rx_rings[0]->count)
3334 goto process_link;
3335
3336 /* alloc updated Rx resources */
3337 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
3338 vsi->rx_rings[0]->count, new_rx_cnt);
3339
3340 rx_rings = kzalloc_objs(*rx_rings, vsi->num_rxq);
3341 if (!rx_rings) {
3342 err = -ENOMEM;
3343 goto free_xdp;
3344 }
3345
3346 ice_for_each_rxq(vsi, i) {
3347 /* clone ring and setup updated count */
3348 rx_rings[i] = *vsi->rx_rings[i];
3349 rx_rings[i].count = new_rx_cnt;
3350 rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
3351 rx_rings[i].desc = NULL;
3352 rx_rings[i].xdp_buf = NULL;
3353 rx_rings[i].xdp_rxq = (struct xdp_rxq_info){ };
3354
3355 /* this is to allow wr32 to have something to write to
3356 * during early allocation of Rx buffers
3357 */
3358 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
3359
3360 err = ice_setup_rx_ring(&rx_rings[i]);
3361 if (err)
3362 goto rx_unwind;
3363 rx_unwind:
3364 if (err) {
3365 while (i) {
3366 i--;
3367 ice_free_rx_ring(&rx_rings[i]);
3368 }
3369 kfree(rx_rings);
3370 err = -ENOMEM;
3371 goto free_xdp;
3372 }
3373 }
3374
3375 process_link:
3376 vsi->hsplit = hsplit;
3377
3378 /* Bring interface down, copy in the new ring info, then restore the
3379 * interface. if VSI is up, bring it down and then back up
3380 */
3381 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
3382 ice_down(vsi);
3383
3384 if (tx_rings) {
3385 ice_for_each_txq(vsi, i) {
3386 ice_free_tx_ring(vsi->tx_rings[i]);
3387 *vsi->tx_rings[i] = tx_rings[i];
3388 }
3389 kfree(tx_rings);
3390 }
3391
3392 if (rx_rings) {
3393 ice_for_each_rxq(vsi, i) {
3394 ice_free_rx_ring(vsi->rx_rings[i]);
3395 /* copy the real tail offset */
3396 rx_rings[i].tail = vsi->rx_rings[i]->tail;
3397 /* this is to fake out the allocation routine
3398 * into thinking it has to realloc everything
3399 * but the recycling logic will let us re-use
3400 * the buffers allocated above
3401 */
3402 rx_rings[i].next_to_use = 0;
3403 rx_rings[i].next_to_clean = 0;
3404 *vsi->rx_rings[i] = rx_rings[i];
3405 }
3406 kfree(rx_rings);
3407 }
3408
3409 if (xdp_rings) {
3410 ice_for_each_xdp_txq(vsi, i) {
3411 ice_free_tx_ring(vsi->xdp_rings[i]);
3412 *vsi->xdp_rings[i] = xdp_rings[i];
3413 }
3414 kfree(xdp_rings);
3415 }
3416
3417 vsi->num_tx_desc = new_tx_cnt;
3418 vsi->num_rx_desc = new_rx_cnt;
3419 ice_up(vsi);
3420 }
3421 goto done;
3422
3423 free_xdp:
3424 if (xdp_rings) {
3425 ice_for_each_xdp_txq(vsi, i)
3426 ice_free_tx_ring(&xdp_rings[i]);
3427 kfree(xdp_rings);
3428 }
3429
3430 free_tx:
3431 /* error cleanup if the Rx allocations failed after getting Tx */
3432 if (tx_rings) {
3433 ice_for_each_txq(vsi, i)
3434 ice_free_tx_ring(&tx_rings[i]);
3435 kfree(tx_rings);
3436 }
3437
3438 done:
3439 clear_bit(ICE_CFG_BUSY, pf->state);
3440 return err;
3441 }
3442
3443 /**
3444 * ice_get_pauseparam - Get Flow Control status
3445 * @netdev: network interface device structure
3446 * @pause: ethernet pause (flow control) parameters
3447 *
3448 * Get requested flow control status from PHY capability.
3449 * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which
3450 * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report
3451 * the negotiated Rx/Tx pause via lp_advertising.
3452 */
3453 static void
ice_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)3454 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
3455 {
3456 struct ice_netdev_priv *np = netdev_priv(netdev);
3457 struct ice_port_info *pi = np->vsi->port_info;
3458 struct ice_aqc_get_phy_caps_data *pcaps;
3459 struct ice_dcbx_cfg *dcbx_cfg;
3460 int status;
3461
3462 /* Initialize pause params */
3463 pause->rx_pause = 0;
3464 pause->tx_pause = 0;
3465
3466 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
3467
3468 pcaps = kzalloc_obj(*pcaps);
3469 if (!pcaps)
3470 return;
3471
3472 /* Get current PHY config */
3473 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
3474 NULL);
3475 if (status)
3476 goto out;
3477
3478 pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
3479 AUTONEG_DISABLE;
3480
3481 if (dcbx_cfg->pfc.pfcena)
3482 /* PFC enabled so report LFC as off */
3483 goto out;
3484
3485 if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3486 pause->tx_pause = 1;
3487 if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3488 pause->rx_pause = 1;
3489
3490 out:
3491 kfree(pcaps);
3492 }
3493
3494 /**
3495 * ice_set_pauseparam - Set Flow Control parameter
3496 * @netdev: network interface device structure
3497 * @pause: return Tx/Rx flow control status
3498 */
3499 static int
ice_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)3500 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
3501 {
3502 struct ice_netdev_priv *np = netdev_priv(netdev);
3503 struct ice_aqc_get_phy_caps_data *pcaps;
3504 struct ice_link_status *hw_link_info;
3505 struct ice_pf *pf = np->vsi->back;
3506 struct ice_dcbx_cfg *dcbx_cfg;
3507 struct ice_vsi *vsi = np->vsi;
3508 struct ice_hw *hw = &pf->hw;
3509 struct ice_port_info *pi;
3510 u8 aq_failures;
3511 bool link_up;
3512 u32 is_an;
3513 int err;
3514
3515 pi = vsi->port_info;
3516 hw_link_info = &pi->phy.link_info;
3517 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
3518 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
3519
3520 /* Changing the port's flow control is not supported if this isn't the
3521 * PF VSI
3522 */
3523 if (vsi->type != ICE_VSI_PF) {
3524 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
3525 return -EOPNOTSUPP;
3526 }
3527
3528 /* Get pause param reports configured and negotiated flow control pause
3529 * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is
3530 * defined get pause param pause->autoneg reports SW configured setting,
3531 * so compare pause->autoneg with SW configured to prevent the user from
3532 * using set pause param to chance autoneg.
3533 */
3534 pcaps = kzalloc_obj(*pcaps);
3535 if (!pcaps)
3536 return -ENOMEM;
3537
3538 /* Get current PHY config */
3539 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
3540 NULL);
3541 if (err) {
3542 kfree(pcaps);
3543 return err;
3544 }
3545
3546 is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
3547 AUTONEG_DISABLE;
3548
3549 kfree(pcaps);
3550
3551 if (pause->autoneg != is_an) {
3552 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
3553 return -EOPNOTSUPP;
3554 }
3555
3556 /* If we have link and don't have autoneg */
3557 if (!test_bit(ICE_DOWN, pf->state) &&
3558 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
3559 /* Send message that it might not necessarily work*/
3560 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
3561 }
3562
3563 if (dcbx_cfg->pfc.pfcena) {
3564 netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
3565 return -EOPNOTSUPP;
3566 }
3567 if (pause->rx_pause && pause->tx_pause)
3568 pi->fc.req_mode = ICE_FC_FULL;
3569 else if (pause->rx_pause && !pause->tx_pause)
3570 pi->fc.req_mode = ICE_FC_RX_PAUSE;
3571 else if (!pause->rx_pause && pause->tx_pause)
3572 pi->fc.req_mode = ICE_FC_TX_PAUSE;
3573 else if (!pause->rx_pause && !pause->tx_pause)
3574 pi->fc.req_mode = ICE_FC_NONE;
3575 else
3576 return -EINVAL;
3577
3578 /* Set the FC mode and only restart AN if link is up */
3579 err = ice_set_fc(pi, &aq_failures, link_up);
3580
3581 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
3582 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
3583 err, libie_aq_str(hw->adminq.sq_last_status));
3584 err = -EAGAIN;
3585 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
3586 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
3587 err, libie_aq_str(hw->adminq.sq_last_status));
3588 err = -EAGAIN;
3589 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
3590 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
3591 err, libie_aq_str(hw->adminq.sq_last_status));
3592 err = -EAGAIN;
3593 }
3594
3595 return err;
3596 }
3597
3598 /**
3599 * ice_get_rxfh_key_size - get the RSS hash key size
3600 * @netdev: network interface device structure
3601 *
3602 * Returns the table size.
3603 */
ice_get_rxfh_key_size(struct net_device __always_unused * netdev)3604 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
3605 {
3606 return ICE_VSIQF_HKEY_ARRAY_SIZE;
3607 }
3608
3609 /**
3610 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
3611 * @netdev: network interface device structure
3612 *
3613 * Returns the table size.
3614 */
ice_get_rxfh_indir_size(struct net_device * netdev)3615 static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
3616 {
3617 struct ice_netdev_priv *np = netdev_priv(netdev);
3618
3619 return np->vsi->rss_table_size;
3620 }
3621
3622 /**
3623 * ice_get_rxfh - get the Rx flow hash indirection table
3624 * @netdev: network interface device structure
3625 * @rxfh: pointer to param struct (indir, key, hfunc)
3626 *
3627 * Reads the indirection table directly from the hardware.
3628 */
3629 static int
ice_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)3630 ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
3631 {
3632 struct ice_netdev_priv *np = netdev_priv(netdev);
3633 struct ice_vsi *vsi = np->vsi;
3634 struct ice_pf *pf = vsi->back;
3635 u16 qcount, offset;
3636 int err, i;
3637 u8 *lut;
3638
3639 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
3640 netdev_warn(netdev, "RSS is not supported on this VSI!\n");
3641 return -EOPNOTSUPP;
3642 }
3643
3644 qcount = vsi->mqprio_qopt.qopt.count[0];
3645 offset = vsi->mqprio_qopt.qopt.offset[0];
3646
3647 rxfh->hfunc = ETH_RSS_HASH_TOP;
3648 if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
3649 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
3650
3651 if (!rxfh->indir)
3652 return 0;
3653
3654 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
3655 if (!lut)
3656 return -ENOMEM;
3657
3658 err = ice_get_rss(vsi, rxfh->key, lut, vsi->rss_table_size);
3659 if (err)
3660 goto out;
3661
3662 if (ice_is_adq_active(pf)) {
3663 for (i = 0; i < vsi->rss_table_size; i++)
3664 rxfh->indir[i] = offset + lut[i] % qcount;
3665 goto out;
3666 }
3667
3668 for (i = 0; i < vsi->rss_table_size; i++)
3669 rxfh->indir[i] = lut[i];
3670
3671 out:
3672 kfree(lut);
3673 return err;
3674 }
3675
3676 /**
3677 * ice_set_rxfh - set the Rx flow hash indirection table
3678 * @netdev: network interface device structure
3679 * @rxfh: pointer to param struct (indir, key, hfunc)
3680 * @extack: extended ACK from the Netlink message
3681 *
3682 * Returns -EINVAL if the table specifies an invalid queue ID, otherwise
3683 * returns 0 after programming the table.
3684 */
3685 static int
ice_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)3686 ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
3687 struct netlink_ext_ack *extack)
3688 {
3689 struct ice_netdev_priv *np = netdev_priv(netdev);
3690 u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
3691 struct ice_vsi *vsi = np->vsi;
3692 struct ice_pf *pf = vsi->back;
3693 struct device *dev;
3694 int err;
3695
3696 dev = ice_pf_to_dev(pf);
3697 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3698 rxfh->hfunc != ETH_RSS_HASH_TOP)
3699 return -EOPNOTSUPP;
3700
3701 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
3702 /* RSS not supported return error here */
3703 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
3704 return -EIO;
3705 }
3706
3707 if (ice_is_adq_active(pf)) {
3708 netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
3709 return -EOPNOTSUPP;
3710 }
3711
3712 /* Update the VSI's hash function */
3713 if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
3714 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
3715
3716 err = ice_set_rss_hfunc(vsi, hfunc);
3717 if (err)
3718 return err;
3719
3720 if (rxfh->key) {
3721 if (!vsi->rss_hkey_user) {
3722 vsi->rss_hkey_user =
3723 devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
3724 GFP_KERNEL);
3725 if (!vsi->rss_hkey_user)
3726 return -ENOMEM;
3727 }
3728 memcpy(vsi->rss_hkey_user, rxfh->key,
3729 ICE_VSIQF_HKEY_ARRAY_SIZE);
3730
3731 err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
3732 if (err)
3733 return err;
3734 }
3735
3736 if (!vsi->rss_lut_user) {
3737 vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size,
3738 GFP_KERNEL);
3739 if (!vsi->rss_lut_user)
3740 return -ENOMEM;
3741 }
3742
3743 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
3744 if (rxfh->indir) {
3745 int i;
3746
3747 for (i = 0; i < vsi->rss_table_size; i++)
3748 vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
3749 } else {
3750 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
3751 vsi->rss_size);
3752 }
3753
3754 err = ice_set_rss_lut(vsi, vsi->rss_lut_user, vsi->rss_table_size);
3755 if (err)
3756 return err;
3757
3758 return 0;
3759 }
3760
3761 static int
ice_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)3762 ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
3763 {
3764 struct ice_pf *pf = ice_netdev_to_pf(dev);
3765
3766 /* only report timestamping if PTP is enabled */
3767 if (pf->ptp.state != ICE_PTP_READY)
3768 return ethtool_op_get_ts_info(dev, info);
3769
3770 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3771 SOF_TIMESTAMPING_TX_HARDWARE |
3772 SOF_TIMESTAMPING_RX_HARDWARE |
3773 SOF_TIMESTAMPING_RAW_HARDWARE;
3774
3775 info->phc_index = ice_ptp_clock_index(pf);
3776
3777 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
3778
3779 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
3780
3781 return 0;
3782 }
3783
3784 /**
3785 * ice_get_combined_cnt - return the current number of combined channels
3786 * @vsi: PF VSI pointer
3787 *
3788 * Go through all queue vectors and count ones that have both Rx and Tx ring
3789 * attached
3790 */
ice_get_combined_cnt(struct ice_vsi * vsi)3791 static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
3792 {
3793 u32 combined = 0;
3794 int q_idx;
3795
3796 ice_for_each_q_vector(vsi, q_idx) {
3797 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3798
3799 combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
3800 }
3801
3802 return combined;
3803 }
3804
3805 /**
3806 * ice_get_channels - get the current and max supported channels
3807 * @dev: network interface device structure
3808 * @ch: ethtool channel data structure
3809 */
3810 static void
ice_get_channels(struct net_device * dev,struct ethtool_channels * ch)3811 ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
3812 {
3813 struct ice_netdev_priv *np = netdev_priv(dev);
3814 struct ice_vsi *vsi = np->vsi;
3815 struct ice_pf *pf = vsi->back;
3816
3817 /* report maximum channels */
3818 ch->max_rx = ice_get_max_rxq(pf);
3819 ch->max_tx = ice_get_max_txq(pf);
3820 ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
3821
3822 /* report current channels */
3823 ch->combined_count = ice_get_combined_cnt(vsi);
3824 ch->rx_count = vsi->num_rxq - ch->combined_count;
3825 ch->tx_count = vsi->num_txq - ch->combined_count;
3826
3827 /* report other queues */
3828 ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
3829 ch->max_other = ch->other_count;
3830 }
3831
3832 /**
3833 * ice_get_valid_rss_size - return valid number of RSS queues
3834 * @hw: pointer to the HW structure
3835 * @new_size: requested RSS queues
3836 */
ice_get_valid_rss_size(struct ice_hw * hw,int new_size)3837 static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
3838 {
3839 struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
3840
3841 return min_t(int, new_size, BIT(caps->rss_table_entry_width));
3842 }
3843
3844 /**
3845 * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
3846 * @vsi: VSI to reconfigure RSS LUT on
3847 * @req_rss_size: requested range of queue numbers for hashing
3848 *
3849 * Set the VSI's RSS parameters, configure the RSS LUT based on these.
3850 */
ice_vsi_set_dflt_rss_lut(struct ice_vsi * vsi,int req_rss_size)3851 static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
3852 {
3853 struct ice_pf *pf = vsi->back;
3854 struct device *dev;
3855 struct ice_hw *hw;
3856 int err;
3857 u8 *lut;
3858
3859 dev = ice_pf_to_dev(pf);
3860 hw = &pf->hw;
3861
3862 if (!req_rss_size)
3863 return -EINVAL;
3864
3865 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
3866 if (!lut)
3867 return -ENOMEM;
3868
3869 /* set RSS LUT parameters */
3870 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3871 vsi->rss_size = 1;
3872 else
3873 vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size);
3874
3875 /* create/set RSS LUT */
3876 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
3877 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
3878 if (err)
3879 dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
3880 libie_aq_str(hw->adminq.sq_last_status));
3881
3882 kfree(lut);
3883 return err;
3884 }
3885
3886 /**
3887 * ice_set_channels - set the number channels
3888 * @dev: network interface device structure
3889 * @ch: ethtool channel data structure
3890 */
ice_set_channels(struct net_device * dev,struct ethtool_channels * ch)3891 static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
3892 {
3893 struct ice_netdev_priv *np = netdev_priv(dev);
3894 struct ice_vsi *vsi = np->vsi;
3895 struct ice_pf *pf = vsi->back;
3896 int new_rx = 0, new_tx = 0;
3897 bool locked = false;
3898 int ret = 0;
3899
3900 /* do not support changing channels in Safe Mode */
3901 if (ice_is_safe_mode(pf)) {
3902 netdev_err(dev, "Changing channel in Safe Mode is not supported\n");
3903 return -EOPNOTSUPP;
3904 }
3905 /* do not support changing other_count */
3906 if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
3907 return -EINVAL;
3908
3909 if (ice_is_adq_active(pf)) {
3910 netdev_err(dev, "Cannot set channels with ADQ configured.\n");
3911 return -EOPNOTSUPP;
3912 }
3913
3914 if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
3915 netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
3916 return -EOPNOTSUPP;
3917 }
3918
3919 if (ch->rx_count && ch->tx_count) {
3920 netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
3921 return -EINVAL;
3922 }
3923
3924 new_rx = ch->combined_count + ch->rx_count;
3925 new_tx = ch->combined_count + ch->tx_count;
3926
3927 if (new_rx < vsi->tc_cfg.numtc) {
3928 netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
3929 vsi->tc_cfg.numtc);
3930 return -EINVAL;
3931 }
3932 if (new_tx < vsi->tc_cfg.numtc) {
3933 netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
3934 vsi->tc_cfg.numtc);
3935 return -EINVAL;
3936 }
3937 if (new_rx > ice_get_max_rxq(pf)) {
3938 netdev_err(dev, "Maximum allowed Rx channels is %d\n",
3939 ice_get_max_rxq(pf));
3940 return -EINVAL;
3941 }
3942 if (new_tx > ice_get_max_txq(pf)) {
3943 netdev_err(dev, "Maximum allowed Tx channels is %d\n",
3944 ice_get_max_txq(pf));
3945 return -EINVAL;
3946 }
3947
3948 if (pf->cdev_info && pf->cdev_info->adev) {
3949 mutex_lock(&pf->adev_mutex);
3950 device_lock(&pf->cdev_info->adev->dev);
3951 locked = true;
3952 if (pf->cdev_info->adev->dev.driver) {
3953 netdev_err(dev, "Cannot change channels when RDMA is active\n");
3954 ret = -EBUSY;
3955 goto adev_unlock;
3956 }
3957 }
3958
3959 ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
3960
3961 if (!netif_is_rxfh_configured(dev)) {
3962 ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
3963 goto adev_unlock;
3964 }
3965
3966 /* Update rss_size due to change in Rx queues */
3967 vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
3968
3969 adev_unlock:
3970 if (locked) {
3971 device_unlock(&pf->cdev_info->adev->dev);
3972 mutex_unlock(&pf->adev_mutex);
3973 }
3974 return ret;
3975 }
3976
3977 /**
3978 * ice_get_wol - get current Wake on LAN configuration
3979 * @netdev: network interface device structure
3980 * @wol: Ethtool structure to retrieve WoL settings
3981 */
ice_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)3982 static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3983 {
3984 struct ice_netdev_priv *np = netdev_priv(netdev);
3985 struct ice_pf *pf = np->vsi->back;
3986
3987 if (np->vsi->type != ICE_VSI_PF)
3988 netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
3989
3990 /* Get WoL settings based on the HW capability */
3991 if (ice_is_wol_supported(&pf->hw)) {
3992 wol->supported = WAKE_MAGIC;
3993 wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
3994 } else {
3995 wol->supported = 0;
3996 wol->wolopts = 0;
3997 }
3998 }
3999
4000 /**
4001 * ice_set_wol - set Wake on LAN on supported device
4002 * @netdev: network interface device structure
4003 * @wol: Ethtool structure to set WoL
4004 */
ice_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)4005 static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
4006 {
4007 struct ice_netdev_priv *np = netdev_priv(netdev);
4008 struct ice_vsi *vsi = np->vsi;
4009 struct ice_pf *pf = vsi->back;
4010
4011 if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
4012 return -EOPNOTSUPP;
4013
4014 /* only magic packet is supported */
4015 if (wol->wolopts && wol->wolopts != WAKE_MAGIC)
4016 return -EOPNOTSUPP;
4017
4018 /* Set WoL only if there is a new value */
4019 if (pf->wol_ena != !!wol->wolopts) {
4020 pf->wol_ena = !!wol->wolopts;
4021 device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena);
4022 netdev_dbg(netdev, "WoL magic packet %sabled\n",
4023 pf->wol_ena ? "en" : "dis");
4024 }
4025
4026 return 0;
4027 }
4028
4029 /**
4030 * ice_get_rc_coalesce - get ITR values for specific ring container
4031 * @ec: ethtool structure to fill with driver's coalesce settings
4032 * @rc: ring container that the ITR values will come from
4033 *
4034 * Query the device for ice_ring_container specific ITR values. This is
4035 * done per ice_ring_container because each q_vector can have 1 or more rings
4036 * and all of said ring(s) will have the same ITR values.
4037 *
4038 * Returns 0 on success, negative otherwise.
4039 */
4040 static int
ice_get_rc_coalesce(struct ethtool_coalesce * ec,struct ice_ring_container * rc)4041 ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
4042 {
4043 if (!rc->rx_ring)
4044 return -EINVAL;
4045
4046 switch (rc->type) {
4047 case ICE_RX_CONTAINER:
4048 ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
4049 ec->rx_coalesce_usecs = rc->itr_setting;
4050 ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
4051 break;
4052 case ICE_TX_CONTAINER:
4053 ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
4054 ec->tx_coalesce_usecs = rc->itr_setting;
4055 break;
4056 default:
4057 dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
4058 return -EINVAL;
4059 }
4060
4061 return 0;
4062 }
4063
4064 /**
4065 * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
4066 * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
4067 * @ec: coalesce settings to program the device with
4068 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
4069 *
4070 * Return 0 on success, and negative under the following conditions:
4071 * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
4072 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
4073 */
4074 static int
ice_get_q_coalesce(struct ice_vsi * vsi,struct ethtool_coalesce * ec,int q_num)4075 ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
4076 {
4077 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
4078 if (ice_get_rc_coalesce(ec,
4079 &vsi->rx_rings[q_num]->q_vector->rx))
4080 return -EINVAL;
4081 if (ice_get_rc_coalesce(ec,
4082 &vsi->tx_rings[q_num]->q_vector->tx))
4083 return -EINVAL;
4084 } else if (q_num < vsi->num_rxq) {
4085 if (ice_get_rc_coalesce(ec,
4086 &vsi->rx_rings[q_num]->q_vector->rx))
4087 return -EINVAL;
4088 } else if (q_num < vsi->num_txq) {
4089 if (ice_get_rc_coalesce(ec,
4090 &vsi->tx_rings[q_num]->q_vector->tx))
4091 return -EINVAL;
4092 } else {
4093 return -EINVAL;
4094 }
4095
4096 return 0;
4097 }
4098
4099 /**
4100 * __ice_get_coalesce - get ITR/INTRL values for the device
4101 * @netdev: pointer to the netdev associated with this query
4102 * @ec: ethtool structure to fill with driver's coalesce settings
4103 * @q_num: queue number to get the coalesce settings for
4104 *
4105 * If the caller passes in a negative q_num then we return coalesce settings
4106 * based on queue number 0, else use the actual q_num passed in.
4107 */
4108 static int
__ice_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int q_num)4109 __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
4110 int q_num)
4111 {
4112 struct ice_netdev_priv *np = netdev_priv(netdev);
4113 struct ice_vsi *vsi = np->vsi;
4114
4115 if (q_num < 0)
4116 q_num = 0;
4117
4118 if (ice_get_q_coalesce(vsi, ec, q_num))
4119 return -EINVAL;
4120
4121 return 0;
4122 }
4123
ice_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)4124 static int ice_get_coalesce(struct net_device *netdev,
4125 struct ethtool_coalesce *ec,
4126 struct kernel_ethtool_coalesce *kernel_coal,
4127 struct netlink_ext_ack *extack)
4128 {
4129 return __ice_get_coalesce(netdev, ec, -1);
4130 }
4131
4132 static int
ice_get_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)4133 ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
4134 struct ethtool_coalesce *ec)
4135 {
4136 return __ice_get_coalesce(netdev, ec, q_num);
4137 }
4138
4139 /**
4140 * ice_set_rc_coalesce - set ITR values for specific ring container
4141 * @ec: ethtool structure from user to update ITR settings
4142 * @rc: ring container that the ITR values will come from
4143 * @vsi: VSI associated to the ring container
4144 *
4145 * Set specific ITR values. This is done per ice_ring_container because each
4146 * q_vector can have 1 or more rings and all of said ring(s) will have the same
4147 * ITR values.
4148 *
4149 * Returns 0 on success, negative otherwise.
4150 */
4151 static int
ice_set_rc_coalesce(struct ethtool_coalesce * ec,struct ice_ring_container * rc,struct ice_vsi * vsi)4152 ice_set_rc_coalesce(struct ethtool_coalesce *ec,
4153 struct ice_ring_container *rc, struct ice_vsi *vsi)
4154 {
4155 const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
4156 u32 use_adaptive_coalesce, coalesce_usecs;
4157 struct ice_pf *pf = vsi->back;
4158 u16 itr_setting;
4159
4160 if (!rc->rx_ring)
4161 return -EINVAL;
4162
4163 switch (rc->type) {
4164 case ICE_RX_CONTAINER:
4165 {
4166 struct ice_q_vector *q_vector = rc->rx_ring->q_vector;
4167
4168 if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
4169 (ec->rx_coalesce_usecs_high &&
4170 ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
4171 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
4172 c_type_str, pf->hw.intrl_gran,
4173 ICE_MAX_INTRL);
4174 return -EINVAL;
4175 }
4176 if (ec->rx_coalesce_usecs_high != q_vector->intrl &&
4177 (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
4178 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
4179 c_type_str);
4180 return -EINVAL;
4181 }
4182 if (ec->rx_coalesce_usecs_high != q_vector->intrl)
4183 q_vector->intrl = ec->rx_coalesce_usecs_high;
4184
4185 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
4186 coalesce_usecs = ec->rx_coalesce_usecs;
4187
4188 break;
4189 }
4190 case ICE_TX_CONTAINER:
4191 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
4192 coalesce_usecs = ec->tx_coalesce_usecs;
4193
4194 break;
4195 default:
4196 dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
4197 rc->type);
4198 return -EINVAL;
4199 }
4200
4201 itr_setting = rc->itr_setting;
4202 if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
4203 netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
4204 c_type_str, c_type_str);
4205 return -EINVAL;
4206 }
4207
4208 if (coalesce_usecs > ICE_ITR_MAX) {
4209 netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n",
4210 c_type_str, ICE_ITR_MAX);
4211 return -EINVAL;
4212 }
4213
4214 if (use_adaptive_coalesce) {
4215 rc->itr_mode = ITR_DYNAMIC;
4216 } else {
4217 rc->itr_mode = ITR_STATIC;
4218 /* store user facing value how it was set */
4219 rc->itr_setting = coalesce_usecs;
4220 /* write the change to the register */
4221 ice_write_itr(rc, coalesce_usecs);
4222 /* force writes to take effect immediately, the flush shouldn't
4223 * be done in the functions above because the intent is for
4224 * them to do lazy writes.
4225 */
4226 ice_flush(&pf->hw);
4227 }
4228
4229 return 0;
4230 }
4231
4232 /**
4233 * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
4234 * @vsi: VSI associated to the queue that need updating
4235 * @ec: coalesce settings to program the device with
4236 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
4237 *
4238 * Return 0 on success, and negative under the following conditions:
4239 * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
4240 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
4241 */
4242 static int
ice_set_q_coalesce(struct ice_vsi * vsi,struct ethtool_coalesce * ec,int q_num)4243 ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
4244 {
4245 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
4246 if (ice_set_rc_coalesce(ec,
4247 &vsi->rx_rings[q_num]->q_vector->rx,
4248 vsi))
4249 return -EINVAL;
4250
4251 if (ice_set_rc_coalesce(ec,
4252 &vsi->tx_rings[q_num]->q_vector->tx,
4253 vsi))
4254 return -EINVAL;
4255 } else if (q_num < vsi->num_rxq) {
4256 if (ice_set_rc_coalesce(ec,
4257 &vsi->rx_rings[q_num]->q_vector->rx,
4258 vsi))
4259 return -EINVAL;
4260 } else if (q_num < vsi->num_txq) {
4261 if (ice_set_rc_coalesce(ec,
4262 &vsi->tx_rings[q_num]->q_vector->tx,
4263 vsi))
4264 return -EINVAL;
4265 } else {
4266 return -EINVAL;
4267 }
4268
4269 return 0;
4270 }
4271
4272 /**
4273 * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
4274 * @netdev: netdev used for print
4275 * @itr_setting: previous user setting
4276 * @use_adaptive_coalesce: if adaptive coalesce is enabled or being enabled
4277 * @coalesce_usecs: requested value of [tx|rx]-usecs
4278 * @c_type_str: either "rx" or "tx" to match user set field of [tx|rx]-usecs
4279 */
4280 static void
ice_print_if_odd_usecs(struct net_device * netdev,u16 itr_setting,u32 use_adaptive_coalesce,u32 coalesce_usecs,const char * c_type_str)4281 ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting,
4282 u32 use_adaptive_coalesce, u32 coalesce_usecs,
4283 const char *c_type_str)
4284 {
4285 if (use_adaptive_coalesce)
4286 return;
4287
4288 if (itr_setting != coalesce_usecs && (coalesce_usecs % 2))
4289 netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n",
4290 c_type_str, coalesce_usecs, c_type_str,
4291 ITR_REG_ALIGN(coalesce_usecs));
4292 }
4293
4294 /**
4295 * __ice_set_coalesce - set ITR/INTRL values for the device
4296 * @netdev: pointer to the netdev associated with this query
4297 * @ec: ethtool structure to fill with driver's coalesce settings
4298 * @q_num: queue number to get the coalesce settings for
4299 *
4300 * If the caller passes in a negative q_num then we set the coalesce settings
4301 * for all Tx/Rx queues, else use the actual q_num passed in.
4302 */
4303 static int
__ice_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int q_num)4304 __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
4305 int q_num)
4306 {
4307 struct ice_netdev_priv *np = netdev_priv(netdev);
4308 struct ice_vsi *vsi = np->vsi;
4309
4310 if (q_num < 0) {
4311 struct ice_q_vector *q_vector = vsi->q_vectors[0];
4312 int v_idx;
4313
4314 if (q_vector) {
4315 ice_print_if_odd_usecs(netdev, q_vector->rx.itr_setting,
4316 ec->use_adaptive_rx_coalesce,
4317 ec->rx_coalesce_usecs, "rx");
4318
4319 ice_print_if_odd_usecs(netdev, q_vector->tx.itr_setting,
4320 ec->use_adaptive_tx_coalesce,
4321 ec->tx_coalesce_usecs, "tx");
4322 }
4323
4324 ice_for_each_q_vector(vsi, v_idx) {
4325 /* In some cases if DCB is configured the num_[rx|tx]q
4326 * can be less than vsi->num_q_vectors. This check
4327 * accounts for that so we don't report a false failure
4328 */
4329 if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
4330 goto set_complete;
4331
4332 if (ice_set_q_coalesce(vsi, ec, v_idx))
4333 return -EINVAL;
4334
4335 ice_set_q_vector_intrl(vsi->q_vectors[v_idx]);
4336 }
4337 goto set_complete;
4338 }
4339
4340 if (ice_set_q_coalesce(vsi, ec, q_num))
4341 return -EINVAL;
4342
4343 ice_set_q_vector_intrl(vsi->q_vectors[q_num]);
4344
4345 set_complete:
4346 return 0;
4347 }
4348
ice_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)4349 static int ice_set_coalesce(struct net_device *netdev,
4350 struct ethtool_coalesce *ec,
4351 struct kernel_ethtool_coalesce *kernel_coal,
4352 struct netlink_ext_ack *extack)
4353 {
4354 return __ice_set_coalesce(netdev, ec, -1);
4355 }
4356
4357 static int
ice_set_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)4358 ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
4359 struct ethtool_coalesce *ec)
4360 {
4361 return __ice_set_coalesce(netdev, ec, q_num);
4362 }
4363
4364 static void
ice_repr_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)4365 ice_repr_get_drvinfo(struct net_device *netdev,
4366 struct ethtool_drvinfo *drvinfo)
4367 {
4368 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4369
4370 if (repr->ops.ready(repr))
4371 return;
4372
4373 __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
4374 }
4375
4376 static void
ice_repr_get_strings(struct net_device * netdev,u32 stringset,u8 * data)4377 ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
4378 {
4379 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4380
4381 /* for port representors only ETH_SS_STATS is supported */
4382 if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
4383 return;
4384
4385 __ice_get_strings(netdev, stringset, data, repr->src_vsi);
4386 }
4387
4388 static void
ice_repr_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)4389 ice_repr_get_ethtool_stats(struct net_device *netdev,
4390 struct ethtool_stats __always_unused *stats,
4391 u64 *data)
4392 {
4393 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4394
4395 if (repr->ops.ready(repr))
4396 return;
4397
4398 __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
4399 }
4400
ice_repr_get_sset_count(struct net_device * netdev,int sset)4401 static int ice_repr_get_sset_count(struct net_device *netdev, int sset)
4402 {
4403 switch (sset) {
4404 case ETH_SS_STATS:
4405 return ICE_VSI_STATS_LEN;
4406 default:
4407 return -EOPNOTSUPP;
4408 }
4409 }
4410
4411 #define ICE_I2C_EEPROM_DEV_ADDR 0xA0
4412 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
4413 #define ICE_MODULE_TYPE_SFP 0x03
4414 #define ICE_MODULE_TYPE_QSFP_PLUS 0x0D
4415 #define ICE_MODULE_TYPE_QSFP28 0x11
4416 #define ICE_MODULE_SFF_ADDR_MODE 0x04
4417 #define ICE_MODULE_SFF_DIAG_CAPAB 0x40
4418 #define ICE_MODULE_REVISION_ADDR 0x01
4419 #define ICE_MODULE_SFF_8472_COMP 0x5E
4420 #define ICE_MODULE_SFF_8472_SWAP 0x5C
4421 #define ICE_MODULE_QSFP_MAX_LEN 640
4422
4423 /**
4424 * ice_get_module_info - get SFF module type and revision information
4425 * @netdev: network interface device structure
4426 * @modinfo: module EEPROM size and layout information structure
4427 */
4428 static int
ice_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)4429 ice_get_module_info(struct net_device *netdev,
4430 struct ethtool_modinfo *modinfo)
4431 {
4432 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4433 struct ice_hw *hw = &pf->hw;
4434 u8 sff8472_comp = 0;
4435 u8 sff8472_swap = 0;
4436 u8 sff8636_rev = 0;
4437 u8 value = 0;
4438 int status;
4439
4440 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
4441 0, &value, 1, 0, NULL);
4442 if (status)
4443 return status;
4444
4445 switch (value) {
4446 case ICE_MODULE_TYPE_SFP:
4447 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4448 ICE_MODULE_SFF_8472_COMP, 0x00, 0,
4449 &sff8472_comp, 1, 0, NULL);
4450 if (status)
4451 return status;
4452 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4453 ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
4454 &sff8472_swap, 1, 0, NULL);
4455 if (status)
4456 return status;
4457
4458 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
4459 modinfo->type = ETH_MODULE_SFF_8079;
4460 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
4461 } else if (sff8472_comp &&
4462 (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
4463 modinfo->type = ETH_MODULE_SFF_8472;
4464 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4465 } else {
4466 modinfo->type = ETH_MODULE_SFF_8079;
4467 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
4468 }
4469 break;
4470 case ICE_MODULE_TYPE_QSFP_PLUS:
4471 case ICE_MODULE_TYPE_QSFP28:
4472 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4473 ICE_MODULE_REVISION_ADDR, 0x00, 0,
4474 &sff8636_rev, 1, 0, NULL);
4475 if (status)
4476 return status;
4477 /* Check revision compliance */
4478 if (sff8636_rev > 0x02) {
4479 /* Module is SFF-8636 compliant */
4480 modinfo->type = ETH_MODULE_SFF_8636;
4481 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
4482 } else {
4483 modinfo->type = ETH_MODULE_SFF_8436;
4484 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
4485 }
4486 break;
4487 default:
4488 netdev_warn(netdev, "SFF Module Type not recognized.\n");
4489 return -EINVAL;
4490 }
4491 return 0;
4492 }
4493
4494 /**
4495 * ice_get_module_eeprom - fill buffer with SFF EEPROM contents
4496 * @netdev: network interface device structure
4497 * @ee: EEPROM dump request structure
4498 * @data: buffer to be filled with EEPROM contents
4499 */
4500 static int
ice_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)4501 ice_get_module_eeprom(struct net_device *netdev,
4502 struct ethtool_eeprom *ee, u8 *data)
4503 {
4504 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4505 #define SFF_READ_BLOCK_SIZE 8
4506 u8 value[SFF_READ_BLOCK_SIZE] = { 0 };
4507 u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
4508 struct ice_hw *hw = &pf->hw;
4509 bool is_sfp = false;
4510 unsigned int i;
4511 u16 offset = 0;
4512 u8 page = 0;
4513 int status;
4514
4515 if (!ee || !ee->len || !data)
4516 return -EINVAL;
4517
4518 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
4519 NULL);
4520 if (status)
4521 return status;
4522
4523 if (value[0] == ICE_MODULE_TYPE_SFP)
4524 is_sfp = true;
4525
4526 memset(data, 0, ee->len);
4527 for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) {
4528 offset = i + ee->offset;
4529 page = 0;
4530
4531 /* Check if we need to access the other memory page */
4532 if (is_sfp) {
4533 if (offset >= ETH_MODULE_SFF_8079_LEN) {
4534 offset -= ETH_MODULE_SFF_8079_LEN;
4535 addr = ICE_I2C_EEPROM_DEV_ADDR2;
4536 }
4537 } else {
4538 while (offset >= ETH_MODULE_SFF_8436_LEN) {
4539 /* Compute memory page number and offset. */
4540 offset -= ETH_MODULE_SFF_8436_LEN / 2;
4541 page++;
4542 }
4543 }
4544
4545 /* Bit 2 of EEPROM address 0x02 declares upper
4546 * pages are disabled on QSFP modules.
4547 * SFP modules only ever use page 0.
4548 */
4549 if (page == 0 || !(data[0x2] & 0x4)) {
4550 u32 copy_len;
4551
4552 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
4553 !is_sfp, value,
4554 SFF_READ_BLOCK_SIZE,
4555 0, NULL);
4556 netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%pe)\n",
4557 addr, offset, page, is_sfp,
4558 value[0], value[1], value[2], value[3],
4559 value[4], value[5], value[6], value[7],
4560 ERR_PTR(status));
4561 if (status) {
4562 netdev_err(netdev, "%s: error reading module EEPROM: status %pe\n",
4563 __func__, ERR_PTR(status));
4564 return status;
4565 }
4566
4567 /* Make sure we have enough room for the new block */
4568 copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
4569 memcpy(data + i, value, copy_len);
4570 }
4571 }
4572 return 0;
4573 }
4574
4575 /**
4576 * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per
4577 * pcsquad, pcsport
4578 * @hw: pointer to the HW struct
4579 * @pcs_quad: pcsquad for input port
4580 * @pcs_port: pcsport for input port
4581 * @fec_stats: buffer to hold FEC statistics for given port
4582 *
4583 * Return: 0 on success, negative on failure.
4584 */
ice_get_port_fec_stats(struct ice_hw * hw,u16 pcs_quad,u16 pcs_port,struct ethtool_fec_stats * fec_stats)4585 static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
4586 struct ethtool_fec_stats *fec_stats)
4587 {
4588 u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0;
4589 u32 fec_corr_low_val = 0, fec_corr_high_val = 0;
4590 int err;
4591
4592 if (pcs_quad > 1 || pcs_port > 3)
4593 return -EINVAL;
4594
4595 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW,
4596 &fec_corr_low_val);
4597 if (err)
4598 return err;
4599
4600 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH,
4601 &fec_corr_high_val);
4602 if (err)
4603 return err;
4604
4605 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
4606 ICE_FEC_UNCORR_LOW,
4607 &fec_uncorr_low_val);
4608 if (err)
4609 return err;
4610
4611 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
4612 ICE_FEC_UNCORR_HIGH,
4613 &fec_uncorr_high_val);
4614 if (err)
4615 return err;
4616
4617 fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) +
4618 fec_corr_low_val;
4619 fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) +
4620 fec_uncorr_low_val;
4621 return 0;
4622 }
4623
4624 /**
4625 * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
4626 * @netdev: network interface device structure
4627 * @fec_stats: buffer to hold FEC statistics for given port
4628 * @hist: buffer to put FEC histogram statistics for given port
4629 *
4630 */
ice_get_fec_stats(struct net_device * netdev,struct ethtool_fec_stats * fec_stats,struct ethtool_fec_hist * hist)4631 static void ice_get_fec_stats(struct net_device *netdev,
4632 struct ethtool_fec_stats *fec_stats,
4633 struct ethtool_fec_hist *hist)
4634 {
4635 struct ice_netdev_priv *np = netdev_priv(netdev);
4636 struct ice_port_topology port_topology;
4637 struct ice_port_info *pi;
4638 struct ice_pf *pf;
4639 struct ice_hw *hw;
4640 int err;
4641
4642 pf = np->vsi->back;
4643 hw = &pf->hw;
4644 pi = np->vsi->port_info;
4645
4646 /* Serdes parameters are not supported if not the PF VSI */
4647 if (np->vsi->type != ICE_VSI_PF || !pi)
4648 return;
4649
4650 err = ice_get_port_topology(hw, pi->lport, &port_topology);
4651 if (err) {
4652 netdev_info(netdev, "Extended register dump failed Lport %d\n",
4653 pi->lport);
4654 return;
4655 }
4656
4657 /* Get FEC correctable, uncorrectable counter */
4658 err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
4659 port_topology.pcs_port, fec_stats);
4660 if (err)
4661 netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n",
4662 pi->lport, err);
4663 }
4664
ice_get_eth_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * mac_stats)4665 static void ice_get_eth_mac_stats(struct net_device *netdev,
4666 struct ethtool_eth_mac_stats *mac_stats)
4667 {
4668 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4669 struct ice_hw_port_stats *ps = &pf->stats;
4670
4671 mac_stats->FramesTransmittedOK = ps->eth.tx_unicast +
4672 ps->eth.tx_multicast +
4673 ps->eth.tx_broadcast;
4674 mac_stats->FramesReceivedOK = ps->eth.rx_unicast +
4675 ps->eth.rx_multicast +
4676 ps->eth.rx_broadcast;
4677 mac_stats->FrameCheckSequenceErrors = ps->crc_errors;
4678 mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes;
4679 mac_stats->OctetsReceivedOK = ps->eth.rx_bytes;
4680 mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast;
4681 mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast;
4682 mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast;
4683 mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast;
4684 mac_stats->InRangeLengthErrors = ps->rx_len_errors;
4685 mac_stats->FrameTooLongErrors = ps->rx_oversize;
4686 }
4687
ice_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * pause_stats)4688 static void ice_get_pause_stats(struct net_device *netdev,
4689 struct ethtool_pause_stats *pause_stats)
4690 {
4691 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4692 struct ice_hw_port_stats *ps = &pf->stats;
4693
4694 pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx;
4695 pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx;
4696 }
4697
4698 static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = {
4699 { 0, 64 },
4700 { 65, 127 },
4701 { 128, 255 },
4702 { 256, 511 },
4703 { 512, 1023 },
4704 { 1024, 1522 },
4705 { 1523, 9522 },
4706 {}
4707 };
4708
ice_get_rmon_stats(struct net_device * netdev,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)4709 static void ice_get_rmon_stats(struct net_device *netdev,
4710 struct ethtool_rmon_stats *rmon,
4711 const struct ethtool_rmon_hist_range **ranges)
4712 {
4713 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4714 struct ice_hw_port_stats *ps = &pf->stats;
4715
4716 rmon->undersize_pkts = ps->rx_undersize;
4717 rmon->oversize_pkts = ps->rx_oversize;
4718 rmon->fragments = ps->rx_fragments;
4719 rmon->jabbers = ps->rx_jabber;
4720
4721 rmon->hist[0] = ps->rx_size_64;
4722 rmon->hist[1] = ps->rx_size_127;
4723 rmon->hist[2] = ps->rx_size_255;
4724 rmon->hist[3] = ps->rx_size_511;
4725 rmon->hist[4] = ps->rx_size_1023;
4726 rmon->hist[5] = ps->rx_size_1522;
4727 rmon->hist[6] = ps->rx_size_big;
4728
4729 rmon->hist_tx[0] = ps->tx_size_64;
4730 rmon->hist_tx[1] = ps->tx_size_127;
4731 rmon->hist_tx[2] = ps->tx_size_255;
4732 rmon->hist_tx[3] = ps->tx_size_511;
4733 rmon->hist_tx[4] = ps->tx_size_1023;
4734 rmon->hist_tx[5] = ps->tx_size_1522;
4735 rmon->hist_tx[6] = ps->tx_size_big;
4736
4737 *ranges = ice_rmon_ranges;
4738 }
4739
4740 /* ice_get_ts_stats - provide timestamping stats
4741 * @netdev: the netdevice pointer from ethtool
4742 * @ts_stats: the ethtool data structure to fill in
4743 */
ice_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)4744 static void ice_get_ts_stats(struct net_device *netdev,
4745 struct ethtool_ts_stats *ts_stats)
4746 {
4747 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4748 struct ice_ptp *ptp = &pf->ptp;
4749
4750 ts_stats->pkts = ptp->tx_hwtstamp_good;
4751 ts_stats->err = ptp->tx_hwtstamp_skipped +
4752 ptp->tx_hwtstamp_flushed +
4753 ptp->tx_hwtstamp_discarded;
4754 ts_stats->lost = ptp->tx_hwtstamp_timeouts;
4755 }
4756
4757 #define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
4758 ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
4759
4760 #define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \
4761 ETH_RESET_SHARED_SHIFT)
4762
4763 #define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \
4764 (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \
4765 (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))
4766
4767 #define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR
4768
4769 /**
4770 * ice_ethtool_reset - triggers a given type of reset
4771 * @dev: network interface device structure
4772 * @flags: set of reset flags
4773 *
4774 * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags.
4775 */
ice_ethtool_reset(struct net_device * dev,u32 * flags)4776 static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
4777 {
4778 struct ice_pf *pf = ice_netdev_to_pf(dev);
4779 enum ice_reset_req reset;
4780
4781 switch (*flags) {
4782 case ICE_ETHTOOL_CORER:
4783 reset = ICE_RESET_CORER;
4784 break;
4785 case ICE_ETHTOOL_GLOBR:
4786 reset = ICE_RESET_GLOBR;
4787 break;
4788 case ICE_ETHTOOL_PFR:
4789 reset = ICE_RESET_PFR;
4790 break;
4791 default:
4792 netdev_info(dev, "Unsupported set of ethtool flags");
4793 return -EOPNOTSUPP;
4794 }
4795
4796 ice_schedule_reset(pf, reset);
4797
4798 *flags = 0;
4799
4800 return 0;
4801 }
4802
4803 /**
4804 * ice_repr_ethtool_reset - triggers a VF reset
4805 * @dev: network interface device structure
4806 * @flags: set of reset flags
4807 *
4808 * Return: 0 on success,
4809 * -EOPNOTSUPP when using unsupported set of flags
4810 * -EBUSY when VF is not ready for reset.
4811 */
ice_repr_ethtool_reset(struct net_device * dev,u32 * flags)4812 static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
4813 {
4814 struct ice_repr *repr = ice_netdev_to_repr(dev);
4815 struct ice_vf *vf;
4816
4817 if (repr->type != ICE_REPR_TYPE_VF ||
4818 *flags != ICE_ETHTOOL_VFR)
4819 return -EOPNOTSUPP;
4820
4821 vf = repr->vf;
4822
4823 if (ice_check_vf_ready_for_cfg(vf))
4824 return -EBUSY;
4825
4826 *flags = 0;
4827
4828 return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
4829 }
4830
4831 static const struct ethtool_ops ice_ethtool_ops = {
4832 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4833 ETHTOOL_COALESCE_USE_ADAPTIVE |
4834 ETHTOOL_COALESCE_RX_USECS_HIGH,
4835 .supported_input_xfrm = RXH_XFRM_SYM_XOR,
4836 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
4837 .get_link_ksettings = ice_get_link_ksettings,
4838 .set_link_ksettings = ice_set_link_ksettings,
4839 .get_fec_stats = ice_get_fec_stats,
4840 .get_eth_mac_stats = ice_get_eth_mac_stats,
4841 .get_pause_stats = ice_get_pause_stats,
4842 .get_rmon_stats = ice_get_rmon_stats,
4843 .get_ts_stats = ice_get_ts_stats,
4844 .get_drvinfo = ice_get_drvinfo,
4845 .get_regs_len = ice_get_regs_len,
4846 .get_regs = ice_get_regs,
4847 .get_wol = ice_get_wol,
4848 .set_wol = ice_set_wol,
4849 .get_msglevel = ice_get_msglevel,
4850 .set_msglevel = ice_set_msglevel,
4851 .self_test = ice_self_test,
4852 .get_link = ethtool_op_get_link,
4853 .get_link_ext_stats = ice_get_link_ext_stats,
4854 .get_eeprom_len = ice_get_eeprom_len,
4855 .get_eeprom = ice_get_eeprom,
4856 .get_coalesce = ice_get_coalesce,
4857 .set_coalesce = ice_set_coalesce,
4858 .get_strings = ice_get_strings,
4859 .set_phys_id = ice_set_phys_id,
4860 .get_ethtool_stats = ice_get_ethtool_stats,
4861 .get_priv_flags = ice_get_priv_flags,
4862 .set_priv_flags = ice_set_priv_flags,
4863 .get_sset_count = ice_get_sset_count,
4864 .get_rxnfc = ice_get_rxnfc,
4865 .set_rxnfc = ice_set_rxnfc,
4866 .get_rx_ring_count = ice_get_rx_ring_count,
4867 .get_ringparam = ice_get_ringparam,
4868 .set_ringparam = ice_set_ringparam,
4869 .nway_reset = ice_nway_reset,
4870 .get_pauseparam = ice_get_pauseparam,
4871 .set_pauseparam = ice_set_pauseparam,
4872 .reset = ice_ethtool_reset,
4873 .get_rxfh_key_size = ice_get_rxfh_key_size,
4874 .get_rxfh_indir_size = ice_get_rxfh_indir_size,
4875 .get_rxfh = ice_get_rxfh,
4876 .set_rxfh = ice_set_rxfh,
4877 .get_rxfh_fields = ice_get_rxfh_fields,
4878 .set_rxfh_fields = ice_set_rxfh_fields,
4879 .get_channels = ice_get_channels,
4880 .set_channels = ice_set_channels,
4881 .get_ts_info = ice_get_ts_info,
4882 .get_per_queue_coalesce = ice_get_per_q_coalesce,
4883 .set_per_queue_coalesce = ice_set_per_q_coalesce,
4884 .get_fecparam = ice_get_fecparam,
4885 .set_fecparam = ice_set_fecparam,
4886 .get_module_info = ice_get_module_info,
4887 .get_module_eeprom = ice_get_module_eeprom,
4888 };
4889
4890 static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
4891 .get_link_ksettings = ice_get_link_ksettings,
4892 .set_link_ksettings = ice_set_link_ksettings,
4893 .get_drvinfo = ice_get_drvinfo,
4894 .get_regs_len = ice_get_regs_len,
4895 .get_regs = ice_get_regs,
4896 .get_wol = ice_get_wol,
4897 .set_wol = ice_set_wol,
4898 .get_msglevel = ice_get_msglevel,
4899 .set_msglevel = ice_set_msglevel,
4900 .get_link = ethtool_op_get_link,
4901 .get_eeprom_len = ice_get_eeprom_len,
4902 .get_eeprom = ice_get_eeprom,
4903 .get_strings = ice_get_strings,
4904 .get_ethtool_stats = ice_get_ethtool_stats,
4905 .get_sset_count = ice_get_sset_count,
4906 .get_ringparam = ice_get_ringparam,
4907 .set_ringparam = ice_set_ringparam,
4908 .nway_reset = ice_nway_reset,
4909 .get_channels = ice_get_channels,
4910 };
4911
4912 /**
4913 * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops
4914 * @netdev: network interface device structure
4915 */
ice_set_ethtool_safe_mode_ops(struct net_device * netdev)4916 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
4917 {
4918 netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
4919 }
4920
4921 static const struct ethtool_ops ice_ethtool_repr_ops = {
4922 .get_drvinfo = ice_repr_get_drvinfo,
4923 .get_link = ethtool_op_get_link,
4924 .get_strings = ice_repr_get_strings,
4925 .get_ethtool_stats = ice_repr_get_ethtool_stats,
4926 .get_sset_count = ice_repr_get_sset_count,
4927 .reset = ice_repr_ethtool_reset,
4928 };
4929
4930 /**
4931 * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops
4932 * @netdev: network interface device structure
4933 */
ice_set_ethtool_repr_ops(struct net_device * netdev)4934 void ice_set_ethtool_repr_ops(struct net_device *netdev)
4935 {
4936 netdev->ethtool_ops = &ice_ethtool_repr_ops;
4937 }
4938
4939 /**
4940 * ice_set_ethtool_ops - setup netdev ethtool ops
4941 * @netdev: network interface device structure
4942 *
4943 * setup netdev ethtool ops with ice specific ops
4944 */
ice_set_ethtool_ops(struct net_device * netdev)4945 void ice_set_ethtool_ops(struct net_device *netdev)
4946 {
4947 netdev->ethtool_ops = &ice_ethtool_ops;
4948 }
4949