1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* ethtool support for ice */
5
6 #include "ice.h"
7 #include "ice_ethtool.h"
8 #include "ice_flow.h"
9 #include "ice_fltr.h"
10 #include "ice_lib.h"
11 #include "ice_dcb_lib.h"
12 #include <net/dcbnl.h>
13 #include <net/libeth/rx.h>
14
15 struct ice_stats {
16 char stat_string[ETH_GSTRING_LEN];
17 int sizeof_stat;
18 int stat_offset;
19 };
20
21 #define ICE_STAT(_type, _name, _stat) { \
22 .stat_string = _name, \
23 .sizeof_stat = sizeof_field(_type, _stat), \
24 .stat_offset = offsetof(_type, _stat) \
25 }
26
27 #define ICE_VSI_STAT(_name, _stat) \
28 ICE_STAT(struct ice_vsi, _name, _stat)
29 #define ICE_PF_STAT(_name, _stat) \
30 ICE_STAT(struct ice_pf, _name, _stat)
31
ice_q_stats_len(struct net_device * netdev)32 static int ice_q_stats_len(struct net_device *netdev)
33 {
34 struct ice_netdev_priv *np = netdev_priv(netdev);
35
36 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
37 (sizeof(struct ice_q_stats) / sizeof(u64)));
38 }
39
40 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
41 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
42
43 #define ICE_PFC_STATS_LEN ( \
44 (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \
45 sizeof_field(struct ice_pf, stats.priority_xon_rx) + \
46 sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \
47 sizeof_field(struct ice_pf, stats.priority_xon_tx)) \
48 / sizeof(u64))
49 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
50 ICE_VSI_STATS_LEN + ice_q_stats_len(n))
51
52 static const struct ice_stats ice_gstrings_vsi_stats[] = {
53 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
54 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
55 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
56 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
57 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
58 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
59 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
60 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
61 ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards),
62 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
63 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
64 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
65 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
66 ICE_VSI_STAT("tx_linearize", tx_linearize),
67 ICE_VSI_STAT("tx_busy", tx_busy),
68 ICE_VSI_STAT("tx_restart", tx_restart),
69 };
70
71 enum ice_ethtool_test_id {
72 ICE_ETH_TEST_REG = 0,
73 ICE_ETH_TEST_EEPROM,
74 ICE_ETH_TEST_INTR,
75 ICE_ETH_TEST_LOOP,
76 ICE_ETH_TEST_LINK,
77 };
78
79 static const char ice_gstrings_test[][ETH_GSTRING_LEN] = {
80 "Register test (offline)",
81 "EEPROM test (offline)",
82 "Interrupt test (offline)",
83 "Loopback test (offline)",
84 "Link test (on/offline)",
85 };
86
87 #define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN)
88
89 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
90 * but they aren't. This device is capable of supporting multiple
91 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
92 * netdevs whereas the PF_STATs are for the physical function that's
93 * hosting these netdevs.
94 *
95 * The PF_STATs are appended to the netdev stats only when ethtool -S
96 * is queried on the base PF netdev.
97 */
98 static const struct ice_stats ice_gstrings_pf_stats[] = {
99 ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes),
100 ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes),
101 ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast),
102 ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast),
103 ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast),
104 ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast),
105 ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
106 ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
107 ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
108 ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
109 ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
110 ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
111 ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
112 ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127),
113 ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255),
114 ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255),
115 ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511),
116 ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511),
117 ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023),
118 ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023),
119 ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522),
120 ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522),
121 ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big),
122 ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big),
123 ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx),
124 ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx),
125 ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx),
126 ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx),
127 ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down),
128 ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize),
129 ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments),
130 ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
131 ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
132 ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
133 ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
134 ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
135 ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
136 ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
137 ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
138 ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
139 ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
140 ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
141 ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
142 ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
143 ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
144 ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
145 ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
146 };
147
148 static const u32 ice_regs_dump_list[] = {
149 PFGEN_STATE,
150 PRTGEN_STATUS,
151 QRX_CTRL(0),
152 QINT_TQCTL(0),
153 QINT_RQCTL(0),
154 PFINT_OICR_ENA,
155 QRX_ITR(0),
156 #define GLDCB_TLPM_PCI_DM 0x000A0180
157 GLDCB_TLPM_PCI_DM,
158 #define GLDCB_TLPM_TC2PFC 0x000A0194
159 GLDCB_TLPM_TC2PFC,
160 #define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4))
161 TCDCB_TLPM_WAIT_DM(0),
162 TCDCB_TLPM_WAIT_DM(1),
163 TCDCB_TLPM_WAIT_DM(2),
164 TCDCB_TLPM_WAIT_DM(3),
165 TCDCB_TLPM_WAIT_DM(4),
166 TCDCB_TLPM_WAIT_DM(5),
167 TCDCB_TLPM_WAIT_DM(6),
168 TCDCB_TLPM_WAIT_DM(7),
169 TCDCB_TLPM_WAIT_DM(8),
170 TCDCB_TLPM_WAIT_DM(9),
171 TCDCB_TLPM_WAIT_DM(10),
172 TCDCB_TLPM_WAIT_DM(11),
173 TCDCB_TLPM_WAIT_DM(12),
174 TCDCB_TLPM_WAIT_DM(13),
175 TCDCB_TLPM_WAIT_DM(14),
176 TCDCB_TLPM_WAIT_DM(15),
177 TCDCB_TLPM_WAIT_DM(16),
178 TCDCB_TLPM_WAIT_DM(17),
179 TCDCB_TLPM_WAIT_DM(18),
180 TCDCB_TLPM_WAIT_DM(19),
181 TCDCB_TLPM_WAIT_DM(20),
182 TCDCB_TLPM_WAIT_DM(21),
183 TCDCB_TLPM_WAIT_DM(22),
184 TCDCB_TLPM_WAIT_DM(23),
185 TCDCB_TLPM_WAIT_DM(24),
186 TCDCB_TLPM_WAIT_DM(25),
187 TCDCB_TLPM_WAIT_DM(26),
188 TCDCB_TLPM_WAIT_DM(27),
189 TCDCB_TLPM_WAIT_DM(28),
190 TCDCB_TLPM_WAIT_DM(29),
191 TCDCB_TLPM_WAIT_DM(30),
192 TCDCB_TLPM_WAIT_DM(31),
193 #define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90
194 GLPCI_WATMK_CLNT_PIPEMON,
195 #define GLPCI_CUR_CLNT_COMMON 0x000BFD84
196 GLPCI_CUR_CLNT_COMMON,
197 #define GLPCI_CUR_CLNT_PIPEMON 0x000BFD88
198 GLPCI_CUR_CLNT_PIPEMON,
199 #define GLPCI_PCIERR 0x0009DEB0
200 GLPCI_PCIERR,
201 #define GLPSM_DEBUG_CTL_STATUS 0x000B0600
202 GLPSM_DEBUG_CTL_STATUS,
203 #define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0680
204 GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT,
205 #define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0684
206 GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT,
207 #define GLPSM0_DEBUG_DT_OUT_OF_WINDOW 0x000B0688
208 GLPSM0_DEBUG_DT_OUT_OF_WINDOW,
209 #define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT 0x000B069C
210 GLPSM0_DEBUG_INTF_HW_ERROR_DETECT,
211 #define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT 0x000B06A0
212 GLPSM0_DEBUG_MISC_HW_ERROR_DETECT,
213 #define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0E80
214 GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT,
215 #define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0E84
216 GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT,
217 #define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT 0x000B0E88
218 GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT,
219 #define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT 0x000B0E8C
220 GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT,
221 #define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT 0x000B0E90
222 GLPSM1_DEBUG_MISC_HW_ERROR_DETECT,
223 #define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT 0x000B1680
224 GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT,
225 #define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B1684
226 GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT,
227 #define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT 0x000B1688
228 GLPSM2_DEBUG_MISC_HW_ERROR_DETECT,
229 #define GLTDPU_TCLAN_COMP_BOB(_i) (0x00049ADC + ((_i) * 4))
230 GLTDPU_TCLAN_COMP_BOB(1),
231 GLTDPU_TCLAN_COMP_BOB(2),
232 GLTDPU_TCLAN_COMP_BOB(3),
233 GLTDPU_TCLAN_COMP_BOB(4),
234 GLTDPU_TCLAN_COMP_BOB(5),
235 GLTDPU_TCLAN_COMP_BOB(6),
236 GLTDPU_TCLAN_COMP_BOB(7),
237 GLTDPU_TCLAN_COMP_BOB(8),
238 #define GLTDPU_TCB_CMD_BOB(_i) (0x0004975C + ((_i) * 4))
239 GLTDPU_TCB_CMD_BOB(1),
240 GLTDPU_TCB_CMD_BOB(2),
241 GLTDPU_TCB_CMD_BOB(3),
242 GLTDPU_TCB_CMD_BOB(4),
243 GLTDPU_TCB_CMD_BOB(5),
244 GLTDPU_TCB_CMD_BOB(6),
245 GLTDPU_TCB_CMD_BOB(7),
246 GLTDPU_TCB_CMD_BOB(8),
247 #define GLTDPU_PSM_UPDATE_BOB(_i) (0x00049B5C + ((_i) * 4))
248 GLTDPU_PSM_UPDATE_BOB(1),
249 GLTDPU_PSM_UPDATE_BOB(2),
250 GLTDPU_PSM_UPDATE_BOB(3),
251 GLTDPU_PSM_UPDATE_BOB(4),
252 GLTDPU_PSM_UPDATE_BOB(5),
253 GLTDPU_PSM_UPDATE_BOB(6),
254 GLTDPU_PSM_UPDATE_BOB(7),
255 GLTDPU_PSM_UPDATE_BOB(8),
256 #define GLTCB_CMD_IN_BOB(_i) (0x000AE288 + ((_i) * 4))
257 GLTCB_CMD_IN_BOB(1),
258 GLTCB_CMD_IN_BOB(2),
259 GLTCB_CMD_IN_BOB(3),
260 GLTCB_CMD_IN_BOB(4),
261 GLTCB_CMD_IN_BOB(5),
262 GLTCB_CMD_IN_BOB(6),
263 GLTCB_CMD_IN_BOB(7),
264 GLTCB_CMD_IN_BOB(8),
265 #define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i) (0x000FC148 + ((_i) * 4))
266 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1),
267 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2),
268 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3),
269 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4),
270 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5),
271 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6),
272 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7),
273 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8),
274 #define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4))
275 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1),
276 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2),
277 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3),
278 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4),
279 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5),
280 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6),
281 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7),
282 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8),
283 #define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i) (0x000FC1C8 + ((_i) * 4))
284 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1),
285 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2),
286 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3),
287 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4),
288 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5),
289 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6),
290 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7),
291 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8),
292 #define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i) (0x000FC188 + ((_i) * 4))
293 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1),
294 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2),
295 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3),
296 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4),
297 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5),
298 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6),
299 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7),
300 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8),
301 #define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4))
302 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1),
303 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2),
304 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3),
305 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4),
306 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5),
307 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6),
308 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7),
309 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8),
310 #define PRTDCB_TCUPM_REG_CM(_i) (0x000BC360 + ((_i) * 4))
311 PRTDCB_TCUPM_REG_CM(0),
312 PRTDCB_TCUPM_REG_CM(1),
313 PRTDCB_TCUPM_REG_CM(2),
314 PRTDCB_TCUPM_REG_CM(3),
315 #define PRTDCB_TCUPM_REG_DM(_i) (0x000BC3A0 + ((_i) * 4))
316 PRTDCB_TCUPM_REG_DM(0),
317 PRTDCB_TCUPM_REG_DM(1),
318 PRTDCB_TCUPM_REG_DM(2),
319 PRTDCB_TCUPM_REG_DM(3),
320 #define PRTDCB_TLPM_REG_DM(_i) (0x000A0000 + ((_i) * 4))
321 PRTDCB_TLPM_REG_DM(0),
322 PRTDCB_TLPM_REG_DM(1),
323 PRTDCB_TLPM_REG_DM(2),
324 PRTDCB_TLPM_REG_DM(3),
325 };
326
327 struct ice_priv_flag {
328 char name[ETH_GSTRING_LEN];
329 u32 bitno; /* bit position in pf->flags */
330 };
331
332 #define ICE_PRIV_FLAG(_name, _bitno) { \
333 .name = _name, \
334 .bitno = _bitno, \
335 }
336
337 static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
338 ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
339 ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
340 ICE_PRIV_FLAG("vf-true-promisc-support",
341 ICE_FLAG_VF_TRUE_PROMISC_ENA),
342 ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
343 ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
344 };
345
346 #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
347
348 static const u32 ice_adv_lnk_speed_100[] __initconst = {
349 ETHTOOL_LINK_MODE_100baseT_Full_BIT,
350 };
351
352 static const u32 ice_adv_lnk_speed_1000[] __initconst = {
353 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
354 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
355 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
356 };
357
358 static const u32 ice_adv_lnk_speed_2500[] __initconst = {
359 ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
360 ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
361 };
362
363 static const u32 ice_adv_lnk_speed_5000[] __initconst = {
364 ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
365 };
366
367 static const u32 ice_adv_lnk_speed_10000[] __initconst = {
368 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
369 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
370 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
371 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
372 };
373
374 static const u32 ice_adv_lnk_speed_25000[] __initconst = {
375 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
376 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
377 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
378 };
379
380 static const u32 ice_adv_lnk_speed_40000[] __initconst = {
381 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
382 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
383 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
384 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
385 };
386
387 static const u32 ice_adv_lnk_speed_50000[] __initconst = {
388 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
389 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
390 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
391 };
392
393 static const u32 ice_adv_lnk_speed_100000[] __initconst = {
394 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
395 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
396 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
397 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
398 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
399 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
400 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
401 };
402
403 static const u32 ice_adv_lnk_speed_200000[] __initconst = {
404 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
405 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
406 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
407 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
408 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
409 };
410
411 static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = {
412 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100),
413 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000),
414 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500),
415 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000),
416 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000),
417 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000),
418 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000),
419 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000),
420 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000),
421 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000),
422 };
423
ice_adv_lnk_speed_maps_init(void)424 void __init ice_adv_lnk_speed_maps_init(void)
425 {
426 ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps,
427 ARRAY_SIZE(ice_adv_lnk_speed_maps));
428 }
429
430 static void
__ice_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo,struct ice_vsi * vsi)431 __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
432 struct ice_vsi *vsi)
433 {
434 struct ice_pf *pf = vsi->back;
435 struct ice_hw *hw = &pf->hw;
436 struct ice_orom_info *orom;
437 struct ice_nvm_info *nvm;
438
439 nvm = &hw->flash.nvm;
440 orom = &hw->flash.orom;
441
442 strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
443
444 /* Display NVM version (from which the firmware version can be
445 * determined) which contains more pertinent information.
446 */
447 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
448 "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
449 nvm->eetrack, orom->major, orom->build, orom->patch);
450
451 strscpy(drvinfo->bus_info, pci_name(pf->pdev),
452 sizeof(drvinfo->bus_info));
453 }
454
455 static void
ice_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)456 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
457 {
458 struct ice_netdev_priv *np = netdev_priv(netdev);
459
460 __ice_get_drvinfo(netdev, drvinfo, np->vsi);
461 drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
462 }
463
ice_get_regs_len(struct net_device __always_unused * netdev)464 static int ice_get_regs_len(struct net_device __always_unused *netdev)
465 {
466 return (sizeof(ice_regs_dump_list) +
467 sizeof(struct ice_regdump_to_ethtool));
468 }
469
470 /**
471 * ice_ethtool_get_maxspeed - Get the max speed for given lport
472 * @hw: pointer to the HW struct
473 * @lport: logical port for which max speed is requested
474 * @max_speed: return max speed for input lport
475 *
476 * Return: 0 on success, negative on failure.
477 */
ice_ethtool_get_maxspeed(struct ice_hw * hw,u8 lport,u8 * max_speed)478 static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
479 {
480 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
481 bool active_valid = false, pending_valid = true;
482 u8 option_count = ICE_AQC_PORT_OPT_MAX;
483 u8 active_idx = 0, pending_idx = 0;
484 int status;
485
486 status = ice_aq_get_port_options(hw, options, &option_count, lport,
487 true, &active_idx, &active_valid,
488 &pending_idx, &pending_valid);
489 if (status)
490 return -EIO;
491 if (!active_valid)
492 return -EINVAL;
493
494 *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M;
495 return 0;
496 }
497
498 /**
499 * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
500 * @hw: pointer to the HW struct
501 *
502 * Return: true when serdes is muxed, false when serdes is not muxed.
503 */
ice_is_serdes_muxed(struct ice_hw * hw)504 static bool ice_is_serdes_muxed(struct ice_hw *hw)
505 {
506 u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
507
508 return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value);
509 }
510
ice_map_port_topology_for_sfp(struct ice_port_topology * port_topology,u8 lport,bool is_muxed)511 static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology,
512 u8 lport, bool is_muxed)
513 {
514 switch (lport) {
515 case 0:
516 port_topology->pcs_quad_select = 0;
517 port_topology->pcs_port = 0;
518 port_topology->primary_serdes_lane = 0;
519 break;
520 case 1:
521 port_topology->pcs_quad_select = 1;
522 port_topology->pcs_port = 0;
523 if (is_muxed)
524 port_topology->primary_serdes_lane = 2;
525 else
526 port_topology->primary_serdes_lane = 4;
527 break;
528 case 2:
529 port_topology->pcs_quad_select = 0;
530 port_topology->pcs_port = 1;
531 port_topology->primary_serdes_lane = 1;
532 break;
533 case 3:
534 port_topology->pcs_quad_select = 1;
535 port_topology->pcs_port = 1;
536 if (is_muxed)
537 port_topology->primary_serdes_lane = 3;
538 else
539 port_topology->primary_serdes_lane = 5;
540 break;
541 case 4:
542 port_topology->pcs_quad_select = 0;
543 port_topology->pcs_port = 2;
544 port_topology->primary_serdes_lane = 2;
545 break;
546 case 5:
547 port_topology->pcs_quad_select = 1;
548 port_topology->pcs_port = 2;
549 port_topology->primary_serdes_lane = 6;
550 break;
551 case 6:
552 port_topology->pcs_quad_select = 0;
553 port_topology->pcs_port = 3;
554 port_topology->primary_serdes_lane = 3;
555 break;
556 case 7:
557 port_topology->pcs_quad_select = 1;
558 port_topology->pcs_port = 3;
559 port_topology->primary_serdes_lane = 7;
560 break;
561 default:
562 return -EINVAL;
563 }
564
565 return 0;
566 }
567
ice_map_port_topology_for_qsfp(struct ice_port_topology * port_topology,u8 lport,bool is_muxed)568 static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology,
569 u8 lport, bool is_muxed)
570 {
571 switch (lport) {
572 case 0:
573 port_topology->pcs_quad_select = 0;
574 port_topology->pcs_port = 0;
575 port_topology->primary_serdes_lane = 0;
576 break;
577 case 1:
578 port_topology->pcs_quad_select = 1;
579 port_topology->pcs_port = 0;
580 if (is_muxed)
581 port_topology->primary_serdes_lane = 2;
582 else
583 port_topology->primary_serdes_lane = 4;
584 break;
585 case 2:
586 port_topology->pcs_quad_select = 0;
587 port_topology->pcs_port = 1;
588 port_topology->primary_serdes_lane = 1;
589 break;
590 case 3:
591 port_topology->pcs_quad_select = 1;
592 port_topology->pcs_port = 1;
593 if (is_muxed)
594 port_topology->primary_serdes_lane = 3;
595 else
596 port_topology->primary_serdes_lane = 5;
597 break;
598 case 4:
599 port_topology->pcs_quad_select = 0;
600 port_topology->pcs_port = 2;
601 port_topology->primary_serdes_lane = 2;
602 break;
603 case 5:
604 port_topology->pcs_quad_select = 1;
605 port_topology->pcs_port = 2;
606 port_topology->primary_serdes_lane = 6;
607 break;
608 case 6:
609 port_topology->pcs_quad_select = 0;
610 port_topology->pcs_port = 3;
611 port_topology->primary_serdes_lane = 3;
612 break;
613 case 7:
614 port_topology->pcs_quad_select = 1;
615 port_topology->pcs_port = 3;
616 port_topology->primary_serdes_lane = 7;
617 break;
618 default:
619 return -EINVAL;
620 }
621
622 return 0;
623 }
624
625 /**
626 * ice_get_port_topology - returns physical topology like pcsquad, pcsport,
627 * serdes number
628 * @hw: pointer to the HW struct
629 * @lport: logical port for which physical info requested
630 * @port_topology: buffer to hold port topology
631 *
632 * Return: 0 on success, negative on failure.
633 */
ice_get_port_topology(struct ice_hw * hw,u8 lport,struct ice_port_topology * port_topology)634 static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
635 struct ice_port_topology *port_topology)
636 {
637 struct ice_aqc_get_link_topo cmd = {};
638 u16 node_handle = 0;
639 u8 cage_type = 0;
640 bool is_muxed;
641 int err;
642 u8 ctx;
643
644 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
645 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
646 cmd.addr.topo_params.node_type_ctx = ctx;
647
648 err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
649 if (err)
650 return -EINVAL;
651
652 is_muxed = ice_is_serdes_muxed(hw);
653
654 if (cage_type == 0x11 || /* SFP+ */
655 cage_type == 0x12) { /* SFP28 */
656 port_topology->serdes_lane_count = 1;
657 err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed);
658 if (err)
659 return err;
660 } else if (cage_type == 0x13 || /* QSFP */
661 cage_type == 0x14) { /* QSFP28 */
662 u8 max_speed = 0;
663
664 err = ice_ethtool_get_maxspeed(hw, lport, &max_speed);
665 if (err)
666 return err;
667
668 if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
669 port_topology->serdes_lane_count = 4;
670 else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
671 max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
672 port_topology->serdes_lane_count = 2;
673 else
674 port_topology->serdes_lane_count = 1;
675
676 err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed);
677 if (err)
678 return err;
679 } else {
680 return -EINVAL;
681 }
682
683 return 0;
684 }
685
686 /**
687 * ice_get_tx_rx_equa - read serdes tx rx equaliser param
688 * @hw: pointer to the HW struct
689 * @serdes_num: represents the serdes number
690 * @ptr: structure to read all serdes parameter for given serdes
691 *
692 * Return: all serdes equalization parameter supported per serdes number
693 */
ice_get_tx_rx_equa(struct ice_hw * hw,u8 serdes_num,struct ice_serdes_equalization_to_ethtool * ptr)694 static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
695 struct ice_serdes_equalization_to_ethtool *ptr)
696 {
697 static const int tx = ICE_AQC_OP_CODE_TX_EQU;
698 static const int rx = ICE_AQC_OP_CODE_RX_EQU;
699 struct {
700 int data_in;
701 int opcode;
702 int *out;
703 } aq_params[] = {
704 { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 },
705 { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 },
706 { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten },
707 { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 },
708 { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 },
709 { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 },
710 { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 },
711 { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
712 { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
713 { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
714 { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
715 { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
716 { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
717 { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw },
718 { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain },
719 { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 },
720 { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 },
721 { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 },
722 { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 },
723 { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 },
724 { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 },
725 { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 },
726 { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 },
727 { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 },
728 { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 },
729 { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 },
730 { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 },
731 };
732 int err;
733
734 for (int i = 0; i < ARRAY_SIZE(aq_params); i++) {
735 err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in,
736 aq_params[i].opcode,
737 serdes_num, aq_params[i].out);
738 if (err)
739 break;
740 }
741
742 return err;
743 }
744
745 /**
746 * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per
747 * pcsquad, pcsport
748 * @netdev: pointer to net device structure
749 * @p: output buffer to fill requested register dump
750 *
751 * Return: 0 on success, negative on failure.
752 */
ice_get_extended_regs(struct net_device * netdev,void * p)753 static int ice_get_extended_regs(struct net_device *netdev, void *p)
754 {
755 struct ice_netdev_priv *np = netdev_priv(netdev);
756 struct ice_regdump_to_ethtool *ice_prv_regs_buf;
757 struct ice_port_topology port_topology = {};
758 struct ice_port_info *pi;
759 struct ice_pf *pf;
760 struct ice_hw *hw;
761 unsigned int i;
762 int err;
763
764 pf = np->vsi->back;
765 hw = &pf->hw;
766 pi = np->vsi->port_info;
767
768 /* Serdes parameters are not supported if not the PF VSI */
769 if (np->vsi->type != ICE_VSI_PF || !pi)
770 return -EINVAL;
771
772 err = ice_get_port_topology(hw, pi->lport, &port_topology);
773 if (err)
774 return -EINVAL;
775 if (port_topology.serdes_lane_count > 4)
776 return -EINVAL;
777
778 ice_prv_regs_buf = p;
779
780 /* Get serdes equalization parameter for available serdes */
781 for (i = 0; i < port_topology.serdes_lane_count; i++) {
782 u8 serdes_num = 0;
783
784 serdes_num = port_topology.primary_serdes_lane + i;
785 err = ice_get_tx_rx_equa(hw, serdes_num,
786 &ice_prv_regs_buf->equalization[i]);
787 if (err)
788 return -EINVAL;
789 }
790
791 return 0;
792 }
793
794 static void
ice_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)795 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
796 {
797 struct ice_pf *pf = ice_netdev_to_pf(netdev);
798 struct ice_hw *hw = &pf->hw;
799 u32 *regs_buf = (u32 *)p;
800 unsigned int i;
801
802 regs->version = 2;
803
804 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
805 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
806
807 ice_get_extended_regs(netdev, (void *)®s_buf[i]);
808 }
809
ice_get_msglevel(struct net_device * netdev)810 static u32 ice_get_msglevel(struct net_device *netdev)
811 {
812 struct ice_pf *pf = ice_netdev_to_pf(netdev);
813
814 #ifndef CONFIG_DYNAMIC_DEBUG
815 if (pf->hw.debug_mask)
816 netdev_info(netdev, "hw debug_mask: 0x%llX\n",
817 pf->hw.debug_mask);
818 #endif /* !CONFIG_DYNAMIC_DEBUG */
819
820 return pf->msg_enable;
821 }
822
ice_set_msglevel(struct net_device * netdev,u32 data)823 static void ice_set_msglevel(struct net_device *netdev, u32 data)
824 {
825 struct ice_pf *pf = ice_netdev_to_pf(netdev);
826
827 #ifndef CONFIG_DYNAMIC_DEBUG
828 if (ICE_DBG_USER & data)
829 pf->hw.debug_mask = data;
830 else
831 pf->msg_enable = data;
832 #else
833 pf->msg_enable = data;
834 #endif /* !CONFIG_DYNAMIC_DEBUG */
835 }
836
ice_get_link_ext_stats(struct net_device * netdev,struct ethtool_link_ext_stats * stats)837 static void ice_get_link_ext_stats(struct net_device *netdev,
838 struct ethtool_link_ext_stats *stats)
839 {
840 struct ice_pf *pf = ice_netdev_to_pf(netdev);
841
842 stats->link_down_events = pf->link_down_events;
843 }
844
ice_get_eeprom_len(struct net_device * netdev)845 static int ice_get_eeprom_len(struct net_device *netdev)
846 {
847 struct ice_pf *pf = ice_netdev_to_pf(netdev);
848
849 return (int)pf->hw.flash.flash_size;
850 }
851
852 static int
ice_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)853 ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
854 u8 *bytes)
855 {
856 struct ice_pf *pf = ice_netdev_to_pf(netdev);
857 struct ice_hw *hw = &pf->hw;
858 struct device *dev;
859 int ret;
860 u8 *buf;
861
862 dev = ice_pf_to_dev(pf);
863
864 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
865 netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n",
866 eeprom->cmd, eeprom->offset, eeprom->len);
867
868 buf = kzalloc(eeprom->len, GFP_KERNEL);
869 if (!buf)
870 return -ENOMEM;
871
872 ret = ice_acquire_nvm(hw, ICE_RES_READ);
873 if (ret) {
874 dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
875 ret, libie_aq_str(hw->adminq.sq_last_status));
876 goto out;
877 }
878
879 ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
880 false);
881 if (ret) {
882 dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
883 ret, libie_aq_str(hw->adminq.sq_last_status));
884 goto release;
885 }
886
887 memcpy(bytes, buf, eeprom->len);
888 release:
889 ice_release_nvm(hw);
890 out:
891 kfree(buf);
892 return ret;
893 }
894
895 /**
896 * ice_active_vfs - check if there are any active VFs
897 * @pf: board private structure
898 *
899 * Returns true if an active VF is found, otherwise returns false
900 */
ice_active_vfs(struct ice_pf * pf)901 static bool ice_active_vfs(struct ice_pf *pf)
902 {
903 bool active = false;
904 struct ice_vf *vf;
905 unsigned int bkt;
906
907 rcu_read_lock();
908 ice_for_each_vf_rcu(pf, bkt, vf) {
909 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
910 active = true;
911 break;
912 }
913 }
914 rcu_read_unlock();
915
916 return active;
917 }
918
919 /**
920 * ice_link_test - perform a link test on a given net_device
921 * @netdev: network interface device structure
922 *
923 * This function performs one of the self-tests required by ethtool.
924 * Returns 0 on success, non-zero on failure.
925 */
ice_link_test(struct net_device * netdev)926 static u64 ice_link_test(struct net_device *netdev)
927 {
928 struct ice_netdev_priv *np = netdev_priv(netdev);
929 bool link_up = false;
930 int status;
931
932 netdev_info(netdev, "link test\n");
933 status = ice_get_link_status(np->vsi->port_info, &link_up);
934 if (status) {
935 netdev_err(netdev, "link query error, status = %d\n",
936 status);
937 return 1;
938 }
939
940 if (!link_up)
941 return 2;
942
943 return 0;
944 }
945
946 /**
947 * ice_eeprom_test - perform an EEPROM test on a given net_device
948 * @netdev: network interface device structure
949 *
950 * This function performs one of the self-tests required by ethtool.
951 * Returns 0 on success, non-zero on failure.
952 */
ice_eeprom_test(struct net_device * netdev)953 static u64 ice_eeprom_test(struct net_device *netdev)
954 {
955 struct ice_pf *pf = ice_netdev_to_pf(netdev);
956
957 netdev_info(netdev, "EEPROM test\n");
958 return !!(ice_nvm_validate_checksum(&pf->hw));
959 }
960
961 /**
962 * ice_reg_pattern_test
963 * @hw: pointer to the HW struct
964 * @reg: reg to be tested
965 * @mask: bits to be touched
966 */
ice_reg_pattern_test(struct ice_hw * hw,u32 reg,u32 mask)967 static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
968 {
969 struct ice_pf *pf = (struct ice_pf *)hw->back;
970 struct device *dev = ice_pf_to_dev(pf);
971 static const u32 patterns[] = {
972 0x5A5A5A5A, 0xA5A5A5A5,
973 0x00000000, 0xFFFFFFFF
974 };
975 u32 val, orig_val;
976 unsigned int i;
977
978 orig_val = rd32(hw, reg);
979 for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
980 u32 pattern = patterns[i] & mask;
981
982 wr32(hw, reg, pattern);
983 val = rd32(hw, reg);
984 if (val == pattern)
985 continue;
986 dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
987 , __func__, reg, pattern, val);
988 return 1;
989 }
990
991 wr32(hw, reg, orig_val);
992 val = rd32(hw, reg);
993 if (val != orig_val) {
994 dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
995 , __func__, reg, orig_val, val);
996 return 1;
997 }
998
999 return 0;
1000 }
1001
1002 /**
1003 * ice_reg_test - perform a register test on a given net_device
1004 * @netdev: network interface device structure
1005 *
1006 * This function performs one of the self-tests required by ethtool.
1007 * Returns 0 on success, non-zero on failure.
1008 */
ice_reg_test(struct net_device * netdev)1009 static u64 ice_reg_test(struct net_device *netdev)
1010 {
1011 struct ice_netdev_priv *np = netdev_priv(netdev);
1012 struct ice_hw *hw = np->vsi->port_info->hw;
1013 u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ?
1014 hw->func_caps.common_cap.num_msix_vectors - 1 : 1;
1015 struct ice_diag_reg_test_info {
1016 u32 address;
1017 u32 mask;
1018 u32 elem_num;
1019 u32 elem_size;
1020 } ice_reg_list[] = {
1021 {GLINT_ITR(0, 0), 0x00000fff, int_elements,
1022 GLINT_ITR(0, 1) - GLINT_ITR(0, 0)},
1023 {GLINT_ITR(1, 0), 0x00000fff, int_elements,
1024 GLINT_ITR(1, 1) - GLINT_ITR(1, 0)},
1025 {GLINT_ITR(0, 0), 0x00000fff, int_elements,
1026 GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
1027 {GLINT_CTL, 0xffff0001, 1, 0}
1028 };
1029 unsigned int i;
1030
1031 netdev_dbg(netdev, "Register test\n");
1032 for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
1033 u32 j;
1034
1035 for (j = 0; j < ice_reg_list[i].elem_num; ++j) {
1036 u32 mask = ice_reg_list[i].mask;
1037 u32 reg = ice_reg_list[i].address +
1038 (j * ice_reg_list[i].elem_size);
1039
1040 /* bail on failure (non-zero return) */
1041 if (ice_reg_pattern_test(hw, reg, mask))
1042 return 1;
1043 }
1044 }
1045
1046 return 0;
1047 }
1048
1049 /**
1050 * ice_lbtest_prepare_rings - configure Tx/Rx test rings
1051 * @vsi: pointer to the VSI structure
1052 *
1053 * Function configures rings of a VSI for loopback test without
1054 * enabling interrupts or informing the kernel about new queues.
1055 *
1056 * Returns 0 on success, negative on failure.
1057 */
ice_lbtest_prepare_rings(struct ice_vsi * vsi)1058 static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
1059 {
1060 int status;
1061
1062 status = ice_vsi_setup_tx_rings(vsi);
1063 if (status)
1064 goto err_setup_tx_ring;
1065
1066 status = ice_vsi_setup_rx_rings(vsi);
1067 if (status)
1068 goto err_setup_rx_ring;
1069
1070 status = ice_vsi_cfg_lan(vsi);
1071 if (status)
1072 goto err_setup_rx_ring;
1073
1074 status = ice_vsi_start_all_rx_rings(vsi);
1075 if (status)
1076 goto err_start_rx_ring;
1077
1078 return 0;
1079
1080 err_start_rx_ring:
1081 ice_vsi_free_rx_rings(vsi);
1082 err_setup_rx_ring:
1083 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
1084 err_setup_tx_ring:
1085 ice_vsi_free_tx_rings(vsi);
1086
1087 return status;
1088 }
1089
1090 /**
1091 * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test
1092 * @vsi: pointer to the VSI structure
1093 *
1094 * Function stops and frees VSI rings after a loopback test.
1095 * Returns 0 on success, negative on failure.
1096 */
ice_lbtest_disable_rings(struct ice_vsi * vsi)1097 static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
1098 {
1099 int status;
1100
1101 status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
1102 if (status)
1103 netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
1104 vsi->vsi_num, status);
1105
1106 status = ice_vsi_stop_all_rx_rings(vsi);
1107 if (status)
1108 netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
1109 vsi->vsi_num, status);
1110
1111 ice_vsi_free_tx_rings(vsi);
1112 ice_vsi_free_rx_rings(vsi);
1113
1114 return status;
1115 }
1116
1117 /**
1118 * ice_lbtest_create_frame - create test packet
1119 * @pf: pointer to the PF structure
1120 * @ret_data: allocated frame buffer
1121 * @size: size of the packet data
1122 *
1123 * Function allocates a frame with a test pattern on specific offsets.
1124 * Returns 0 on success, non-zero on failure.
1125 */
ice_lbtest_create_frame(struct ice_pf * pf,u8 ** ret_data,u16 size)1126 static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
1127 {
1128 u8 *data;
1129
1130 if (!pf)
1131 return -EINVAL;
1132
1133 data = kzalloc(size, GFP_KERNEL);
1134 if (!data)
1135 return -ENOMEM;
1136
1137 /* Since the ethernet test frame should always be at least
1138 * 64 bytes long, fill some octets in the payload with test data.
1139 */
1140 memset(data, 0xFF, size);
1141 data[32] = 0xDE;
1142 data[42] = 0xAD;
1143 data[44] = 0xBE;
1144 data[46] = 0xEF;
1145
1146 *ret_data = data;
1147
1148 return 0;
1149 }
1150
1151 /**
1152 * ice_lbtest_check_frame - verify received loopback frame
1153 * @frame: pointer to the raw packet data
1154 *
1155 * Function verifies received test frame with a pattern.
1156 * Returns true if frame matches the pattern, false otherwise.
1157 */
ice_lbtest_check_frame(u8 * frame)1158 static bool ice_lbtest_check_frame(u8 *frame)
1159 {
1160 /* Validate bytes of a frame under offsets chosen earlier */
1161 if (frame[32] == 0xDE &&
1162 frame[42] == 0xAD &&
1163 frame[44] == 0xBE &&
1164 frame[46] == 0xEF &&
1165 frame[48] == 0xFF)
1166 return true;
1167
1168 return false;
1169 }
1170
1171 /**
1172 * ice_diag_send - send test frames to the test ring
1173 * @tx_ring: pointer to the transmit ring
1174 * @data: pointer to the raw packet data
1175 * @size: size of the packet to send
1176 *
1177 * Function sends loopback packets on a test Tx ring.
1178 */
ice_diag_send(struct ice_tx_ring * tx_ring,u8 * data,u16 size)1179 static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
1180 {
1181 struct ice_tx_desc *tx_desc;
1182 struct ice_tx_buf *tx_buf;
1183 dma_addr_t dma;
1184 u64 td_cmd;
1185
1186 tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use);
1187 tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use];
1188
1189 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
1190 if (dma_mapping_error(tx_ring->dev, dma))
1191 return -EINVAL;
1192
1193 tx_desc->buf_addr = cpu_to_le64(dma);
1194
1195 /* These flags are required for a descriptor to be pushed out */
1196 td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1197 tx_desc->cmd_type_offset_bsz =
1198 cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1199 (td_cmd << ICE_TXD_QW1_CMD_S) |
1200 ((u64)0 << ICE_TXD_QW1_OFFSET_S) |
1201 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1202 ((u64)0 << ICE_TXD_QW1_L2TAG1_S));
1203
1204 tx_buf->next_to_watch = tx_desc;
1205
1206 /* Force memory write to complete before letting h/w know
1207 * there are new descriptors to fetch.
1208 */
1209 wmb();
1210
1211 tx_ring->next_to_use++;
1212 if (tx_ring->next_to_use >= tx_ring->count)
1213 tx_ring->next_to_use = 0;
1214
1215 writel_relaxed(tx_ring->next_to_use, tx_ring->tail);
1216
1217 /* Wait until the packets get transmitted to the receive queue. */
1218 usleep_range(1000, 2000);
1219 dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE);
1220
1221 return 0;
1222 }
1223
1224 #define ICE_LB_FRAME_SIZE 64
1225 /**
1226 * ice_lbtest_receive_frames - receive and verify test frames
1227 * @rx_ring: pointer to the receive ring
1228 *
1229 * Function receives loopback packets and verify their correctness.
1230 * Returns number of received valid frames.
1231 */
ice_lbtest_receive_frames(struct ice_rx_ring * rx_ring)1232 static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
1233 {
1234 struct libeth_fqe *rx_buf;
1235 int valid_frames, i;
1236 struct page *page;
1237 u8 *received_buf;
1238
1239 valid_frames = 0;
1240
1241 for (i = 0; i < rx_ring->count; i++) {
1242 union ice_32b_rx_flex_desc *rx_desc;
1243
1244 rx_desc = ICE_RX_DESC(rx_ring, i);
1245
1246 if (!(rx_desc->wb.status_error0 &
1247 (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
1248 cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
1249 continue;
1250
1251 rx_buf = &rx_ring->rx_fqes[i];
1252 page = __netmem_to_page(rx_buf->netmem);
1253 received_buf = page_address(page) + rx_buf->offset +
1254 page->pp->p.offset;
1255
1256 if (ice_lbtest_check_frame(received_buf))
1257 valid_frames++;
1258 }
1259
1260 return valid_frames;
1261 }
1262
1263 /**
1264 * ice_loopback_test - perform a loopback test on a given net_device
1265 * @netdev: network interface device structure
1266 *
1267 * This function performs one of the self-tests required by ethtool.
1268 * Returns 0 on success, non-zero on failure.
1269 */
ice_loopback_test(struct net_device * netdev)1270 static u64 ice_loopback_test(struct net_device *netdev)
1271 {
1272 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1273 struct ice_vsi *test_vsi;
1274 u8 *tx_frame __free(kfree) = NULL;
1275 u8 broadcast[ETH_ALEN], ret = 0;
1276 int num_frames, valid_frames;
1277 struct ice_tx_ring *tx_ring;
1278 struct ice_rx_ring *rx_ring;
1279 int i;
1280
1281 netdev_info(netdev, "loopback test\n");
1282
1283 test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
1284 if (!test_vsi) {
1285 netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
1286 return 1;
1287 }
1288
1289 test_vsi->netdev = netdev;
1290 tx_ring = test_vsi->tx_rings[0];
1291 rx_ring = test_vsi->rx_rings[0];
1292
1293 if (ice_lbtest_prepare_rings(test_vsi)) {
1294 ret = 2;
1295 goto lbtest_vsi_close;
1296 }
1297
1298 if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) {
1299 ret = 3;
1300 goto lbtest_rings_dis;
1301 }
1302
1303 /* Enable MAC loopback in firmware */
1304 if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) {
1305 ret = 4;
1306 goto lbtest_mac_dis;
1307 }
1308
1309 /* Test VSI needs to receive broadcast packets */
1310 eth_broadcast_addr(broadcast);
1311 if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
1312 ret = 5;
1313 goto lbtest_mac_dis;
1314 }
1315
1316 if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
1317 ret = 7;
1318 goto remove_mac_filters;
1319 }
1320
1321 num_frames = min_t(int, tx_ring->count, 32);
1322 for (i = 0; i < num_frames; i++) {
1323 if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
1324 ret = 8;
1325 goto remove_mac_filters;
1326 }
1327 }
1328
1329 valid_frames = ice_lbtest_receive_frames(rx_ring);
1330 if (!valid_frames)
1331 ret = 9;
1332 else if (valid_frames != num_frames)
1333 ret = 10;
1334
1335 remove_mac_filters:
1336 if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
1337 netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
1338 lbtest_mac_dis:
1339 /* Disable MAC loopback after the test is completed. */
1340 if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
1341 netdev_err(netdev, "Could not disable MAC loopback\n");
1342 lbtest_rings_dis:
1343 if (ice_lbtest_disable_rings(test_vsi))
1344 netdev_err(netdev, "Could not disable test rings\n");
1345 lbtest_vsi_close:
1346 test_vsi->netdev = NULL;
1347 if (ice_vsi_release(test_vsi))
1348 netdev_err(netdev, "Failed to remove the test VSI\n");
1349
1350 return ret;
1351 }
1352
1353 /**
1354 * ice_intr_test - perform an interrupt test on a given net_device
1355 * @netdev: network interface device structure
1356 *
1357 * This function performs one of the self-tests required by ethtool.
1358 * Returns 0 on success, non-zero on failure.
1359 */
ice_intr_test(struct net_device * netdev)1360 static u64 ice_intr_test(struct net_device *netdev)
1361 {
1362 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1363 u16 swic_old = pf->sw_int_count;
1364
1365 netdev_info(netdev, "interrupt test\n");
1366
1367 wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index),
1368 GLINT_DYN_CTL_SW_ITR_INDX_M |
1369 GLINT_DYN_CTL_INTENA_MSK_M |
1370 GLINT_DYN_CTL_SWINT_TRIG_M);
1371
1372 usleep_range(1000, 2000);
1373 return (swic_old == pf->sw_int_count);
1374 }
1375
1376 /**
1377 * ice_self_test - handler function for performing a self-test by ethtool
1378 * @netdev: network interface device structure
1379 * @eth_test: ethtool_test structure
1380 * @data: required by ethtool.self_test
1381 *
1382 * This function is called after invoking 'ethtool -t devname' command where
1383 * devname is the name of the network device on which ethtool should operate.
1384 * It performs a set of self-tests to check if a device works properly.
1385 */
1386 static void
ice_self_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)1387 ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
1388 u64 *data)
1389 {
1390 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1391 bool if_running = netif_running(netdev);
1392 struct device *dev;
1393
1394 dev = ice_pf_to_dev(pf);
1395
1396 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1397 netdev_info(netdev, "offline testing starting\n");
1398
1399 set_bit(ICE_TESTING, pf->state);
1400
1401 if (ice_active_vfs(pf)) {
1402 dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
1403 data[ICE_ETH_TEST_REG] = 1;
1404 data[ICE_ETH_TEST_EEPROM] = 1;
1405 data[ICE_ETH_TEST_INTR] = 1;
1406 data[ICE_ETH_TEST_LOOP] = 1;
1407 data[ICE_ETH_TEST_LINK] = 1;
1408 eth_test->flags |= ETH_TEST_FL_FAILED;
1409 clear_bit(ICE_TESTING, pf->state);
1410 goto skip_ol_tests;
1411 }
1412 /* If the device is online then take it offline */
1413 if (if_running)
1414 /* indicate we're in test mode */
1415 ice_stop(netdev);
1416
1417 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
1418 data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev);
1419 data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev);
1420 data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev);
1421 data[ICE_ETH_TEST_REG] = ice_reg_test(netdev);
1422
1423 if (data[ICE_ETH_TEST_LINK] ||
1424 data[ICE_ETH_TEST_EEPROM] ||
1425 data[ICE_ETH_TEST_LOOP] ||
1426 data[ICE_ETH_TEST_INTR] ||
1427 data[ICE_ETH_TEST_REG])
1428 eth_test->flags |= ETH_TEST_FL_FAILED;
1429
1430 clear_bit(ICE_TESTING, pf->state);
1431
1432 if (if_running) {
1433 int status = ice_open(netdev);
1434
1435 if (status) {
1436 dev_err(dev, "Could not open device %s, err %d\n",
1437 pf->int_name, status);
1438 }
1439 }
1440 } else {
1441 /* Online tests */
1442 netdev_info(netdev, "online testing starting\n");
1443
1444 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
1445 if (data[ICE_ETH_TEST_LINK])
1446 eth_test->flags |= ETH_TEST_FL_FAILED;
1447
1448 /* Offline only tests, not run in online; pass by default */
1449 data[ICE_ETH_TEST_REG] = 0;
1450 data[ICE_ETH_TEST_EEPROM] = 0;
1451 data[ICE_ETH_TEST_INTR] = 0;
1452 data[ICE_ETH_TEST_LOOP] = 0;
1453 }
1454
1455 skip_ol_tests:
1456 netdev_info(netdev, "testing finished\n");
1457 }
1458
1459 static void
__ice_get_strings(struct net_device * netdev,u32 stringset,u8 * data,struct ice_vsi * vsi)1460 __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
1461 struct ice_vsi *vsi)
1462 {
1463 unsigned int i;
1464 u8 *p = data;
1465
1466 switch (stringset) {
1467 case ETH_SS_STATS:
1468 for (i = 0; i < ICE_VSI_STATS_LEN; i++)
1469 ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
1470
1471 if (ice_is_port_repr_netdev(netdev))
1472 return;
1473
1474 ice_for_each_alloc_txq(vsi, i) {
1475 ethtool_sprintf(&p, "tx_queue_%u_packets", i);
1476 ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
1477 }
1478
1479 ice_for_each_alloc_rxq(vsi, i) {
1480 ethtool_sprintf(&p, "rx_queue_%u_packets", i);
1481 ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
1482 }
1483
1484 if (vsi->type != ICE_VSI_PF)
1485 return;
1486
1487 for (i = 0; i < ICE_PF_STATS_LEN; i++)
1488 ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
1489
1490 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
1491 ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
1492 ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i);
1493 }
1494 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
1495 ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i);
1496 ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i);
1497 }
1498 break;
1499 case ETH_SS_TEST:
1500 memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN);
1501 break;
1502 case ETH_SS_PRIV_FLAGS:
1503 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
1504 ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
1505 break;
1506 default:
1507 break;
1508 }
1509 }
1510
ice_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1511 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1512 {
1513 struct ice_netdev_priv *np = netdev_priv(netdev);
1514
1515 __ice_get_strings(netdev, stringset, data, np->vsi);
1516 }
1517
1518 static int
ice_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)1519 ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
1520 {
1521 struct ice_netdev_priv *np = netdev_priv(netdev);
1522 bool led_active;
1523
1524 switch (state) {
1525 case ETHTOOL_ID_ACTIVE:
1526 led_active = true;
1527 break;
1528 case ETHTOOL_ID_INACTIVE:
1529 led_active = false;
1530 break;
1531 default:
1532 return -EINVAL;
1533 }
1534
1535 if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL))
1536 return -EIO;
1537
1538 return 0;
1539 }
1540
1541 /**
1542 * ice_set_fec_cfg - Set link FEC options
1543 * @netdev: network interface device structure
1544 * @req_fec: FEC mode to configure
1545 */
ice_set_fec_cfg(struct net_device * netdev,enum ice_fec_mode req_fec)1546 static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
1547 {
1548 struct ice_netdev_priv *np = netdev_priv(netdev);
1549 struct ice_aqc_set_phy_cfg_data config = { 0 };
1550 struct ice_vsi *vsi = np->vsi;
1551 struct ice_port_info *pi;
1552
1553 pi = vsi->port_info;
1554 if (!pi)
1555 return -EOPNOTSUPP;
1556
1557 /* Changing the FEC parameters is not supported if not the PF VSI */
1558 if (vsi->type != ICE_VSI_PF) {
1559 netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n");
1560 return -EOPNOTSUPP;
1561 }
1562
1563 /* Proceed only if requesting different FEC mode */
1564 if (pi->phy.curr_user_fec_req == req_fec)
1565 return 0;
1566
1567 /* Copy the current user PHY configuration. The current user PHY
1568 * configuration is initialized during probe from PHY capabilities
1569 * software mode, and updated on set PHY configuration.
1570 */
1571 memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config));
1572
1573 ice_cfg_phy_fec(pi, &config, req_fec);
1574 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1575
1576 if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL))
1577 return -EAGAIN;
1578
1579 /* Save requested FEC config */
1580 pi->phy.curr_user_fec_req = req_fec;
1581
1582 return 0;
1583 }
1584
1585 /**
1586 * ice_set_fecparam - Set FEC link options
1587 * @netdev: network interface device structure
1588 * @fecparam: Ethtool structure to retrieve FEC parameters
1589 */
1590 static int
ice_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1591 ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
1592 {
1593 struct ice_netdev_priv *np = netdev_priv(netdev);
1594 struct ice_vsi *vsi = np->vsi;
1595 enum ice_fec_mode fec;
1596
1597 switch (fecparam->fec) {
1598 case ETHTOOL_FEC_AUTO:
1599 fec = ICE_FEC_AUTO;
1600 break;
1601 case ETHTOOL_FEC_RS:
1602 fec = ICE_FEC_RS;
1603 break;
1604 case ETHTOOL_FEC_BASER:
1605 fec = ICE_FEC_BASER;
1606 break;
1607 case ETHTOOL_FEC_OFF:
1608 case ETHTOOL_FEC_NONE:
1609 fec = ICE_FEC_NONE;
1610 break;
1611 default:
1612 dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
1613 fecparam->fec);
1614 return -EINVAL;
1615 }
1616
1617 return ice_set_fec_cfg(netdev, fec);
1618 }
1619
1620 /**
1621 * ice_get_fecparam - Get link FEC options
1622 * @netdev: network interface device structure
1623 * @fecparam: Ethtool structure to retrieve FEC parameters
1624 */
1625 static int
ice_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1626 ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
1627 {
1628 struct ice_netdev_priv *np = netdev_priv(netdev);
1629 struct ice_aqc_get_phy_caps_data *caps;
1630 struct ice_link_status *link_info;
1631 struct ice_vsi *vsi = np->vsi;
1632 struct ice_port_info *pi;
1633 int err;
1634
1635 pi = vsi->port_info;
1636
1637 if (!pi)
1638 return -EOPNOTSUPP;
1639 link_info = &pi->phy.link_info;
1640
1641 /* Set FEC mode based on negotiated link info */
1642 switch (link_info->fec_info) {
1643 case ICE_AQ_LINK_25G_KR_FEC_EN:
1644 fecparam->active_fec = ETHTOOL_FEC_BASER;
1645 break;
1646 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
1647 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
1648 fecparam->active_fec = ETHTOOL_FEC_RS;
1649 break;
1650 default:
1651 fecparam->active_fec = ETHTOOL_FEC_OFF;
1652 break;
1653 }
1654
1655 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
1656 if (!caps)
1657 return -ENOMEM;
1658
1659 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1660 caps, NULL);
1661 if (err)
1662 goto done;
1663
1664 /* Set supported/configured FEC modes based on PHY capability */
1665 if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
1666 fecparam->fec |= ETHTOOL_FEC_AUTO;
1667 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
1668 caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
1669 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN ||
1670 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
1671 fecparam->fec |= ETHTOOL_FEC_BASER;
1672 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
1673 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ ||
1674 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
1675 fecparam->fec |= ETHTOOL_FEC_RS;
1676 if (caps->link_fec_options == 0)
1677 fecparam->fec |= ETHTOOL_FEC_OFF;
1678
1679 done:
1680 kfree(caps);
1681 return err;
1682 }
1683
1684 /**
1685 * ice_nway_reset - restart autonegotiation
1686 * @netdev: network interface device structure
1687 */
ice_nway_reset(struct net_device * netdev)1688 static int ice_nway_reset(struct net_device *netdev)
1689 {
1690 struct ice_netdev_priv *np = netdev_priv(netdev);
1691 struct ice_vsi *vsi = np->vsi;
1692 int err;
1693
1694 /* If VSI state is up, then restart autoneg with link up */
1695 if (!test_bit(ICE_DOWN, vsi->back->state))
1696 err = ice_set_link(vsi, true);
1697 else
1698 err = ice_set_link(vsi, false);
1699
1700 return err;
1701 }
1702
1703 /**
1704 * ice_get_priv_flags - report device private flags
1705 * @netdev: network interface device structure
1706 *
1707 * The get string set count and the string set should be matched for each
1708 * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags
1709 * array.
1710 *
1711 * Returns a u32 bitmap of flags.
1712 */
ice_get_priv_flags(struct net_device * netdev)1713 static u32 ice_get_priv_flags(struct net_device *netdev)
1714 {
1715 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1716 u32 i, ret_flags = 0;
1717
1718 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
1719 const struct ice_priv_flag *priv_flag;
1720
1721 priv_flag = &ice_gstrings_priv_flags[i];
1722
1723 if (test_bit(priv_flag->bitno, pf->flags))
1724 ret_flags |= BIT(i);
1725 }
1726
1727 return ret_flags;
1728 }
1729
1730 /**
1731 * ice_set_priv_flags - set private flags
1732 * @netdev: network interface device structure
1733 * @flags: bit flags to be set
1734 */
ice_set_priv_flags(struct net_device * netdev,u32 flags)1735 static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
1736 {
1737 struct ice_netdev_priv *np = netdev_priv(netdev);
1738 DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
1739 DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
1740 struct ice_vsi *vsi = np->vsi;
1741 struct ice_pf *pf = vsi->back;
1742 struct device *dev;
1743 int ret = 0;
1744 u32 i;
1745
1746 if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
1747 return -EINVAL;
1748
1749 dev = ice_pf_to_dev(pf);
1750 set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
1751
1752 bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
1753 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
1754 const struct ice_priv_flag *priv_flag;
1755
1756 priv_flag = &ice_gstrings_priv_flags[i];
1757
1758 if (flags & BIT(i))
1759 set_bit(priv_flag->bitno, pf->flags);
1760 else
1761 clear_bit(priv_flag->bitno, pf->flags);
1762 }
1763
1764 bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
1765
1766 /* Do not allow change to link-down-on-close when Total Port Shutdown
1767 * is enabled.
1768 */
1769 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) &&
1770 test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
1771 dev_err(dev, "Setting link-down-on-close not supported on this port\n");
1772 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1773 ret = -EINVAL;
1774 goto ethtool_exit;
1775 }
1776
1777 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
1778 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
1779 int status;
1780
1781 /* Disable FW LLDP engine */
1782 status = ice_cfg_lldp_mib_change(&pf->hw, false);
1783
1784 /* If unregistering for LLDP events fails, this is
1785 * not an error state, as there shouldn't be any
1786 * events to respond to.
1787 */
1788 if (status)
1789 dev_info(dev, "Failed to unreg for LLDP events\n");
1790
1791 /* The AQ call to stop the FW LLDP agent will generate
1792 * an error if the agent is already stopped.
1793 */
1794 status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
1795 if (status)
1796 dev_warn(dev, "Fail to stop LLDP agent\n");
1797 /* Use case for having the FW LLDP agent stopped
1798 * will likely not need DCB, so failure to init is
1799 * not a concern of ethtool
1800 */
1801 status = ice_init_pf_dcb(pf, true);
1802 if (status)
1803 dev_warn(dev, "Fail to init DCB\n");
1804
1805 pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
1806 pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
1807 } else {
1808 bool dcbx_agent_status;
1809 int status;
1810
1811 if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
1812 clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
1813 dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n");
1814 ret = -EOPNOTSUPP;
1815 goto ethtool_exit;
1816 }
1817
1818 /* Remove rule to direct LLDP packets to default VSI.
1819 * The FW LLDP engine will now be consuming them.
1820 */
1821 ice_cfg_sw_rx_lldp(vsi->back, false);
1822
1823 /* AQ command to start FW LLDP agent will return an
1824 * error if the agent is already started
1825 */
1826 status = ice_aq_start_lldp(&pf->hw, true, NULL);
1827 if (status)
1828 dev_warn(dev, "Fail to start LLDP Agent\n");
1829
1830 /* AQ command to start FW DCBX agent will fail if
1831 * the agent is already started
1832 */
1833 status = ice_aq_start_stop_dcbx(&pf->hw, true,
1834 &dcbx_agent_status,
1835 NULL);
1836 if (status)
1837 dev_dbg(dev, "Failed to start FW DCBX\n");
1838
1839 dev_info(dev, "FW DCBX agent is %s\n",
1840 dcbx_agent_status ? "ACTIVE" : "DISABLED");
1841
1842 /* Failure to configure MIB change or init DCB is not
1843 * relevant to ethtool. Print notification that
1844 * registration/init failed but do not return error
1845 * state to ethtool
1846 */
1847 status = ice_init_pf_dcb(pf, true);
1848 if (status)
1849 dev_dbg(dev, "Fail to init DCB\n");
1850
1851 /* Register for MIB change events */
1852 status = ice_cfg_lldp_mib_change(&pf->hw, true);
1853 if (status)
1854 dev_dbg(dev, "Fail to enable MIB change events\n");
1855
1856 pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST;
1857 pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
1858
1859 ice_nway_reset(netdev);
1860 }
1861 }
1862 /* don't allow modification of this flag when a single VF is in
1863 * promiscuous mode because it's not supported
1864 */
1865 if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
1866 ice_is_any_vf_in_unicast_promisc(pf)) {
1867 dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
1868 /* toggle bit back to previous state */
1869 change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
1870 ret = -EAGAIN;
1871 }
1872
1873 if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
1874 ice_has_vfs(pf)) {
1875 dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
1876 /* toggle bit back to previous state */
1877 change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
1878 ret = -EOPNOTSUPP;
1879 }
1880 ethtool_exit:
1881 clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
1882 return ret;
1883 }
1884
ice_get_sset_count(struct net_device * netdev,int sset)1885 static int ice_get_sset_count(struct net_device *netdev, int sset)
1886 {
1887 switch (sset) {
1888 case ETH_SS_STATS:
1889 /* The number (and order) of strings reported *must* remain
1890 * constant for a given netdevice. This function must not
1891 * report a different number based on run time parameters
1892 * (such as the number of queues in use, or the setting of
1893 * a private ethtool flag). This is due to the nature of the
1894 * ethtool stats API.
1895 *
1896 * Userspace programs such as ethtool must make 3 separate
1897 * ioctl requests, one for size, one for the strings, and
1898 * finally one for the stats. Since these cross into
1899 * userspace, changes to the number or size could result in
1900 * undefined memory access or incorrect string<->value
1901 * correlations for statistics.
1902 *
1903 * Even if it appears to be safe, changes to the size or
1904 * order of strings will suffer from race conditions and are
1905 * not safe.
1906 */
1907 return ICE_ALL_STATS_LEN(netdev);
1908 case ETH_SS_TEST:
1909 return ICE_TEST_LEN;
1910 case ETH_SS_PRIV_FLAGS:
1911 return ICE_PRIV_FLAG_ARRAY_SIZE;
1912 default:
1913 return -EOPNOTSUPP;
1914 }
1915 }
1916
1917 static void
__ice_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data,struct ice_vsi * vsi)1918 __ice_get_ethtool_stats(struct net_device *netdev,
1919 struct ethtool_stats __always_unused *stats, u64 *data,
1920 struct ice_vsi *vsi)
1921 {
1922 struct ice_pf *pf = vsi->back;
1923 struct ice_tx_ring *tx_ring;
1924 struct ice_rx_ring *rx_ring;
1925 unsigned int j;
1926 int i = 0;
1927 char *p;
1928
1929 ice_update_pf_stats(pf);
1930 ice_update_vsi_stats(vsi);
1931
1932 for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
1933 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
1934 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
1935 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1936 }
1937
1938 if (ice_is_port_repr_netdev(netdev))
1939 return;
1940
1941 /* populate per queue stats */
1942 rcu_read_lock();
1943
1944 ice_for_each_alloc_txq(vsi, j) {
1945 tx_ring = READ_ONCE(vsi->tx_rings[j]);
1946 if (tx_ring && tx_ring->ring_stats) {
1947 data[i++] = tx_ring->ring_stats->stats.pkts;
1948 data[i++] = tx_ring->ring_stats->stats.bytes;
1949 } else {
1950 data[i++] = 0;
1951 data[i++] = 0;
1952 }
1953 }
1954
1955 ice_for_each_alloc_rxq(vsi, j) {
1956 rx_ring = READ_ONCE(vsi->rx_rings[j]);
1957 if (rx_ring && rx_ring->ring_stats) {
1958 data[i++] = rx_ring->ring_stats->stats.pkts;
1959 data[i++] = rx_ring->ring_stats->stats.bytes;
1960 } else {
1961 data[i++] = 0;
1962 data[i++] = 0;
1963 }
1964 }
1965
1966 rcu_read_unlock();
1967
1968 if (vsi->type != ICE_VSI_PF)
1969 return;
1970
1971 for (j = 0; j < ICE_PF_STATS_LEN; j++) {
1972 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
1973 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
1974 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1975 }
1976
1977 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
1978 data[i++] = pf->stats.priority_xon_tx[j];
1979 data[i++] = pf->stats.priority_xoff_tx[j];
1980 }
1981
1982 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
1983 data[i++] = pf->stats.priority_xon_rx[j];
1984 data[i++] = pf->stats.priority_xoff_rx[j];
1985 }
1986 }
1987
1988 static void
ice_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)1989 ice_get_ethtool_stats(struct net_device *netdev,
1990 struct ethtool_stats __always_unused *stats, u64 *data)
1991 {
1992 struct ice_netdev_priv *np = netdev_priv(netdev);
1993
1994 __ice_get_ethtool_stats(netdev, stats, data, np->vsi);
1995 }
1996
1997 #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
1998 ICE_PHY_TYPE_LOW_100M_SGMII)
1999
2000 #define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \
2001 ICE_PHY_TYPE_LOW_1000BASE_T | \
2002 ICE_PHY_TYPE_LOW_1000BASE_SX | \
2003 ICE_PHY_TYPE_LOW_1000BASE_LX | \
2004 ICE_PHY_TYPE_LOW_1000BASE_KX | \
2005 ICE_PHY_TYPE_LOW_1G_SGMII | \
2006 ICE_PHY_TYPE_LOW_2500BASE_T | \
2007 ICE_PHY_TYPE_LOW_2500BASE_X | \
2008 ICE_PHY_TYPE_LOW_2500BASE_KX | \
2009 ICE_PHY_TYPE_LOW_5GBASE_T | \
2010 ICE_PHY_TYPE_LOW_5GBASE_KR | \
2011 ICE_PHY_TYPE_LOW_10GBASE_T | \
2012 ICE_PHY_TYPE_LOW_10G_SFI_DA | \
2013 ICE_PHY_TYPE_LOW_10GBASE_SR | \
2014 ICE_PHY_TYPE_LOW_10GBASE_LR | \
2015 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
2016 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
2017 ICE_PHY_TYPE_LOW_10G_SFI_C2C)
2018
2019 #define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
2020 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
2021 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
2022 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
2023 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
2024 ICE_PHY_TYPE_LOW_100G_CAUI4 | \
2025 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \
2026 ICE_PHY_TYPE_LOW_100G_AUI4 | \
2027 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
2028 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \
2029 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \
2030 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
2031 ICE_PHY_TYPE_LOW_100GBASE_DR)
2032
2033 #define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
2034 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\
2035 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
2036 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
2037 ICE_PHY_TYPE_HIGH_100G_AUI2)
2038
2039 #define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \
2040 ICE_PHY_TYPE_HIGH_200G_SR4 | \
2041 ICE_PHY_TYPE_HIGH_200G_FR4 | \
2042 ICE_PHY_TYPE_HIGH_200G_LR4 | \
2043 ICE_PHY_TYPE_HIGH_200G_DR4 | \
2044 ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \
2045 ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
2046 ICE_PHY_TYPE_HIGH_200G_AUI4)
2047
2048 /**
2049 * ice_mask_min_supported_speeds
2050 * @hw: pointer to the HW structure
2051 * @phy_types_high: PHY type high
2052 * @phy_types_low: PHY type low to apply minimum supported speeds mask
2053 *
2054 * Apply minimum supported speeds mask to PHY type low. These are the speeds
2055 * for ethtool supported link mode.
2056 */
2057 static void
ice_mask_min_supported_speeds(struct ice_hw * hw,u64 phy_types_high,u64 * phy_types_low)2058 ice_mask_min_supported_speeds(struct ice_hw *hw,
2059 u64 phy_types_high, u64 *phy_types_low)
2060 {
2061 /* if QSFP connection with 100G speed, minimum supported speed is 25G */
2062 if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) ||
2063 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) ||
2064 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G))
2065 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
2066 else if (!ice_is_100m_speed_supported(hw))
2067 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
2068 }
2069
2070 /**
2071 * ice_linkmode_set_bit - set link mode bit
2072 * @phy_to_ethtool: PHY type to ethtool link mode struct to set
2073 * @ks: ethtool link ksettings struct to fill out
2074 * @req_speeds: speed requested by user
2075 * @advert_phy_type: advertised PHY type
2076 * @phy_type: PHY type
2077 */
2078 static void
ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool * phy_to_ethtool,struct ethtool_link_ksettings * ks,u32 req_speeds,u64 advert_phy_type,u32 phy_type)2079 ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool *phy_to_ethtool,
2080 struct ethtool_link_ksettings *ks, u32 req_speeds,
2081 u64 advert_phy_type, u32 phy_type)
2082 {
2083 linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported);
2084
2085 if (req_speeds & phy_to_ethtool->aq_link_speed ||
2086 (!req_speeds && advert_phy_type & BIT(phy_type)))
2087 linkmode_set_bit(phy_to_ethtool->link_mode,
2088 ks->link_modes.advertising);
2089 }
2090
2091 /**
2092 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
2093 * @netdev: network interface device structure
2094 * @ks: ethtool link ksettings struct to fill out
2095 */
2096 static void
ice_phy_type_to_ethtool(struct net_device * netdev,struct ethtool_link_ksettings * ks)2097 ice_phy_type_to_ethtool(struct net_device *netdev,
2098 struct ethtool_link_ksettings *ks)
2099 {
2100 struct ice_netdev_priv *np = netdev_priv(netdev);
2101 struct ice_vsi *vsi = np->vsi;
2102 struct ice_pf *pf = vsi->back;
2103 u64 advert_phy_type_lo = 0;
2104 u64 advert_phy_type_hi = 0;
2105 u64 phy_types_high = 0;
2106 u64 phy_types_low = 0;
2107 u32 req_speeds;
2108 u32 i;
2109
2110 req_speeds = vsi->port_info->phy.link_info.req_speeds;
2111
2112 /* Check if lenient mode is supported and enabled, or in strict mode.
2113 *
2114 * In lenient mode the Supported link modes are the PHY types without
2115 * media. The Advertising link mode is either 1. the user requested
2116 * speed, 2. the override PHY mask, or 3. the PHY types with media.
2117 *
2118 * In strict mode Supported link mode are the PHY type with media,
2119 * and Advertising link modes are the media PHY type or the speed
2120 * requested by user.
2121 */
2122 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
2123 phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
2124 phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
2125
2126 ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
2127 &phy_types_low);
2128 /* determine advertised modes based on link override only
2129 * if it's supported and if the FW doesn't abstract the
2130 * driver from having to account for link overrides
2131 */
2132 if (ice_fw_supports_link_override(&pf->hw) &&
2133 !ice_fw_supports_report_dflt_cfg(&pf->hw)) {
2134 struct ice_link_default_override_tlv *ldo;
2135
2136 ldo = &pf->link_dflt_override;
2137 /* If override enabled and PHY mask set, then
2138 * Advertising link mode is the intersection of the PHY
2139 * types without media and the override PHY mask.
2140 */
2141 if (ldo->options & ICE_LINK_OVERRIDE_EN &&
2142 (ldo->phy_type_low || ldo->phy_type_high)) {
2143 advert_phy_type_lo =
2144 le64_to_cpu(pf->nvm_phy_type_lo) &
2145 ldo->phy_type_low;
2146 advert_phy_type_hi =
2147 le64_to_cpu(pf->nvm_phy_type_hi) &
2148 ldo->phy_type_high;
2149 }
2150 }
2151 } else {
2152 /* strict mode */
2153 phy_types_low = vsi->port_info->phy.phy_type_low;
2154 phy_types_high = vsi->port_info->phy.phy_type_high;
2155 }
2156
2157 /* If Advertising link mode PHY type is not using override PHY type,
2158 * then use PHY type with media.
2159 */
2160 if (!advert_phy_type_lo && !advert_phy_type_hi) {
2161 advert_phy_type_lo = vsi->port_info->phy.phy_type_low;
2162 advert_phy_type_hi = vsi->port_info->phy.phy_type_high;
2163 }
2164
2165 linkmode_zero(ks->link_modes.supported);
2166 linkmode_zero(ks->link_modes.advertising);
2167
2168 for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
2169 if (phy_types_low & BIT_ULL(i))
2170 ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
2171 req_speeds, advert_phy_type_lo,
2172 i);
2173 }
2174
2175 for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
2176 if (phy_types_high & BIT_ULL(i))
2177 ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
2178 req_speeds, advert_phy_type_hi,
2179 i);
2180 }
2181 }
2182
2183 #define TEST_SET_BITS_TIMEOUT 50
2184 #define TEST_SET_BITS_SLEEP_MAX 2000
2185 #define TEST_SET_BITS_SLEEP_MIN 1000
2186
2187 /**
2188 * ice_get_settings_link_up - Get Link settings for when link is up
2189 * @ks: ethtool ksettings to fill in
2190 * @netdev: network interface device structure
2191 */
2192 static void
ice_get_settings_link_up(struct ethtool_link_ksettings * ks,struct net_device * netdev)2193 ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
2194 struct net_device *netdev)
2195 {
2196 struct ice_netdev_priv *np = netdev_priv(netdev);
2197 struct ice_port_info *pi = np->vsi->port_info;
2198 struct ice_link_status *link_info;
2199 struct ice_vsi *vsi = np->vsi;
2200
2201 link_info = &vsi->port_info->phy.link_info;
2202
2203 /* Get supported and advertised settings from PHY ability with media */
2204 ice_phy_type_to_ethtool(netdev, ks);
2205
2206 switch (link_info->link_speed) {
2207 case ICE_AQ_LINK_SPEED_200GB:
2208 ks->base.speed = SPEED_200000;
2209 break;
2210 case ICE_AQ_LINK_SPEED_100GB:
2211 ks->base.speed = SPEED_100000;
2212 break;
2213 case ICE_AQ_LINK_SPEED_50GB:
2214 ks->base.speed = SPEED_50000;
2215 break;
2216 case ICE_AQ_LINK_SPEED_40GB:
2217 ks->base.speed = SPEED_40000;
2218 break;
2219 case ICE_AQ_LINK_SPEED_25GB:
2220 ks->base.speed = SPEED_25000;
2221 break;
2222 case ICE_AQ_LINK_SPEED_20GB:
2223 ks->base.speed = SPEED_20000;
2224 break;
2225 case ICE_AQ_LINK_SPEED_10GB:
2226 ks->base.speed = SPEED_10000;
2227 break;
2228 case ICE_AQ_LINK_SPEED_5GB:
2229 ks->base.speed = SPEED_5000;
2230 break;
2231 case ICE_AQ_LINK_SPEED_2500MB:
2232 ks->base.speed = SPEED_2500;
2233 break;
2234 case ICE_AQ_LINK_SPEED_1000MB:
2235 ks->base.speed = SPEED_1000;
2236 break;
2237 case ICE_AQ_LINK_SPEED_100MB:
2238 ks->base.speed = SPEED_100;
2239 break;
2240 default:
2241 netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n",
2242 link_info->link_speed);
2243 break;
2244 }
2245 ks->base.duplex = DUPLEX_FULL;
2246
2247 if (link_info->an_info & ICE_AQ_AN_COMPLETED)
2248 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2249 Autoneg);
2250
2251 /* Set flow control negotiated Rx/Tx pause */
2252 switch (pi->fc.current_mode) {
2253 case ICE_FC_FULL:
2254 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
2255 break;
2256 case ICE_FC_TX_PAUSE:
2257 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
2258 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2259 Asym_Pause);
2260 break;
2261 case ICE_FC_RX_PAUSE:
2262 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2263 Asym_Pause);
2264 break;
2265 case ICE_FC_PFC:
2266 default:
2267 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
2268 ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
2269 Asym_Pause);
2270 break;
2271 }
2272 }
2273
2274 /**
2275 * ice_get_settings_link_down - Get the Link settings when link is down
2276 * @ks: ethtool ksettings to fill in
2277 * @netdev: network interface device structure
2278 *
2279 * Reports link settings that can be determined when link is down
2280 */
2281 static void
ice_get_settings_link_down(struct ethtool_link_ksettings * ks,struct net_device * netdev)2282 ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
2283 struct net_device *netdev)
2284 {
2285 /* link is down and the driver needs to fall back on
2286 * supported PHY types to figure out what info to display
2287 */
2288 ice_phy_type_to_ethtool(netdev, ks);
2289
2290 /* With no link, speed and duplex are unknown */
2291 ks->base.speed = SPEED_UNKNOWN;
2292 ks->base.duplex = DUPLEX_UNKNOWN;
2293 }
2294
2295 /**
2296 * ice_get_link_ksettings - Get Link Speed and Duplex settings
2297 * @netdev: network interface device structure
2298 * @ks: ethtool ksettings
2299 *
2300 * Reports speed/duplex settings based on media_type
2301 */
2302 static int
ice_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ks)2303 ice_get_link_ksettings(struct net_device *netdev,
2304 struct ethtool_link_ksettings *ks)
2305 {
2306 struct ice_netdev_priv *np = netdev_priv(netdev);
2307 struct ice_aqc_get_phy_caps_data *caps;
2308 struct ice_link_status *hw_link_info;
2309 struct ice_vsi *vsi = np->vsi;
2310 int err;
2311
2312 ethtool_link_ksettings_zero_link_mode(ks, supported);
2313 ethtool_link_ksettings_zero_link_mode(ks, advertising);
2314 ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
2315 hw_link_info = &vsi->port_info->phy.link_info;
2316
2317 /* set speed and duplex */
2318 if (hw_link_info->link_info & ICE_AQ_LINK_UP)
2319 ice_get_settings_link_up(ks, netdev);
2320 else
2321 ice_get_settings_link_down(ks, netdev);
2322
2323 /* set autoneg settings */
2324 ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
2325 AUTONEG_ENABLE : AUTONEG_DISABLE;
2326
2327 /* set media type settings */
2328 switch (vsi->port_info->phy.media_type) {
2329 case ICE_MEDIA_FIBER:
2330 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
2331 ks->base.port = PORT_FIBRE;
2332 break;
2333 case ICE_MEDIA_BASET:
2334 ethtool_link_ksettings_add_link_mode(ks, supported, TP);
2335 ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
2336 ks->base.port = PORT_TP;
2337 break;
2338 case ICE_MEDIA_BACKPLANE:
2339 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
2340 ethtool_link_ksettings_add_link_mode(ks, advertising,
2341 Backplane);
2342 ks->base.port = PORT_NONE;
2343 break;
2344 case ICE_MEDIA_DA:
2345 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
2346 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
2347 ks->base.port = PORT_DA;
2348 break;
2349 default:
2350 ks->base.port = PORT_OTHER;
2351 break;
2352 }
2353
2354 /* flow control is symmetric and always supported */
2355 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
2356
2357 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2358 if (!caps)
2359 return -ENOMEM;
2360
2361 err = ice_aq_get_phy_caps(vsi->port_info, false,
2362 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
2363 if (err)
2364 goto done;
2365
2366 /* Set the advertised flow control based on the PHY capability */
2367 if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
2368 (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) {
2369 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
2370 ethtool_link_ksettings_add_link_mode(ks, advertising,
2371 Asym_Pause);
2372 } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) {
2373 ethtool_link_ksettings_add_link_mode(ks, advertising,
2374 Asym_Pause);
2375 } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) {
2376 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
2377 ethtool_link_ksettings_add_link_mode(ks, advertising,
2378 Asym_Pause);
2379 } else {
2380 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
2381 ethtool_link_ksettings_del_link_mode(ks, advertising,
2382 Asym_Pause);
2383 }
2384
2385 /* Set advertised FEC modes based on PHY capability */
2386 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
2387
2388 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
2389 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
2390 ethtool_link_ksettings_add_link_mode(ks, advertising,
2391 FEC_BASER);
2392 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
2393 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
2394 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
2395
2396 err = ice_aq_get_phy_caps(vsi->port_info, false,
2397 ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
2398 if (err)
2399 goto done;
2400
2401 /* Set supported FEC modes based on PHY capability */
2402 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
2403
2404 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
2405 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
2406 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
2407 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
2408 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
2409
2410 /* Set supported and advertised autoneg */
2411 if (ice_is_phy_caps_an_enabled(caps)) {
2412 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
2413 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
2414 }
2415
2416 done:
2417 kfree(caps);
2418 return err;
2419 }
2420
2421 /**
2422 * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed
2423 * @speed: ethtool forced speed
2424 */
ice_speed_to_aq_link(int speed)2425 static u16 ice_speed_to_aq_link(int speed)
2426 {
2427 int aq_speed;
2428
2429 switch (speed) {
2430 case SPEED_10:
2431 aq_speed = ICE_AQ_LINK_SPEED_10MB;
2432 break;
2433 case SPEED_100:
2434 aq_speed = ICE_AQ_LINK_SPEED_100MB;
2435 break;
2436 case SPEED_1000:
2437 aq_speed = ICE_AQ_LINK_SPEED_1000MB;
2438 break;
2439 case SPEED_2500:
2440 aq_speed = ICE_AQ_LINK_SPEED_2500MB;
2441 break;
2442 case SPEED_5000:
2443 aq_speed = ICE_AQ_LINK_SPEED_5GB;
2444 break;
2445 case SPEED_10000:
2446 aq_speed = ICE_AQ_LINK_SPEED_10GB;
2447 break;
2448 case SPEED_20000:
2449 aq_speed = ICE_AQ_LINK_SPEED_20GB;
2450 break;
2451 case SPEED_25000:
2452 aq_speed = ICE_AQ_LINK_SPEED_25GB;
2453 break;
2454 case SPEED_40000:
2455 aq_speed = ICE_AQ_LINK_SPEED_40GB;
2456 break;
2457 case SPEED_50000:
2458 aq_speed = ICE_AQ_LINK_SPEED_50GB;
2459 break;
2460 case SPEED_100000:
2461 aq_speed = ICE_AQ_LINK_SPEED_100GB;
2462 break;
2463 default:
2464 aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
2465 break;
2466 }
2467 return aq_speed;
2468 }
2469
2470 /**
2471 * ice_ksettings_find_adv_link_speed - Find advertising link speed
2472 * @ks: ethtool ksettings
2473 */
2474 static u16
ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings * ks)2475 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
2476 {
2477 const struct ethtool_forced_speed_map *map;
2478 u16 adv_link_speed = 0;
2479
2480 for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) {
2481 map = ice_adv_lnk_speed_maps + i;
2482 if (linkmode_intersects(ks->link_modes.advertising, map->caps))
2483 adv_link_speed |= ice_speed_to_aq_link(map->speed);
2484 }
2485
2486 return adv_link_speed;
2487 }
2488
2489 /**
2490 * ice_setup_autoneg
2491 * @p: port info
2492 * @ks: ethtool_link_ksettings
2493 * @config: configuration that will be sent down to FW
2494 * @autoneg_enabled: autonegotiation is enabled or not
2495 * @autoneg_changed: will there a change in autonegotiation
2496 * @netdev: network interface device structure
2497 *
2498 * Setup PHY autonegotiation feature
2499 */
2500 static int
ice_setup_autoneg(struct ice_port_info * p,struct ethtool_link_ksettings * ks,struct ice_aqc_set_phy_cfg_data * config,u8 autoneg_enabled,u8 * autoneg_changed,struct net_device * netdev)2501 ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
2502 struct ice_aqc_set_phy_cfg_data *config,
2503 u8 autoneg_enabled, u8 *autoneg_changed,
2504 struct net_device *netdev)
2505 {
2506 int err = 0;
2507
2508 *autoneg_changed = 0;
2509
2510 /* Check autoneg */
2511 if (autoneg_enabled == AUTONEG_ENABLE) {
2512 /* If autoneg was not already enabled */
2513 if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) {
2514 /* If autoneg is not supported, return error */
2515 if (!ethtool_link_ksettings_test_link_mode(ks,
2516 supported,
2517 Autoneg)) {
2518 netdev_info(netdev, "Autoneg not supported on this phy.\n");
2519 err = -EINVAL;
2520 } else {
2521 /* Autoneg is allowed to change */
2522 config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2523 *autoneg_changed = 1;
2524 }
2525 }
2526 } else {
2527 /* If autoneg is currently enabled */
2528 if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
2529 /* If autoneg is supported 10GBASE_T is the only PHY
2530 * that can disable it, so otherwise return error
2531 */
2532 if (ethtool_link_ksettings_test_link_mode(ks,
2533 supported,
2534 Autoneg)) {
2535 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
2536 err = -EINVAL;
2537 } else {
2538 /* Autoneg is allowed to change */
2539 config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2540 *autoneg_changed = 1;
2541 }
2542 }
2543 }
2544
2545 return err;
2546 }
2547
2548 /**
2549 * ice_set_phy_type_from_speed - set phy_types based on speeds
2550 * and advertised modes
2551 * @ks: ethtool link ksettings struct
2552 * @phy_type_low: pointer to the lower part of phy_type
2553 * @phy_type_high: pointer to the higher part of phy_type
2554 * @adv_link_speed: targeted link speeds bitmap
2555 */
2556 static void
ice_set_phy_type_from_speed(const struct ethtool_link_ksettings * ks,u64 * phy_type_low,u64 * phy_type_high,u16 adv_link_speed)2557 ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
2558 u64 *phy_type_low, u64 *phy_type_high,
2559 u16 adv_link_speed)
2560 {
2561 /* Handle 1000M speed in a special way because ice_update_phy_type
2562 * enables all link modes, but having mixed copper and optical
2563 * standards is not supported.
2564 */
2565 adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
2566
2567 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2568 1000baseT_Full))
2569 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
2570 ICE_PHY_TYPE_LOW_1G_SGMII;
2571
2572 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2573 1000baseKX_Full))
2574 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
2575
2576 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2577 1000baseX_Full))
2578 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
2579 ICE_PHY_TYPE_LOW_1000BASE_LX;
2580
2581 ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
2582 }
2583
2584 /**
2585 * ice_set_link_ksettings - Set Speed and Duplex
2586 * @netdev: network interface device structure
2587 * @ks: ethtool ksettings
2588 *
2589 * Set speed/duplex per media_types advertised/forced
2590 */
2591 static int
ice_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * ks)2592 ice_set_link_ksettings(struct net_device *netdev,
2593 const struct ethtool_link_ksettings *ks)
2594 {
2595 struct ice_netdev_priv *np = netdev_priv(netdev);
2596 u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
2597 struct ethtool_link_ksettings copy_ks = *ks;
2598 struct ethtool_link_ksettings safe_ks = {};
2599 struct ice_aqc_get_phy_caps_data *phy_caps;
2600 struct ice_aqc_set_phy_cfg_data config;
2601 u16 adv_link_speed, curr_link_speed;
2602 struct ice_pf *pf = np->vsi->back;
2603 struct ice_port_info *pi;
2604 u8 autoneg_changed = 0;
2605 u64 phy_type_high = 0;
2606 u64 phy_type_low = 0;
2607 bool linkup;
2608 int err;
2609
2610 pi = np->vsi->port_info;
2611
2612 if (!pi)
2613 return -EIO;
2614
2615 if (pi->phy.media_type != ICE_MEDIA_BASET &&
2616 pi->phy.media_type != ICE_MEDIA_FIBER &&
2617 pi->phy.media_type != ICE_MEDIA_BACKPLANE &&
2618 pi->phy.media_type != ICE_MEDIA_DA &&
2619 pi->phy.link_info.link_info & ICE_AQ_LINK_UP)
2620 return -EOPNOTSUPP;
2621
2622 phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL);
2623 if (!phy_caps)
2624 return -ENOMEM;
2625
2626 /* Get the PHY capabilities based on media */
2627 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2628 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2629 phy_caps, NULL);
2630 else
2631 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2632 phy_caps, NULL);
2633 if (err)
2634 goto done;
2635
2636 /* save autoneg out of ksettings */
2637 autoneg = copy_ks.base.autoneg;
2638
2639 /* Get link modes supported by hardware.*/
2640 ice_phy_type_to_ethtool(netdev, &safe_ks);
2641
2642 /* and check against modes requested by user.
2643 * Return an error if unsupported mode was set.
2644 */
2645 if (!bitmap_subset(copy_ks.link_modes.advertising,
2646 safe_ks.link_modes.supported,
2647 __ETHTOOL_LINK_MODE_MASK_NBITS)) {
2648 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags))
2649 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2650 err = -EOPNOTSUPP;
2651 goto done;
2652 }
2653
2654 /* get our own copy of the bits to check against */
2655 memset(&safe_ks, 0, sizeof(safe_ks));
2656 safe_ks.base.cmd = copy_ks.base.cmd;
2657 safe_ks.base.link_mode_masks_nwords =
2658 copy_ks.base.link_mode_masks_nwords;
2659 ice_get_link_ksettings(netdev, &safe_ks);
2660
2661 /* set autoneg back to what it currently is */
2662 copy_ks.base.autoneg = safe_ks.base.autoneg;
2663 /* we don't compare the speed */
2664 copy_ks.base.speed = safe_ks.base.speed;
2665
2666 /* If copy_ks.base and safe_ks.base are not the same now, then they are
2667 * trying to set something that we do not support.
2668 */
2669 if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) {
2670 err = -EOPNOTSUPP;
2671 goto done;
2672 }
2673
2674 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
2675 timeout--;
2676 if (!timeout) {
2677 err = -EBUSY;
2678 goto done;
2679 }
2680 usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
2681 }
2682
2683 /* Copy the current user PHY configuration. The current user PHY
2684 * configuration is initialized during probe from PHY capabilities
2685 * software mode, and updated on set PHY configuration.
2686 */
2687 config = pi->phy.curr_user_phy_cfg;
2688
2689 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2690
2691 /* Check autoneg */
2692 err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed,
2693 netdev);
2694
2695 if (err)
2696 goto done;
2697
2698 /* Call to get the current link speed */
2699 pi->phy.get_link_info = true;
2700 err = ice_get_link_status(pi, &linkup);
2701 if (err)
2702 goto done;
2703
2704 curr_link_speed = pi->phy.curr_user_speed_req;
2705 adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
2706
2707 /* If speed didn't get set, set it to what it currently is.
2708 * This is needed because if advertise is 0 (as it is when autoneg
2709 * is disabled) then speed won't get set.
2710 */
2711 if (!adv_link_speed)
2712 adv_link_speed = curr_link_speed;
2713
2714 /* Convert the advertise link speeds to their corresponded PHY_TYPE */
2715 ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
2716 adv_link_speed);
2717
2718 if (!autoneg_changed && adv_link_speed == curr_link_speed) {
2719 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
2720 goto done;
2721 }
2722
2723 /* save the requested speeds */
2724 pi->phy.link_info.req_speeds = adv_link_speed;
2725
2726 /* set link and auto negotiation so changes take effect */
2727 config.caps |= ICE_AQ_PHY_ENA_LINK;
2728
2729 /* check if there is a PHY type for the requested advertised speed */
2730 if (!(phy_type_low || phy_type_high)) {
2731 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2732 err = -EOPNOTSUPP;
2733 goto done;
2734 }
2735
2736 /* intersect requested advertised speed PHY types with media PHY types
2737 * for set PHY configuration
2738 */
2739 config.phy_type_high = cpu_to_le64(phy_type_high) &
2740 phy_caps->phy_type_high;
2741 config.phy_type_low = cpu_to_le64(phy_type_low) &
2742 phy_caps->phy_type_low;
2743
2744 if (!(config.phy_type_high || config.phy_type_low)) {
2745 /* If there is no intersection and lenient mode is enabled, then
2746 * intersect the requested advertised speed with NVM media type
2747 * PHY types.
2748 */
2749 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
2750 config.phy_type_high = cpu_to_le64(phy_type_high) &
2751 pf->nvm_phy_type_hi;
2752 config.phy_type_low = cpu_to_le64(phy_type_low) &
2753 pf->nvm_phy_type_lo;
2754 } else {
2755 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2756 err = -EOPNOTSUPP;
2757 goto done;
2758 }
2759 }
2760
2761 /* If link is up put link down */
2762 if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) {
2763 /* Tell the OS link is going down, the link will go
2764 * back up when fw says it is ready asynchronously
2765 */
2766 ice_print_link_msg(np->vsi, false);
2767 netif_carrier_off(netdev);
2768 netif_tx_stop_all_queues(netdev);
2769 }
2770
2771 /* make the aq call */
2772 err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
2773 if (err) {
2774 netdev_info(netdev, "Set phy config failed,\n");
2775 goto done;
2776 }
2777
2778 /* Save speed request */
2779 pi->phy.curr_user_speed_req = adv_link_speed;
2780 done:
2781 kfree(phy_caps);
2782 clear_bit(ICE_CFG_BUSY, pf->state);
2783
2784 return err;
2785 }
2786
ice_parse_hdrs(const struct ethtool_rxfh_fields * nfc)2787 static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
2788 {
2789 u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
2790
2791 switch (nfc->flow_type) {
2792 case TCP_V4_FLOW:
2793 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4;
2794 break;
2795 case UDP_V4_FLOW:
2796 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4;
2797 break;
2798 case SCTP_V4_FLOW:
2799 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
2800 break;
2801 case GTPU_V4_FLOW:
2802 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
2803 break;
2804 case GTPC_V4_FLOW:
2805 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
2806 break;
2807 case GTPC_TEID_V4_FLOW:
2808 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
2809 break;
2810 case GTPU_EH_V4_FLOW:
2811 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
2812 break;
2813 case GTPU_UL_V4_FLOW:
2814 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
2815 break;
2816 case GTPU_DL_V4_FLOW:
2817 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
2818 break;
2819 case TCP_V6_FLOW:
2820 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
2821 break;
2822 case UDP_V6_FLOW:
2823 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6;
2824 break;
2825 case SCTP_V6_FLOW:
2826 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
2827 break;
2828 case GTPU_V6_FLOW:
2829 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
2830 break;
2831 case GTPC_V6_FLOW:
2832 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
2833 break;
2834 case GTPC_TEID_V6_FLOW:
2835 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
2836 break;
2837 case GTPU_EH_V6_FLOW:
2838 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
2839 break;
2840 case GTPU_UL_V6_FLOW:
2841 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
2842 break;
2843 case GTPU_DL_V6_FLOW:
2844 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
2845 break;
2846 default:
2847 break;
2848 }
2849 return hdrs;
2850 }
2851
ice_parse_hash_flds(const struct ethtool_rxfh_fields * nfc,bool symm)2852 static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
2853 {
2854 u64 hfld = ICE_HASH_INVALID;
2855
2856 if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) {
2857 switch (nfc->flow_type) {
2858 case TCP_V4_FLOW:
2859 case UDP_V4_FLOW:
2860 case SCTP_V4_FLOW:
2861 case GTPU_V4_FLOW:
2862 case GTPC_V4_FLOW:
2863 case GTPC_TEID_V4_FLOW:
2864 case GTPU_EH_V4_FLOW:
2865 case GTPU_UL_V4_FLOW:
2866 case GTPU_DL_V4_FLOW:
2867 if (nfc->data & RXH_IP_SRC)
2868 hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
2869 if (nfc->data & RXH_IP_DST)
2870 hfld |= ICE_FLOW_HASH_FLD_IPV4_DA;
2871 break;
2872 case TCP_V6_FLOW:
2873 case UDP_V6_FLOW:
2874 case SCTP_V6_FLOW:
2875 case GTPU_V6_FLOW:
2876 case GTPC_V6_FLOW:
2877 case GTPC_TEID_V6_FLOW:
2878 case GTPU_EH_V6_FLOW:
2879 case GTPU_UL_V6_FLOW:
2880 case GTPU_DL_V6_FLOW:
2881 if (nfc->data & RXH_IP_SRC)
2882 hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
2883 if (nfc->data & RXH_IP_DST)
2884 hfld |= ICE_FLOW_HASH_FLD_IPV6_DA;
2885 break;
2886 default:
2887 break;
2888 }
2889 }
2890
2891 if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) {
2892 switch (nfc->flow_type) {
2893 case TCP_V4_FLOW:
2894 case TCP_V6_FLOW:
2895 if (nfc->data & RXH_L4_B_0_1)
2896 hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT;
2897 if (nfc->data & RXH_L4_B_2_3)
2898 hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT;
2899 break;
2900 case UDP_V4_FLOW:
2901 case UDP_V6_FLOW:
2902 if (nfc->data & RXH_L4_B_0_1)
2903 hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
2904 if (nfc->data & RXH_L4_B_2_3)
2905 hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT;
2906 break;
2907 case SCTP_V4_FLOW:
2908 case SCTP_V6_FLOW:
2909 if (nfc->data & RXH_L4_B_0_1)
2910 hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT;
2911 if (nfc->data & RXH_L4_B_2_3)
2912 hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
2913 break;
2914 default:
2915 break;
2916 }
2917 }
2918
2919 if (nfc->data & RXH_GTP_TEID) {
2920 switch (nfc->flow_type) {
2921 case GTPC_TEID_V4_FLOW:
2922 case GTPC_TEID_V6_FLOW:
2923 hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
2924 break;
2925 case GTPU_V4_FLOW:
2926 case GTPU_V6_FLOW:
2927 hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
2928 break;
2929 case GTPU_EH_V4_FLOW:
2930 case GTPU_EH_V6_FLOW:
2931 hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
2932 break;
2933 case GTPU_UL_V4_FLOW:
2934 case GTPU_UL_V6_FLOW:
2935 hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
2936 break;
2937 case GTPU_DL_V4_FLOW:
2938 case GTPU_DL_V6_FLOW:
2939 hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
2940 break;
2941 default:
2942 break;
2943 }
2944 }
2945
2946 return hfld;
2947 }
2948
2949 static int
ice_set_rxfh_fields(struct net_device * netdev,const struct ethtool_rxfh_fields * nfc,struct netlink_ext_ack * extack)2950 ice_set_rxfh_fields(struct net_device *netdev,
2951 const struct ethtool_rxfh_fields *nfc,
2952 struct netlink_ext_ack *extack)
2953 {
2954 struct ice_netdev_priv *np = netdev_priv(netdev);
2955 struct ice_vsi *vsi = np->vsi;
2956 struct ice_pf *pf = vsi->back;
2957 struct ice_rss_hash_cfg cfg;
2958 struct device *dev;
2959 u64 hashed_flds;
2960 int status;
2961 bool symm;
2962 u32 hdrs;
2963
2964 dev = ice_pf_to_dev(pf);
2965 if (ice_is_safe_mode(pf)) {
2966 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
2967 vsi->vsi_num);
2968 return -EINVAL;
2969 }
2970
2971 symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
2972 hashed_flds = ice_parse_hash_flds(nfc, symm);
2973 if (hashed_flds == ICE_HASH_INVALID) {
2974 dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
2975 vsi->vsi_num);
2976 return -EINVAL;
2977 }
2978
2979 hdrs = ice_parse_hdrs(nfc);
2980 if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
2981 dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
2982 vsi->vsi_num);
2983 return -EINVAL;
2984 }
2985
2986 cfg.hash_flds = hashed_flds;
2987 cfg.addl_hdrs = hdrs;
2988 cfg.hdr_type = ICE_RSS_ANY_HEADERS;
2989 cfg.symm = symm;
2990
2991 status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
2992 if (status) {
2993 dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
2994 vsi->vsi_num, status);
2995 return status;
2996 }
2997
2998 return 0;
2999 }
3000
3001 static int
ice_get_rxfh_fields(struct net_device * netdev,struct ethtool_rxfh_fields * nfc)3002 ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
3003 {
3004 struct ice_netdev_priv *np = netdev_priv(netdev);
3005 struct ice_vsi *vsi = np->vsi;
3006 struct ice_pf *pf = vsi->back;
3007 struct device *dev;
3008 u64 hash_flds;
3009 bool symm;
3010 u32 hdrs;
3011
3012 dev = ice_pf_to_dev(pf);
3013
3014 nfc->data = 0;
3015 if (ice_is_safe_mode(pf)) {
3016 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
3017 vsi->vsi_num);
3018 return 0;
3019 }
3020
3021 hdrs = ice_parse_hdrs(nfc);
3022 if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
3023 dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
3024 vsi->vsi_num);
3025 return 0;
3026 }
3027
3028 hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
3029 if (hash_flds == ICE_HASH_INVALID) {
3030 dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
3031 vsi->vsi_num);
3032 return 0;
3033 }
3034
3035 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
3036 hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA)
3037 nfc->data |= (u64)RXH_IP_SRC;
3038
3039 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA ||
3040 hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA)
3041 nfc->data |= (u64)RXH_IP_DST;
3042
3043 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
3044 hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
3045 hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
3046 nfc->data |= (u64)RXH_L4_B_0_1;
3047
3048 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
3049 hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
3050 hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
3051 nfc->data |= (u64)RXH_L4_B_2_3;
3052
3053 if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
3054 hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
3055 hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
3056 hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
3057 hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
3058 nfc->data |= (u64)RXH_GTP_TEID;
3059
3060 return 0;
3061 }
3062
3063 /**
3064 * ice_set_rxnfc - command to set Rx flow rules.
3065 * @netdev: network interface device structure
3066 * @cmd: ethtool rxnfc command
3067 *
3068 * Returns 0 for success and negative values for errors
3069 */
ice_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)3070 static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3071 {
3072 struct ice_netdev_priv *np = netdev_priv(netdev);
3073 struct ice_vsi *vsi = np->vsi;
3074
3075 switch (cmd->cmd) {
3076 case ETHTOOL_SRXCLSRLINS:
3077 return ice_add_fdir_ethtool(vsi, cmd);
3078 case ETHTOOL_SRXCLSRLDEL:
3079 return ice_del_fdir_ethtool(vsi, cmd);
3080 default:
3081 break;
3082 }
3083 return -EOPNOTSUPP;
3084 }
3085
3086 /**
3087 * ice_get_rx_ring_count - get RX ring count
3088 * @netdev: network interface device structure
3089 *
3090 * Return: number of RX rings.
3091 */
ice_get_rx_ring_count(struct net_device * netdev)3092 static u32 ice_get_rx_ring_count(struct net_device *netdev)
3093 {
3094 struct ice_netdev_priv *np = netdev_priv(netdev);
3095 struct ice_vsi *vsi = np->vsi;
3096
3097 return vsi->rss_size;
3098 }
3099
3100 /**
3101 * ice_get_rxnfc - command to get Rx flow classification rules
3102 * @netdev: network interface device structure
3103 * @cmd: ethtool rxnfc command
3104 * @rule_locs: buffer to rturn Rx flow classification rules
3105 *
3106 * Returns Success if the command is supported.
3107 */
3108 static int
ice_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 __always_unused * rule_locs)3109 ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3110 u32 __always_unused *rule_locs)
3111 {
3112 struct ice_netdev_priv *np = netdev_priv(netdev);
3113 struct ice_vsi *vsi = np->vsi;
3114 int ret = -EOPNOTSUPP;
3115 struct ice_hw *hw;
3116
3117 hw = &vsi->back->hw;
3118
3119 switch (cmd->cmd) {
3120 case ETHTOOL_GRXCLSRLCNT:
3121 cmd->rule_cnt = hw->fdir_active_fltr;
3122 /* report total rule count */
3123 cmd->data = ice_get_fdir_cnt_all(hw);
3124 ret = 0;
3125 break;
3126 case ETHTOOL_GRXCLSRULE:
3127 ret = ice_get_ethtool_fdir_entry(hw, cmd);
3128 break;
3129 case ETHTOOL_GRXCLSRLALL:
3130 ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
3131 break;
3132 default:
3133 break;
3134 }
3135
3136 return ret;
3137 }
3138
3139 static void
ice_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3140 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
3141 struct kernel_ethtool_ringparam *kernel_ring,
3142 struct netlink_ext_ack *extack)
3143 {
3144 struct ice_netdev_priv *np = netdev_priv(netdev);
3145 struct ice_vsi *vsi = np->vsi;
3146 struct ice_hw *hw;
3147
3148 hw = &vsi->back->hw;
3149 ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
3150 ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
3151 if (vsi->tx_rings && vsi->rx_rings) {
3152 ring->rx_pending = vsi->rx_rings[0]->count;
3153 ring->tx_pending = vsi->tx_rings[0]->count;
3154 } else {
3155 ring->rx_pending = 0;
3156 ring->tx_pending = 0;
3157 }
3158
3159 /* Rx mini and jumbo rings are not supported */
3160 ring->rx_mini_max_pending = 0;
3161 ring->rx_jumbo_max_pending = 0;
3162 ring->rx_mini_pending = 0;
3163 ring->rx_jumbo_pending = 0;
3164
3165 kernel_ring->tcp_data_split = vsi->hsplit ?
3166 ETHTOOL_TCP_DATA_SPLIT_ENABLED :
3167 ETHTOOL_TCP_DATA_SPLIT_DISABLED;
3168 }
3169
3170 static int
ice_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3171 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
3172 struct kernel_ethtool_ringparam *kernel_ring,
3173 struct netlink_ext_ack *extack)
3174 {
3175 struct ice_netdev_priv *np = netdev_priv(netdev);
3176 struct ice_tx_ring *xdp_rings = NULL;
3177 struct ice_tx_ring *tx_rings = NULL;
3178 struct ice_rx_ring *rx_rings = NULL;
3179 struct ice_vsi *vsi = np->vsi;
3180 struct ice_pf *pf = vsi->back;
3181 int i, timeout = 50, err = 0;
3182 struct ice_hw *hw = &pf->hw;
3183 u16 new_rx_cnt, new_tx_cnt;
3184 bool hsplit;
3185
3186 if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
3187 ring->tx_pending < ICE_MIN_NUM_DESC ||
3188 ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
3189 ring->rx_pending < ICE_MIN_NUM_DESC) {
3190 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
3191 ring->tx_pending, ring->rx_pending,
3192 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
3193 ICE_REQ_DESC_MULTIPLE);
3194 return -EINVAL;
3195 }
3196
3197 /* Return if there is no rings (device is reloading) */
3198 if (!vsi->tx_rings || !vsi->rx_rings)
3199 return -EBUSY;
3200
3201 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
3202 if (new_tx_cnt != ring->tx_pending)
3203 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
3204 new_tx_cnt);
3205 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
3206 if (new_rx_cnt != ring->rx_pending)
3207 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
3208 new_rx_cnt);
3209
3210 hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
3211
3212 /* if nothing to do return success */
3213 if (new_tx_cnt == vsi->tx_rings[0]->count &&
3214 new_rx_cnt == vsi->rx_rings[0]->count &&
3215 hsplit == vsi->hsplit) {
3216 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
3217 return 0;
3218 }
3219
3220 /* If there is a AF_XDP UMEM attached to any of Rx rings,
3221 * disallow changing the number of descriptors -- regardless
3222 * if the netdev is running or not.
3223 */
3224 if (ice_xsk_any_rx_ring_ena(vsi))
3225 return -EBUSY;
3226
3227 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3228 timeout--;
3229 if (!timeout)
3230 return -EBUSY;
3231 usleep_range(1000, 2000);
3232 }
3233
3234 /* set for the next time the netdev is started */
3235 if (!netif_running(vsi->netdev)) {
3236 ice_for_each_alloc_txq(vsi, i)
3237 vsi->tx_rings[i]->count = new_tx_cnt;
3238 ice_for_each_alloc_rxq(vsi, i)
3239 vsi->rx_rings[i]->count = new_rx_cnt;
3240 if (ice_is_xdp_ena_vsi(vsi))
3241 ice_for_each_xdp_txq(vsi, i)
3242 vsi->xdp_rings[i]->count = new_tx_cnt;
3243 vsi->num_tx_desc = (u16)new_tx_cnt;
3244 vsi->num_rx_desc = (u16)new_rx_cnt;
3245 vsi->hsplit = hsplit;
3246
3247 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
3248 goto done;
3249 }
3250
3251 if (new_tx_cnt == vsi->tx_rings[0]->count)
3252 goto process_rx;
3253
3254 /* alloc updated Tx resources */
3255 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
3256 vsi->tx_rings[0]->count, new_tx_cnt);
3257
3258 tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL);
3259 if (!tx_rings) {
3260 err = -ENOMEM;
3261 goto done;
3262 }
3263
3264 ice_for_each_txq(vsi, i) {
3265 /* clone ring and setup updated count */
3266 tx_rings[i] = *vsi->tx_rings[i];
3267 tx_rings[i].count = new_tx_cnt;
3268 tx_rings[i].desc = NULL;
3269 tx_rings[i].tx_buf = NULL;
3270 tx_rings[i].tstamp_ring = NULL;
3271 tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
3272 err = ice_setup_tx_ring(&tx_rings[i]);
3273 if (err) {
3274 while (i--)
3275 ice_clean_tx_ring(&tx_rings[i]);
3276 kfree(tx_rings);
3277 goto done;
3278 }
3279 }
3280
3281 if (!ice_is_xdp_ena_vsi(vsi))
3282 goto process_rx;
3283
3284 /* alloc updated XDP resources */
3285 netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
3286 vsi->xdp_rings[0]->count, new_tx_cnt);
3287
3288 xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL);
3289 if (!xdp_rings) {
3290 err = -ENOMEM;
3291 goto free_tx;
3292 }
3293
3294 ice_for_each_xdp_txq(vsi, i) {
3295 /* clone ring and setup updated count */
3296 xdp_rings[i] = *vsi->xdp_rings[i];
3297 xdp_rings[i].count = new_tx_cnt;
3298 xdp_rings[i].desc = NULL;
3299 xdp_rings[i].tx_buf = NULL;
3300 err = ice_setup_tx_ring(&xdp_rings[i]);
3301 if (err) {
3302 while (i--)
3303 ice_clean_tx_ring(&xdp_rings[i]);
3304 kfree(xdp_rings);
3305 goto free_tx;
3306 }
3307 ice_set_ring_xdp(&xdp_rings[i]);
3308 }
3309
3310 process_rx:
3311 if (new_rx_cnt == vsi->rx_rings[0]->count)
3312 goto process_link;
3313
3314 /* alloc updated Rx resources */
3315 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
3316 vsi->rx_rings[0]->count, new_rx_cnt);
3317
3318 rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL);
3319 if (!rx_rings) {
3320 err = -ENOMEM;
3321 goto done;
3322 }
3323
3324 ice_for_each_rxq(vsi, i) {
3325 /* clone ring and setup updated count */
3326 rx_rings[i] = *vsi->rx_rings[i];
3327 rx_rings[i].count = new_rx_cnt;
3328 rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
3329 rx_rings[i].desc = NULL;
3330 rx_rings[i].xdp_buf = NULL;
3331
3332 /* this is to allow wr32 to have something to write to
3333 * during early allocation of Rx buffers
3334 */
3335 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
3336
3337 err = ice_setup_rx_ring(&rx_rings[i]);
3338 if (err)
3339 goto rx_unwind;
3340 rx_unwind:
3341 if (err) {
3342 while (i) {
3343 i--;
3344 ice_free_rx_ring(&rx_rings[i]);
3345 }
3346 kfree(rx_rings);
3347 err = -ENOMEM;
3348 goto free_tx;
3349 }
3350 }
3351
3352 process_link:
3353 vsi->hsplit = hsplit;
3354
3355 /* Bring interface down, copy in the new ring info, then restore the
3356 * interface. if VSI is up, bring it down and then back up
3357 */
3358 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
3359 ice_down(vsi);
3360
3361 if (tx_rings) {
3362 ice_for_each_txq(vsi, i) {
3363 ice_free_tx_ring(vsi->tx_rings[i]);
3364 *vsi->tx_rings[i] = tx_rings[i];
3365 }
3366 kfree(tx_rings);
3367 }
3368
3369 if (rx_rings) {
3370 ice_for_each_rxq(vsi, i) {
3371 ice_free_rx_ring(vsi->rx_rings[i]);
3372 /* copy the real tail offset */
3373 rx_rings[i].tail = vsi->rx_rings[i]->tail;
3374 /* this is to fake out the allocation routine
3375 * into thinking it has to realloc everything
3376 * but the recycling logic will let us re-use
3377 * the buffers allocated above
3378 */
3379 rx_rings[i].next_to_use = 0;
3380 rx_rings[i].next_to_clean = 0;
3381 rx_rings[i].next_to_alloc = 0;
3382 *vsi->rx_rings[i] = rx_rings[i];
3383 }
3384 kfree(rx_rings);
3385 }
3386
3387 if (xdp_rings) {
3388 ice_for_each_xdp_txq(vsi, i) {
3389 ice_free_tx_ring(vsi->xdp_rings[i]);
3390 *vsi->xdp_rings[i] = xdp_rings[i];
3391 }
3392 kfree(xdp_rings);
3393 }
3394
3395 vsi->num_tx_desc = new_tx_cnt;
3396 vsi->num_rx_desc = new_rx_cnt;
3397 ice_up(vsi);
3398 }
3399 goto done;
3400
3401 free_tx:
3402 /* error cleanup if the Rx allocations failed after getting Tx */
3403 if (tx_rings) {
3404 ice_for_each_txq(vsi, i)
3405 ice_free_tx_ring(&tx_rings[i]);
3406 kfree(tx_rings);
3407 }
3408
3409 done:
3410 clear_bit(ICE_CFG_BUSY, pf->state);
3411 return err;
3412 }
3413
3414 /**
3415 * ice_get_pauseparam - Get Flow Control status
3416 * @netdev: network interface device structure
3417 * @pause: ethernet pause (flow control) parameters
3418 *
3419 * Get requested flow control status from PHY capability.
3420 * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which
3421 * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report
3422 * the negotiated Rx/Tx pause via lp_advertising.
3423 */
3424 static void
ice_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)3425 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
3426 {
3427 struct ice_netdev_priv *np = netdev_priv(netdev);
3428 struct ice_port_info *pi = np->vsi->port_info;
3429 struct ice_aqc_get_phy_caps_data *pcaps;
3430 struct ice_dcbx_cfg *dcbx_cfg;
3431 int status;
3432
3433 /* Initialize pause params */
3434 pause->rx_pause = 0;
3435 pause->tx_pause = 0;
3436
3437 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
3438
3439 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3440 if (!pcaps)
3441 return;
3442
3443 /* Get current PHY config */
3444 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
3445 NULL);
3446 if (status)
3447 goto out;
3448
3449 pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
3450 AUTONEG_DISABLE;
3451
3452 if (dcbx_cfg->pfc.pfcena)
3453 /* PFC enabled so report LFC as off */
3454 goto out;
3455
3456 if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3457 pause->tx_pause = 1;
3458 if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3459 pause->rx_pause = 1;
3460
3461 out:
3462 kfree(pcaps);
3463 }
3464
3465 /**
3466 * ice_set_pauseparam - Set Flow Control parameter
3467 * @netdev: network interface device structure
3468 * @pause: return Tx/Rx flow control status
3469 */
3470 static int
ice_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)3471 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
3472 {
3473 struct ice_netdev_priv *np = netdev_priv(netdev);
3474 struct ice_aqc_get_phy_caps_data *pcaps;
3475 struct ice_link_status *hw_link_info;
3476 struct ice_pf *pf = np->vsi->back;
3477 struct ice_dcbx_cfg *dcbx_cfg;
3478 struct ice_vsi *vsi = np->vsi;
3479 struct ice_hw *hw = &pf->hw;
3480 struct ice_port_info *pi;
3481 u8 aq_failures;
3482 bool link_up;
3483 u32 is_an;
3484 int err;
3485
3486 pi = vsi->port_info;
3487 hw_link_info = &pi->phy.link_info;
3488 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
3489 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
3490
3491 /* Changing the port's flow control is not supported if this isn't the
3492 * PF VSI
3493 */
3494 if (vsi->type != ICE_VSI_PF) {
3495 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
3496 return -EOPNOTSUPP;
3497 }
3498
3499 /* Get pause param reports configured and negotiated flow control pause
3500 * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is
3501 * defined get pause param pause->autoneg reports SW configured setting,
3502 * so compare pause->autoneg with SW configured to prevent the user from
3503 * using set pause param to chance autoneg.
3504 */
3505 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3506 if (!pcaps)
3507 return -ENOMEM;
3508
3509 /* Get current PHY config */
3510 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
3511 NULL);
3512 if (err) {
3513 kfree(pcaps);
3514 return err;
3515 }
3516
3517 is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
3518 AUTONEG_DISABLE;
3519
3520 kfree(pcaps);
3521
3522 if (pause->autoneg != is_an) {
3523 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
3524 return -EOPNOTSUPP;
3525 }
3526
3527 /* If we have link and don't have autoneg */
3528 if (!test_bit(ICE_DOWN, pf->state) &&
3529 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
3530 /* Send message that it might not necessarily work*/
3531 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
3532 }
3533
3534 if (dcbx_cfg->pfc.pfcena) {
3535 netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
3536 return -EOPNOTSUPP;
3537 }
3538 if (pause->rx_pause && pause->tx_pause)
3539 pi->fc.req_mode = ICE_FC_FULL;
3540 else if (pause->rx_pause && !pause->tx_pause)
3541 pi->fc.req_mode = ICE_FC_RX_PAUSE;
3542 else if (!pause->rx_pause && pause->tx_pause)
3543 pi->fc.req_mode = ICE_FC_TX_PAUSE;
3544 else if (!pause->rx_pause && !pause->tx_pause)
3545 pi->fc.req_mode = ICE_FC_NONE;
3546 else
3547 return -EINVAL;
3548
3549 /* Set the FC mode and only restart AN if link is up */
3550 err = ice_set_fc(pi, &aq_failures, link_up);
3551
3552 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
3553 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
3554 err, libie_aq_str(hw->adminq.sq_last_status));
3555 err = -EAGAIN;
3556 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
3557 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
3558 err, libie_aq_str(hw->adminq.sq_last_status));
3559 err = -EAGAIN;
3560 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
3561 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
3562 err, libie_aq_str(hw->adminq.sq_last_status));
3563 err = -EAGAIN;
3564 }
3565
3566 return err;
3567 }
3568
3569 /**
3570 * ice_get_rxfh_key_size - get the RSS hash key size
3571 * @netdev: network interface device structure
3572 *
3573 * Returns the table size.
3574 */
ice_get_rxfh_key_size(struct net_device __always_unused * netdev)3575 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
3576 {
3577 return ICE_VSIQF_HKEY_ARRAY_SIZE;
3578 }
3579
3580 /**
3581 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
3582 * @netdev: network interface device structure
3583 *
3584 * Returns the table size.
3585 */
ice_get_rxfh_indir_size(struct net_device * netdev)3586 static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
3587 {
3588 struct ice_netdev_priv *np = netdev_priv(netdev);
3589
3590 return np->vsi->rss_table_size;
3591 }
3592
3593 /**
3594 * ice_get_rxfh - get the Rx flow hash indirection table
3595 * @netdev: network interface device structure
3596 * @rxfh: pointer to param struct (indir, key, hfunc)
3597 *
3598 * Reads the indirection table directly from the hardware.
3599 */
3600 static int
ice_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)3601 ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
3602 {
3603 struct ice_netdev_priv *np = netdev_priv(netdev);
3604 struct ice_vsi *vsi = np->vsi;
3605 struct ice_pf *pf = vsi->back;
3606 u16 qcount, offset;
3607 int err, i;
3608 u8 *lut;
3609
3610 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
3611 netdev_warn(netdev, "RSS is not supported on this VSI!\n");
3612 return -EOPNOTSUPP;
3613 }
3614
3615 qcount = vsi->mqprio_qopt.qopt.count[0];
3616 offset = vsi->mqprio_qopt.qopt.offset[0];
3617
3618 rxfh->hfunc = ETH_RSS_HASH_TOP;
3619 if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
3620 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
3621
3622 if (!rxfh->indir)
3623 return 0;
3624
3625 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
3626 if (!lut)
3627 return -ENOMEM;
3628
3629 err = ice_get_rss(vsi, rxfh->key, lut, vsi->rss_table_size);
3630 if (err)
3631 goto out;
3632
3633 if (ice_is_adq_active(pf)) {
3634 for (i = 0; i < vsi->rss_table_size; i++)
3635 rxfh->indir[i] = offset + lut[i] % qcount;
3636 goto out;
3637 }
3638
3639 for (i = 0; i < vsi->rss_table_size; i++)
3640 rxfh->indir[i] = lut[i];
3641
3642 out:
3643 kfree(lut);
3644 return err;
3645 }
3646
3647 /**
3648 * ice_set_rxfh - set the Rx flow hash indirection table
3649 * @netdev: network interface device structure
3650 * @rxfh: pointer to param struct (indir, key, hfunc)
3651 * @extack: extended ACK from the Netlink message
3652 *
3653 * Returns -EINVAL if the table specifies an invalid queue ID, otherwise
3654 * returns 0 after programming the table.
3655 */
3656 static int
ice_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)3657 ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
3658 struct netlink_ext_ack *extack)
3659 {
3660 struct ice_netdev_priv *np = netdev_priv(netdev);
3661 u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
3662 struct ice_vsi *vsi = np->vsi;
3663 struct ice_pf *pf = vsi->back;
3664 struct device *dev;
3665 int err;
3666
3667 dev = ice_pf_to_dev(pf);
3668 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3669 rxfh->hfunc != ETH_RSS_HASH_TOP)
3670 return -EOPNOTSUPP;
3671
3672 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
3673 /* RSS not supported return error here */
3674 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
3675 return -EIO;
3676 }
3677
3678 if (ice_is_adq_active(pf)) {
3679 netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
3680 return -EOPNOTSUPP;
3681 }
3682
3683 /* Update the VSI's hash function */
3684 if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
3685 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
3686
3687 err = ice_set_rss_hfunc(vsi, hfunc);
3688 if (err)
3689 return err;
3690
3691 if (rxfh->key) {
3692 if (!vsi->rss_hkey_user) {
3693 vsi->rss_hkey_user =
3694 devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
3695 GFP_KERNEL);
3696 if (!vsi->rss_hkey_user)
3697 return -ENOMEM;
3698 }
3699 memcpy(vsi->rss_hkey_user, rxfh->key,
3700 ICE_VSIQF_HKEY_ARRAY_SIZE);
3701
3702 err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
3703 if (err)
3704 return err;
3705 }
3706
3707 if (!vsi->rss_lut_user) {
3708 vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size,
3709 GFP_KERNEL);
3710 if (!vsi->rss_lut_user)
3711 return -ENOMEM;
3712 }
3713
3714 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
3715 if (rxfh->indir) {
3716 int i;
3717
3718 for (i = 0; i < vsi->rss_table_size; i++)
3719 vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
3720 } else {
3721 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
3722 vsi->rss_size);
3723 }
3724
3725 err = ice_set_rss_lut(vsi, vsi->rss_lut_user, vsi->rss_table_size);
3726 if (err)
3727 return err;
3728
3729 return 0;
3730 }
3731
3732 static int
ice_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)3733 ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
3734 {
3735 struct ice_pf *pf = ice_netdev_to_pf(dev);
3736
3737 /* only report timestamping if PTP is enabled */
3738 if (pf->ptp.state != ICE_PTP_READY)
3739 return ethtool_op_get_ts_info(dev, info);
3740
3741 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3742 SOF_TIMESTAMPING_TX_HARDWARE |
3743 SOF_TIMESTAMPING_RX_HARDWARE |
3744 SOF_TIMESTAMPING_RAW_HARDWARE;
3745
3746 info->phc_index = ice_ptp_clock_index(pf);
3747
3748 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
3749
3750 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
3751
3752 return 0;
3753 }
3754
3755 /**
3756 * ice_get_max_txq - return the maximum number of Tx queues for in a PF
3757 * @pf: PF structure
3758 */
ice_get_max_txq(struct ice_pf * pf)3759 static int ice_get_max_txq(struct ice_pf *pf)
3760 {
3761 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
3762 }
3763
3764 /**
3765 * ice_get_max_rxq - return the maximum number of Rx queues for in a PF
3766 * @pf: PF structure
3767 */
ice_get_max_rxq(struct ice_pf * pf)3768 static int ice_get_max_rxq(struct ice_pf *pf)
3769 {
3770 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
3771 }
3772
3773 /**
3774 * ice_get_combined_cnt - return the current number of combined channels
3775 * @vsi: PF VSI pointer
3776 *
3777 * Go through all queue vectors and count ones that have both Rx and Tx ring
3778 * attached
3779 */
ice_get_combined_cnt(struct ice_vsi * vsi)3780 static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
3781 {
3782 u32 combined = 0;
3783 int q_idx;
3784
3785 ice_for_each_q_vector(vsi, q_idx) {
3786 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3787
3788 combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
3789 }
3790
3791 return combined;
3792 }
3793
3794 /**
3795 * ice_get_channels - get the current and max supported channels
3796 * @dev: network interface device structure
3797 * @ch: ethtool channel data structure
3798 */
3799 static void
ice_get_channels(struct net_device * dev,struct ethtool_channels * ch)3800 ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
3801 {
3802 struct ice_netdev_priv *np = netdev_priv(dev);
3803 struct ice_vsi *vsi = np->vsi;
3804 struct ice_pf *pf = vsi->back;
3805
3806 /* report maximum channels */
3807 ch->max_rx = ice_get_max_rxq(pf);
3808 ch->max_tx = ice_get_max_txq(pf);
3809 ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
3810
3811 /* report current channels */
3812 ch->combined_count = ice_get_combined_cnt(vsi);
3813 ch->rx_count = vsi->num_rxq - ch->combined_count;
3814 ch->tx_count = vsi->num_txq - ch->combined_count;
3815
3816 /* report other queues */
3817 ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
3818 ch->max_other = ch->other_count;
3819 }
3820
3821 /**
3822 * ice_get_valid_rss_size - return valid number of RSS queues
3823 * @hw: pointer to the HW structure
3824 * @new_size: requested RSS queues
3825 */
ice_get_valid_rss_size(struct ice_hw * hw,int new_size)3826 static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
3827 {
3828 struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
3829
3830 return min_t(int, new_size, BIT(caps->rss_table_entry_width));
3831 }
3832
3833 /**
3834 * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
3835 * @vsi: VSI to reconfigure RSS LUT on
3836 * @req_rss_size: requested range of queue numbers for hashing
3837 *
3838 * Set the VSI's RSS parameters, configure the RSS LUT based on these.
3839 */
ice_vsi_set_dflt_rss_lut(struct ice_vsi * vsi,int req_rss_size)3840 static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
3841 {
3842 struct ice_pf *pf = vsi->back;
3843 struct device *dev;
3844 struct ice_hw *hw;
3845 int err;
3846 u8 *lut;
3847
3848 dev = ice_pf_to_dev(pf);
3849 hw = &pf->hw;
3850
3851 if (!req_rss_size)
3852 return -EINVAL;
3853
3854 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
3855 if (!lut)
3856 return -ENOMEM;
3857
3858 /* set RSS LUT parameters */
3859 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3860 vsi->rss_size = 1;
3861 else
3862 vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size);
3863
3864 /* create/set RSS LUT */
3865 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
3866 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
3867 if (err)
3868 dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
3869 libie_aq_str(hw->adminq.sq_last_status));
3870
3871 kfree(lut);
3872 return err;
3873 }
3874
3875 /**
3876 * ice_set_channels - set the number channels
3877 * @dev: network interface device structure
3878 * @ch: ethtool channel data structure
3879 */
ice_set_channels(struct net_device * dev,struct ethtool_channels * ch)3880 static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
3881 {
3882 struct ice_netdev_priv *np = netdev_priv(dev);
3883 struct ice_vsi *vsi = np->vsi;
3884 struct ice_pf *pf = vsi->back;
3885 int new_rx = 0, new_tx = 0;
3886 bool locked = false;
3887 int ret = 0;
3888
3889 /* do not support changing channels in Safe Mode */
3890 if (ice_is_safe_mode(pf)) {
3891 netdev_err(dev, "Changing channel in Safe Mode is not supported\n");
3892 return -EOPNOTSUPP;
3893 }
3894 /* do not support changing other_count */
3895 if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
3896 return -EINVAL;
3897
3898 if (ice_is_adq_active(pf)) {
3899 netdev_err(dev, "Cannot set channels with ADQ configured.\n");
3900 return -EOPNOTSUPP;
3901 }
3902
3903 if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
3904 netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
3905 return -EOPNOTSUPP;
3906 }
3907
3908 if (ch->rx_count && ch->tx_count) {
3909 netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
3910 return -EINVAL;
3911 }
3912
3913 new_rx = ch->combined_count + ch->rx_count;
3914 new_tx = ch->combined_count + ch->tx_count;
3915
3916 if (new_rx < vsi->tc_cfg.numtc) {
3917 netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
3918 vsi->tc_cfg.numtc);
3919 return -EINVAL;
3920 }
3921 if (new_tx < vsi->tc_cfg.numtc) {
3922 netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
3923 vsi->tc_cfg.numtc);
3924 return -EINVAL;
3925 }
3926 if (new_rx > ice_get_max_rxq(pf)) {
3927 netdev_err(dev, "Maximum allowed Rx channels is %d\n",
3928 ice_get_max_rxq(pf));
3929 return -EINVAL;
3930 }
3931 if (new_tx > ice_get_max_txq(pf)) {
3932 netdev_err(dev, "Maximum allowed Tx channels is %d\n",
3933 ice_get_max_txq(pf));
3934 return -EINVAL;
3935 }
3936
3937 if (pf->cdev_info && pf->cdev_info->adev) {
3938 mutex_lock(&pf->adev_mutex);
3939 device_lock(&pf->cdev_info->adev->dev);
3940 locked = true;
3941 if (pf->cdev_info->adev->dev.driver) {
3942 netdev_err(dev, "Cannot change channels when RDMA is active\n");
3943 ret = -EBUSY;
3944 goto adev_unlock;
3945 }
3946 }
3947
3948 ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
3949
3950 if (!netif_is_rxfh_configured(dev)) {
3951 ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
3952 goto adev_unlock;
3953 }
3954
3955 /* Update rss_size due to change in Rx queues */
3956 vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
3957
3958 adev_unlock:
3959 if (locked) {
3960 device_unlock(&pf->cdev_info->adev->dev);
3961 mutex_unlock(&pf->adev_mutex);
3962 }
3963 return ret;
3964 }
3965
3966 /**
3967 * ice_get_wol - get current Wake on LAN configuration
3968 * @netdev: network interface device structure
3969 * @wol: Ethtool structure to retrieve WoL settings
3970 */
ice_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)3971 static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3972 {
3973 struct ice_netdev_priv *np = netdev_priv(netdev);
3974 struct ice_pf *pf = np->vsi->back;
3975
3976 if (np->vsi->type != ICE_VSI_PF)
3977 netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
3978
3979 /* Get WoL settings based on the HW capability */
3980 if (ice_is_wol_supported(&pf->hw)) {
3981 wol->supported = WAKE_MAGIC;
3982 wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
3983 } else {
3984 wol->supported = 0;
3985 wol->wolopts = 0;
3986 }
3987 }
3988
3989 /**
3990 * ice_set_wol - set Wake on LAN on supported device
3991 * @netdev: network interface device structure
3992 * @wol: Ethtool structure to set WoL
3993 */
ice_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)3994 static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3995 {
3996 struct ice_netdev_priv *np = netdev_priv(netdev);
3997 struct ice_vsi *vsi = np->vsi;
3998 struct ice_pf *pf = vsi->back;
3999
4000 if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
4001 return -EOPNOTSUPP;
4002
4003 /* only magic packet is supported */
4004 if (wol->wolopts && wol->wolopts != WAKE_MAGIC)
4005 return -EOPNOTSUPP;
4006
4007 /* Set WoL only if there is a new value */
4008 if (pf->wol_ena != !!wol->wolopts) {
4009 pf->wol_ena = !!wol->wolopts;
4010 device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena);
4011 netdev_dbg(netdev, "WoL magic packet %sabled\n",
4012 pf->wol_ena ? "en" : "dis");
4013 }
4014
4015 return 0;
4016 }
4017
4018 /**
4019 * ice_get_rc_coalesce - get ITR values for specific ring container
4020 * @ec: ethtool structure to fill with driver's coalesce settings
4021 * @rc: ring container that the ITR values will come from
4022 *
4023 * Query the device for ice_ring_container specific ITR values. This is
4024 * done per ice_ring_container because each q_vector can have 1 or more rings
4025 * and all of said ring(s) will have the same ITR values.
4026 *
4027 * Returns 0 on success, negative otherwise.
4028 */
4029 static int
ice_get_rc_coalesce(struct ethtool_coalesce * ec,struct ice_ring_container * rc)4030 ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
4031 {
4032 if (!rc->rx_ring)
4033 return -EINVAL;
4034
4035 switch (rc->type) {
4036 case ICE_RX_CONTAINER:
4037 ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
4038 ec->rx_coalesce_usecs = rc->itr_setting;
4039 ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
4040 break;
4041 case ICE_TX_CONTAINER:
4042 ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
4043 ec->tx_coalesce_usecs = rc->itr_setting;
4044 break;
4045 default:
4046 dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
4047 return -EINVAL;
4048 }
4049
4050 return 0;
4051 }
4052
4053 /**
4054 * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
4055 * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
4056 * @ec: coalesce settings to program the device with
4057 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
4058 *
4059 * Return 0 on success, and negative under the following conditions:
4060 * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
4061 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
4062 */
4063 static int
ice_get_q_coalesce(struct ice_vsi * vsi,struct ethtool_coalesce * ec,int q_num)4064 ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
4065 {
4066 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
4067 if (ice_get_rc_coalesce(ec,
4068 &vsi->rx_rings[q_num]->q_vector->rx))
4069 return -EINVAL;
4070 if (ice_get_rc_coalesce(ec,
4071 &vsi->tx_rings[q_num]->q_vector->tx))
4072 return -EINVAL;
4073 } else if (q_num < vsi->num_rxq) {
4074 if (ice_get_rc_coalesce(ec,
4075 &vsi->rx_rings[q_num]->q_vector->rx))
4076 return -EINVAL;
4077 } else if (q_num < vsi->num_txq) {
4078 if (ice_get_rc_coalesce(ec,
4079 &vsi->tx_rings[q_num]->q_vector->tx))
4080 return -EINVAL;
4081 } else {
4082 return -EINVAL;
4083 }
4084
4085 return 0;
4086 }
4087
4088 /**
4089 * __ice_get_coalesce - get ITR/INTRL values for the device
4090 * @netdev: pointer to the netdev associated with this query
4091 * @ec: ethtool structure to fill with driver's coalesce settings
4092 * @q_num: queue number to get the coalesce settings for
4093 *
4094 * If the caller passes in a negative q_num then we return coalesce settings
4095 * based on queue number 0, else use the actual q_num passed in.
4096 */
4097 static int
__ice_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int q_num)4098 __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
4099 int q_num)
4100 {
4101 struct ice_netdev_priv *np = netdev_priv(netdev);
4102 struct ice_vsi *vsi = np->vsi;
4103
4104 if (q_num < 0)
4105 q_num = 0;
4106
4107 if (ice_get_q_coalesce(vsi, ec, q_num))
4108 return -EINVAL;
4109
4110 return 0;
4111 }
4112
ice_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)4113 static int ice_get_coalesce(struct net_device *netdev,
4114 struct ethtool_coalesce *ec,
4115 struct kernel_ethtool_coalesce *kernel_coal,
4116 struct netlink_ext_ack *extack)
4117 {
4118 return __ice_get_coalesce(netdev, ec, -1);
4119 }
4120
4121 static int
ice_get_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)4122 ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
4123 struct ethtool_coalesce *ec)
4124 {
4125 return __ice_get_coalesce(netdev, ec, q_num);
4126 }
4127
4128 /**
4129 * ice_set_rc_coalesce - set ITR values for specific ring container
4130 * @ec: ethtool structure from user to update ITR settings
4131 * @rc: ring container that the ITR values will come from
4132 * @vsi: VSI associated to the ring container
4133 *
4134 * Set specific ITR values. This is done per ice_ring_container because each
4135 * q_vector can have 1 or more rings and all of said ring(s) will have the same
4136 * ITR values.
4137 *
4138 * Returns 0 on success, negative otherwise.
4139 */
4140 static int
ice_set_rc_coalesce(struct ethtool_coalesce * ec,struct ice_ring_container * rc,struct ice_vsi * vsi)4141 ice_set_rc_coalesce(struct ethtool_coalesce *ec,
4142 struct ice_ring_container *rc, struct ice_vsi *vsi)
4143 {
4144 const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
4145 u32 use_adaptive_coalesce, coalesce_usecs;
4146 struct ice_pf *pf = vsi->back;
4147 u16 itr_setting;
4148
4149 if (!rc->rx_ring)
4150 return -EINVAL;
4151
4152 switch (rc->type) {
4153 case ICE_RX_CONTAINER:
4154 {
4155 struct ice_q_vector *q_vector = rc->rx_ring->q_vector;
4156
4157 if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
4158 (ec->rx_coalesce_usecs_high &&
4159 ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
4160 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
4161 c_type_str, pf->hw.intrl_gran,
4162 ICE_MAX_INTRL);
4163 return -EINVAL;
4164 }
4165 if (ec->rx_coalesce_usecs_high != q_vector->intrl &&
4166 (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
4167 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
4168 c_type_str);
4169 return -EINVAL;
4170 }
4171 if (ec->rx_coalesce_usecs_high != q_vector->intrl)
4172 q_vector->intrl = ec->rx_coalesce_usecs_high;
4173
4174 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
4175 coalesce_usecs = ec->rx_coalesce_usecs;
4176
4177 break;
4178 }
4179 case ICE_TX_CONTAINER:
4180 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
4181 coalesce_usecs = ec->tx_coalesce_usecs;
4182
4183 break;
4184 default:
4185 dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
4186 rc->type);
4187 return -EINVAL;
4188 }
4189
4190 itr_setting = rc->itr_setting;
4191 if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
4192 netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
4193 c_type_str, c_type_str);
4194 return -EINVAL;
4195 }
4196
4197 if (coalesce_usecs > ICE_ITR_MAX) {
4198 netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n",
4199 c_type_str, ICE_ITR_MAX);
4200 return -EINVAL;
4201 }
4202
4203 if (use_adaptive_coalesce) {
4204 rc->itr_mode = ITR_DYNAMIC;
4205 } else {
4206 rc->itr_mode = ITR_STATIC;
4207 /* store user facing value how it was set */
4208 rc->itr_setting = coalesce_usecs;
4209 /* write the change to the register */
4210 ice_write_itr(rc, coalesce_usecs);
4211 /* force writes to take effect immediately, the flush shouldn't
4212 * be done in the functions above because the intent is for
4213 * them to do lazy writes.
4214 */
4215 ice_flush(&pf->hw);
4216 }
4217
4218 return 0;
4219 }
4220
4221 /**
4222 * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
4223 * @vsi: VSI associated to the queue that need updating
4224 * @ec: coalesce settings to program the device with
4225 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
4226 *
4227 * Return 0 on success, and negative under the following conditions:
4228 * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
4229 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
4230 */
4231 static int
ice_set_q_coalesce(struct ice_vsi * vsi,struct ethtool_coalesce * ec,int q_num)4232 ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
4233 {
4234 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
4235 if (ice_set_rc_coalesce(ec,
4236 &vsi->rx_rings[q_num]->q_vector->rx,
4237 vsi))
4238 return -EINVAL;
4239
4240 if (ice_set_rc_coalesce(ec,
4241 &vsi->tx_rings[q_num]->q_vector->tx,
4242 vsi))
4243 return -EINVAL;
4244 } else if (q_num < vsi->num_rxq) {
4245 if (ice_set_rc_coalesce(ec,
4246 &vsi->rx_rings[q_num]->q_vector->rx,
4247 vsi))
4248 return -EINVAL;
4249 } else if (q_num < vsi->num_txq) {
4250 if (ice_set_rc_coalesce(ec,
4251 &vsi->tx_rings[q_num]->q_vector->tx,
4252 vsi))
4253 return -EINVAL;
4254 } else {
4255 return -EINVAL;
4256 }
4257
4258 return 0;
4259 }
4260
4261 /**
4262 * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
4263 * @netdev: netdev used for print
4264 * @itr_setting: previous user setting
4265 * @use_adaptive_coalesce: if adaptive coalesce is enabled or being enabled
4266 * @coalesce_usecs: requested value of [tx|rx]-usecs
4267 * @c_type_str: either "rx" or "tx" to match user set field of [tx|rx]-usecs
4268 */
4269 static void
ice_print_if_odd_usecs(struct net_device * netdev,u16 itr_setting,u32 use_adaptive_coalesce,u32 coalesce_usecs,const char * c_type_str)4270 ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting,
4271 u32 use_adaptive_coalesce, u32 coalesce_usecs,
4272 const char *c_type_str)
4273 {
4274 if (use_adaptive_coalesce)
4275 return;
4276
4277 if (itr_setting != coalesce_usecs && (coalesce_usecs % 2))
4278 netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n",
4279 c_type_str, coalesce_usecs, c_type_str,
4280 ITR_REG_ALIGN(coalesce_usecs));
4281 }
4282
4283 /**
4284 * __ice_set_coalesce - set ITR/INTRL values for the device
4285 * @netdev: pointer to the netdev associated with this query
4286 * @ec: ethtool structure to fill with driver's coalesce settings
4287 * @q_num: queue number to get the coalesce settings for
4288 *
4289 * If the caller passes in a negative q_num then we set the coalesce settings
4290 * for all Tx/Rx queues, else use the actual q_num passed in.
4291 */
4292 static int
__ice_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int q_num)4293 __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
4294 int q_num)
4295 {
4296 struct ice_netdev_priv *np = netdev_priv(netdev);
4297 struct ice_vsi *vsi = np->vsi;
4298
4299 if (q_num < 0) {
4300 struct ice_q_vector *q_vector = vsi->q_vectors[0];
4301 int v_idx;
4302
4303 if (q_vector) {
4304 ice_print_if_odd_usecs(netdev, q_vector->rx.itr_setting,
4305 ec->use_adaptive_rx_coalesce,
4306 ec->rx_coalesce_usecs, "rx");
4307
4308 ice_print_if_odd_usecs(netdev, q_vector->tx.itr_setting,
4309 ec->use_adaptive_tx_coalesce,
4310 ec->tx_coalesce_usecs, "tx");
4311 }
4312
4313 ice_for_each_q_vector(vsi, v_idx) {
4314 /* In some cases if DCB is configured the num_[rx|tx]q
4315 * can be less than vsi->num_q_vectors. This check
4316 * accounts for that so we don't report a false failure
4317 */
4318 if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
4319 goto set_complete;
4320
4321 if (ice_set_q_coalesce(vsi, ec, v_idx))
4322 return -EINVAL;
4323
4324 ice_set_q_vector_intrl(vsi->q_vectors[v_idx]);
4325 }
4326 goto set_complete;
4327 }
4328
4329 if (ice_set_q_coalesce(vsi, ec, q_num))
4330 return -EINVAL;
4331
4332 ice_set_q_vector_intrl(vsi->q_vectors[q_num]);
4333
4334 set_complete:
4335 return 0;
4336 }
4337
ice_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)4338 static int ice_set_coalesce(struct net_device *netdev,
4339 struct ethtool_coalesce *ec,
4340 struct kernel_ethtool_coalesce *kernel_coal,
4341 struct netlink_ext_ack *extack)
4342 {
4343 return __ice_set_coalesce(netdev, ec, -1);
4344 }
4345
4346 static int
ice_set_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)4347 ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
4348 struct ethtool_coalesce *ec)
4349 {
4350 return __ice_set_coalesce(netdev, ec, q_num);
4351 }
4352
4353 static void
ice_repr_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)4354 ice_repr_get_drvinfo(struct net_device *netdev,
4355 struct ethtool_drvinfo *drvinfo)
4356 {
4357 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4358
4359 if (repr->ops.ready(repr))
4360 return;
4361
4362 __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
4363 }
4364
4365 static void
ice_repr_get_strings(struct net_device * netdev,u32 stringset,u8 * data)4366 ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
4367 {
4368 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4369
4370 /* for port representors only ETH_SS_STATS is supported */
4371 if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
4372 return;
4373
4374 __ice_get_strings(netdev, stringset, data, repr->src_vsi);
4375 }
4376
4377 static void
ice_repr_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)4378 ice_repr_get_ethtool_stats(struct net_device *netdev,
4379 struct ethtool_stats __always_unused *stats,
4380 u64 *data)
4381 {
4382 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4383
4384 if (repr->ops.ready(repr))
4385 return;
4386
4387 __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
4388 }
4389
ice_repr_get_sset_count(struct net_device * netdev,int sset)4390 static int ice_repr_get_sset_count(struct net_device *netdev, int sset)
4391 {
4392 switch (sset) {
4393 case ETH_SS_STATS:
4394 return ICE_VSI_STATS_LEN;
4395 default:
4396 return -EOPNOTSUPP;
4397 }
4398 }
4399
4400 #define ICE_I2C_EEPROM_DEV_ADDR 0xA0
4401 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
4402 #define ICE_MODULE_TYPE_SFP 0x03
4403 #define ICE_MODULE_TYPE_QSFP_PLUS 0x0D
4404 #define ICE_MODULE_TYPE_QSFP28 0x11
4405 #define ICE_MODULE_SFF_ADDR_MODE 0x04
4406 #define ICE_MODULE_SFF_DIAG_CAPAB 0x40
4407 #define ICE_MODULE_REVISION_ADDR 0x01
4408 #define ICE_MODULE_SFF_8472_COMP 0x5E
4409 #define ICE_MODULE_SFF_8472_SWAP 0x5C
4410 #define ICE_MODULE_QSFP_MAX_LEN 640
4411
4412 /**
4413 * ice_get_module_info - get SFF module type and revision information
4414 * @netdev: network interface device structure
4415 * @modinfo: module EEPROM size and layout information structure
4416 */
4417 static int
ice_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)4418 ice_get_module_info(struct net_device *netdev,
4419 struct ethtool_modinfo *modinfo)
4420 {
4421 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4422 struct ice_hw *hw = &pf->hw;
4423 u8 sff8472_comp = 0;
4424 u8 sff8472_swap = 0;
4425 u8 sff8636_rev = 0;
4426 u8 value = 0;
4427 int status;
4428
4429 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
4430 0, &value, 1, 0, NULL);
4431 if (status)
4432 return status;
4433
4434 switch (value) {
4435 case ICE_MODULE_TYPE_SFP:
4436 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4437 ICE_MODULE_SFF_8472_COMP, 0x00, 0,
4438 &sff8472_comp, 1, 0, NULL);
4439 if (status)
4440 return status;
4441 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4442 ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
4443 &sff8472_swap, 1, 0, NULL);
4444 if (status)
4445 return status;
4446
4447 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
4448 modinfo->type = ETH_MODULE_SFF_8079;
4449 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
4450 } else if (sff8472_comp &&
4451 (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
4452 modinfo->type = ETH_MODULE_SFF_8472;
4453 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4454 } else {
4455 modinfo->type = ETH_MODULE_SFF_8079;
4456 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
4457 }
4458 break;
4459 case ICE_MODULE_TYPE_QSFP_PLUS:
4460 case ICE_MODULE_TYPE_QSFP28:
4461 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4462 ICE_MODULE_REVISION_ADDR, 0x00, 0,
4463 &sff8636_rev, 1, 0, NULL);
4464 if (status)
4465 return status;
4466 /* Check revision compliance */
4467 if (sff8636_rev > 0x02) {
4468 /* Module is SFF-8636 compliant */
4469 modinfo->type = ETH_MODULE_SFF_8636;
4470 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
4471 } else {
4472 modinfo->type = ETH_MODULE_SFF_8436;
4473 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
4474 }
4475 break;
4476 default:
4477 netdev_warn(netdev, "SFF Module Type not recognized.\n");
4478 return -EINVAL;
4479 }
4480 return 0;
4481 }
4482
4483 /**
4484 * ice_get_module_eeprom - fill buffer with SFF EEPROM contents
4485 * @netdev: network interface device structure
4486 * @ee: EEPROM dump request structure
4487 * @data: buffer to be filled with EEPROM contents
4488 */
4489 static int
ice_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)4490 ice_get_module_eeprom(struct net_device *netdev,
4491 struct ethtool_eeprom *ee, u8 *data)
4492 {
4493 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4494 #define SFF_READ_BLOCK_SIZE 8
4495 u8 value[SFF_READ_BLOCK_SIZE] = { 0 };
4496 u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
4497 struct ice_hw *hw = &pf->hw;
4498 bool is_sfp = false;
4499 unsigned int i, j;
4500 u16 offset = 0;
4501 u8 page = 0;
4502 int status;
4503
4504 if (!ee || !ee->len || !data)
4505 return -EINVAL;
4506
4507 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
4508 NULL);
4509 if (status)
4510 return status;
4511
4512 if (value[0] == ICE_MODULE_TYPE_SFP)
4513 is_sfp = true;
4514
4515 memset(data, 0, ee->len);
4516 for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) {
4517 offset = i + ee->offset;
4518 page = 0;
4519
4520 /* Check if we need to access the other memory page */
4521 if (is_sfp) {
4522 if (offset >= ETH_MODULE_SFF_8079_LEN) {
4523 offset -= ETH_MODULE_SFF_8079_LEN;
4524 addr = ICE_I2C_EEPROM_DEV_ADDR2;
4525 }
4526 } else {
4527 while (offset >= ETH_MODULE_SFF_8436_LEN) {
4528 /* Compute memory page number and offset. */
4529 offset -= ETH_MODULE_SFF_8436_LEN / 2;
4530 page++;
4531 }
4532 }
4533
4534 /* Bit 2 of EEPROM address 0x02 declares upper
4535 * pages are disabled on QSFP modules.
4536 * SFP modules only ever use page 0.
4537 */
4538 if (page == 0 || !(data[0x2] & 0x4)) {
4539 u32 copy_len;
4540
4541 /* If i2c bus is busy due to slow page change or
4542 * link management access, call can fail. This is normal.
4543 * So we retry this a few times.
4544 */
4545 for (j = 0; j < 4; j++) {
4546 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
4547 !is_sfp, value,
4548 SFF_READ_BLOCK_SIZE,
4549 0, NULL);
4550 netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n",
4551 addr, offset, page, is_sfp,
4552 value[0], value[1], value[2], value[3],
4553 value[4], value[5], value[6], value[7],
4554 status);
4555 if (status) {
4556 usleep_range(1500, 2500);
4557 memset(value, 0, SFF_READ_BLOCK_SIZE);
4558 continue;
4559 }
4560 break;
4561 }
4562
4563 /* Make sure we have enough room for the new block */
4564 copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
4565 memcpy(data + i, value, copy_len);
4566 }
4567 }
4568 return 0;
4569 }
4570
4571 /**
4572 * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per
4573 * pcsquad, pcsport
4574 * @hw: pointer to the HW struct
4575 * @pcs_quad: pcsquad for input port
4576 * @pcs_port: pcsport for input port
4577 * @fec_stats: buffer to hold FEC statistics for given port
4578 *
4579 * Return: 0 on success, negative on failure.
4580 */
ice_get_port_fec_stats(struct ice_hw * hw,u16 pcs_quad,u16 pcs_port,struct ethtool_fec_stats * fec_stats)4581 static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
4582 struct ethtool_fec_stats *fec_stats)
4583 {
4584 u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0;
4585 u32 fec_corr_low_val = 0, fec_corr_high_val = 0;
4586 int err;
4587
4588 if (pcs_quad > 1 || pcs_port > 3)
4589 return -EINVAL;
4590
4591 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW,
4592 &fec_corr_low_val);
4593 if (err)
4594 return err;
4595
4596 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH,
4597 &fec_corr_high_val);
4598 if (err)
4599 return err;
4600
4601 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
4602 ICE_FEC_UNCORR_LOW,
4603 &fec_uncorr_low_val);
4604 if (err)
4605 return err;
4606
4607 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
4608 ICE_FEC_UNCORR_HIGH,
4609 &fec_uncorr_high_val);
4610 if (err)
4611 return err;
4612
4613 fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) +
4614 fec_corr_low_val;
4615 fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) +
4616 fec_uncorr_low_val;
4617 return 0;
4618 }
4619
4620 /**
4621 * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
4622 * @netdev: network interface device structure
4623 * @fec_stats: buffer to hold FEC statistics for given port
4624 * @hist: buffer to put FEC histogram statistics for given port
4625 *
4626 */
ice_get_fec_stats(struct net_device * netdev,struct ethtool_fec_stats * fec_stats,struct ethtool_fec_hist * hist)4627 static void ice_get_fec_stats(struct net_device *netdev,
4628 struct ethtool_fec_stats *fec_stats,
4629 struct ethtool_fec_hist *hist)
4630 {
4631 struct ice_netdev_priv *np = netdev_priv(netdev);
4632 struct ice_port_topology port_topology;
4633 struct ice_port_info *pi;
4634 struct ice_pf *pf;
4635 struct ice_hw *hw;
4636 int err;
4637
4638 pf = np->vsi->back;
4639 hw = &pf->hw;
4640 pi = np->vsi->port_info;
4641
4642 /* Serdes parameters are not supported if not the PF VSI */
4643 if (np->vsi->type != ICE_VSI_PF || !pi)
4644 return;
4645
4646 err = ice_get_port_topology(hw, pi->lport, &port_topology);
4647 if (err) {
4648 netdev_info(netdev, "Extended register dump failed Lport %d\n",
4649 pi->lport);
4650 return;
4651 }
4652
4653 /* Get FEC correctable, uncorrectable counter */
4654 err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
4655 port_topology.pcs_port, fec_stats);
4656 if (err)
4657 netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n",
4658 pi->lport, err);
4659 }
4660
ice_get_eth_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * mac_stats)4661 static void ice_get_eth_mac_stats(struct net_device *netdev,
4662 struct ethtool_eth_mac_stats *mac_stats)
4663 {
4664 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4665 struct ice_hw_port_stats *ps = &pf->stats;
4666
4667 mac_stats->FramesTransmittedOK = ps->eth.tx_unicast +
4668 ps->eth.tx_multicast +
4669 ps->eth.tx_broadcast;
4670 mac_stats->FramesReceivedOK = ps->eth.rx_unicast +
4671 ps->eth.rx_multicast +
4672 ps->eth.rx_broadcast;
4673 mac_stats->FrameCheckSequenceErrors = ps->crc_errors;
4674 mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes;
4675 mac_stats->OctetsReceivedOK = ps->eth.rx_bytes;
4676 mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast;
4677 mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast;
4678 mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast;
4679 mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast;
4680 mac_stats->InRangeLengthErrors = ps->rx_len_errors;
4681 mac_stats->FrameTooLongErrors = ps->rx_oversize;
4682 }
4683
ice_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * pause_stats)4684 static void ice_get_pause_stats(struct net_device *netdev,
4685 struct ethtool_pause_stats *pause_stats)
4686 {
4687 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4688 struct ice_hw_port_stats *ps = &pf->stats;
4689
4690 pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx;
4691 pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx;
4692 }
4693
4694 static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = {
4695 { 0, 64 },
4696 { 65, 127 },
4697 { 128, 255 },
4698 { 256, 511 },
4699 { 512, 1023 },
4700 { 1024, 1522 },
4701 { 1523, 9522 },
4702 {}
4703 };
4704
ice_get_rmon_stats(struct net_device * netdev,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)4705 static void ice_get_rmon_stats(struct net_device *netdev,
4706 struct ethtool_rmon_stats *rmon,
4707 const struct ethtool_rmon_hist_range **ranges)
4708 {
4709 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4710 struct ice_hw_port_stats *ps = &pf->stats;
4711
4712 rmon->undersize_pkts = ps->rx_undersize;
4713 rmon->oversize_pkts = ps->rx_oversize;
4714 rmon->fragments = ps->rx_fragments;
4715 rmon->jabbers = ps->rx_jabber;
4716
4717 rmon->hist[0] = ps->rx_size_64;
4718 rmon->hist[1] = ps->rx_size_127;
4719 rmon->hist[2] = ps->rx_size_255;
4720 rmon->hist[3] = ps->rx_size_511;
4721 rmon->hist[4] = ps->rx_size_1023;
4722 rmon->hist[5] = ps->rx_size_1522;
4723 rmon->hist[6] = ps->rx_size_big;
4724
4725 rmon->hist_tx[0] = ps->tx_size_64;
4726 rmon->hist_tx[1] = ps->tx_size_127;
4727 rmon->hist_tx[2] = ps->tx_size_255;
4728 rmon->hist_tx[3] = ps->tx_size_511;
4729 rmon->hist_tx[4] = ps->tx_size_1023;
4730 rmon->hist_tx[5] = ps->tx_size_1522;
4731 rmon->hist_tx[6] = ps->tx_size_big;
4732
4733 *ranges = ice_rmon_ranges;
4734 }
4735
4736 /* ice_get_ts_stats - provide timestamping stats
4737 * @netdev: the netdevice pointer from ethtool
4738 * @ts_stats: the ethtool data structure to fill in
4739 */
ice_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)4740 static void ice_get_ts_stats(struct net_device *netdev,
4741 struct ethtool_ts_stats *ts_stats)
4742 {
4743 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4744 struct ice_ptp *ptp = &pf->ptp;
4745
4746 ts_stats->pkts = ptp->tx_hwtstamp_good;
4747 ts_stats->err = ptp->tx_hwtstamp_skipped +
4748 ptp->tx_hwtstamp_flushed +
4749 ptp->tx_hwtstamp_discarded;
4750 ts_stats->lost = ptp->tx_hwtstamp_timeouts;
4751 }
4752
4753 #define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
4754 ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
4755
4756 #define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \
4757 ETH_RESET_SHARED_SHIFT)
4758
4759 #define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \
4760 (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \
4761 (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))
4762
4763 #define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR
4764
4765 /**
4766 * ice_ethtool_reset - triggers a given type of reset
4767 * @dev: network interface device structure
4768 * @flags: set of reset flags
4769 *
4770 * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags.
4771 */
ice_ethtool_reset(struct net_device * dev,u32 * flags)4772 static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
4773 {
4774 struct ice_pf *pf = ice_netdev_to_pf(dev);
4775 enum ice_reset_req reset;
4776
4777 switch (*flags) {
4778 case ICE_ETHTOOL_CORER:
4779 reset = ICE_RESET_CORER;
4780 break;
4781 case ICE_ETHTOOL_GLOBR:
4782 reset = ICE_RESET_GLOBR;
4783 break;
4784 case ICE_ETHTOOL_PFR:
4785 reset = ICE_RESET_PFR;
4786 break;
4787 default:
4788 netdev_info(dev, "Unsupported set of ethtool flags");
4789 return -EOPNOTSUPP;
4790 }
4791
4792 ice_schedule_reset(pf, reset);
4793
4794 *flags = 0;
4795
4796 return 0;
4797 }
4798
4799 /**
4800 * ice_repr_ethtool_reset - triggers a VF reset
4801 * @dev: network interface device structure
4802 * @flags: set of reset flags
4803 *
4804 * Return: 0 on success,
4805 * -EOPNOTSUPP when using unsupported set of flags
4806 * -EBUSY when VF is not ready for reset.
4807 */
ice_repr_ethtool_reset(struct net_device * dev,u32 * flags)4808 static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
4809 {
4810 struct ice_repr *repr = ice_netdev_to_repr(dev);
4811 struct ice_vf *vf;
4812
4813 if (repr->type != ICE_REPR_TYPE_VF ||
4814 *flags != ICE_ETHTOOL_VFR)
4815 return -EOPNOTSUPP;
4816
4817 vf = repr->vf;
4818
4819 if (ice_check_vf_ready_for_cfg(vf))
4820 return -EBUSY;
4821
4822 *flags = 0;
4823
4824 return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
4825 }
4826
4827 static const struct ethtool_ops ice_ethtool_ops = {
4828 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4829 ETHTOOL_COALESCE_USE_ADAPTIVE |
4830 ETHTOOL_COALESCE_RX_USECS_HIGH,
4831 .supported_input_xfrm = RXH_XFRM_SYM_XOR,
4832 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
4833 .get_link_ksettings = ice_get_link_ksettings,
4834 .set_link_ksettings = ice_set_link_ksettings,
4835 .get_fec_stats = ice_get_fec_stats,
4836 .get_eth_mac_stats = ice_get_eth_mac_stats,
4837 .get_pause_stats = ice_get_pause_stats,
4838 .get_rmon_stats = ice_get_rmon_stats,
4839 .get_ts_stats = ice_get_ts_stats,
4840 .get_drvinfo = ice_get_drvinfo,
4841 .get_regs_len = ice_get_regs_len,
4842 .get_regs = ice_get_regs,
4843 .get_wol = ice_get_wol,
4844 .set_wol = ice_set_wol,
4845 .get_msglevel = ice_get_msglevel,
4846 .set_msglevel = ice_set_msglevel,
4847 .self_test = ice_self_test,
4848 .get_link = ethtool_op_get_link,
4849 .get_link_ext_stats = ice_get_link_ext_stats,
4850 .get_eeprom_len = ice_get_eeprom_len,
4851 .get_eeprom = ice_get_eeprom,
4852 .get_coalesce = ice_get_coalesce,
4853 .set_coalesce = ice_set_coalesce,
4854 .get_strings = ice_get_strings,
4855 .set_phys_id = ice_set_phys_id,
4856 .get_ethtool_stats = ice_get_ethtool_stats,
4857 .get_priv_flags = ice_get_priv_flags,
4858 .set_priv_flags = ice_set_priv_flags,
4859 .get_sset_count = ice_get_sset_count,
4860 .get_rxnfc = ice_get_rxnfc,
4861 .set_rxnfc = ice_set_rxnfc,
4862 .get_rx_ring_count = ice_get_rx_ring_count,
4863 .get_ringparam = ice_get_ringparam,
4864 .set_ringparam = ice_set_ringparam,
4865 .nway_reset = ice_nway_reset,
4866 .get_pauseparam = ice_get_pauseparam,
4867 .set_pauseparam = ice_set_pauseparam,
4868 .reset = ice_ethtool_reset,
4869 .get_rxfh_key_size = ice_get_rxfh_key_size,
4870 .get_rxfh_indir_size = ice_get_rxfh_indir_size,
4871 .get_rxfh = ice_get_rxfh,
4872 .set_rxfh = ice_set_rxfh,
4873 .get_rxfh_fields = ice_get_rxfh_fields,
4874 .set_rxfh_fields = ice_set_rxfh_fields,
4875 .get_channels = ice_get_channels,
4876 .set_channels = ice_set_channels,
4877 .get_ts_info = ice_get_ts_info,
4878 .get_per_queue_coalesce = ice_get_per_q_coalesce,
4879 .set_per_queue_coalesce = ice_set_per_q_coalesce,
4880 .get_fecparam = ice_get_fecparam,
4881 .set_fecparam = ice_set_fecparam,
4882 .get_module_info = ice_get_module_info,
4883 .get_module_eeprom = ice_get_module_eeprom,
4884 };
4885
4886 static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
4887 .get_link_ksettings = ice_get_link_ksettings,
4888 .set_link_ksettings = ice_set_link_ksettings,
4889 .get_drvinfo = ice_get_drvinfo,
4890 .get_regs_len = ice_get_regs_len,
4891 .get_regs = ice_get_regs,
4892 .get_wol = ice_get_wol,
4893 .set_wol = ice_set_wol,
4894 .get_msglevel = ice_get_msglevel,
4895 .set_msglevel = ice_set_msglevel,
4896 .get_link = ethtool_op_get_link,
4897 .get_eeprom_len = ice_get_eeprom_len,
4898 .get_eeprom = ice_get_eeprom,
4899 .get_strings = ice_get_strings,
4900 .get_ethtool_stats = ice_get_ethtool_stats,
4901 .get_sset_count = ice_get_sset_count,
4902 .get_ringparam = ice_get_ringparam,
4903 .set_ringparam = ice_set_ringparam,
4904 .nway_reset = ice_nway_reset,
4905 .get_channels = ice_get_channels,
4906 };
4907
4908 /**
4909 * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops
4910 * @netdev: network interface device structure
4911 */
ice_set_ethtool_safe_mode_ops(struct net_device * netdev)4912 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
4913 {
4914 netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
4915 }
4916
4917 static const struct ethtool_ops ice_ethtool_repr_ops = {
4918 .get_drvinfo = ice_repr_get_drvinfo,
4919 .get_link = ethtool_op_get_link,
4920 .get_strings = ice_repr_get_strings,
4921 .get_ethtool_stats = ice_repr_get_ethtool_stats,
4922 .get_sset_count = ice_repr_get_sset_count,
4923 .reset = ice_repr_ethtool_reset,
4924 };
4925
4926 /**
4927 * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops
4928 * @netdev: network interface device structure
4929 */
ice_set_ethtool_repr_ops(struct net_device * netdev)4930 void ice_set_ethtool_repr_ops(struct net_device *netdev)
4931 {
4932 netdev->ethtool_ops = &ice_ethtool_repr_ops;
4933 }
4934
4935 /**
4936 * ice_set_ethtool_ops - setup netdev ethtool ops
4937 * @netdev: network interface device structure
4938 *
4939 * setup netdev ethtool ops with ice specific ops
4940 */
ice_set_ethtool_ops(struct net_device * netdev)4941 void ice_set_ethtool_ops(struct net_device *netdev)
4942 {
4943 netdev->ethtool_ops = &ice_ethtool_ops;
4944 }
4945