1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* ethtool support for ice */
5
6 #include "ice.h"
7 #include "ice_ethtool.h"
8 #include "ice_flow.h"
9 #include "ice_fltr.h"
10 #include "ice_lib.h"
11 #include "ice_dcb_lib.h"
12 #include <net/dcbnl.h>
13 #include <net/libeth/rx.h>
14
15 struct ice_stats {
16 char stat_string[ETH_GSTRING_LEN];
17 int sizeof_stat;
18 int stat_offset;
19 };
20
21 #define ICE_STAT(_type, _name, _stat) { \
22 .stat_string = _name, \
23 .sizeof_stat = sizeof_field(_type, _stat), \
24 .stat_offset = offsetof(_type, _stat) \
25 }
26
27 #define ICE_VSI_STAT(_name, _stat) \
28 ICE_STAT(struct ice_vsi, _name, _stat)
29 #define ICE_PF_STAT(_name, _stat) \
30 ICE_STAT(struct ice_pf, _name, _stat)
31
ice_q_stats_len(struct net_device * netdev)32 static int ice_q_stats_len(struct net_device *netdev)
33 {
34 struct ice_netdev_priv *np = netdev_priv(netdev);
35
36 /* One packets and one bytes count per queue */
37 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 2);
38 }
39
40 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
41 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
42
43 #define ICE_PFC_STATS_LEN ( \
44 (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \
45 sizeof_field(struct ice_pf, stats.priority_xon_rx) + \
46 sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \
47 sizeof_field(struct ice_pf, stats.priority_xon_tx)) \
48 / sizeof(u64))
49 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
50 ICE_VSI_STATS_LEN + ice_q_stats_len(n))
51
52 static const struct ice_stats ice_gstrings_vsi_stats[] = {
53 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
54 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
55 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
56 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
57 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
58 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
59 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
60 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
61 ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards),
62 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
63 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
64 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
65 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
66 ICE_VSI_STAT("tx_linearize", tx_linearize),
67 ICE_VSI_STAT("tx_busy", tx_busy),
68 ICE_VSI_STAT("tx_restart", tx_restart),
69 };
70
71 enum ice_ethtool_test_id {
72 ICE_ETH_TEST_REG = 0,
73 ICE_ETH_TEST_EEPROM,
74 ICE_ETH_TEST_INTR,
75 ICE_ETH_TEST_LOOP,
76 ICE_ETH_TEST_LINK,
77 };
78
79 static const char ice_gstrings_test[][ETH_GSTRING_LEN] = {
80 "Register test (offline)",
81 "EEPROM test (offline)",
82 "Interrupt test (offline)",
83 "Loopback test (offline)",
84 "Link test (on/offline)",
85 };
86
87 #define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN)
88
89 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
90 * but they aren't. This device is capable of supporting multiple
91 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
92 * netdevs whereas the PF_STATs are for the physical function that's
93 * hosting these netdevs.
94 *
95 * The PF_STATs are appended to the netdev stats only when ethtool -S
96 * is queried on the base PF netdev.
97 */
98 static const struct ice_stats ice_gstrings_pf_stats[] = {
99 ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes),
100 ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes),
101 ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast),
102 ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast),
103 ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast),
104 ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast),
105 ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
106 ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
107 ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
108 ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
109 ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
110 ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
111 ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
112 ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127),
113 ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255),
114 ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255),
115 ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511),
116 ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511),
117 ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023),
118 ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023),
119 ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522),
120 ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522),
121 ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big),
122 ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big),
123 ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx),
124 ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx),
125 ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx),
126 ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx),
127 ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down),
128 ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize),
129 ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments),
130 ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
131 ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
132 ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
133 ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
134 ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
135 ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
136 ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
137 ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults),
138 ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
139 ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
140 ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
141 ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
142 ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
143 ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
144 ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
145 ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
146 };
147
148 static const u32 ice_regs_dump_list[] = {
149 PFGEN_STATE,
150 PRTGEN_STATUS,
151 QRX_CTRL(0),
152 QINT_TQCTL(0),
153 QINT_RQCTL(0),
154 PFINT_OICR_ENA,
155 QRX_ITR(0),
156 #define GLDCB_TLPM_PCI_DM 0x000A0180
157 GLDCB_TLPM_PCI_DM,
158 #define GLDCB_TLPM_TC2PFC 0x000A0194
159 GLDCB_TLPM_TC2PFC,
160 #define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4))
161 TCDCB_TLPM_WAIT_DM(0),
162 TCDCB_TLPM_WAIT_DM(1),
163 TCDCB_TLPM_WAIT_DM(2),
164 TCDCB_TLPM_WAIT_DM(3),
165 TCDCB_TLPM_WAIT_DM(4),
166 TCDCB_TLPM_WAIT_DM(5),
167 TCDCB_TLPM_WAIT_DM(6),
168 TCDCB_TLPM_WAIT_DM(7),
169 TCDCB_TLPM_WAIT_DM(8),
170 TCDCB_TLPM_WAIT_DM(9),
171 TCDCB_TLPM_WAIT_DM(10),
172 TCDCB_TLPM_WAIT_DM(11),
173 TCDCB_TLPM_WAIT_DM(12),
174 TCDCB_TLPM_WAIT_DM(13),
175 TCDCB_TLPM_WAIT_DM(14),
176 TCDCB_TLPM_WAIT_DM(15),
177 TCDCB_TLPM_WAIT_DM(16),
178 TCDCB_TLPM_WAIT_DM(17),
179 TCDCB_TLPM_WAIT_DM(18),
180 TCDCB_TLPM_WAIT_DM(19),
181 TCDCB_TLPM_WAIT_DM(20),
182 TCDCB_TLPM_WAIT_DM(21),
183 TCDCB_TLPM_WAIT_DM(22),
184 TCDCB_TLPM_WAIT_DM(23),
185 TCDCB_TLPM_WAIT_DM(24),
186 TCDCB_TLPM_WAIT_DM(25),
187 TCDCB_TLPM_WAIT_DM(26),
188 TCDCB_TLPM_WAIT_DM(27),
189 TCDCB_TLPM_WAIT_DM(28),
190 TCDCB_TLPM_WAIT_DM(29),
191 TCDCB_TLPM_WAIT_DM(30),
192 TCDCB_TLPM_WAIT_DM(31),
193 #define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90
194 GLPCI_WATMK_CLNT_PIPEMON,
195 #define GLPCI_CUR_CLNT_COMMON 0x000BFD84
196 GLPCI_CUR_CLNT_COMMON,
197 #define GLPCI_CUR_CLNT_PIPEMON 0x000BFD88
198 GLPCI_CUR_CLNT_PIPEMON,
199 #define GLPCI_PCIERR 0x0009DEB0
200 GLPCI_PCIERR,
201 #define GLPSM_DEBUG_CTL_STATUS 0x000B0600
202 GLPSM_DEBUG_CTL_STATUS,
203 #define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0680
204 GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT,
205 #define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0684
206 GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT,
207 #define GLPSM0_DEBUG_DT_OUT_OF_WINDOW 0x000B0688
208 GLPSM0_DEBUG_DT_OUT_OF_WINDOW,
209 #define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT 0x000B069C
210 GLPSM0_DEBUG_INTF_HW_ERROR_DETECT,
211 #define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT 0x000B06A0
212 GLPSM0_DEBUG_MISC_HW_ERROR_DETECT,
213 #define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0E80
214 GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT,
215 #define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0E84
216 GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT,
217 #define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT 0x000B0E88
218 GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT,
219 #define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT 0x000B0E8C
220 GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT,
221 #define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT 0x000B0E90
222 GLPSM1_DEBUG_MISC_HW_ERROR_DETECT,
223 #define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT 0x000B1680
224 GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT,
225 #define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B1684
226 GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT,
227 #define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT 0x000B1688
228 GLPSM2_DEBUG_MISC_HW_ERROR_DETECT,
229 #define GLTDPU_TCLAN_COMP_BOB(_i) (0x00049ADC + ((_i) * 4))
230 GLTDPU_TCLAN_COMP_BOB(1),
231 GLTDPU_TCLAN_COMP_BOB(2),
232 GLTDPU_TCLAN_COMP_BOB(3),
233 GLTDPU_TCLAN_COMP_BOB(4),
234 GLTDPU_TCLAN_COMP_BOB(5),
235 GLTDPU_TCLAN_COMP_BOB(6),
236 GLTDPU_TCLAN_COMP_BOB(7),
237 GLTDPU_TCLAN_COMP_BOB(8),
238 #define GLTDPU_TCB_CMD_BOB(_i) (0x0004975C + ((_i) * 4))
239 GLTDPU_TCB_CMD_BOB(1),
240 GLTDPU_TCB_CMD_BOB(2),
241 GLTDPU_TCB_CMD_BOB(3),
242 GLTDPU_TCB_CMD_BOB(4),
243 GLTDPU_TCB_CMD_BOB(5),
244 GLTDPU_TCB_CMD_BOB(6),
245 GLTDPU_TCB_CMD_BOB(7),
246 GLTDPU_TCB_CMD_BOB(8),
247 #define GLTDPU_PSM_UPDATE_BOB(_i) (0x00049B5C + ((_i) * 4))
248 GLTDPU_PSM_UPDATE_BOB(1),
249 GLTDPU_PSM_UPDATE_BOB(2),
250 GLTDPU_PSM_UPDATE_BOB(3),
251 GLTDPU_PSM_UPDATE_BOB(4),
252 GLTDPU_PSM_UPDATE_BOB(5),
253 GLTDPU_PSM_UPDATE_BOB(6),
254 GLTDPU_PSM_UPDATE_BOB(7),
255 GLTDPU_PSM_UPDATE_BOB(8),
256 #define GLTCB_CMD_IN_BOB(_i) (0x000AE288 + ((_i) * 4))
257 GLTCB_CMD_IN_BOB(1),
258 GLTCB_CMD_IN_BOB(2),
259 GLTCB_CMD_IN_BOB(3),
260 GLTCB_CMD_IN_BOB(4),
261 GLTCB_CMD_IN_BOB(5),
262 GLTCB_CMD_IN_BOB(6),
263 GLTCB_CMD_IN_BOB(7),
264 GLTCB_CMD_IN_BOB(8),
265 #define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i) (0x000FC148 + ((_i) * 4))
266 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1),
267 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2),
268 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3),
269 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4),
270 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5),
271 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6),
272 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7),
273 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8),
274 #define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4))
275 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1),
276 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2),
277 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3),
278 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4),
279 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5),
280 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6),
281 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7),
282 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8),
283 #define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i) (0x000FC1C8 + ((_i) * 4))
284 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1),
285 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2),
286 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3),
287 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4),
288 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5),
289 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6),
290 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7),
291 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8),
292 #define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i) (0x000FC188 + ((_i) * 4))
293 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1),
294 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2),
295 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3),
296 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4),
297 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5),
298 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6),
299 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7),
300 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8),
301 #define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4))
302 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1),
303 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2),
304 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3),
305 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4),
306 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5),
307 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6),
308 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7),
309 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8),
310 #define PRTDCB_TCUPM_REG_CM(_i) (0x000BC360 + ((_i) * 4))
311 PRTDCB_TCUPM_REG_CM(0),
312 PRTDCB_TCUPM_REG_CM(1),
313 PRTDCB_TCUPM_REG_CM(2),
314 PRTDCB_TCUPM_REG_CM(3),
315 #define PRTDCB_TCUPM_REG_DM(_i) (0x000BC3A0 + ((_i) * 4))
316 PRTDCB_TCUPM_REG_DM(0),
317 PRTDCB_TCUPM_REG_DM(1),
318 PRTDCB_TCUPM_REG_DM(2),
319 PRTDCB_TCUPM_REG_DM(3),
320 #define PRTDCB_TLPM_REG_DM(_i) (0x000A0000 + ((_i) * 4))
321 PRTDCB_TLPM_REG_DM(0),
322 PRTDCB_TLPM_REG_DM(1),
323 PRTDCB_TLPM_REG_DM(2),
324 PRTDCB_TLPM_REG_DM(3),
325 };
326
327 struct ice_priv_flag {
328 char name[ETH_GSTRING_LEN];
329 u32 bitno; /* bit position in pf->flags */
330 };
331
332 #define ICE_PRIV_FLAG(_name, _bitno) { \
333 .name = _name, \
334 .bitno = _bitno, \
335 }
336
337 static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
338 ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
339 ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
340 ICE_PRIV_FLAG("vf-true-promisc-support",
341 ICE_FLAG_VF_TRUE_PROMISC_ENA),
342 ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
343 ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
344 };
345
346 #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
347
348 static const u32 ice_adv_lnk_speed_100[] __initconst = {
349 ETHTOOL_LINK_MODE_100baseT_Full_BIT,
350 };
351
352 static const u32 ice_adv_lnk_speed_1000[] __initconst = {
353 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
354 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
355 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
356 };
357
358 static const u32 ice_adv_lnk_speed_2500[] __initconst = {
359 ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
360 ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
361 };
362
363 static const u32 ice_adv_lnk_speed_5000[] __initconst = {
364 ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
365 };
366
367 static const u32 ice_adv_lnk_speed_10000[] __initconst = {
368 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
369 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
370 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
371 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
372 };
373
374 static const u32 ice_adv_lnk_speed_25000[] __initconst = {
375 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
376 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
377 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
378 };
379
380 static const u32 ice_adv_lnk_speed_40000[] __initconst = {
381 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
382 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
383 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
384 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
385 };
386
387 static const u32 ice_adv_lnk_speed_50000[] __initconst = {
388 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
389 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
390 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
391 };
392
393 static const u32 ice_adv_lnk_speed_100000[] __initconst = {
394 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
395 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
396 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
397 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
398 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
399 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
400 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
401 };
402
403 static const u32 ice_adv_lnk_speed_200000[] __initconst = {
404 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
405 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
406 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
407 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
408 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
409 };
410
411 static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = {
412 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100),
413 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000),
414 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500),
415 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000),
416 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000),
417 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000),
418 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000),
419 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000),
420 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000),
421 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000),
422 };
423
ice_adv_lnk_speed_maps_init(void)424 void __init ice_adv_lnk_speed_maps_init(void)
425 {
426 ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps,
427 ARRAY_SIZE(ice_adv_lnk_speed_maps));
428 }
429
430 static void
__ice_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo,struct ice_vsi * vsi)431 __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
432 struct ice_vsi *vsi)
433 {
434 struct ice_pf *pf = vsi->back;
435 struct ice_hw *hw = &pf->hw;
436 struct ice_orom_info *orom;
437 struct ice_nvm_info *nvm;
438
439 nvm = &hw->flash.nvm;
440 orom = &hw->flash.orom;
441
442 strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
443
444 /* Display NVM version (from which the firmware version can be
445 * determined) which contains more pertinent information.
446 */
447 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
448 "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
449 nvm->eetrack, orom->major, orom->build, orom->patch);
450
451 strscpy(drvinfo->bus_info, pci_name(pf->pdev),
452 sizeof(drvinfo->bus_info));
453 }
454
455 static void
ice_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)456 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
457 {
458 struct ice_netdev_priv *np = netdev_priv(netdev);
459
460 __ice_get_drvinfo(netdev, drvinfo, np->vsi);
461 drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
462 }
463
ice_get_regs_len(struct net_device __always_unused * netdev)464 static int ice_get_regs_len(struct net_device __always_unused *netdev)
465 {
466 return (sizeof(ice_regs_dump_list) +
467 sizeof(struct ice_regdump_to_ethtool));
468 }
469
470 /**
471 * ice_ethtool_get_maxspeed - Get the max speed for given lport
472 * @hw: pointer to the HW struct
473 * @lport: logical port for which max speed is requested
474 * @max_speed: return max speed for input lport
475 *
476 * Return: 0 on success, negative on failure.
477 */
ice_ethtool_get_maxspeed(struct ice_hw * hw,u8 lport,u8 * max_speed)478 static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
479 {
480 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
481 bool active_valid = false, pending_valid = true;
482 u8 option_count = ICE_AQC_PORT_OPT_MAX;
483 u8 active_idx = 0, pending_idx = 0;
484 int status;
485
486 status = ice_aq_get_port_options(hw, options, &option_count, lport,
487 true, &active_idx, &active_valid,
488 &pending_idx, &pending_valid);
489 if (status)
490 return -EIO;
491 if (!active_valid)
492 return -EINVAL;
493
494 *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M;
495 return 0;
496 }
497
498 /**
499 * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
500 * @hw: pointer to the HW struct
501 *
502 * Return: true when serdes is muxed, false when serdes is not muxed.
503 */
ice_is_serdes_muxed(struct ice_hw * hw)504 static bool ice_is_serdes_muxed(struct ice_hw *hw)
505 {
506 u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
507
508 return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value);
509 }
510
ice_map_port_topology_for_sfp(struct ice_port_topology * port_topology,u8 lport,bool is_muxed)511 static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology,
512 u8 lport, bool is_muxed)
513 {
514 switch (lport) {
515 case 0:
516 port_topology->pcs_quad_select = 0;
517 port_topology->pcs_port = 0;
518 port_topology->primary_serdes_lane = 0;
519 break;
520 case 1:
521 port_topology->pcs_quad_select = 1;
522 port_topology->pcs_port = 0;
523 if (is_muxed)
524 port_topology->primary_serdes_lane = 2;
525 else
526 port_topology->primary_serdes_lane = 4;
527 break;
528 case 2:
529 port_topology->pcs_quad_select = 0;
530 port_topology->pcs_port = 1;
531 port_topology->primary_serdes_lane = 1;
532 break;
533 case 3:
534 port_topology->pcs_quad_select = 1;
535 port_topology->pcs_port = 1;
536 if (is_muxed)
537 port_topology->primary_serdes_lane = 3;
538 else
539 port_topology->primary_serdes_lane = 5;
540 break;
541 case 4:
542 port_topology->pcs_quad_select = 0;
543 port_topology->pcs_port = 2;
544 port_topology->primary_serdes_lane = 2;
545 break;
546 case 5:
547 port_topology->pcs_quad_select = 1;
548 port_topology->pcs_port = 2;
549 port_topology->primary_serdes_lane = 6;
550 break;
551 case 6:
552 port_topology->pcs_quad_select = 0;
553 port_topology->pcs_port = 3;
554 port_topology->primary_serdes_lane = 3;
555 break;
556 case 7:
557 port_topology->pcs_quad_select = 1;
558 port_topology->pcs_port = 3;
559 port_topology->primary_serdes_lane = 7;
560 break;
561 default:
562 return -EINVAL;
563 }
564
565 return 0;
566 }
567
ice_map_port_topology_for_qsfp(struct ice_port_topology * port_topology,u8 lport,bool is_muxed)568 static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology,
569 u8 lport, bool is_muxed)
570 {
571 switch (lport) {
572 case 0:
573 port_topology->pcs_quad_select = 0;
574 port_topology->pcs_port = 0;
575 port_topology->primary_serdes_lane = 0;
576 break;
577 case 1:
578 port_topology->pcs_quad_select = 1;
579 port_topology->pcs_port = 0;
580 if (is_muxed)
581 port_topology->primary_serdes_lane = 2;
582 else
583 port_topology->primary_serdes_lane = 4;
584 break;
585 case 2:
586 port_topology->pcs_quad_select = 0;
587 port_topology->pcs_port = 1;
588 port_topology->primary_serdes_lane = 1;
589 break;
590 case 3:
591 port_topology->pcs_quad_select = 1;
592 port_topology->pcs_port = 1;
593 if (is_muxed)
594 port_topology->primary_serdes_lane = 3;
595 else
596 port_topology->primary_serdes_lane = 5;
597 break;
598 case 4:
599 port_topology->pcs_quad_select = 0;
600 port_topology->pcs_port = 2;
601 port_topology->primary_serdes_lane = 2;
602 break;
603 case 5:
604 port_topology->pcs_quad_select = 1;
605 port_topology->pcs_port = 2;
606 port_topology->primary_serdes_lane = 6;
607 break;
608 case 6:
609 port_topology->pcs_quad_select = 0;
610 port_topology->pcs_port = 3;
611 port_topology->primary_serdes_lane = 3;
612 break;
613 case 7:
614 port_topology->pcs_quad_select = 1;
615 port_topology->pcs_port = 3;
616 port_topology->primary_serdes_lane = 7;
617 break;
618 default:
619 return -EINVAL;
620 }
621
622 return 0;
623 }
624
625 /**
626 * ice_get_port_topology - returns physical topology like pcsquad, pcsport,
627 * serdes number
628 * @hw: pointer to the HW struct
629 * @lport: logical port for which physical info requested
630 * @port_topology: buffer to hold port topology
631 *
632 * Return: 0 on success, negative on failure.
633 */
ice_get_port_topology(struct ice_hw * hw,u8 lport,struct ice_port_topology * port_topology)634 static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
635 struct ice_port_topology *port_topology)
636 {
637 struct ice_aqc_get_link_topo cmd = {};
638 u16 node_handle = 0;
639 u8 cage_type = 0;
640 bool is_muxed;
641 int err;
642 u8 ctx;
643
644 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
645 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
646 cmd.addr.topo_params.node_type_ctx = ctx;
647
648 err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
649 if (err)
650 return -EINVAL;
651
652 is_muxed = ice_is_serdes_muxed(hw);
653
654 if (cage_type == 0x11 || /* SFP+ */
655 cage_type == 0x12) { /* SFP28 */
656 port_topology->serdes_lane_count = 1;
657 err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed);
658 if (err)
659 return err;
660 } else if (cage_type == 0x13 || /* QSFP */
661 cage_type == 0x14) { /* QSFP28 */
662 u8 max_speed = 0;
663
664 err = ice_ethtool_get_maxspeed(hw, lport, &max_speed);
665 if (err)
666 return err;
667
668 if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
669 port_topology->serdes_lane_count = 4;
670 else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
671 max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
672 port_topology->serdes_lane_count = 2;
673 else
674 port_topology->serdes_lane_count = 1;
675
676 err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed);
677 if (err)
678 return err;
679 } else {
680 return -EINVAL;
681 }
682
683 return 0;
684 }
685
686 /**
687 * ice_get_tx_rx_equa - read serdes tx rx equaliser param
688 * @hw: pointer to the HW struct
689 * @serdes_num: represents the serdes number
690 * @ptr: structure to read all serdes parameter for given serdes
691 *
692 * Return: all serdes equalization parameter supported per serdes number
693 */
ice_get_tx_rx_equa(struct ice_hw * hw,u8 serdes_num,struct ice_serdes_equalization_to_ethtool * ptr)694 static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
695 struct ice_serdes_equalization_to_ethtool *ptr)
696 {
697 static const int tx = ICE_AQC_OP_CODE_TX_EQU;
698 static const int rx = ICE_AQC_OP_CODE_RX_EQU;
699 struct {
700 int data_in;
701 int opcode;
702 int *out;
703 } aq_params[] = {
704 { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 },
705 { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 },
706 { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten },
707 { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 },
708 { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 },
709 { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 },
710 { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 },
711 { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
712 { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
713 { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
714 { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
715 { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
716 { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
717 { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw },
718 { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain },
719 { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 },
720 { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 },
721 { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 },
722 { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 },
723 { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 },
724 { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 },
725 { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 },
726 { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 },
727 { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 },
728 { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 },
729 { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 },
730 { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 },
731 };
732 int err;
733
734 for (int i = 0; i < ARRAY_SIZE(aq_params); i++) {
735 err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in,
736 aq_params[i].opcode,
737 serdes_num, aq_params[i].out);
738 if (err)
739 break;
740 }
741
742 return err;
743 }
744
745 /**
746 * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per
747 * pcsquad, pcsport
748 * @netdev: pointer to net device structure
749 * @p: output buffer to fill requested register dump
750 *
751 * Return: 0 on success, negative on failure.
752 */
ice_get_extended_regs(struct net_device * netdev,void * p)753 static int ice_get_extended_regs(struct net_device *netdev, void *p)
754 {
755 struct ice_netdev_priv *np = netdev_priv(netdev);
756 struct ice_regdump_to_ethtool *ice_prv_regs_buf;
757 struct ice_port_topology port_topology = {};
758 struct ice_port_info *pi;
759 struct ice_pf *pf;
760 struct ice_hw *hw;
761 unsigned int i;
762 int err;
763
764 pf = np->vsi->back;
765 hw = &pf->hw;
766 pi = np->vsi->port_info;
767
768 /* Serdes parameters are not supported if not the PF VSI */
769 if (np->vsi->type != ICE_VSI_PF || !pi)
770 return -EINVAL;
771
772 err = ice_get_port_topology(hw, pi->lport, &port_topology);
773 if (err)
774 return -EINVAL;
775 if (port_topology.serdes_lane_count > 4)
776 return -EINVAL;
777
778 ice_prv_regs_buf = p;
779
780 /* Get serdes equalization parameter for available serdes */
781 for (i = 0; i < port_topology.serdes_lane_count; i++) {
782 u8 serdes_num = 0;
783
784 serdes_num = port_topology.primary_serdes_lane + i;
785 err = ice_get_tx_rx_equa(hw, serdes_num,
786 &ice_prv_regs_buf->equalization[i]);
787 if (err)
788 return -EINVAL;
789 }
790
791 return 0;
792 }
793
794 static void
ice_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)795 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
796 {
797 struct ice_pf *pf = ice_netdev_to_pf(netdev);
798 struct ice_hw *hw = &pf->hw;
799 u32 *regs_buf = (u32 *)p;
800 unsigned int i;
801
802 regs->version = 2;
803
804 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
805 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
806
807 ice_get_extended_regs(netdev, (void *)®s_buf[i]);
808 }
809
ice_get_msglevel(struct net_device * netdev)810 static u32 ice_get_msglevel(struct net_device *netdev)
811 {
812 struct ice_pf *pf = ice_netdev_to_pf(netdev);
813
814 #ifndef CONFIG_DYNAMIC_DEBUG
815 if (pf->hw.debug_mask)
816 netdev_info(netdev, "hw debug_mask: 0x%llX\n",
817 pf->hw.debug_mask);
818 #endif /* !CONFIG_DYNAMIC_DEBUG */
819
820 return pf->msg_enable;
821 }
822
ice_set_msglevel(struct net_device * netdev,u32 data)823 static void ice_set_msglevel(struct net_device *netdev, u32 data)
824 {
825 struct ice_pf *pf = ice_netdev_to_pf(netdev);
826
827 #ifndef CONFIG_DYNAMIC_DEBUG
828 if (ICE_DBG_USER & data)
829 pf->hw.debug_mask = data;
830 else
831 pf->msg_enable = data;
832 #else
833 pf->msg_enable = data;
834 #endif /* !CONFIG_DYNAMIC_DEBUG */
835 }
836
ice_get_link_ext_stats(struct net_device * netdev,struct ethtool_link_ext_stats * stats)837 static void ice_get_link_ext_stats(struct net_device *netdev,
838 struct ethtool_link_ext_stats *stats)
839 {
840 struct ice_pf *pf = ice_netdev_to_pf(netdev);
841
842 stats->link_down_events = pf->link_down_events;
843 }
844
ice_get_eeprom_len(struct net_device * netdev)845 static int ice_get_eeprom_len(struct net_device *netdev)
846 {
847 struct ice_pf *pf = ice_netdev_to_pf(netdev);
848
849 return (int)pf->hw.flash.flash_size;
850 }
851
852 static int
ice_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)853 ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
854 u8 *bytes)
855 {
856 struct ice_pf *pf = ice_netdev_to_pf(netdev);
857 struct ice_hw *hw = &pf->hw;
858 struct device *dev;
859 int ret;
860 u8 *buf;
861
862 dev = ice_pf_to_dev(pf);
863
864 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
865 netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n",
866 eeprom->cmd, eeprom->offset, eeprom->len);
867
868 buf = kzalloc(eeprom->len, GFP_KERNEL);
869 if (!buf)
870 return -ENOMEM;
871
872 ret = ice_acquire_nvm(hw, ICE_RES_READ);
873 if (ret) {
874 dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
875 ret, libie_aq_str(hw->adminq.sq_last_status));
876 goto out;
877 }
878
879 ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
880 false);
881 if (ret) {
882 dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
883 ret, libie_aq_str(hw->adminq.sq_last_status));
884 goto release;
885 }
886
887 memcpy(bytes, buf, eeprom->len);
888 release:
889 ice_release_nvm(hw);
890 out:
891 kfree(buf);
892 return ret;
893 }
894
895 /**
896 * ice_active_vfs - check if there are any active VFs
897 * @pf: board private structure
898 *
899 * Returns true if an active VF is found, otherwise returns false
900 */
ice_active_vfs(struct ice_pf * pf)901 static bool ice_active_vfs(struct ice_pf *pf)
902 {
903 bool active = false;
904 struct ice_vf *vf;
905 unsigned int bkt;
906
907 rcu_read_lock();
908 ice_for_each_vf_rcu(pf, bkt, vf) {
909 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
910 active = true;
911 break;
912 }
913 }
914 rcu_read_unlock();
915
916 return active;
917 }
918
919 /**
920 * ice_link_test - perform a link test on a given net_device
921 * @netdev: network interface device structure
922 *
923 * This function performs one of the self-tests required by ethtool.
924 * Returns 0 on success, non-zero on failure.
925 */
ice_link_test(struct net_device * netdev)926 static u64 ice_link_test(struct net_device *netdev)
927 {
928 struct ice_netdev_priv *np = netdev_priv(netdev);
929 bool link_up = false;
930 int status;
931
932 netdev_info(netdev, "link test\n");
933 status = ice_get_link_status(np->vsi->port_info, &link_up);
934 if (status) {
935 netdev_err(netdev, "link query error, status = %d\n",
936 status);
937 return 1;
938 }
939
940 if (!link_up)
941 return 2;
942
943 return 0;
944 }
945
946 /**
947 * ice_eeprom_test - perform an EEPROM test on a given net_device
948 * @netdev: network interface device structure
949 *
950 * This function performs one of the self-tests required by ethtool.
951 * Returns 0 on success, non-zero on failure.
952 */
ice_eeprom_test(struct net_device * netdev)953 static u64 ice_eeprom_test(struct net_device *netdev)
954 {
955 struct ice_pf *pf = ice_netdev_to_pf(netdev);
956
957 netdev_info(netdev, "EEPROM test\n");
958 return !!(ice_nvm_validate_checksum(&pf->hw));
959 }
960
961 /**
962 * ice_reg_pattern_test
963 * @hw: pointer to the HW struct
964 * @reg: reg to be tested
965 * @mask: bits to be touched
966 */
ice_reg_pattern_test(struct ice_hw * hw,u32 reg,u32 mask)967 static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
968 {
969 struct ice_pf *pf = (struct ice_pf *)hw->back;
970 struct device *dev = ice_pf_to_dev(pf);
971 static const u32 patterns[] = {
972 0x5A5A5A5A, 0xA5A5A5A5,
973 0x00000000, 0xFFFFFFFF
974 };
975 u32 val, orig_val;
976 unsigned int i;
977
978 orig_val = rd32(hw, reg);
979 for (i = 0; i < ARRAY_SIZE(patterns); ++i) {
980 u32 pattern = patterns[i] & mask;
981
982 wr32(hw, reg, pattern);
983 val = rd32(hw, reg);
984 if (val == pattern)
985 continue;
986 dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
987 , __func__, reg, pattern, val);
988 return 1;
989 }
990
991 wr32(hw, reg, orig_val);
992 val = rd32(hw, reg);
993 if (val != orig_val) {
994 dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
995 , __func__, reg, orig_val, val);
996 return 1;
997 }
998
999 return 0;
1000 }
1001
1002 /**
1003 * ice_reg_test - perform a register test on a given net_device
1004 * @netdev: network interface device structure
1005 *
1006 * This function performs one of the self-tests required by ethtool.
1007 * Returns 0 on success, non-zero on failure.
1008 */
ice_reg_test(struct net_device * netdev)1009 static u64 ice_reg_test(struct net_device *netdev)
1010 {
1011 struct ice_netdev_priv *np = netdev_priv(netdev);
1012 struct ice_hw *hw = np->vsi->port_info->hw;
1013 u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ?
1014 hw->func_caps.common_cap.num_msix_vectors - 1 : 1;
1015 struct ice_diag_reg_test_info {
1016 u32 address;
1017 u32 mask;
1018 u32 elem_num;
1019 u32 elem_size;
1020 } ice_reg_list[] = {
1021 {GLINT_ITR(0, 0), 0x00000fff, int_elements,
1022 GLINT_ITR(0, 1) - GLINT_ITR(0, 0)},
1023 {GLINT_ITR(1, 0), 0x00000fff, int_elements,
1024 GLINT_ITR(1, 1) - GLINT_ITR(1, 0)},
1025 {GLINT_ITR(0, 0), 0x00000fff, int_elements,
1026 GLINT_ITR(2, 1) - GLINT_ITR(2, 0)},
1027 {GLINT_CTL, 0xffff0001, 1, 0}
1028 };
1029 unsigned int i;
1030
1031 netdev_dbg(netdev, "Register test\n");
1032 for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) {
1033 u32 j;
1034
1035 for (j = 0; j < ice_reg_list[i].elem_num; ++j) {
1036 u32 mask = ice_reg_list[i].mask;
1037 u32 reg = ice_reg_list[i].address +
1038 (j * ice_reg_list[i].elem_size);
1039
1040 /* bail on failure (non-zero return) */
1041 if (ice_reg_pattern_test(hw, reg, mask))
1042 return 1;
1043 }
1044 }
1045
1046 return 0;
1047 }
1048
1049 /**
1050 * ice_lbtest_prepare_rings - configure Tx/Rx test rings
1051 * @vsi: pointer to the VSI structure
1052 *
1053 * Function configures rings of a VSI for loopback test without
1054 * enabling interrupts or informing the kernel about new queues.
1055 *
1056 * Returns 0 on success, negative on failure.
1057 */
ice_lbtest_prepare_rings(struct ice_vsi * vsi)1058 static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
1059 {
1060 int status;
1061
1062 status = ice_vsi_setup_tx_rings(vsi);
1063 if (status)
1064 goto err_setup_tx_ring;
1065
1066 status = ice_vsi_setup_rx_rings(vsi);
1067 if (status)
1068 goto err_setup_rx_ring;
1069
1070 status = ice_vsi_cfg_lan(vsi);
1071 if (status)
1072 goto err_setup_rx_ring;
1073
1074 status = ice_vsi_start_all_rx_rings(vsi);
1075 if (status)
1076 goto err_start_rx_ring;
1077
1078 return 0;
1079
1080 err_start_rx_ring:
1081 ice_vsi_free_rx_rings(vsi);
1082 err_setup_rx_ring:
1083 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
1084 err_setup_tx_ring:
1085 ice_vsi_free_tx_rings(vsi);
1086
1087 return status;
1088 }
1089
1090 /**
1091 * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test
1092 * @vsi: pointer to the VSI structure
1093 *
1094 * Function stops and frees VSI rings after a loopback test.
1095 * Returns 0 on success, negative on failure.
1096 */
ice_lbtest_disable_rings(struct ice_vsi * vsi)1097 static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
1098 {
1099 int status;
1100
1101 status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
1102 if (status)
1103 netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
1104 vsi->vsi_num, status);
1105
1106 status = ice_vsi_stop_all_rx_rings(vsi);
1107 if (status)
1108 netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
1109 vsi->vsi_num, status);
1110
1111 ice_vsi_free_tx_rings(vsi);
1112 ice_vsi_free_rx_rings(vsi);
1113
1114 return status;
1115 }
1116
1117 /**
1118 * ice_lbtest_create_frame - create test packet
1119 * @pf: pointer to the PF structure
1120 * @ret_data: allocated frame buffer
1121 * @size: size of the packet data
1122 *
1123 * Function allocates a frame with a test pattern on specific offsets.
1124 * Returns 0 on success, non-zero on failure.
1125 */
ice_lbtest_create_frame(struct ice_pf * pf,u8 ** ret_data,u16 size)1126 static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
1127 {
1128 u8 *data;
1129
1130 if (!pf)
1131 return -EINVAL;
1132
1133 data = kzalloc(size, GFP_KERNEL);
1134 if (!data)
1135 return -ENOMEM;
1136
1137 /* Since the ethernet test frame should always be at least
1138 * 64 bytes long, fill some octets in the payload with test data.
1139 */
1140 memset(data, 0xFF, size);
1141 data[32] = 0xDE;
1142 data[42] = 0xAD;
1143 data[44] = 0xBE;
1144 data[46] = 0xEF;
1145
1146 *ret_data = data;
1147
1148 return 0;
1149 }
1150
1151 /**
1152 * ice_lbtest_check_frame - verify received loopback frame
1153 * @frame: pointer to the raw packet data
1154 *
1155 * Function verifies received test frame with a pattern.
1156 * Returns true if frame matches the pattern, false otherwise.
1157 */
ice_lbtest_check_frame(u8 * frame)1158 static bool ice_lbtest_check_frame(u8 *frame)
1159 {
1160 /* Validate bytes of a frame under offsets chosen earlier */
1161 if (frame[32] == 0xDE &&
1162 frame[42] == 0xAD &&
1163 frame[44] == 0xBE &&
1164 frame[46] == 0xEF &&
1165 frame[48] == 0xFF)
1166 return true;
1167
1168 return false;
1169 }
1170
1171 /**
1172 * ice_diag_send - send test frames to the test ring
1173 * @tx_ring: pointer to the transmit ring
1174 * @data: pointer to the raw packet data
1175 * @size: size of the packet to send
1176 *
1177 * Function sends loopback packets on a test Tx ring.
1178 */
ice_diag_send(struct ice_tx_ring * tx_ring,u8 * data,u16 size)1179 static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
1180 {
1181 struct ice_tx_desc *tx_desc;
1182 struct ice_tx_buf *tx_buf;
1183 dma_addr_t dma;
1184 u64 td_cmd;
1185
1186 tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use);
1187 tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use];
1188
1189 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
1190 if (dma_mapping_error(tx_ring->dev, dma))
1191 return -EINVAL;
1192
1193 tx_desc->buf_addr = cpu_to_le64(dma);
1194
1195 /* These flags are required for a descriptor to be pushed out */
1196 td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1197 tx_desc->cmd_type_offset_bsz =
1198 cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1199 (td_cmd << ICE_TXD_QW1_CMD_S) |
1200 ((u64)0 << ICE_TXD_QW1_OFFSET_S) |
1201 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1202 ((u64)0 << ICE_TXD_QW1_L2TAG1_S));
1203
1204 tx_buf->next_to_watch = tx_desc;
1205
1206 /* Force memory write to complete before letting h/w know
1207 * there are new descriptors to fetch.
1208 */
1209 wmb();
1210
1211 tx_ring->next_to_use++;
1212 if (tx_ring->next_to_use >= tx_ring->count)
1213 tx_ring->next_to_use = 0;
1214
1215 writel_relaxed(tx_ring->next_to_use, tx_ring->tail);
1216
1217 /* Wait until the packets get transmitted to the receive queue. */
1218 usleep_range(1000, 2000);
1219 dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE);
1220
1221 return 0;
1222 }
1223
1224 #define ICE_LB_FRAME_SIZE 64
1225 /**
1226 * ice_lbtest_receive_frames - receive and verify test frames
1227 * @rx_ring: pointer to the receive ring
1228 *
1229 * Function receives loopback packets and verify their correctness.
1230 * Returns number of received valid frames.
1231 */
ice_lbtest_receive_frames(struct ice_rx_ring * rx_ring)1232 static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
1233 {
1234 struct libeth_fqe *rx_buf;
1235 int valid_frames, i;
1236 struct page *page;
1237 u8 *received_buf;
1238
1239 valid_frames = 0;
1240
1241 for (i = 0; i < rx_ring->count; i++) {
1242 union ice_32b_rx_flex_desc *rx_desc;
1243
1244 rx_desc = ICE_RX_DESC(rx_ring, i);
1245
1246 if (!(rx_desc->wb.status_error0 &
1247 (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
1248 cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
1249 continue;
1250
1251 rx_buf = &rx_ring->rx_fqes[i];
1252 page = __netmem_to_page(rx_buf->netmem);
1253 received_buf = page_address(page) + rx_buf->offset +
1254 page->pp->p.offset;
1255
1256 if (ice_lbtest_check_frame(received_buf))
1257 valid_frames++;
1258 }
1259
1260 return valid_frames;
1261 }
1262
1263 /**
1264 * ice_loopback_test - perform a loopback test on a given net_device
1265 * @netdev: network interface device structure
1266 *
1267 * This function performs one of the self-tests required by ethtool.
1268 * Returns 0 on success, non-zero on failure.
1269 */
ice_loopback_test(struct net_device * netdev)1270 static u64 ice_loopback_test(struct net_device *netdev)
1271 {
1272 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1273 struct ice_vsi *test_vsi;
1274 u8 *tx_frame __free(kfree) = NULL;
1275 u8 broadcast[ETH_ALEN], ret = 0;
1276 int num_frames, valid_frames;
1277 struct ice_tx_ring *tx_ring;
1278 struct ice_rx_ring *rx_ring;
1279 int i;
1280
1281 netdev_info(netdev, "loopback test\n");
1282
1283 test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
1284 if (!test_vsi) {
1285 netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
1286 return 1;
1287 }
1288
1289 test_vsi->netdev = netdev;
1290 tx_ring = test_vsi->tx_rings[0];
1291 rx_ring = test_vsi->rx_rings[0];
1292 /* Dummy q_vector and napi. Fill the minimum required for
1293 * ice_rxq_pp_create().
1294 */
1295 rx_ring->q_vector->napi.dev = netdev;
1296
1297 if (ice_lbtest_prepare_rings(test_vsi)) {
1298 ret = 2;
1299 goto lbtest_vsi_close;
1300 }
1301
1302 if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) {
1303 ret = 3;
1304 goto lbtest_rings_dis;
1305 }
1306
1307 /* Enable MAC loopback in firmware */
1308 if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) {
1309 ret = 4;
1310 goto lbtest_mac_dis;
1311 }
1312
1313 /* Test VSI needs to receive broadcast packets */
1314 eth_broadcast_addr(broadcast);
1315 if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
1316 ret = 5;
1317 goto lbtest_mac_dis;
1318 }
1319
1320 if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
1321 ret = 7;
1322 goto remove_mac_filters;
1323 }
1324
1325 num_frames = min_t(int, tx_ring->count, 32);
1326 for (i = 0; i < num_frames; i++) {
1327 if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
1328 ret = 8;
1329 goto remove_mac_filters;
1330 }
1331 }
1332
1333 valid_frames = ice_lbtest_receive_frames(rx_ring);
1334 if (!valid_frames)
1335 ret = 9;
1336 else if (valid_frames != num_frames)
1337 ret = 10;
1338
1339 remove_mac_filters:
1340 if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
1341 netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
1342 lbtest_mac_dis:
1343 /* Disable MAC loopback after the test is completed. */
1344 if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
1345 netdev_err(netdev, "Could not disable MAC loopback\n");
1346 lbtest_rings_dis:
1347 if (ice_lbtest_disable_rings(test_vsi))
1348 netdev_err(netdev, "Could not disable test rings\n");
1349 lbtest_vsi_close:
1350 test_vsi->netdev = NULL;
1351 if (ice_vsi_release(test_vsi))
1352 netdev_err(netdev, "Failed to remove the test VSI\n");
1353
1354 return ret;
1355 }
1356
1357 /**
1358 * ice_intr_test - perform an interrupt test on a given net_device
1359 * @netdev: network interface device structure
1360 *
1361 * This function performs one of the self-tests required by ethtool.
1362 * Returns 0 on success, non-zero on failure.
1363 */
ice_intr_test(struct net_device * netdev)1364 static u64 ice_intr_test(struct net_device *netdev)
1365 {
1366 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1367 u16 swic_old = pf->sw_int_count;
1368
1369 netdev_info(netdev, "interrupt test\n");
1370
1371 wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index),
1372 GLINT_DYN_CTL_SW_ITR_INDX_M |
1373 GLINT_DYN_CTL_INTENA_MSK_M |
1374 GLINT_DYN_CTL_SWINT_TRIG_M);
1375
1376 usleep_range(1000, 2000);
1377 return (swic_old == pf->sw_int_count);
1378 }
1379
1380 /**
1381 * ice_self_test - handler function for performing a self-test by ethtool
1382 * @netdev: network interface device structure
1383 * @eth_test: ethtool_test structure
1384 * @data: required by ethtool.self_test
1385 *
1386 * This function is called after invoking 'ethtool -t devname' command where
1387 * devname is the name of the network device on which ethtool should operate.
1388 * It performs a set of self-tests to check if a device works properly.
1389 */
1390 static void
ice_self_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)1391 ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
1392 u64 *data)
1393 {
1394 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1395 bool if_running = netif_running(netdev);
1396 struct device *dev;
1397
1398 dev = ice_pf_to_dev(pf);
1399
1400 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1401 netdev_info(netdev, "offline testing starting\n");
1402
1403 set_bit(ICE_TESTING, pf->state);
1404
1405 if (ice_active_vfs(pf)) {
1406 dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
1407 data[ICE_ETH_TEST_REG] = 1;
1408 data[ICE_ETH_TEST_EEPROM] = 1;
1409 data[ICE_ETH_TEST_INTR] = 1;
1410 data[ICE_ETH_TEST_LOOP] = 1;
1411 data[ICE_ETH_TEST_LINK] = 1;
1412 eth_test->flags |= ETH_TEST_FL_FAILED;
1413 clear_bit(ICE_TESTING, pf->state);
1414 goto skip_ol_tests;
1415 }
1416 /* If the device is online then take it offline */
1417 if (if_running)
1418 /* indicate we're in test mode */
1419 ice_stop(netdev);
1420
1421 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
1422 data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev);
1423 data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev);
1424 data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev);
1425 data[ICE_ETH_TEST_REG] = ice_reg_test(netdev);
1426
1427 if (data[ICE_ETH_TEST_LINK] ||
1428 data[ICE_ETH_TEST_EEPROM] ||
1429 data[ICE_ETH_TEST_LOOP] ||
1430 data[ICE_ETH_TEST_INTR] ||
1431 data[ICE_ETH_TEST_REG])
1432 eth_test->flags |= ETH_TEST_FL_FAILED;
1433
1434 clear_bit(ICE_TESTING, pf->state);
1435
1436 if (if_running) {
1437 int status = ice_open(netdev);
1438
1439 if (status) {
1440 dev_err(dev, "Could not open device %s, err %d\n",
1441 pf->int_name, status);
1442 }
1443 }
1444 } else {
1445 /* Online tests */
1446 netdev_info(netdev, "online testing starting\n");
1447
1448 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev);
1449 if (data[ICE_ETH_TEST_LINK])
1450 eth_test->flags |= ETH_TEST_FL_FAILED;
1451
1452 /* Offline only tests, not run in online; pass by default */
1453 data[ICE_ETH_TEST_REG] = 0;
1454 data[ICE_ETH_TEST_EEPROM] = 0;
1455 data[ICE_ETH_TEST_INTR] = 0;
1456 data[ICE_ETH_TEST_LOOP] = 0;
1457 }
1458
1459 skip_ol_tests:
1460 netdev_info(netdev, "testing finished\n");
1461 }
1462
1463 static void
__ice_get_strings(struct net_device * netdev,u32 stringset,u8 * data,struct ice_vsi * vsi)1464 __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
1465 struct ice_vsi *vsi)
1466 {
1467 unsigned int i;
1468 u8 *p = data;
1469
1470 switch (stringset) {
1471 case ETH_SS_STATS:
1472 for (i = 0; i < ICE_VSI_STATS_LEN; i++)
1473 ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
1474
1475 if (ice_is_port_repr_netdev(netdev))
1476 return;
1477
1478 ice_for_each_alloc_txq(vsi, i) {
1479 ethtool_sprintf(&p, "tx_queue_%u_packets", i);
1480 ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
1481 }
1482
1483 ice_for_each_alloc_rxq(vsi, i) {
1484 ethtool_sprintf(&p, "rx_queue_%u_packets", i);
1485 ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
1486 }
1487
1488 if (vsi->type != ICE_VSI_PF)
1489 return;
1490
1491 for (i = 0; i < ICE_PF_STATS_LEN; i++)
1492 ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
1493
1494 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
1495 ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
1496 ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i);
1497 }
1498 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
1499 ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i);
1500 ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i);
1501 }
1502 break;
1503 case ETH_SS_TEST:
1504 memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN);
1505 break;
1506 case ETH_SS_PRIV_FLAGS:
1507 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
1508 ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
1509 break;
1510 default:
1511 break;
1512 }
1513 }
1514
ice_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1515 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1516 {
1517 struct ice_netdev_priv *np = netdev_priv(netdev);
1518
1519 __ice_get_strings(netdev, stringset, data, np->vsi);
1520 }
1521
1522 static int
ice_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)1523 ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
1524 {
1525 struct ice_netdev_priv *np = netdev_priv(netdev);
1526 bool led_active;
1527
1528 switch (state) {
1529 case ETHTOOL_ID_ACTIVE:
1530 led_active = true;
1531 break;
1532 case ETHTOOL_ID_INACTIVE:
1533 led_active = false;
1534 break;
1535 default:
1536 return -EINVAL;
1537 }
1538
1539 if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL))
1540 return -EIO;
1541
1542 return 0;
1543 }
1544
1545 /**
1546 * ice_set_fec_cfg - Set link FEC options
1547 * @netdev: network interface device structure
1548 * @req_fec: FEC mode to configure
1549 */
ice_set_fec_cfg(struct net_device * netdev,enum ice_fec_mode req_fec)1550 static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
1551 {
1552 struct ice_netdev_priv *np = netdev_priv(netdev);
1553 struct ice_aqc_set_phy_cfg_data config = { 0 };
1554 struct ice_vsi *vsi = np->vsi;
1555 struct ice_port_info *pi;
1556
1557 pi = vsi->port_info;
1558 if (!pi)
1559 return -EOPNOTSUPP;
1560
1561 /* Changing the FEC parameters is not supported if not the PF VSI */
1562 if (vsi->type != ICE_VSI_PF) {
1563 netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n");
1564 return -EOPNOTSUPP;
1565 }
1566
1567 /* Proceed only if requesting different FEC mode */
1568 if (pi->phy.curr_user_fec_req == req_fec)
1569 return 0;
1570
1571 /* Copy the current user PHY configuration. The current user PHY
1572 * configuration is initialized during probe from PHY capabilities
1573 * software mode, and updated on set PHY configuration.
1574 */
1575 memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config));
1576
1577 ice_cfg_phy_fec(pi, &config, req_fec);
1578 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1579
1580 if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL))
1581 return -EAGAIN;
1582
1583 /* Save requested FEC config */
1584 pi->phy.curr_user_fec_req = req_fec;
1585
1586 return 0;
1587 }
1588
1589 /**
1590 * ice_set_fecparam - Set FEC link options
1591 * @netdev: network interface device structure
1592 * @fecparam: Ethtool structure to retrieve FEC parameters
1593 */
1594 static int
ice_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1595 ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
1596 {
1597 struct ice_netdev_priv *np = netdev_priv(netdev);
1598 struct ice_vsi *vsi = np->vsi;
1599 enum ice_fec_mode fec;
1600
1601 switch (fecparam->fec) {
1602 case ETHTOOL_FEC_AUTO:
1603 fec = ICE_FEC_AUTO;
1604 break;
1605 case ETHTOOL_FEC_RS:
1606 fec = ICE_FEC_RS;
1607 break;
1608 case ETHTOOL_FEC_BASER:
1609 fec = ICE_FEC_BASER;
1610 break;
1611 case ETHTOOL_FEC_OFF:
1612 case ETHTOOL_FEC_NONE:
1613 fec = ICE_FEC_NONE;
1614 break;
1615 default:
1616 dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
1617 fecparam->fec);
1618 return -EINVAL;
1619 }
1620
1621 return ice_set_fec_cfg(netdev, fec);
1622 }
1623
1624 /**
1625 * ice_get_fecparam - Get link FEC options
1626 * @netdev: network interface device structure
1627 * @fecparam: Ethtool structure to retrieve FEC parameters
1628 */
1629 static int
ice_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * fecparam)1630 ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
1631 {
1632 struct ice_netdev_priv *np = netdev_priv(netdev);
1633 struct ice_aqc_get_phy_caps_data *caps;
1634 struct ice_link_status *link_info;
1635 struct ice_vsi *vsi = np->vsi;
1636 struct ice_port_info *pi;
1637 int err;
1638
1639 pi = vsi->port_info;
1640
1641 if (!pi)
1642 return -EOPNOTSUPP;
1643 link_info = &pi->phy.link_info;
1644
1645 /* Set FEC mode based on negotiated link info */
1646 switch (link_info->fec_info) {
1647 case ICE_AQ_LINK_25G_KR_FEC_EN:
1648 fecparam->active_fec = ETHTOOL_FEC_BASER;
1649 break;
1650 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
1651 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
1652 fecparam->active_fec = ETHTOOL_FEC_RS;
1653 break;
1654 default:
1655 fecparam->active_fec = ETHTOOL_FEC_OFF;
1656 break;
1657 }
1658
1659 caps = kzalloc_obj(*caps);
1660 if (!caps)
1661 return -ENOMEM;
1662
1663 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1664 caps, NULL);
1665 if (err)
1666 goto done;
1667
1668 /* Set supported/configured FEC modes based on PHY capability */
1669 if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
1670 fecparam->fec |= ETHTOOL_FEC_AUTO;
1671 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
1672 caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
1673 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN ||
1674 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
1675 fecparam->fec |= ETHTOOL_FEC_BASER;
1676 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
1677 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ ||
1678 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
1679 fecparam->fec |= ETHTOOL_FEC_RS;
1680 if (caps->link_fec_options == 0)
1681 fecparam->fec |= ETHTOOL_FEC_OFF;
1682
1683 done:
1684 kfree(caps);
1685 return err;
1686 }
1687
1688 /**
1689 * ice_nway_reset - restart autonegotiation
1690 * @netdev: network interface device structure
1691 */
ice_nway_reset(struct net_device * netdev)1692 static int ice_nway_reset(struct net_device *netdev)
1693 {
1694 struct ice_netdev_priv *np = netdev_priv(netdev);
1695 struct ice_vsi *vsi = np->vsi;
1696 int err;
1697
1698 /* If VSI state is up, then restart autoneg with link up */
1699 if (!test_bit(ICE_DOWN, vsi->back->state))
1700 err = ice_set_link(vsi, true);
1701 else
1702 err = ice_set_link(vsi, false);
1703
1704 return err;
1705 }
1706
1707 /**
1708 * ice_get_priv_flags - report device private flags
1709 * @netdev: network interface device structure
1710 *
1711 * The get string set count and the string set should be matched for each
1712 * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags
1713 * array.
1714 *
1715 * Returns a u32 bitmap of flags.
1716 */
ice_get_priv_flags(struct net_device * netdev)1717 static u32 ice_get_priv_flags(struct net_device *netdev)
1718 {
1719 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1720 u32 i, ret_flags = 0;
1721
1722 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
1723 const struct ice_priv_flag *priv_flag;
1724
1725 priv_flag = &ice_gstrings_priv_flags[i];
1726
1727 if (test_bit(priv_flag->bitno, pf->flags))
1728 ret_flags |= BIT(i);
1729 }
1730
1731 return ret_flags;
1732 }
1733
1734 /**
1735 * ice_set_priv_flags - set private flags
1736 * @netdev: network interface device structure
1737 * @flags: bit flags to be set
1738 */
ice_set_priv_flags(struct net_device * netdev,u32 flags)1739 static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
1740 {
1741 struct ice_netdev_priv *np = netdev_priv(netdev);
1742 DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
1743 DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
1744 struct ice_vsi *vsi = np->vsi;
1745 struct ice_pf *pf = vsi->back;
1746 struct device *dev;
1747 int ret = 0;
1748 u32 i;
1749
1750 if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
1751 return -EINVAL;
1752
1753 dev = ice_pf_to_dev(pf);
1754 set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
1755
1756 bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
1757 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
1758 const struct ice_priv_flag *priv_flag;
1759
1760 priv_flag = &ice_gstrings_priv_flags[i];
1761
1762 if (flags & BIT(i))
1763 set_bit(priv_flag->bitno, pf->flags);
1764 else
1765 clear_bit(priv_flag->bitno, pf->flags);
1766 }
1767
1768 bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
1769
1770 /* Do not allow change to link-down-on-close when Total Port Shutdown
1771 * is enabled.
1772 */
1773 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) &&
1774 test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
1775 dev_err(dev, "Setting link-down-on-close not supported on this port\n");
1776 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1777 ret = -EINVAL;
1778 goto ethtool_exit;
1779 }
1780
1781 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
1782 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
1783 int status;
1784
1785 /* Disable FW LLDP engine */
1786 status = ice_cfg_lldp_mib_change(&pf->hw, false);
1787
1788 /* If unregistering for LLDP events fails, this is
1789 * not an error state, as there shouldn't be any
1790 * events to respond to.
1791 */
1792 if (status)
1793 dev_info(dev, "Failed to unreg for LLDP events\n");
1794
1795 /* The AQ call to stop the FW LLDP agent will generate
1796 * an error if the agent is already stopped.
1797 */
1798 status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
1799 if (status)
1800 dev_warn(dev, "Fail to stop LLDP agent\n");
1801 /* Use case for having the FW LLDP agent stopped
1802 * will likely not need DCB, so failure to init is
1803 * not a concern of ethtool
1804 */
1805 status = ice_init_pf_dcb(pf, true);
1806 if (status)
1807 dev_warn(dev, "Fail to init DCB\n");
1808
1809 pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
1810 pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
1811 } else {
1812 bool dcbx_agent_status;
1813 int status;
1814
1815 if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
1816 clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
1817 dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n");
1818 ret = -EOPNOTSUPP;
1819 goto ethtool_exit;
1820 }
1821
1822 /* Remove rule to direct LLDP packets to default VSI.
1823 * The FW LLDP engine will now be consuming them.
1824 */
1825 ice_cfg_sw_rx_lldp(vsi->back, false);
1826
1827 /* AQ command to start FW LLDP agent will return an
1828 * error if the agent is already started
1829 */
1830 status = ice_aq_start_lldp(&pf->hw, true, NULL);
1831 if (status)
1832 dev_warn(dev, "Fail to start LLDP Agent\n");
1833
1834 /* AQ command to start FW DCBX agent will fail if
1835 * the agent is already started
1836 */
1837 status = ice_aq_start_stop_dcbx(&pf->hw, true,
1838 &dcbx_agent_status,
1839 NULL);
1840 if (status)
1841 dev_dbg(dev, "Failed to start FW DCBX\n");
1842
1843 dev_info(dev, "FW DCBX agent is %s\n",
1844 dcbx_agent_status ? "ACTIVE" : "DISABLED");
1845
1846 /* Failure to configure MIB change or init DCB is not
1847 * relevant to ethtool. Print notification that
1848 * registration/init failed but do not return error
1849 * state to ethtool
1850 */
1851 status = ice_init_pf_dcb(pf, true);
1852 if (status)
1853 dev_dbg(dev, "Fail to init DCB\n");
1854
1855 /* Register for MIB change events */
1856 status = ice_cfg_lldp_mib_change(&pf->hw, true);
1857 if (status)
1858 dev_dbg(dev, "Fail to enable MIB change events\n");
1859
1860 pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST;
1861 pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
1862
1863 ice_nway_reset(netdev);
1864 }
1865 }
1866 /* don't allow modification of this flag when a single VF is in
1867 * promiscuous mode because it's not supported
1868 */
1869 if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
1870 ice_is_any_vf_in_unicast_promisc(pf)) {
1871 dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
1872 /* toggle bit back to previous state */
1873 change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
1874 ret = -EAGAIN;
1875 }
1876
1877 if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
1878 ice_has_vfs(pf)) {
1879 dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
1880 /* toggle bit back to previous state */
1881 change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
1882 ret = -EOPNOTSUPP;
1883 }
1884 ethtool_exit:
1885 clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
1886 return ret;
1887 }
1888
ice_get_sset_count(struct net_device * netdev,int sset)1889 static int ice_get_sset_count(struct net_device *netdev, int sset)
1890 {
1891 switch (sset) {
1892 case ETH_SS_STATS:
1893 /* The number (and order) of strings reported *must* remain
1894 * constant for a given netdevice. This function must not
1895 * report a different number based on run time parameters
1896 * (such as the number of queues in use, or the setting of
1897 * a private ethtool flag). This is due to the nature of the
1898 * ethtool stats API.
1899 *
1900 * Userspace programs such as ethtool must make 3 separate
1901 * ioctl requests, one for size, one for the strings, and
1902 * finally one for the stats. Since these cross into
1903 * userspace, changes to the number or size could result in
1904 * undefined memory access or incorrect string<->value
1905 * correlations for statistics.
1906 *
1907 * Even if it appears to be safe, changes to the size or
1908 * order of strings will suffer from race conditions and are
1909 * not safe.
1910 */
1911 return ICE_ALL_STATS_LEN(netdev);
1912 case ETH_SS_TEST:
1913 return ICE_TEST_LEN;
1914 case ETH_SS_PRIV_FLAGS:
1915 return ICE_PRIV_FLAG_ARRAY_SIZE;
1916 default:
1917 return -EOPNOTSUPP;
1918 }
1919 }
1920
1921 static void
__ice_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data,struct ice_vsi * vsi)1922 __ice_get_ethtool_stats(struct net_device *netdev,
1923 struct ethtool_stats __always_unused *stats, u64 *data,
1924 struct ice_vsi *vsi)
1925 {
1926 struct ice_pf *pf = vsi->back;
1927 struct ice_tx_ring *tx_ring;
1928 struct ice_rx_ring *rx_ring;
1929 unsigned int j;
1930 int i = 0;
1931 char *p;
1932
1933 ice_update_pf_stats(pf);
1934 ice_update_vsi_stats(vsi);
1935
1936 for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
1937 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
1938 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
1939 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1940 }
1941
1942 if (ice_is_port_repr_netdev(netdev))
1943 return;
1944
1945 /* populate per queue stats */
1946 rcu_read_lock();
1947
1948 ice_for_each_alloc_txq(vsi, j) {
1949 u64 pkts, bytes;
1950
1951 tx_ring = READ_ONCE(vsi->tx_rings[j]);
1952 if (!tx_ring || !tx_ring->ring_stats) {
1953 data[i++] = 0;
1954 data[i++] = 0;
1955 continue;
1956 }
1957
1958 ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes);
1959
1960 data[i++] = pkts;
1961 data[i++] = bytes;
1962 }
1963
1964 ice_for_each_alloc_rxq(vsi, j) {
1965 u64 pkts, bytes;
1966
1967 rx_ring = READ_ONCE(vsi->rx_rings[j]);
1968 if (!rx_ring || !rx_ring->ring_stats) {
1969 data[i++] = 0;
1970 data[i++] = 0;
1971 continue;
1972 }
1973
1974 ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes);
1975
1976 data[i++] = pkts;
1977 data[i++] = bytes;
1978 }
1979
1980 rcu_read_unlock();
1981
1982 if (vsi->type != ICE_VSI_PF)
1983 return;
1984
1985 for (j = 0; j < ICE_PF_STATS_LEN; j++) {
1986 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
1987 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
1988 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1989 }
1990
1991 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
1992 data[i++] = pf->stats.priority_xon_tx[j];
1993 data[i++] = pf->stats.priority_xoff_tx[j];
1994 }
1995
1996 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
1997 data[i++] = pf->stats.priority_xon_rx[j];
1998 data[i++] = pf->stats.priority_xoff_rx[j];
1999 }
2000 }
2001
2002 static void
ice_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)2003 ice_get_ethtool_stats(struct net_device *netdev,
2004 struct ethtool_stats __always_unused *stats, u64 *data)
2005 {
2006 struct ice_netdev_priv *np = netdev_priv(netdev);
2007
2008 __ice_get_ethtool_stats(netdev, stats, data, np->vsi);
2009 }
2010
2011 #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
2012 ICE_PHY_TYPE_LOW_100M_SGMII)
2013
2014 #define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \
2015 ICE_PHY_TYPE_LOW_1000BASE_T | \
2016 ICE_PHY_TYPE_LOW_1000BASE_SX | \
2017 ICE_PHY_TYPE_LOW_1000BASE_LX | \
2018 ICE_PHY_TYPE_LOW_1000BASE_KX | \
2019 ICE_PHY_TYPE_LOW_1G_SGMII | \
2020 ICE_PHY_TYPE_LOW_2500BASE_T | \
2021 ICE_PHY_TYPE_LOW_2500BASE_X | \
2022 ICE_PHY_TYPE_LOW_2500BASE_KX | \
2023 ICE_PHY_TYPE_LOW_5GBASE_T | \
2024 ICE_PHY_TYPE_LOW_5GBASE_KR | \
2025 ICE_PHY_TYPE_LOW_10GBASE_T | \
2026 ICE_PHY_TYPE_LOW_10G_SFI_DA | \
2027 ICE_PHY_TYPE_LOW_10GBASE_SR | \
2028 ICE_PHY_TYPE_LOW_10GBASE_LR | \
2029 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
2030 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
2031 ICE_PHY_TYPE_LOW_10G_SFI_C2C)
2032
2033 #define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
2034 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
2035 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
2036 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
2037 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
2038 ICE_PHY_TYPE_LOW_100G_CAUI4 | \
2039 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \
2040 ICE_PHY_TYPE_LOW_100G_AUI4 | \
2041 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
2042 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \
2043 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \
2044 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
2045 ICE_PHY_TYPE_LOW_100GBASE_DR)
2046
2047 #define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
2048 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\
2049 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
2050 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
2051 ICE_PHY_TYPE_HIGH_100G_AUI2)
2052
2053 #define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \
2054 ICE_PHY_TYPE_HIGH_200G_SR4 | \
2055 ICE_PHY_TYPE_HIGH_200G_FR4 | \
2056 ICE_PHY_TYPE_HIGH_200G_LR4 | \
2057 ICE_PHY_TYPE_HIGH_200G_DR4 | \
2058 ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \
2059 ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
2060 ICE_PHY_TYPE_HIGH_200G_AUI4)
2061
2062 /**
2063 * ice_mask_min_supported_speeds
2064 * @hw: pointer to the HW structure
2065 * @phy_types_high: PHY type high
2066 * @phy_types_low: PHY type low to apply minimum supported speeds mask
2067 *
2068 * Apply minimum supported speeds mask to PHY type low. These are the speeds
2069 * for ethtool supported link mode.
2070 */
2071 static void
ice_mask_min_supported_speeds(struct ice_hw * hw,u64 phy_types_high,u64 * phy_types_low)2072 ice_mask_min_supported_speeds(struct ice_hw *hw,
2073 u64 phy_types_high, u64 *phy_types_low)
2074 {
2075 /* if QSFP connection with 100G speed, minimum supported speed is 25G */
2076 if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) ||
2077 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) ||
2078 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G))
2079 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
2080 else if (!ice_is_100m_speed_supported(hw))
2081 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
2082 }
2083
2084 /**
2085 * ice_linkmode_set_bit - set link mode bit
2086 * @phy_to_ethtool: PHY type to ethtool link mode struct to set
2087 * @ks: ethtool link ksettings struct to fill out
2088 * @req_speeds: speed requested by user
2089 * @advert_phy_type: advertised PHY type
2090 * @phy_type: PHY type
2091 */
2092 static void
ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool * phy_to_ethtool,struct ethtool_link_ksettings * ks,u32 req_speeds,u64 advert_phy_type,u32 phy_type)2093 ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool *phy_to_ethtool,
2094 struct ethtool_link_ksettings *ks, u32 req_speeds,
2095 u64 advert_phy_type, u32 phy_type)
2096 {
2097 linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported);
2098
2099 if (req_speeds & phy_to_ethtool->aq_link_speed ||
2100 (!req_speeds && advert_phy_type & BIT(phy_type)))
2101 linkmode_set_bit(phy_to_ethtool->link_mode,
2102 ks->link_modes.advertising);
2103 }
2104
2105 /**
2106 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
2107 * @netdev: network interface device structure
2108 * @ks: ethtool link ksettings struct to fill out
2109 */
2110 static void
ice_phy_type_to_ethtool(struct net_device * netdev,struct ethtool_link_ksettings * ks)2111 ice_phy_type_to_ethtool(struct net_device *netdev,
2112 struct ethtool_link_ksettings *ks)
2113 {
2114 struct ice_netdev_priv *np = netdev_priv(netdev);
2115 struct ice_vsi *vsi = np->vsi;
2116 struct ice_pf *pf = vsi->back;
2117 u64 advert_phy_type_lo = 0;
2118 u64 advert_phy_type_hi = 0;
2119 u64 phy_types_high = 0;
2120 u64 phy_types_low = 0;
2121 u32 req_speeds;
2122 u32 i;
2123
2124 req_speeds = vsi->port_info->phy.link_info.req_speeds;
2125
2126 /* Check if lenient mode is supported and enabled, or in strict mode.
2127 *
2128 * In lenient mode the Supported link modes are the PHY types without
2129 * media. The Advertising link mode is either 1. the user requested
2130 * speed, 2. the override PHY mask, or 3. the PHY types with media.
2131 *
2132 * In strict mode Supported link mode are the PHY type with media,
2133 * and Advertising link modes are the media PHY type or the speed
2134 * requested by user.
2135 */
2136 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
2137 phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
2138 phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
2139
2140 ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
2141 &phy_types_low);
2142 /* determine advertised modes based on link override only
2143 * if it's supported and if the FW doesn't abstract the
2144 * driver from having to account for link overrides
2145 */
2146 if (ice_fw_supports_link_override(&pf->hw) &&
2147 !ice_fw_supports_report_dflt_cfg(&pf->hw)) {
2148 struct ice_link_default_override_tlv *ldo;
2149
2150 ldo = &pf->link_dflt_override;
2151 /* If override enabled and PHY mask set, then
2152 * Advertising link mode is the intersection of the PHY
2153 * types without media and the override PHY mask.
2154 */
2155 if (ldo->options & ICE_LINK_OVERRIDE_EN &&
2156 (ldo->phy_type_low || ldo->phy_type_high)) {
2157 advert_phy_type_lo =
2158 le64_to_cpu(pf->nvm_phy_type_lo) &
2159 ldo->phy_type_low;
2160 advert_phy_type_hi =
2161 le64_to_cpu(pf->nvm_phy_type_hi) &
2162 ldo->phy_type_high;
2163 }
2164 }
2165 } else {
2166 /* strict mode */
2167 phy_types_low = vsi->port_info->phy.phy_type_low;
2168 phy_types_high = vsi->port_info->phy.phy_type_high;
2169 }
2170
2171 /* If Advertising link mode PHY type is not using override PHY type,
2172 * then use PHY type with media.
2173 */
2174 if (!advert_phy_type_lo && !advert_phy_type_hi) {
2175 advert_phy_type_lo = vsi->port_info->phy.phy_type_low;
2176 advert_phy_type_hi = vsi->port_info->phy.phy_type_high;
2177 }
2178
2179 linkmode_zero(ks->link_modes.supported);
2180 linkmode_zero(ks->link_modes.advertising);
2181
2182 for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
2183 if (phy_types_low & BIT_ULL(i))
2184 ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
2185 req_speeds, advert_phy_type_lo,
2186 i);
2187 }
2188
2189 for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
2190 if (phy_types_high & BIT_ULL(i))
2191 ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
2192 req_speeds, advert_phy_type_hi,
2193 i);
2194 }
2195 }
2196
2197 #define TEST_SET_BITS_TIMEOUT 50
2198 #define TEST_SET_BITS_SLEEP_MAX 2000
2199 #define TEST_SET_BITS_SLEEP_MIN 1000
2200
2201 /**
2202 * ice_get_settings_link_up - Get Link settings for when link is up
2203 * @ks: ethtool ksettings to fill in
2204 * @netdev: network interface device structure
2205 */
2206 static void
ice_get_settings_link_up(struct ethtool_link_ksettings * ks,struct net_device * netdev)2207 ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
2208 struct net_device *netdev)
2209 {
2210 struct ice_netdev_priv *np = netdev_priv(netdev);
2211 struct ice_port_info *pi = np->vsi->port_info;
2212 struct ice_link_status *link_info;
2213 struct ice_vsi *vsi = np->vsi;
2214
2215 link_info = &vsi->port_info->phy.link_info;
2216
2217 /* Get supported and advertised settings from PHY ability with media */
2218 ice_phy_type_to_ethtool(netdev, ks);
2219
2220 switch (link_info->link_speed) {
2221 case ICE_AQ_LINK_SPEED_200GB:
2222 ks->base.speed = SPEED_200000;
2223 break;
2224 case ICE_AQ_LINK_SPEED_100GB:
2225 ks->base.speed = SPEED_100000;
2226 break;
2227 case ICE_AQ_LINK_SPEED_50GB:
2228 ks->base.speed = SPEED_50000;
2229 break;
2230 case ICE_AQ_LINK_SPEED_40GB:
2231 ks->base.speed = SPEED_40000;
2232 break;
2233 case ICE_AQ_LINK_SPEED_25GB:
2234 ks->base.speed = SPEED_25000;
2235 break;
2236 case ICE_AQ_LINK_SPEED_20GB:
2237 ks->base.speed = SPEED_20000;
2238 break;
2239 case ICE_AQ_LINK_SPEED_10GB:
2240 ks->base.speed = SPEED_10000;
2241 break;
2242 case ICE_AQ_LINK_SPEED_5GB:
2243 ks->base.speed = SPEED_5000;
2244 break;
2245 case ICE_AQ_LINK_SPEED_2500MB:
2246 ks->base.speed = SPEED_2500;
2247 break;
2248 case ICE_AQ_LINK_SPEED_1000MB:
2249 ks->base.speed = SPEED_1000;
2250 break;
2251 case ICE_AQ_LINK_SPEED_100MB:
2252 ks->base.speed = SPEED_100;
2253 break;
2254 default:
2255 netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n",
2256 link_info->link_speed);
2257 break;
2258 }
2259 ks->base.duplex = DUPLEX_FULL;
2260
2261 if (link_info->an_info & ICE_AQ_AN_COMPLETED)
2262 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2263 Autoneg);
2264
2265 /* Set flow control negotiated Rx/Tx pause */
2266 switch (pi->fc.current_mode) {
2267 case ICE_FC_FULL:
2268 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
2269 break;
2270 case ICE_FC_TX_PAUSE:
2271 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
2272 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2273 Asym_Pause);
2274 break;
2275 case ICE_FC_RX_PAUSE:
2276 ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
2277 Asym_Pause);
2278 break;
2279 case ICE_FC_PFC:
2280 default:
2281 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
2282 ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
2283 Asym_Pause);
2284 break;
2285 }
2286 }
2287
2288 /**
2289 * ice_get_settings_link_down - Get the Link settings when link is down
2290 * @ks: ethtool ksettings to fill in
2291 * @netdev: network interface device structure
2292 *
2293 * Reports link settings that can be determined when link is down
2294 */
2295 static void
ice_get_settings_link_down(struct ethtool_link_ksettings * ks,struct net_device * netdev)2296 ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
2297 struct net_device *netdev)
2298 {
2299 /* link is down and the driver needs to fall back on
2300 * supported PHY types to figure out what info to display
2301 */
2302 ice_phy_type_to_ethtool(netdev, ks);
2303
2304 /* With no link, speed and duplex are unknown */
2305 ks->base.speed = SPEED_UNKNOWN;
2306 ks->base.duplex = DUPLEX_UNKNOWN;
2307 }
2308
2309 /**
2310 * ice_get_link_ksettings - Get Link Speed and Duplex settings
2311 * @netdev: network interface device structure
2312 * @ks: ethtool ksettings
2313 *
2314 * Reports speed/duplex settings based on media_type
2315 */
2316 static int
ice_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ks)2317 ice_get_link_ksettings(struct net_device *netdev,
2318 struct ethtool_link_ksettings *ks)
2319 {
2320 struct ice_netdev_priv *np = netdev_priv(netdev);
2321 struct ice_aqc_get_phy_caps_data *caps;
2322 struct ice_link_status *hw_link_info;
2323 struct ice_vsi *vsi = np->vsi;
2324 int err;
2325
2326 ethtool_link_ksettings_zero_link_mode(ks, supported);
2327 ethtool_link_ksettings_zero_link_mode(ks, advertising);
2328 ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
2329 hw_link_info = &vsi->port_info->phy.link_info;
2330
2331 /* set speed and duplex */
2332 if (hw_link_info->link_info & ICE_AQ_LINK_UP)
2333 ice_get_settings_link_up(ks, netdev);
2334 else
2335 ice_get_settings_link_down(ks, netdev);
2336
2337 /* set autoneg settings */
2338 ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
2339 AUTONEG_ENABLE : AUTONEG_DISABLE;
2340
2341 /* set media type settings */
2342 switch (vsi->port_info->phy.media_type) {
2343 case ICE_MEDIA_FIBER:
2344 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
2345 ks->base.port = PORT_FIBRE;
2346 break;
2347 case ICE_MEDIA_BASET:
2348 ethtool_link_ksettings_add_link_mode(ks, supported, TP);
2349 ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
2350 ks->base.port = PORT_TP;
2351 break;
2352 case ICE_MEDIA_BACKPLANE:
2353 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
2354 ethtool_link_ksettings_add_link_mode(ks, advertising,
2355 Backplane);
2356 ks->base.port = PORT_NONE;
2357 break;
2358 case ICE_MEDIA_DA:
2359 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
2360 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
2361 ks->base.port = PORT_DA;
2362 break;
2363 default:
2364 ks->base.port = PORT_OTHER;
2365 break;
2366 }
2367
2368 /* flow control is symmetric and always supported */
2369 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
2370
2371 caps = kzalloc_obj(*caps);
2372 if (!caps)
2373 return -ENOMEM;
2374
2375 err = ice_aq_get_phy_caps(vsi->port_info, false,
2376 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
2377 if (err)
2378 goto done;
2379
2380 /* Set the advertised flow control based on the PHY capability */
2381 if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
2382 (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) {
2383 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
2384 ethtool_link_ksettings_add_link_mode(ks, advertising,
2385 Asym_Pause);
2386 } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) {
2387 ethtool_link_ksettings_add_link_mode(ks, advertising,
2388 Asym_Pause);
2389 } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) {
2390 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
2391 ethtool_link_ksettings_add_link_mode(ks, advertising,
2392 Asym_Pause);
2393 } else {
2394 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
2395 ethtool_link_ksettings_del_link_mode(ks, advertising,
2396 Asym_Pause);
2397 }
2398
2399 /* Set advertised FEC modes based on PHY capability */
2400 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
2401
2402 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
2403 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
2404 ethtool_link_ksettings_add_link_mode(ks, advertising,
2405 FEC_BASER);
2406 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
2407 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
2408 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
2409
2410 err = ice_aq_get_phy_caps(vsi->port_info, false,
2411 ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
2412 if (err)
2413 goto done;
2414
2415 /* Set supported FEC modes based on PHY capability */
2416 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
2417
2418 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
2419 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
2420 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
2421 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
2422 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
2423
2424 /* Set supported and advertised autoneg */
2425 if (ice_is_phy_caps_an_enabled(caps)) {
2426 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
2427 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
2428 }
2429
2430 done:
2431 kfree(caps);
2432 return err;
2433 }
2434
2435 /**
2436 * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed
2437 * @speed: ethtool forced speed
2438 */
ice_speed_to_aq_link(int speed)2439 static u16 ice_speed_to_aq_link(int speed)
2440 {
2441 int aq_speed;
2442
2443 switch (speed) {
2444 case SPEED_10:
2445 aq_speed = ICE_AQ_LINK_SPEED_10MB;
2446 break;
2447 case SPEED_100:
2448 aq_speed = ICE_AQ_LINK_SPEED_100MB;
2449 break;
2450 case SPEED_1000:
2451 aq_speed = ICE_AQ_LINK_SPEED_1000MB;
2452 break;
2453 case SPEED_2500:
2454 aq_speed = ICE_AQ_LINK_SPEED_2500MB;
2455 break;
2456 case SPEED_5000:
2457 aq_speed = ICE_AQ_LINK_SPEED_5GB;
2458 break;
2459 case SPEED_10000:
2460 aq_speed = ICE_AQ_LINK_SPEED_10GB;
2461 break;
2462 case SPEED_20000:
2463 aq_speed = ICE_AQ_LINK_SPEED_20GB;
2464 break;
2465 case SPEED_25000:
2466 aq_speed = ICE_AQ_LINK_SPEED_25GB;
2467 break;
2468 case SPEED_40000:
2469 aq_speed = ICE_AQ_LINK_SPEED_40GB;
2470 break;
2471 case SPEED_50000:
2472 aq_speed = ICE_AQ_LINK_SPEED_50GB;
2473 break;
2474 case SPEED_100000:
2475 aq_speed = ICE_AQ_LINK_SPEED_100GB;
2476 break;
2477 default:
2478 aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
2479 break;
2480 }
2481 return aq_speed;
2482 }
2483
2484 /**
2485 * ice_ksettings_find_adv_link_speed - Find advertising link speed
2486 * @ks: ethtool ksettings
2487 */
2488 static u16
ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings * ks)2489 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
2490 {
2491 const struct ethtool_forced_speed_map *map;
2492 u16 adv_link_speed = 0;
2493
2494 for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) {
2495 map = ice_adv_lnk_speed_maps + i;
2496 if (linkmode_intersects(ks->link_modes.advertising, map->caps))
2497 adv_link_speed |= ice_speed_to_aq_link(map->speed);
2498 }
2499
2500 return adv_link_speed;
2501 }
2502
2503 /**
2504 * ice_setup_autoneg
2505 * @p: port info
2506 * @ks: ethtool_link_ksettings
2507 * @config: configuration that will be sent down to FW
2508 * @autoneg_enabled: autonegotiation is enabled or not
2509 * @autoneg_changed: will there a change in autonegotiation
2510 * @netdev: network interface device structure
2511 *
2512 * Setup PHY autonegotiation feature
2513 */
2514 static int
ice_setup_autoneg(struct ice_port_info * p,struct ethtool_link_ksettings * ks,struct ice_aqc_set_phy_cfg_data * config,u8 autoneg_enabled,u8 * autoneg_changed,struct net_device * netdev)2515 ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
2516 struct ice_aqc_set_phy_cfg_data *config,
2517 u8 autoneg_enabled, u8 *autoneg_changed,
2518 struct net_device *netdev)
2519 {
2520 int err = 0;
2521
2522 *autoneg_changed = 0;
2523
2524 /* Check autoneg */
2525 if (autoneg_enabled == AUTONEG_ENABLE) {
2526 /* If autoneg was not already enabled */
2527 if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) {
2528 /* If autoneg is not supported, return error */
2529 if (!ethtool_link_ksettings_test_link_mode(ks,
2530 supported,
2531 Autoneg)) {
2532 netdev_info(netdev, "Autoneg not supported on this phy.\n");
2533 err = -EINVAL;
2534 } else {
2535 /* Autoneg is allowed to change */
2536 config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2537 *autoneg_changed = 1;
2538 }
2539 }
2540 } else {
2541 /* If autoneg is currently enabled */
2542 if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
2543 /* If autoneg is supported 10GBASE_T is the only PHY
2544 * that can disable it, so otherwise return error
2545 */
2546 if (ethtool_link_ksettings_test_link_mode(ks,
2547 supported,
2548 Autoneg)) {
2549 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
2550 err = -EINVAL;
2551 } else {
2552 /* Autoneg is allowed to change */
2553 config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2554 *autoneg_changed = 1;
2555 }
2556 }
2557 }
2558
2559 return err;
2560 }
2561
2562 /**
2563 * ice_set_phy_type_from_speed - set phy_types based on speeds
2564 * and advertised modes
2565 * @ks: ethtool link ksettings struct
2566 * @phy_type_low: pointer to the lower part of phy_type
2567 * @phy_type_high: pointer to the higher part of phy_type
2568 * @adv_link_speed: targeted link speeds bitmap
2569 */
2570 static void
ice_set_phy_type_from_speed(const struct ethtool_link_ksettings * ks,u64 * phy_type_low,u64 * phy_type_high,u16 adv_link_speed)2571 ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
2572 u64 *phy_type_low, u64 *phy_type_high,
2573 u16 adv_link_speed)
2574 {
2575 /* Handle 1000M speed in a special way because ice_update_phy_type
2576 * enables all link modes, but having mixed copper and optical
2577 * standards is not supported.
2578 */
2579 adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
2580
2581 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2582 1000baseT_Full))
2583 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
2584 ICE_PHY_TYPE_LOW_1G_SGMII;
2585
2586 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2587 1000baseKX_Full))
2588 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
2589
2590 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2591 1000baseX_Full))
2592 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
2593 ICE_PHY_TYPE_LOW_1000BASE_LX;
2594
2595 ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
2596 }
2597
2598 /**
2599 * ice_set_link_ksettings - Set Speed and Duplex
2600 * @netdev: network interface device structure
2601 * @ks: ethtool ksettings
2602 *
2603 * Set speed/duplex per media_types advertised/forced
2604 */
2605 static int
ice_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * ks)2606 ice_set_link_ksettings(struct net_device *netdev,
2607 const struct ethtool_link_ksettings *ks)
2608 {
2609 struct ice_netdev_priv *np = netdev_priv(netdev);
2610 u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
2611 struct ethtool_link_ksettings copy_ks = *ks;
2612 struct ethtool_link_ksettings safe_ks = {};
2613 struct ice_aqc_get_phy_caps_data *phy_caps;
2614 struct ice_aqc_set_phy_cfg_data config;
2615 u16 adv_link_speed, curr_link_speed;
2616 struct ice_pf *pf = np->vsi->back;
2617 struct ice_port_info *pi;
2618 u8 autoneg_changed = 0;
2619 u64 phy_type_high = 0;
2620 u64 phy_type_low = 0;
2621 bool linkup;
2622 int err;
2623
2624 pi = np->vsi->port_info;
2625
2626 if (!pi)
2627 return -EIO;
2628
2629 if (pi->phy.media_type != ICE_MEDIA_BASET &&
2630 pi->phy.media_type != ICE_MEDIA_FIBER &&
2631 pi->phy.media_type != ICE_MEDIA_BACKPLANE &&
2632 pi->phy.media_type != ICE_MEDIA_DA &&
2633 pi->phy.link_info.link_info & ICE_AQ_LINK_UP)
2634 return -EOPNOTSUPP;
2635
2636 phy_caps = kzalloc_obj(*phy_caps);
2637 if (!phy_caps)
2638 return -ENOMEM;
2639
2640 /* Get the PHY capabilities based on media */
2641 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2642 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2643 phy_caps, NULL);
2644 else
2645 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2646 phy_caps, NULL);
2647 if (err)
2648 goto done;
2649
2650 /* save autoneg out of ksettings */
2651 autoneg = copy_ks.base.autoneg;
2652
2653 /* Get link modes supported by hardware.*/
2654 ice_phy_type_to_ethtool(netdev, &safe_ks);
2655
2656 /* and check against modes requested by user.
2657 * Return an error if unsupported mode was set.
2658 */
2659 if (!bitmap_subset(copy_ks.link_modes.advertising,
2660 safe_ks.link_modes.supported,
2661 __ETHTOOL_LINK_MODE_MASK_NBITS)) {
2662 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags))
2663 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2664 err = -EOPNOTSUPP;
2665 goto done;
2666 }
2667
2668 /* get our own copy of the bits to check against */
2669 memset(&safe_ks, 0, sizeof(safe_ks));
2670 safe_ks.base.cmd = copy_ks.base.cmd;
2671 safe_ks.base.link_mode_masks_nwords =
2672 copy_ks.base.link_mode_masks_nwords;
2673 ice_get_link_ksettings(netdev, &safe_ks);
2674
2675 /* set autoneg back to what it currently is */
2676 copy_ks.base.autoneg = safe_ks.base.autoneg;
2677 /* we don't compare the speed */
2678 copy_ks.base.speed = safe_ks.base.speed;
2679
2680 /* If copy_ks.base and safe_ks.base are not the same now, then they are
2681 * trying to set something that we do not support.
2682 */
2683 if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) {
2684 err = -EOPNOTSUPP;
2685 goto done;
2686 }
2687
2688 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
2689 timeout--;
2690 if (!timeout) {
2691 err = -EBUSY;
2692 goto done;
2693 }
2694 usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
2695 }
2696
2697 /* Copy the current user PHY configuration. The current user PHY
2698 * configuration is initialized during probe from PHY capabilities
2699 * software mode, and updated on set PHY configuration.
2700 */
2701 config = pi->phy.curr_user_phy_cfg;
2702
2703 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2704
2705 /* Check autoneg */
2706 err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed,
2707 netdev);
2708
2709 if (err)
2710 goto done;
2711
2712 /* Call to get the current link speed */
2713 pi->phy.get_link_info = true;
2714 err = ice_get_link_status(pi, &linkup);
2715 if (err)
2716 goto done;
2717
2718 curr_link_speed = pi->phy.curr_user_speed_req;
2719 adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
2720
2721 /* If speed didn't get set, set it to what it currently is.
2722 * This is needed because if advertise is 0 (as it is when autoneg
2723 * is disabled) then speed won't get set.
2724 */
2725 if (!adv_link_speed)
2726 adv_link_speed = curr_link_speed;
2727
2728 /* Convert the advertise link speeds to their corresponded PHY_TYPE */
2729 ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
2730 adv_link_speed);
2731
2732 if (!autoneg_changed && adv_link_speed == curr_link_speed) {
2733 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
2734 goto done;
2735 }
2736
2737 /* save the requested speeds */
2738 pi->phy.link_info.req_speeds = adv_link_speed;
2739
2740 /* set link and auto negotiation so changes take effect */
2741 config.caps |= ICE_AQ_PHY_ENA_LINK;
2742
2743 /* check if there is a PHY type for the requested advertised speed */
2744 if (!(phy_type_low || phy_type_high)) {
2745 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2746 err = -EOPNOTSUPP;
2747 goto done;
2748 }
2749
2750 /* intersect requested advertised speed PHY types with media PHY types
2751 * for set PHY configuration
2752 */
2753 config.phy_type_high = cpu_to_le64(phy_type_high) &
2754 phy_caps->phy_type_high;
2755 config.phy_type_low = cpu_to_le64(phy_type_low) &
2756 phy_caps->phy_type_low;
2757
2758 if (!(config.phy_type_high || config.phy_type_low)) {
2759 /* If there is no intersection and lenient mode is enabled, then
2760 * intersect the requested advertised speed with NVM media type
2761 * PHY types.
2762 */
2763 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
2764 config.phy_type_high = cpu_to_le64(phy_type_high) &
2765 pf->nvm_phy_type_hi;
2766 config.phy_type_low = cpu_to_le64(phy_type_low) &
2767 pf->nvm_phy_type_lo;
2768 } else {
2769 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
2770 err = -EOPNOTSUPP;
2771 goto done;
2772 }
2773 }
2774
2775 /* If link is up put link down */
2776 if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) {
2777 /* Tell the OS link is going down, the link will go
2778 * back up when fw says it is ready asynchronously
2779 */
2780 ice_print_link_msg(np->vsi, false);
2781 netif_carrier_off(netdev);
2782 netif_tx_stop_all_queues(netdev);
2783 }
2784
2785 /* make the aq call */
2786 err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
2787 if (err) {
2788 netdev_info(netdev, "Set phy config failed,\n");
2789 goto done;
2790 }
2791
2792 /* Save speed request */
2793 pi->phy.curr_user_speed_req = adv_link_speed;
2794 done:
2795 kfree(phy_caps);
2796 clear_bit(ICE_CFG_BUSY, pf->state);
2797
2798 return err;
2799 }
2800
ice_parse_hdrs(const struct ethtool_rxfh_fields * nfc)2801 static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
2802 {
2803 u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
2804
2805 switch (nfc->flow_type) {
2806 case TCP_V4_FLOW:
2807 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4;
2808 break;
2809 case UDP_V4_FLOW:
2810 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4;
2811 break;
2812 case SCTP_V4_FLOW:
2813 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
2814 break;
2815 case GTPU_V4_FLOW:
2816 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
2817 break;
2818 case GTPC_V4_FLOW:
2819 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
2820 break;
2821 case GTPC_TEID_V4_FLOW:
2822 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
2823 break;
2824 case GTPU_EH_V4_FLOW:
2825 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
2826 break;
2827 case GTPU_UL_V4_FLOW:
2828 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
2829 break;
2830 case GTPU_DL_V4_FLOW:
2831 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
2832 break;
2833 case TCP_V6_FLOW:
2834 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
2835 break;
2836 case UDP_V6_FLOW:
2837 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6;
2838 break;
2839 case SCTP_V6_FLOW:
2840 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
2841 break;
2842 case GTPU_V6_FLOW:
2843 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
2844 break;
2845 case GTPC_V6_FLOW:
2846 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
2847 break;
2848 case GTPC_TEID_V6_FLOW:
2849 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
2850 break;
2851 case GTPU_EH_V6_FLOW:
2852 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
2853 break;
2854 case GTPU_UL_V6_FLOW:
2855 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
2856 break;
2857 case GTPU_DL_V6_FLOW:
2858 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
2859 break;
2860 default:
2861 break;
2862 }
2863 return hdrs;
2864 }
2865
ice_parse_hash_flds(const struct ethtool_rxfh_fields * nfc,bool symm)2866 static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
2867 {
2868 u64 hfld = ICE_HASH_INVALID;
2869
2870 if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) {
2871 switch (nfc->flow_type) {
2872 case TCP_V4_FLOW:
2873 case UDP_V4_FLOW:
2874 case SCTP_V4_FLOW:
2875 case GTPU_V4_FLOW:
2876 case GTPC_V4_FLOW:
2877 case GTPC_TEID_V4_FLOW:
2878 case GTPU_EH_V4_FLOW:
2879 case GTPU_UL_V4_FLOW:
2880 case GTPU_DL_V4_FLOW:
2881 if (nfc->data & RXH_IP_SRC)
2882 hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
2883 if (nfc->data & RXH_IP_DST)
2884 hfld |= ICE_FLOW_HASH_FLD_IPV4_DA;
2885 break;
2886 case TCP_V6_FLOW:
2887 case UDP_V6_FLOW:
2888 case SCTP_V6_FLOW:
2889 case GTPU_V6_FLOW:
2890 case GTPC_V6_FLOW:
2891 case GTPC_TEID_V6_FLOW:
2892 case GTPU_EH_V6_FLOW:
2893 case GTPU_UL_V6_FLOW:
2894 case GTPU_DL_V6_FLOW:
2895 if (nfc->data & RXH_IP_SRC)
2896 hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
2897 if (nfc->data & RXH_IP_DST)
2898 hfld |= ICE_FLOW_HASH_FLD_IPV6_DA;
2899 break;
2900 default:
2901 break;
2902 }
2903 }
2904
2905 if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) {
2906 switch (nfc->flow_type) {
2907 case TCP_V4_FLOW:
2908 case TCP_V6_FLOW:
2909 if (nfc->data & RXH_L4_B_0_1)
2910 hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT;
2911 if (nfc->data & RXH_L4_B_2_3)
2912 hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT;
2913 break;
2914 case UDP_V4_FLOW:
2915 case UDP_V6_FLOW:
2916 if (nfc->data & RXH_L4_B_0_1)
2917 hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT;
2918 if (nfc->data & RXH_L4_B_2_3)
2919 hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT;
2920 break;
2921 case SCTP_V4_FLOW:
2922 case SCTP_V6_FLOW:
2923 if (nfc->data & RXH_L4_B_0_1)
2924 hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT;
2925 if (nfc->data & RXH_L4_B_2_3)
2926 hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT;
2927 break;
2928 default:
2929 break;
2930 }
2931 }
2932
2933 if (nfc->data & RXH_GTP_TEID) {
2934 switch (nfc->flow_type) {
2935 case GTPC_TEID_V4_FLOW:
2936 case GTPC_TEID_V6_FLOW:
2937 hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
2938 break;
2939 case GTPU_V4_FLOW:
2940 case GTPU_V6_FLOW:
2941 hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
2942 break;
2943 case GTPU_EH_V4_FLOW:
2944 case GTPU_EH_V6_FLOW:
2945 hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
2946 break;
2947 case GTPU_UL_V4_FLOW:
2948 case GTPU_UL_V6_FLOW:
2949 hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
2950 break;
2951 case GTPU_DL_V4_FLOW:
2952 case GTPU_DL_V6_FLOW:
2953 hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
2954 break;
2955 default:
2956 break;
2957 }
2958 }
2959
2960 return hfld;
2961 }
2962
2963 static int
ice_set_rxfh_fields(struct net_device * netdev,const struct ethtool_rxfh_fields * nfc,struct netlink_ext_ack * extack)2964 ice_set_rxfh_fields(struct net_device *netdev,
2965 const struct ethtool_rxfh_fields *nfc,
2966 struct netlink_ext_ack *extack)
2967 {
2968 struct ice_netdev_priv *np = netdev_priv(netdev);
2969 struct ice_vsi *vsi = np->vsi;
2970 struct ice_pf *pf = vsi->back;
2971 struct ice_rss_hash_cfg cfg;
2972 struct device *dev;
2973 u64 hashed_flds;
2974 int status;
2975 bool symm;
2976 u32 hdrs;
2977
2978 dev = ice_pf_to_dev(pf);
2979 if (ice_is_safe_mode(pf)) {
2980 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
2981 vsi->vsi_num);
2982 return -EINVAL;
2983 }
2984
2985 symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
2986 hashed_flds = ice_parse_hash_flds(nfc, symm);
2987 if (hashed_flds == ICE_HASH_INVALID) {
2988 dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
2989 vsi->vsi_num);
2990 return -EINVAL;
2991 }
2992
2993 hdrs = ice_parse_hdrs(nfc);
2994 if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
2995 dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
2996 vsi->vsi_num);
2997 return -EINVAL;
2998 }
2999
3000 cfg.hash_flds = hashed_flds;
3001 cfg.addl_hdrs = hdrs;
3002 cfg.hdr_type = ICE_RSS_ANY_HEADERS;
3003 cfg.symm = symm;
3004
3005 status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
3006 if (status) {
3007 dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
3008 vsi->vsi_num, status);
3009 return status;
3010 }
3011
3012 return 0;
3013 }
3014
3015 static int
ice_get_rxfh_fields(struct net_device * netdev,struct ethtool_rxfh_fields * nfc)3016 ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
3017 {
3018 struct ice_netdev_priv *np = netdev_priv(netdev);
3019 struct ice_vsi *vsi = np->vsi;
3020 struct ice_pf *pf = vsi->back;
3021 struct device *dev;
3022 u64 hash_flds;
3023 bool symm;
3024 u32 hdrs;
3025
3026 dev = ice_pf_to_dev(pf);
3027
3028 nfc->data = 0;
3029 if (ice_is_safe_mode(pf)) {
3030 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
3031 vsi->vsi_num);
3032 return 0;
3033 }
3034
3035 hdrs = ice_parse_hdrs(nfc);
3036 if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
3037 dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
3038 vsi->vsi_num);
3039 return 0;
3040 }
3041
3042 hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
3043 if (hash_flds == ICE_HASH_INVALID) {
3044 dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
3045 vsi->vsi_num);
3046 return 0;
3047 }
3048
3049 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
3050 hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA)
3051 nfc->data |= (u64)RXH_IP_SRC;
3052
3053 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA ||
3054 hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA)
3055 nfc->data |= (u64)RXH_IP_DST;
3056
3057 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT ||
3058 hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT ||
3059 hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT)
3060 nfc->data |= (u64)RXH_L4_B_0_1;
3061
3062 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT ||
3063 hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
3064 hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
3065 nfc->data |= (u64)RXH_L4_B_2_3;
3066
3067 if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
3068 hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
3069 hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
3070 hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
3071 hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
3072 nfc->data |= (u64)RXH_GTP_TEID;
3073
3074 return 0;
3075 }
3076
3077 /**
3078 * ice_set_rxnfc - command to set Rx flow rules.
3079 * @netdev: network interface device structure
3080 * @cmd: ethtool rxnfc command
3081 *
3082 * Returns 0 for success and negative values for errors
3083 */
ice_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)3084 static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3085 {
3086 struct ice_netdev_priv *np = netdev_priv(netdev);
3087 struct ice_vsi *vsi = np->vsi;
3088
3089 switch (cmd->cmd) {
3090 case ETHTOOL_SRXCLSRLINS:
3091 return ice_add_fdir_ethtool(vsi, cmd);
3092 case ETHTOOL_SRXCLSRLDEL:
3093 return ice_del_fdir_ethtool(vsi, cmd);
3094 default:
3095 break;
3096 }
3097 return -EOPNOTSUPP;
3098 }
3099
3100 /**
3101 * ice_get_rx_ring_count - get RX ring count
3102 * @netdev: network interface device structure
3103 *
3104 * Return: number of RX rings.
3105 */
ice_get_rx_ring_count(struct net_device * netdev)3106 static u32 ice_get_rx_ring_count(struct net_device *netdev)
3107 {
3108 struct ice_netdev_priv *np = netdev_priv(netdev);
3109 struct ice_vsi *vsi = np->vsi;
3110
3111 return vsi->rss_size;
3112 }
3113
3114 /**
3115 * ice_get_rxnfc - command to get Rx flow classification rules
3116 * @netdev: network interface device structure
3117 * @cmd: ethtool rxnfc command
3118 * @rule_locs: buffer to rturn Rx flow classification rules
3119 *
3120 * Returns Success if the command is supported.
3121 */
3122 static int
ice_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 __always_unused * rule_locs)3123 ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3124 u32 __always_unused *rule_locs)
3125 {
3126 struct ice_netdev_priv *np = netdev_priv(netdev);
3127 struct ice_vsi *vsi = np->vsi;
3128 int ret = -EOPNOTSUPP;
3129 struct ice_hw *hw;
3130
3131 hw = &vsi->back->hw;
3132
3133 switch (cmd->cmd) {
3134 case ETHTOOL_GRXCLSRLCNT:
3135 cmd->rule_cnt = hw->fdir_active_fltr;
3136 /* report total rule count */
3137 cmd->data = ice_get_fdir_cnt_all(hw);
3138 ret = 0;
3139 break;
3140 case ETHTOOL_GRXCLSRULE:
3141 ret = ice_get_ethtool_fdir_entry(hw, cmd);
3142 break;
3143 case ETHTOOL_GRXCLSRLALL:
3144 ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
3145 break;
3146 default:
3147 break;
3148 }
3149
3150 return ret;
3151 }
3152
3153 static void
ice_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3154 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
3155 struct kernel_ethtool_ringparam *kernel_ring,
3156 struct netlink_ext_ack *extack)
3157 {
3158 struct ice_netdev_priv *np = netdev_priv(netdev);
3159 struct ice_vsi *vsi = np->vsi;
3160 struct ice_hw *hw;
3161
3162 hw = &vsi->back->hw;
3163 ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
3164 ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
3165 if (vsi->tx_rings && vsi->rx_rings) {
3166 ring->rx_pending = vsi->rx_rings[0]->count;
3167 ring->tx_pending = vsi->tx_rings[0]->count;
3168 } else {
3169 ring->rx_pending = 0;
3170 ring->tx_pending = 0;
3171 }
3172
3173 /* Rx mini and jumbo rings are not supported */
3174 ring->rx_mini_max_pending = 0;
3175 ring->rx_jumbo_max_pending = 0;
3176 ring->rx_mini_pending = 0;
3177 ring->rx_jumbo_pending = 0;
3178
3179 kernel_ring->tcp_data_split = vsi->hsplit ?
3180 ETHTOOL_TCP_DATA_SPLIT_ENABLED :
3181 ETHTOOL_TCP_DATA_SPLIT_DISABLED;
3182 }
3183
3184 static int
ice_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3185 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
3186 struct kernel_ethtool_ringparam *kernel_ring,
3187 struct netlink_ext_ack *extack)
3188 {
3189 struct ice_netdev_priv *np = netdev_priv(netdev);
3190 struct ice_tx_ring *xdp_rings = NULL;
3191 struct ice_tx_ring *tx_rings = NULL;
3192 struct ice_rx_ring *rx_rings = NULL;
3193 struct ice_vsi *vsi = np->vsi;
3194 struct ice_pf *pf = vsi->back;
3195 int i, timeout = 50, err = 0;
3196 struct ice_hw *hw = &pf->hw;
3197 u16 new_rx_cnt, new_tx_cnt;
3198 bool hsplit;
3199
3200 if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
3201 ring->tx_pending < ICE_MIN_NUM_DESC ||
3202 ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
3203 ring->rx_pending < ICE_MIN_NUM_DESC) {
3204 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
3205 ring->tx_pending, ring->rx_pending,
3206 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
3207 ICE_REQ_DESC_MULTIPLE);
3208 return -EINVAL;
3209 }
3210
3211 /* Return if there is no rings (device is reloading) */
3212 if (!vsi->tx_rings || !vsi->rx_rings)
3213 return -EBUSY;
3214
3215 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
3216 if (new_tx_cnt != ring->tx_pending)
3217 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
3218 new_tx_cnt);
3219 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
3220 if (new_rx_cnt != ring->rx_pending)
3221 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
3222 new_rx_cnt);
3223
3224 hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
3225
3226 /* if nothing to do return success */
3227 if (new_tx_cnt == vsi->tx_rings[0]->count &&
3228 new_rx_cnt == vsi->rx_rings[0]->count &&
3229 hsplit == vsi->hsplit) {
3230 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
3231 return 0;
3232 }
3233
3234 /* If there is a AF_XDP UMEM attached to any of Rx rings,
3235 * disallow changing the number of descriptors -- regardless
3236 * if the netdev is running or not.
3237 */
3238 if (ice_xsk_any_rx_ring_ena(vsi))
3239 return -EBUSY;
3240
3241 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3242 timeout--;
3243 if (!timeout)
3244 return -EBUSY;
3245 usleep_range(1000, 2000);
3246 }
3247
3248 /* set for the next time the netdev is started */
3249 if (!netif_running(vsi->netdev)) {
3250 ice_for_each_alloc_txq(vsi, i)
3251 vsi->tx_rings[i]->count = new_tx_cnt;
3252 ice_for_each_alloc_rxq(vsi, i)
3253 vsi->rx_rings[i]->count = new_rx_cnt;
3254 if (ice_is_xdp_ena_vsi(vsi))
3255 ice_for_each_xdp_txq(vsi, i)
3256 vsi->xdp_rings[i]->count = new_tx_cnt;
3257 vsi->num_tx_desc = (u16)new_tx_cnt;
3258 vsi->num_rx_desc = (u16)new_rx_cnt;
3259 vsi->hsplit = hsplit;
3260
3261 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
3262 goto done;
3263 }
3264
3265 if (new_tx_cnt == vsi->tx_rings[0]->count)
3266 goto process_rx;
3267
3268 /* alloc updated Tx resources */
3269 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
3270 vsi->tx_rings[0]->count, new_tx_cnt);
3271
3272 tx_rings = kzalloc_objs(*tx_rings, vsi->num_txq);
3273 if (!tx_rings) {
3274 err = -ENOMEM;
3275 goto done;
3276 }
3277
3278 ice_for_each_txq(vsi, i) {
3279 /* clone ring and setup updated count */
3280 tx_rings[i] = *vsi->tx_rings[i];
3281 tx_rings[i].count = new_tx_cnt;
3282 tx_rings[i].desc = NULL;
3283 tx_rings[i].tx_buf = NULL;
3284 tx_rings[i].tstamp_ring = NULL;
3285 tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
3286 err = ice_setup_tx_ring(&tx_rings[i]);
3287 if (err) {
3288 while (i--)
3289 ice_clean_tx_ring(&tx_rings[i]);
3290 kfree(tx_rings);
3291 goto done;
3292 }
3293 }
3294
3295 if (!ice_is_xdp_ena_vsi(vsi))
3296 goto process_rx;
3297
3298 /* alloc updated XDP resources */
3299 netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n",
3300 vsi->xdp_rings[0]->count, new_tx_cnt);
3301
3302 xdp_rings = kzalloc_objs(*xdp_rings, vsi->num_xdp_txq);
3303 if (!xdp_rings) {
3304 err = -ENOMEM;
3305 goto free_tx;
3306 }
3307
3308 ice_for_each_xdp_txq(vsi, i) {
3309 /* clone ring and setup updated count */
3310 xdp_rings[i] = *vsi->xdp_rings[i];
3311 xdp_rings[i].count = new_tx_cnt;
3312 xdp_rings[i].desc = NULL;
3313 xdp_rings[i].tx_buf = NULL;
3314 err = ice_setup_tx_ring(&xdp_rings[i]);
3315 if (err) {
3316 while (i--)
3317 ice_clean_tx_ring(&xdp_rings[i]);
3318 kfree(xdp_rings);
3319 goto free_tx;
3320 }
3321 ice_set_ring_xdp(&xdp_rings[i]);
3322 }
3323
3324 process_rx:
3325 if (new_rx_cnt == vsi->rx_rings[0]->count)
3326 goto process_link;
3327
3328 /* alloc updated Rx resources */
3329 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
3330 vsi->rx_rings[0]->count, new_rx_cnt);
3331
3332 rx_rings = kzalloc_objs(*rx_rings, vsi->num_rxq);
3333 if (!rx_rings) {
3334 err = -ENOMEM;
3335 goto free_xdp;
3336 }
3337
3338 ice_for_each_rxq(vsi, i) {
3339 /* clone ring and setup updated count */
3340 rx_rings[i] = *vsi->rx_rings[i];
3341 rx_rings[i].count = new_rx_cnt;
3342 rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
3343 rx_rings[i].desc = NULL;
3344 rx_rings[i].xdp_buf = NULL;
3345 rx_rings[i].xdp_rxq = (struct xdp_rxq_info){ };
3346
3347 /* this is to allow wr32 to have something to write to
3348 * during early allocation of Rx buffers
3349 */
3350 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
3351
3352 err = ice_setup_rx_ring(&rx_rings[i]);
3353 if (err)
3354 goto rx_unwind;
3355 rx_unwind:
3356 if (err) {
3357 while (i) {
3358 i--;
3359 ice_free_rx_ring(&rx_rings[i]);
3360 }
3361 kfree(rx_rings);
3362 err = -ENOMEM;
3363 goto free_xdp;
3364 }
3365 }
3366
3367 process_link:
3368 vsi->hsplit = hsplit;
3369
3370 /* Bring interface down, copy in the new ring info, then restore the
3371 * interface. if VSI is up, bring it down and then back up
3372 */
3373 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
3374 ice_down(vsi);
3375
3376 if (tx_rings) {
3377 ice_for_each_txq(vsi, i) {
3378 ice_free_tx_ring(vsi->tx_rings[i]);
3379 *vsi->tx_rings[i] = tx_rings[i];
3380 }
3381 kfree(tx_rings);
3382 }
3383
3384 if (rx_rings) {
3385 ice_for_each_rxq(vsi, i) {
3386 ice_free_rx_ring(vsi->rx_rings[i]);
3387 /* copy the real tail offset */
3388 rx_rings[i].tail = vsi->rx_rings[i]->tail;
3389 /* this is to fake out the allocation routine
3390 * into thinking it has to realloc everything
3391 * but the recycling logic will let us re-use
3392 * the buffers allocated above
3393 */
3394 rx_rings[i].next_to_use = 0;
3395 rx_rings[i].next_to_clean = 0;
3396 *vsi->rx_rings[i] = rx_rings[i];
3397 }
3398 kfree(rx_rings);
3399 }
3400
3401 if (xdp_rings) {
3402 ice_for_each_xdp_txq(vsi, i) {
3403 ice_free_tx_ring(vsi->xdp_rings[i]);
3404 *vsi->xdp_rings[i] = xdp_rings[i];
3405 }
3406 kfree(xdp_rings);
3407 }
3408
3409 vsi->num_tx_desc = new_tx_cnt;
3410 vsi->num_rx_desc = new_rx_cnt;
3411 ice_up(vsi);
3412 }
3413 goto done;
3414
3415 free_xdp:
3416 if (xdp_rings) {
3417 ice_for_each_xdp_txq(vsi, i)
3418 ice_free_tx_ring(&xdp_rings[i]);
3419 kfree(xdp_rings);
3420 }
3421
3422 free_tx:
3423 /* error cleanup if the Rx allocations failed after getting Tx */
3424 if (tx_rings) {
3425 ice_for_each_txq(vsi, i)
3426 ice_free_tx_ring(&tx_rings[i]);
3427 kfree(tx_rings);
3428 }
3429
3430 done:
3431 clear_bit(ICE_CFG_BUSY, pf->state);
3432 return err;
3433 }
3434
3435 /**
3436 * ice_get_pauseparam - Get Flow Control status
3437 * @netdev: network interface device structure
3438 * @pause: ethernet pause (flow control) parameters
3439 *
3440 * Get requested flow control status from PHY capability.
3441 * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which
3442 * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report
3443 * the negotiated Rx/Tx pause via lp_advertising.
3444 */
3445 static void
ice_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)3446 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
3447 {
3448 struct ice_netdev_priv *np = netdev_priv(netdev);
3449 struct ice_port_info *pi = np->vsi->port_info;
3450 struct ice_aqc_get_phy_caps_data *pcaps;
3451 struct ice_dcbx_cfg *dcbx_cfg;
3452 int status;
3453
3454 /* Initialize pause params */
3455 pause->rx_pause = 0;
3456 pause->tx_pause = 0;
3457
3458 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
3459
3460 pcaps = kzalloc_obj(*pcaps);
3461 if (!pcaps)
3462 return;
3463
3464 /* Get current PHY config */
3465 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
3466 NULL);
3467 if (status)
3468 goto out;
3469
3470 pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
3471 AUTONEG_DISABLE;
3472
3473 if (dcbx_cfg->pfc.pfcena)
3474 /* PFC enabled so report LFC as off */
3475 goto out;
3476
3477 if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3478 pause->tx_pause = 1;
3479 if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3480 pause->rx_pause = 1;
3481
3482 out:
3483 kfree(pcaps);
3484 }
3485
3486 /**
3487 * ice_set_pauseparam - Set Flow Control parameter
3488 * @netdev: network interface device structure
3489 * @pause: return Tx/Rx flow control status
3490 */
3491 static int
ice_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)3492 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
3493 {
3494 struct ice_netdev_priv *np = netdev_priv(netdev);
3495 struct ice_aqc_get_phy_caps_data *pcaps;
3496 struct ice_link_status *hw_link_info;
3497 struct ice_pf *pf = np->vsi->back;
3498 struct ice_dcbx_cfg *dcbx_cfg;
3499 struct ice_vsi *vsi = np->vsi;
3500 struct ice_hw *hw = &pf->hw;
3501 struct ice_port_info *pi;
3502 u8 aq_failures;
3503 bool link_up;
3504 u32 is_an;
3505 int err;
3506
3507 pi = vsi->port_info;
3508 hw_link_info = &pi->phy.link_info;
3509 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
3510 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
3511
3512 /* Changing the port's flow control is not supported if this isn't the
3513 * PF VSI
3514 */
3515 if (vsi->type != ICE_VSI_PF) {
3516 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
3517 return -EOPNOTSUPP;
3518 }
3519
3520 /* Get pause param reports configured and negotiated flow control pause
3521 * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is
3522 * defined get pause param pause->autoneg reports SW configured setting,
3523 * so compare pause->autoneg with SW configured to prevent the user from
3524 * using set pause param to chance autoneg.
3525 */
3526 pcaps = kzalloc_obj(*pcaps);
3527 if (!pcaps)
3528 return -ENOMEM;
3529
3530 /* Get current PHY config */
3531 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
3532 NULL);
3533 if (err) {
3534 kfree(pcaps);
3535 return err;
3536 }
3537
3538 is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
3539 AUTONEG_DISABLE;
3540
3541 kfree(pcaps);
3542
3543 if (pause->autoneg != is_an) {
3544 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
3545 return -EOPNOTSUPP;
3546 }
3547
3548 /* If we have link and don't have autoneg */
3549 if (!test_bit(ICE_DOWN, pf->state) &&
3550 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
3551 /* Send message that it might not necessarily work*/
3552 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
3553 }
3554
3555 if (dcbx_cfg->pfc.pfcena) {
3556 netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
3557 return -EOPNOTSUPP;
3558 }
3559 if (pause->rx_pause && pause->tx_pause)
3560 pi->fc.req_mode = ICE_FC_FULL;
3561 else if (pause->rx_pause && !pause->tx_pause)
3562 pi->fc.req_mode = ICE_FC_RX_PAUSE;
3563 else if (!pause->rx_pause && pause->tx_pause)
3564 pi->fc.req_mode = ICE_FC_TX_PAUSE;
3565 else if (!pause->rx_pause && !pause->tx_pause)
3566 pi->fc.req_mode = ICE_FC_NONE;
3567 else
3568 return -EINVAL;
3569
3570 /* Set the FC mode and only restart AN if link is up */
3571 err = ice_set_fc(pi, &aq_failures, link_up);
3572
3573 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
3574 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
3575 err, libie_aq_str(hw->adminq.sq_last_status));
3576 err = -EAGAIN;
3577 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
3578 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
3579 err, libie_aq_str(hw->adminq.sq_last_status));
3580 err = -EAGAIN;
3581 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
3582 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
3583 err, libie_aq_str(hw->adminq.sq_last_status));
3584 err = -EAGAIN;
3585 }
3586
3587 return err;
3588 }
3589
3590 /**
3591 * ice_get_rxfh_key_size - get the RSS hash key size
3592 * @netdev: network interface device structure
3593 *
3594 * Returns the table size.
3595 */
ice_get_rxfh_key_size(struct net_device __always_unused * netdev)3596 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
3597 {
3598 return ICE_VSIQF_HKEY_ARRAY_SIZE;
3599 }
3600
3601 /**
3602 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
3603 * @netdev: network interface device structure
3604 *
3605 * Returns the table size.
3606 */
ice_get_rxfh_indir_size(struct net_device * netdev)3607 static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
3608 {
3609 struct ice_netdev_priv *np = netdev_priv(netdev);
3610
3611 return np->vsi->rss_table_size;
3612 }
3613
3614 /**
3615 * ice_get_rxfh - get the Rx flow hash indirection table
3616 * @netdev: network interface device structure
3617 * @rxfh: pointer to param struct (indir, key, hfunc)
3618 *
3619 * Reads the indirection table directly from the hardware.
3620 */
3621 static int
ice_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)3622 ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
3623 {
3624 struct ice_netdev_priv *np = netdev_priv(netdev);
3625 struct ice_vsi *vsi = np->vsi;
3626 struct ice_pf *pf = vsi->back;
3627 u16 qcount, offset;
3628 int err, i;
3629 u8 *lut;
3630
3631 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
3632 netdev_warn(netdev, "RSS is not supported on this VSI!\n");
3633 return -EOPNOTSUPP;
3634 }
3635
3636 qcount = vsi->mqprio_qopt.qopt.count[0];
3637 offset = vsi->mqprio_qopt.qopt.offset[0];
3638
3639 rxfh->hfunc = ETH_RSS_HASH_TOP;
3640 if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
3641 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
3642
3643 if (!rxfh->indir)
3644 return 0;
3645
3646 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
3647 if (!lut)
3648 return -ENOMEM;
3649
3650 err = ice_get_rss(vsi, rxfh->key, lut, vsi->rss_table_size);
3651 if (err)
3652 goto out;
3653
3654 if (ice_is_adq_active(pf)) {
3655 for (i = 0; i < vsi->rss_table_size; i++)
3656 rxfh->indir[i] = offset + lut[i] % qcount;
3657 goto out;
3658 }
3659
3660 for (i = 0; i < vsi->rss_table_size; i++)
3661 rxfh->indir[i] = lut[i];
3662
3663 out:
3664 kfree(lut);
3665 return err;
3666 }
3667
3668 /**
3669 * ice_set_rxfh - set the Rx flow hash indirection table
3670 * @netdev: network interface device structure
3671 * @rxfh: pointer to param struct (indir, key, hfunc)
3672 * @extack: extended ACK from the Netlink message
3673 *
3674 * Returns -EINVAL if the table specifies an invalid queue ID, otherwise
3675 * returns 0 after programming the table.
3676 */
3677 static int
ice_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)3678 ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
3679 struct netlink_ext_ack *extack)
3680 {
3681 struct ice_netdev_priv *np = netdev_priv(netdev);
3682 u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
3683 struct ice_vsi *vsi = np->vsi;
3684 struct ice_pf *pf = vsi->back;
3685 struct device *dev;
3686 int err;
3687
3688 dev = ice_pf_to_dev(pf);
3689 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3690 rxfh->hfunc != ETH_RSS_HASH_TOP)
3691 return -EOPNOTSUPP;
3692
3693 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
3694 /* RSS not supported return error here */
3695 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
3696 return -EIO;
3697 }
3698
3699 if (ice_is_adq_active(pf)) {
3700 netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
3701 return -EOPNOTSUPP;
3702 }
3703
3704 /* Update the VSI's hash function */
3705 if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
3706 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
3707
3708 err = ice_set_rss_hfunc(vsi, hfunc);
3709 if (err)
3710 return err;
3711
3712 if (rxfh->key) {
3713 if (!vsi->rss_hkey_user) {
3714 vsi->rss_hkey_user =
3715 devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
3716 GFP_KERNEL);
3717 if (!vsi->rss_hkey_user)
3718 return -ENOMEM;
3719 }
3720 memcpy(vsi->rss_hkey_user, rxfh->key,
3721 ICE_VSIQF_HKEY_ARRAY_SIZE);
3722
3723 err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
3724 if (err)
3725 return err;
3726 }
3727
3728 if (!vsi->rss_lut_user) {
3729 vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size,
3730 GFP_KERNEL);
3731 if (!vsi->rss_lut_user)
3732 return -ENOMEM;
3733 }
3734
3735 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
3736 if (rxfh->indir) {
3737 int i;
3738
3739 for (i = 0; i < vsi->rss_table_size; i++)
3740 vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
3741 } else {
3742 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
3743 vsi->rss_size);
3744 }
3745
3746 err = ice_set_rss_lut(vsi, vsi->rss_lut_user, vsi->rss_table_size);
3747 if (err)
3748 return err;
3749
3750 return 0;
3751 }
3752
3753 static int
ice_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)3754 ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
3755 {
3756 struct ice_pf *pf = ice_netdev_to_pf(dev);
3757
3758 /* only report timestamping if PTP is enabled */
3759 if (pf->ptp.state != ICE_PTP_READY)
3760 return ethtool_op_get_ts_info(dev, info);
3761
3762 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3763 SOF_TIMESTAMPING_TX_HARDWARE |
3764 SOF_TIMESTAMPING_RX_HARDWARE |
3765 SOF_TIMESTAMPING_RAW_HARDWARE;
3766
3767 info->phc_index = ice_ptp_clock_index(pf);
3768
3769 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
3770
3771 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
3772
3773 return 0;
3774 }
3775
3776 /**
3777 * ice_get_max_txq - return the maximum number of Tx queues for in a PF
3778 * @pf: PF structure
3779 */
ice_get_max_txq(struct ice_pf * pf)3780 static int ice_get_max_txq(struct ice_pf *pf)
3781 {
3782 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
3783 }
3784
3785 /**
3786 * ice_get_max_rxq - return the maximum number of Rx queues for in a PF
3787 * @pf: PF structure
3788 */
ice_get_max_rxq(struct ice_pf * pf)3789 static int ice_get_max_rxq(struct ice_pf *pf)
3790 {
3791 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
3792 }
3793
3794 /**
3795 * ice_get_combined_cnt - return the current number of combined channels
3796 * @vsi: PF VSI pointer
3797 *
3798 * Go through all queue vectors and count ones that have both Rx and Tx ring
3799 * attached
3800 */
ice_get_combined_cnt(struct ice_vsi * vsi)3801 static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
3802 {
3803 u32 combined = 0;
3804 int q_idx;
3805
3806 ice_for_each_q_vector(vsi, q_idx) {
3807 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3808
3809 combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
3810 }
3811
3812 return combined;
3813 }
3814
3815 /**
3816 * ice_get_channels - get the current and max supported channels
3817 * @dev: network interface device structure
3818 * @ch: ethtool channel data structure
3819 */
3820 static void
ice_get_channels(struct net_device * dev,struct ethtool_channels * ch)3821 ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
3822 {
3823 struct ice_netdev_priv *np = netdev_priv(dev);
3824 struct ice_vsi *vsi = np->vsi;
3825 struct ice_pf *pf = vsi->back;
3826
3827 /* report maximum channels */
3828 ch->max_rx = ice_get_max_rxq(pf);
3829 ch->max_tx = ice_get_max_txq(pf);
3830 ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
3831
3832 /* report current channels */
3833 ch->combined_count = ice_get_combined_cnt(vsi);
3834 ch->rx_count = vsi->num_rxq - ch->combined_count;
3835 ch->tx_count = vsi->num_txq - ch->combined_count;
3836
3837 /* report other queues */
3838 ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
3839 ch->max_other = ch->other_count;
3840 }
3841
3842 /**
3843 * ice_get_valid_rss_size - return valid number of RSS queues
3844 * @hw: pointer to the HW structure
3845 * @new_size: requested RSS queues
3846 */
ice_get_valid_rss_size(struct ice_hw * hw,int new_size)3847 static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
3848 {
3849 struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
3850
3851 return min_t(int, new_size, BIT(caps->rss_table_entry_width));
3852 }
3853
3854 /**
3855 * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
3856 * @vsi: VSI to reconfigure RSS LUT on
3857 * @req_rss_size: requested range of queue numbers for hashing
3858 *
3859 * Set the VSI's RSS parameters, configure the RSS LUT based on these.
3860 */
ice_vsi_set_dflt_rss_lut(struct ice_vsi * vsi,int req_rss_size)3861 static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
3862 {
3863 struct ice_pf *pf = vsi->back;
3864 struct device *dev;
3865 struct ice_hw *hw;
3866 int err;
3867 u8 *lut;
3868
3869 dev = ice_pf_to_dev(pf);
3870 hw = &pf->hw;
3871
3872 if (!req_rss_size)
3873 return -EINVAL;
3874
3875 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
3876 if (!lut)
3877 return -ENOMEM;
3878
3879 /* set RSS LUT parameters */
3880 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3881 vsi->rss_size = 1;
3882 else
3883 vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size);
3884
3885 /* create/set RSS LUT */
3886 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
3887 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
3888 if (err)
3889 dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
3890 libie_aq_str(hw->adminq.sq_last_status));
3891
3892 kfree(lut);
3893 return err;
3894 }
3895
3896 /**
3897 * ice_set_channels - set the number channels
3898 * @dev: network interface device structure
3899 * @ch: ethtool channel data structure
3900 */
ice_set_channels(struct net_device * dev,struct ethtool_channels * ch)3901 static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
3902 {
3903 struct ice_netdev_priv *np = netdev_priv(dev);
3904 struct ice_vsi *vsi = np->vsi;
3905 struct ice_pf *pf = vsi->back;
3906 int new_rx = 0, new_tx = 0;
3907 bool locked = false;
3908 int ret = 0;
3909
3910 /* do not support changing channels in Safe Mode */
3911 if (ice_is_safe_mode(pf)) {
3912 netdev_err(dev, "Changing channel in Safe Mode is not supported\n");
3913 return -EOPNOTSUPP;
3914 }
3915 /* do not support changing other_count */
3916 if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
3917 return -EINVAL;
3918
3919 if (ice_is_adq_active(pf)) {
3920 netdev_err(dev, "Cannot set channels with ADQ configured.\n");
3921 return -EOPNOTSUPP;
3922 }
3923
3924 if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
3925 netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
3926 return -EOPNOTSUPP;
3927 }
3928
3929 if (ch->rx_count && ch->tx_count) {
3930 netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
3931 return -EINVAL;
3932 }
3933
3934 new_rx = ch->combined_count + ch->rx_count;
3935 new_tx = ch->combined_count + ch->tx_count;
3936
3937 if (new_rx < vsi->tc_cfg.numtc) {
3938 netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
3939 vsi->tc_cfg.numtc);
3940 return -EINVAL;
3941 }
3942 if (new_tx < vsi->tc_cfg.numtc) {
3943 netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
3944 vsi->tc_cfg.numtc);
3945 return -EINVAL;
3946 }
3947 if (new_rx > ice_get_max_rxq(pf)) {
3948 netdev_err(dev, "Maximum allowed Rx channels is %d\n",
3949 ice_get_max_rxq(pf));
3950 return -EINVAL;
3951 }
3952 if (new_tx > ice_get_max_txq(pf)) {
3953 netdev_err(dev, "Maximum allowed Tx channels is %d\n",
3954 ice_get_max_txq(pf));
3955 return -EINVAL;
3956 }
3957
3958 if (pf->cdev_info && pf->cdev_info->adev) {
3959 mutex_lock(&pf->adev_mutex);
3960 device_lock(&pf->cdev_info->adev->dev);
3961 locked = true;
3962 if (pf->cdev_info->adev->dev.driver) {
3963 netdev_err(dev, "Cannot change channels when RDMA is active\n");
3964 ret = -EBUSY;
3965 goto adev_unlock;
3966 }
3967 }
3968
3969 ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
3970
3971 if (!netif_is_rxfh_configured(dev)) {
3972 ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
3973 goto adev_unlock;
3974 }
3975
3976 /* Update rss_size due to change in Rx queues */
3977 vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
3978
3979 adev_unlock:
3980 if (locked) {
3981 device_unlock(&pf->cdev_info->adev->dev);
3982 mutex_unlock(&pf->adev_mutex);
3983 }
3984 return ret;
3985 }
3986
3987 /**
3988 * ice_get_wol - get current Wake on LAN configuration
3989 * @netdev: network interface device structure
3990 * @wol: Ethtool structure to retrieve WoL settings
3991 */
ice_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)3992 static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3993 {
3994 struct ice_netdev_priv *np = netdev_priv(netdev);
3995 struct ice_pf *pf = np->vsi->back;
3996
3997 if (np->vsi->type != ICE_VSI_PF)
3998 netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
3999
4000 /* Get WoL settings based on the HW capability */
4001 if (ice_is_wol_supported(&pf->hw)) {
4002 wol->supported = WAKE_MAGIC;
4003 wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
4004 } else {
4005 wol->supported = 0;
4006 wol->wolopts = 0;
4007 }
4008 }
4009
4010 /**
4011 * ice_set_wol - set Wake on LAN on supported device
4012 * @netdev: network interface device structure
4013 * @wol: Ethtool structure to set WoL
4014 */
ice_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)4015 static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
4016 {
4017 struct ice_netdev_priv *np = netdev_priv(netdev);
4018 struct ice_vsi *vsi = np->vsi;
4019 struct ice_pf *pf = vsi->back;
4020
4021 if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
4022 return -EOPNOTSUPP;
4023
4024 /* only magic packet is supported */
4025 if (wol->wolopts && wol->wolopts != WAKE_MAGIC)
4026 return -EOPNOTSUPP;
4027
4028 /* Set WoL only if there is a new value */
4029 if (pf->wol_ena != !!wol->wolopts) {
4030 pf->wol_ena = !!wol->wolopts;
4031 device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena);
4032 netdev_dbg(netdev, "WoL magic packet %sabled\n",
4033 pf->wol_ena ? "en" : "dis");
4034 }
4035
4036 return 0;
4037 }
4038
4039 /**
4040 * ice_get_rc_coalesce - get ITR values for specific ring container
4041 * @ec: ethtool structure to fill with driver's coalesce settings
4042 * @rc: ring container that the ITR values will come from
4043 *
4044 * Query the device for ice_ring_container specific ITR values. This is
4045 * done per ice_ring_container because each q_vector can have 1 or more rings
4046 * and all of said ring(s) will have the same ITR values.
4047 *
4048 * Returns 0 on success, negative otherwise.
4049 */
4050 static int
ice_get_rc_coalesce(struct ethtool_coalesce * ec,struct ice_ring_container * rc)4051 ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
4052 {
4053 if (!rc->rx_ring)
4054 return -EINVAL;
4055
4056 switch (rc->type) {
4057 case ICE_RX_CONTAINER:
4058 ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
4059 ec->rx_coalesce_usecs = rc->itr_setting;
4060 ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
4061 break;
4062 case ICE_TX_CONTAINER:
4063 ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
4064 ec->tx_coalesce_usecs = rc->itr_setting;
4065 break;
4066 default:
4067 dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
4068 return -EINVAL;
4069 }
4070
4071 return 0;
4072 }
4073
4074 /**
4075 * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
4076 * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
4077 * @ec: coalesce settings to program the device with
4078 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
4079 *
4080 * Return 0 on success, and negative under the following conditions:
4081 * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
4082 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
4083 */
4084 static int
ice_get_q_coalesce(struct ice_vsi * vsi,struct ethtool_coalesce * ec,int q_num)4085 ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
4086 {
4087 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
4088 if (ice_get_rc_coalesce(ec,
4089 &vsi->rx_rings[q_num]->q_vector->rx))
4090 return -EINVAL;
4091 if (ice_get_rc_coalesce(ec,
4092 &vsi->tx_rings[q_num]->q_vector->tx))
4093 return -EINVAL;
4094 } else if (q_num < vsi->num_rxq) {
4095 if (ice_get_rc_coalesce(ec,
4096 &vsi->rx_rings[q_num]->q_vector->rx))
4097 return -EINVAL;
4098 } else if (q_num < vsi->num_txq) {
4099 if (ice_get_rc_coalesce(ec,
4100 &vsi->tx_rings[q_num]->q_vector->tx))
4101 return -EINVAL;
4102 } else {
4103 return -EINVAL;
4104 }
4105
4106 return 0;
4107 }
4108
4109 /**
4110 * __ice_get_coalesce - get ITR/INTRL values for the device
4111 * @netdev: pointer to the netdev associated with this query
4112 * @ec: ethtool structure to fill with driver's coalesce settings
4113 * @q_num: queue number to get the coalesce settings for
4114 *
4115 * If the caller passes in a negative q_num then we return coalesce settings
4116 * based on queue number 0, else use the actual q_num passed in.
4117 */
4118 static int
__ice_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int q_num)4119 __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
4120 int q_num)
4121 {
4122 struct ice_netdev_priv *np = netdev_priv(netdev);
4123 struct ice_vsi *vsi = np->vsi;
4124
4125 if (q_num < 0)
4126 q_num = 0;
4127
4128 if (ice_get_q_coalesce(vsi, ec, q_num))
4129 return -EINVAL;
4130
4131 return 0;
4132 }
4133
ice_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)4134 static int ice_get_coalesce(struct net_device *netdev,
4135 struct ethtool_coalesce *ec,
4136 struct kernel_ethtool_coalesce *kernel_coal,
4137 struct netlink_ext_ack *extack)
4138 {
4139 return __ice_get_coalesce(netdev, ec, -1);
4140 }
4141
4142 static int
ice_get_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)4143 ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
4144 struct ethtool_coalesce *ec)
4145 {
4146 return __ice_get_coalesce(netdev, ec, q_num);
4147 }
4148
4149 /**
4150 * ice_set_rc_coalesce - set ITR values for specific ring container
4151 * @ec: ethtool structure from user to update ITR settings
4152 * @rc: ring container that the ITR values will come from
4153 * @vsi: VSI associated to the ring container
4154 *
4155 * Set specific ITR values. This is done per ice_ring_container because each
4156 * q_vector can have 1 or more rings and all of said ring(s) will have the same
4157 * ITR values.
4158 *
4159 * Returns 0 on success, negative otherwise.
4160 */
4161 static int
ice_set_rc_coalesce(struct ethtool_coalesce * ec,struct ice_ring_container * rc,struct ice_vsi * vsi)4162 ice_set_rc_coalesce(struct ethtool_coalesce *ec,
4163 struct ice_ring_container *rc, struct ice_vsi *vsi)
4164 {
4165 const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
4166 u32 use_adaptive_coalesce, coalesce_usecs;
4167 struct ice_pf *pf = vsi->back;
4168 u16 itr_setting;
4169
4170 if (!rc->rx_ring)
4171 return -EINVAL;
4172
4173 switch (rc->type) {
4174 case ICE_RX_CONTAINER:
4175 {
4176 struct ice_q_vector *q_vector = rc->rx_ring->q_vector;
4177
4178 if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
4179 (ec->rx_coalesce_usecs_high &&
4180 ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
4181 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
4182 c_type_str, pf->hw.intrl_gran,
4183 ICE_MAX_INTRL);
4184 return -EINVAL;
4185 }
4186 if (ec->rx_coalesce_usecs_high != q_vector->intrl &&
4187 (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
4188 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
4189 c_type_str);
4190 return -EINVAL;
4191 }
4192 if (ec->rx_coalesce_usecs_high != q_vector->intrl)
4193 q_vector->intrl = ec->rx_coalesce_usecs_high;
4194
4195 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
4196 coalesce_usecs = ec->rx_coalesce_usecs;
4197
4198 break;
4199 }
4200 case ICE_TX_CONTAINER:
4201 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
4202 coalesce_usecs = ec->tx_coalesce_usecs;
4203
4204 break;
4205 default:
4206 dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
4207 rc->type);
4208 return -EINVAL;
4209 }
4210
4211 itr_setting = rc->itr_setting;
4212 if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
4213 netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
4214 c_type_str, c_type_str);
4215 return -EINVAL;
4216 }
4217
4218 if (coalesce_usecs > ICE_ITR_MAX) {
4219 netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n",
4220 c_type_str, ICE_ITR_MAX);
4221 return -EINVAL;
4222 }
4223
4224 if (use_adaptive_coalesce) {
4225 rc->itr_mode = ITR_DYNAMIC;
4226 } else {
4227 rc->itr_mode = ITR_STATIC;
4228 /* store user facing value how it was set */
4229 rc->itr_setting = coalesce_usecs;
4230 /* write the change to the register */
4231 ice_write_itr(rc, coalesce_usecs);
4232 /* force writes to take effect immediately, the flush shouldn't
4233 * be done in the functions above because the intent is for
4234 * them to do lazy writes.
4235 */
4236 ice_flush(&pf->hw);
4237 }
4238
4239 return 0;
4240 }
4241
4242 /**
4243 * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
4244 * @vsi: VSI associated to the queue that need updating
4245 * @ec: coalesce settings to program the device with
4246 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
4247 *
4248 * Return 0 on success, and negative under the following conditions:
4249 * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
4250 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
4251 */
4252 static int
ice_set_q_coalesce(struct ice_vsi * vsi,struct ethtool_coalesce * ec,int q_num)4253 ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
4254 {
4255 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
4256 if (ice_set_rc_coalesce(ec,
4257 &vsi->rx_rings[q_num]->q_vector->rx,
4258 vsi))
4259 return -EINVAL;
4260
4261 if (ice_set_rc_coalesce(ec,
4262 &vsi->tx_rings[q_num]->q_vector->tx,
4263 vsi))
4264 return -EINVAL;
4265 } else if (q_num < vsi->num_rxq) {
4266 if (ice_set_rc_coalesce(ec,
4267 &vsi->rx_rings[q_num]->q_vector->rx,
4268 vsi))
4269 return -EINVAL;
4270 } else if (q_num < vsi->num_txq) {
4271 if (ice_set_rc_coalesce(ec,
4272 &vsi->tx_rings[q_num]->q_vector->tx,
4273 vsi))
4274 return -EINVAL;
4275 } else {
4276 return -EINVAL;
4277 }
4278
4279 return 0;
4280 }
4281
4282 /**
4283 * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
4284 * @netdev: netdev used for print
4285 * @itr_setting: previous user setting
4286 * @use_adaptive_coalesce: if adaptive coalesce is enabled or being enabled
4287 * @coalesce_usecs: requested value of [tx|rx]-usecs
4288 * @c_type_str: either "rx" or "tx" to match user set field of [tx|rx]-usecs
4289 */
4290 static void
ice_print_if_odd_usecs(struct net_device * netdev,u16 itr_setting,u32 use_adaptive_coalesce,u32 coalesce_usecs,const char * c_type_str)4291 ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting,
4292 u32 use_adaptive_coalesce, u32 coalesce_usecs,
4293 const char *c_type_str)
4294 {
4295 if (use_adaptive_coalesce)
4296 return;
4297
4298 if (itr_setting != coalesce_usecs && (coalesce_usecs % 2))
4299 netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n",
4300 c_type_str, coalesce_usecs, c_type_str,
4301 ITR_REG_ALIGN(coalesce_usecs));
4302 }
4303
4304 /**
4305 * __ice_set_coalesce - set ITR/INTRL values for the device
4306 * @netdev: pointer to the netdev associated with this query
4307 * @ec: ethtool structure to fill with driver's coalesce settings
4308 * @q_num: queue number to get the coalesce settings for
4309 *
4310 * If the caller passes in a negative q_num then we set the coalesce settings
4311 * for all Tx/Rx queues, else use the actual q_num passed in.
4312 */
4313 static int
__ice_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int q_num)4314 __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
4315 int q_num)
4316 {
4317 struct ice_netdev_priv *np = netdev_priv(netdev);
4318 struct ice_vsi *vsi = np->vsi;
4319
4320 if (q_num < 0) {
4321 struct ice_q_vector *q_vector = vsi->q_vectors[0];
4322 int v_idx;
4323
4324 if (q_vector) {
4325 ice_print_if_odd_usecs(netdev, q_vector->rx.itr_setting,
4326 ec->use_adaptive_rx_coalesce,
4327 ec->rx_coalesce_usecs, "rx");
4328
4329 ice_print_if_odd_usecs(netdev, q_vector->tx.itr_setting,
4330 ec->use_adaptive_tx_coalesce,
4331 ec->tx_coalesce_usecs, "tx");
4332 }
4333
4334 ice_for_each_q_vector(vsi, v_idx) {
4335 /* In some cases if DCB is configured the num_[rx|tx]q
4336 * can be less than vsi->num_q_vectors. This check
4337 * accounts for that so we don't report a false failure
4338 */
4339 if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq)
4340 goto set_complete;
4341
4342 if (ice_set_q_coalesce(vsi, ec, v_idx))
4343 return -EINVAL;
4344
4345 ice_set_q_vector_intrl(vsi->q_vectors[v_idx]);
4346 }
4347 goto set_complete;
4348 }
4349
4350 if (ice_set_q_coalesce(vsi, ec, q_num))
4351 return -EINVAL;
4352
4353 ice_set_q_vector_intrl(vsi->q_vectors[q_num]);
4354
4355 set_complete:
4356 return 0;
4357 }
4358
ice_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)4359 static int ice_set_coalesce(struct net_device *netdev,
4360 struct ethtool_coalesce *ec,
4361 struct kernel_ethtool_coalesce *kernel_coal,
4362 struct netlink_ext_ack *extack)
4363 {
4364 return __ice_set_coalesce(netdev, ec, -1);
4365 }
4366
4367 static int
ice_set_per_q_coalesce(struct net_device * netdev,u32 q_num,struct ethtool_coalesce * ec)4368 ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
4369 struct ethtool_coalesce *ec)
4370 {
4371 return __ice_set_coalesce(netdev, ec, q_num);
4372 }
4373
4374 static void
ice_repr_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)4375 ice_repr_get_drvinfo(struct net_device *netdev,
4376 struct ethtool_drvinfo *drvinfo)
4377 {
4378 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4379
4380 if (repr->ops.ready(repr))
4381 return;
4382
4383 __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
4384 }
4385
4386 static void
ice_repr_get_strings(struct net_device * netdev,u32 stringset,u8 * data)4387 ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
4388 {
4389 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4390
4391 /* for port representors only ETH_SS_STATS is supported */
4392 if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
4393 return;
4394
4395 __ice_get_strings(netdev, stringset, data, repr->src_vsi);
4396 }
4397
4398 static void
ice_repr_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)4399 ice_repr_get_ethtool_stats(struct net_device *netdev,
4400 struct ethtool_stats __always_unused *stats,
4401 u64 *data)
4402 {
4403 struct ice_repr *repr = ice_netdev_to_repr(netdev);
4404
4405 if (repr->ops.ready(repr))
4406 return;
4407
4408 __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
4409 }
4410
ice_repr_get_sset_count(struct net_device * netdev,int sset)4411 static int ice_repr_get_sset_count(struct net_device *netdev, int sset)
4412 {
4413 switch (sset) {
4414 case ETH_SS_STATS:
4415 return ICE_VSI_STATS_LEN;
4416 default:
4417 return -EOPNOTSUPP;
4418 }
4419 }
4420
4421 #define ICE_I2C_EEPROM_DEV_ADDR 0xA0
4422 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
4423 #define ICE_MODULE_TYPE_SFP 0x03
4424 #define ICE_MODULE_TYPE_QSFP_PLUS 0x0D
4425 #define ICE_MODULE_TYPE_QSFP28 0x11
4426 #define ICE_MODULE_SFF_ADDR_MODE 0x04
4427 #define ICE_MODULE_SFF_DIAG_CAPAB 0x40
4428 #define ICE_MODULE_REVISION_ADDR 0x01
4429 #define ICE_MODULE_SFF_8472_COMP 0x5E
4430 #define ICE_MODULE_SFF_8472_SWAP 0x5C
4431 #define ICE_MODULE_QSFP_MAX_LEN 640
4432
4433 /**
4434 * ice_get_module_info - get SFF module type and revision information
4435 * @netdev: network interface device structure
4436 * @modinfo: module EEPROM size and layout information structure
4437 */
4438 static int
ice_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)4439 ice_get_module_info(struct net_device *netdev,
4440 struct ethtool_modinfo *modinfo)
4441 {
4442 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4443 struct ice_hw *hw = &pf->hw;
4444 u8 sff8472_comp = 0;
4445 u8 sff8472_swap = 0;
4446 u8 sff8636_rev = 0;
4447 u8 value = 0;
4448 int status;
4449
4450 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
4451 0, &value, 1, 0, NULL);
4452 if (status)
4453 return status;
4454
4455 switch (value) {
4456 case ICE_MODULE_TYPE_SFP:
4457 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4458 ICE_MODULE_SFF_8472_COMP, 0x00, 0,
4459 &sff8472_comp, 1, 0, NULL);
4460 if (status)
4461 return status;
4462 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4463 ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
4464 &sff8472_swap, 1, 0, NULL);
4465 if (status)
4466 return status;
4467
4468 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
4469 modinfo->type = ETH_MODULE_SFF_8079;
4470 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
4471 } else if (sff8472_comp &&
4472 (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
4473 modinfo->type = ETH_MODULE_SFF_8472;
4474 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4475 } else {
4476 modinfo->type = ETH_MODULE_SFF_8079;
4477 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
4478 }
4479 break;
4480 case ICE_MODULE_TYPE_QSFP_PLUS:
4481 case ICE_MODULE_TYPE_QSFP28:
4482 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4483 ICE_MODULE_REVISION_ADDR, 0x00, 0,
4484 &sff8636_rev, 1, 0, NULL);
4485 if (status)
4486 return status;
4487 /* Check revision compliance */
4488 if (sff8636_rev > 0x02) {
4489 /* Module is SFF-8636 compliant */
4490 modinfo->type = ETH_MODULE_SFF_8636;
4491 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
4492 } else {
4493 modinfo->type = ETH_MODULE_SFF_8436;
4494 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
4495 }
4496 break;
4497 default:
4498 netdev_warn(netdev, "SFF Module Type not recognized.\n");
4499 return -EINVAL;
4500 }
4501 return 0;
4502 }
4503
4504 /**
4505 * ice_get_module_eeprom - fill buffer with SFF EEPROM contents
4506 * @netdev: network interface device structure
4507 * @ee: EEPROM dump request structure
4508 * @data: buffer to be filled with EEPROM contents
4509 */
4510 static int
ice_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)4511 ice_get_module_eeprom(struct net_device *netdev,
4512 struct ethtool_eeprom *ee, u8 *data)
4513 {
4514 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4515 #define SFF_READ_BLOCK_SIZE 8
4516 u8 value[SFF_READ_BLOCK_SIZE] = { 0 };
4517 u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
4518 struct ice_hw *hw = &pf->hw;
4519 bool is_sfp = false;
4520 unsigned int i;
4521 u16 offset = 0;
4522 u8 page = 0;
4523 int status;
4524
4525 if (!ee || !ee->len || !data)
4526 return -EINVAL;
4527
4528 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
4529 NULL);
4530 if (status)
4531 return status;
4532
4533 if (value[0] == ICE_MODULE_TYPE_SFP)
4534 is_sfp = true;
4535
4536 memset(data, 0, ee->len);
4537 for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) {
4538 offset = i + ee->offset;
4539 page = 0;
4540
4541 /* Check if we need to access the other memory page */
4542 if (is_sfp) {
4543 if (offset >= ETH_MODULE_SFF_8079_LEN) {
4544 offset -= ETH_MODULE_SFF_8079_LEN;
4545 addr = ICE_I2C_EEPROM_DEV_ADDR2;
4546 }
4547 } else {
4548 while (offset >= ETH_MODULE_SFF_8436_LEN) {
4549 /* Compute memory page number and offset. */
4550 offset -= ETH_MODULE_SFF_8436_LEN / 2;
4551 page++;
4552 }
4553 }
4554
4555 /* Bit 2 of EEPROM address 0x02 declares upper
4556 * pages are disabled on QSFP modules.
4557 * SFP modules only ever use page 0.
4558 */
4559 if (page == 0 || !(data[0x2] & 0x4)) {
4560 u32 copy_len;
4561
4562 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
4563 !is_sfp, value,
4564 SFF_READ_BLOCK_SIZE,
4565 0, NULL);
4566 netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%pe)\n",
4567 addr, offset, page, is_sfp,
4568 value[0], value[1], value[2], value[3],
4569 value[4], value[5], value[6], value[7],
4570 ERR_PTR(status));
4571 if (status) {
4572 netdev_err(netdev, "%s: error reading module EEPROM: status %pe\n",
4573 __func__, ERR_PTR(status));
4574 return status;
4575 }
4576
4577 /* Make sure we have enough room for the new block */
4578 copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
4579 memcpy(data + i, value, copy_len);
4580 }
4581 }
4582 return 0;
4583 }
4584
4585 /**
4586 * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per
4587 * pcsquad, pcsport
4588 * @hw: pointer to the HW struct
4589 * @pcs_quad: pcsquad for input port
4590 * @pcs_port: pcsport for input port
4591 * @fec_stats: buffer to hold FEC statistics for given port
4592 *
4593 * Return: 0 on success, negative on failure.
4594 */
ice_get_port_fec_stats(struct ice_hw * hw,u16 pcs_quad,u16 pcs_port,struct ethtool_fec_stats * fec_stats)4595 static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
4596 struct ethtool_fec_stats *fec_stats)
4597 {
4598 u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0;
4599 u32 fec_corr_low_val = 0, fec_corr_high_val = 0;
4600 int err;
4601
4602 if (pcs_quad > 1 || pcs_port > 3)
4603 return -EINVAL;
4604
4605 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW,
4606 &fec_corr_low_val);
4607 if (err)
4608 return err;
4609
4610 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH,
4611 &fec_corr_high_val);
4612 if (err)
4613 return err;
4614
4615 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
4616 ICE_FEC_UNCORR_LOW,
4617 &fec_uncorr_low_val);
4618 if (err)
4619 return err;
4620
4621 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
4622 ICE_FEC_UNCORR_HIGH,
4623 &fec_uncorr_high_val);
4624 if (err)
4625 return err;
4626
4627 fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) +
4628 fec_corr_low_val;
4629 fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) +
4630 fec_uncorr_low_val;
4631 return 0;
4632 }
4633
4634 /**
4635 * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
4636 * @netdev: network interface device structure
4637 * @fec_stats: buffer to hold FEC statistics for given port
4638 * @hist: buffer to put FEC histogram statistics for given port
4639 *
4640 */
ice_get_fec_stats(struct net_device * netdev,struct ethtool_fec_stats * fec_stats,struct ethtool_fec_hist * hist)4641 static void ice_get_fec_stats(struct net_device *netdev,
4642 struct ethtool_fec_stats *fec_stats,
4643 struct ethtool_fec_hist *hist)
4644 {
4645 struct ice_netdev_priv *np = netdev_priv(netdev);
4646 struct ice_port_topology port_topology;
4647 struct ice_port_info *pi;
4648 struct ice_pf *pf;
4649 struct ice_hw *hw;
4650 int err;
4651
4652 pf = np->vsi->back;
4653 hw = &pf->hw;
4654 pi = np->vsi->port_info;
4655
4656 /* Serdes parameters are not supported if not the PF VSI */
4657 if (np->vsi->type != ICE_VSI_PF || !pi)
4658 return;
4659
4660 err = ice_get_port_topology(hw, pi->lport, &port_topology);
4661 if (err) {
4662 netdev_info(netdev, "Extended register dump failed Lport %d\n",
4663 pi->lport);
4664 return;
4665 }
4666
4667 /* Get FEC correctable, uncorrectable counter */
4668 err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
4669 port_topology.pcs_port, fec_stats);
4670 if (err)
4671 netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n",
4672 pi->lport, err);
4673 }
4674
ice_get_eth_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * mac_stats)4675 static void ice_get_eth_mac_stats(struct net_device *netdev,
4676 struct ethtool_eth_mac_stats *mac_stats)
4677 {
4678 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4679 struct ice_hw_port_stats *ps = &pf->stats;
4680
4681 mac_stats->FramesTransmittedOK = ps->eth.tx_unicast +
4682 ps->eth.tx_multicast +
4683 ps->eth.tx_broadcast;
4684 mac_stats->FramesReceivedOK = ps->eth.rx_unicast +
4685 ps->eth.rx_multicast +
4686 ps->eth.rx_broadcast;
4687 mac_stats->FrameCheckSequenceErrors = ps->crc_errors;
4688 mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes;
4689 mac_stats->OctetsReceivedOK = ps->eth.rx_bytes;
4690 mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast;
4691 mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast;
4692 mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast;
4693 mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast;
4694 mac_stats->InRangeLengthErrors = ps->rx_len_errors;
4695 mac_stats->FrameTooLongErrors = ps->rx_oversize;
4696 }
4697
ice_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * pause_stats)4698 static void ice_get_pause_stats(struct net_device *netdev,
4699 struct ethtool_pause_stats *pause_stats)
4700 {
4701 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4702 struct ice_hw_port_stats *ps = &pf->stats;
4703
4704 pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx;
4705 pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx;
4706 }
4707
4708 static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = {
4709 { 0, 64 },
4710 { 65, 127 },
4711 { 128, 255 },
4712 { 256, 511 },
4713 { 512, 1023 },
4714 { 1024, 1522 },
4715 { 1523, 9522 },
4716 {}
4717 };
4718
ice_get_rmon_stats(struct net_device * netdev,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)4719 static void ice_get_rmon_stats(struct net_device *netdev,
4720 struct ethtool_rmon_stats *rmon,
4721 const struct ethtool_rmon_hist_range **ranges)
4722 {
4723 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4724 struct ice_hw_port_stats *ps = &pf->stats;
4725
4726 rmon->undersize_pkts = ps->rx_undersize;
4727 rmon->oversize_pkts = ps->rx_oversize;
4728 rmon->fragments = ps->rx_fragments;
4729 rmon->jabbers = ps->rx_jabber;
4730
4731 rmon->hist[0] = ps->rx_size_64;
4732 rmon->hist[1] = ps->rx_size_127;
4733 rmon->hist[2] = ps->rx_size_255;
4734 rmon->hist[3] = ps->rx_size_511;
4735 rmon->hist[4] = ps->rx_size_1023;
4736 rmon->hist[5] = ps->rx_size_1522;
4737 rmon->hist[6] = ps->rx_size_big;
4738
4739 rmon->hist_tx[0] = ps->tx_size_64;
4740 rmon->hist_tx[1] = ps->tx_size_127;
4741 rmon->hist_tx[2] = ps->tx_size_255;
4742 rmon->hist_tx[3] = ps->tx_size_511;
4743 rmon->hist_tx[4] = ps->tx_size_1023;
4744 rmon->hist_tx[5] = ps->tx_size_1522;
4745 rmon->hist_tx[6] = ps->tx_size_big;
4746
4747 *ranges = ice_rmon_ranges;
4748 }
4749
4750 /* ice_get_ts_stats - provide timestamping stats
4751 * @netdev: the netdevice pointer from ethtool
4752 * @ts_stats: the ethtool data structure to fill in
4753 */
ice_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)4754 static void ice_get_ts_stats(struct net_device *netdev,
4755 struct ethtool_ts_stats *ts_stats)
4756 {
4757 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4758 struct ice_ptp *ptp = &pf->ptp;
4759
4760 ts_stats->pkts = ptp->tx_hwtstamp_good;
4761 ts_stats->err = ptp->tx_hwtstamp_skipped +
4762 ptp->tx_hwtstamp_flushed +
4763 ptp->tx_hwtstamp_discarded;
4764 ts_stats->lost = ptp->tx_hwtstamp_timeouts;
4765 }
4766
4767 #define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
4768 ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
4769
4770 #define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \
4771 ETH_RESET_SHARED_SHIFT)
4772
4773 #define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \
4774 (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \
4775 (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))
4776
4777 #define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR
4778
4779 /**
4780 * ice_ethtool_reset - triggers a given type of reset
4781 * @dev: network interface device structure
4782 * @flags: set of reset flags
4783 *
4784 * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags.
4785 */
ice_ethtool_reset(struct net_device * dev,u32 * flags)4786 static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
4787 {
4788 struct ice_pf *pf = ice_netdev_to_pf(dev);
4789 enum ice_reset_req reset;
4790
4791 switch (*flags) {
4792 case ICE_ETHTOOL_CORER:
4793 reset = ICE_RESET_CORER;
4794 break;
4795 case ICE_ETHTOOL_GLOBR:
4796 reset = ICE_RESET_GLOBR;
4797 break;
4798 case ICE_ETHTOOL_PFR:
4799 reset = ICE_RESET_PFR;
4800 break;
4801 default:
4802 netdev_info(dev, "Unsupported set of ethtool flags");
4803 return -EOPNOTSUPP;
4804 }
4805
4806 ice_schedule_reset(pf, reset);
4807
4808 *flags = 0;
4809
4810 return 0;
4811 }
4812
4813 /**
4814 * ice_repr_ethtool_reset - triggers a VF reset
4815 * @dev: network interface device structure
4816 * @flags: set of reset flags
4817 *
4818 * Return: 0 on success,
4819 * -EOPNOTSUPP when using unsupported set of flags
4820 * -EBUSY when VF is not ready for reset.
4821 */
ice_repr_ethtool_reset(struct net_device * dev,u32 * flags)4822 static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
4823 {
4824 struct ice_repr *repr = ice_netdev_to_repr(dev);
4825 struct ice_vf *vf;
4826
4827 if (repr->type != ICE_REPR_TYPE_VF ||
4828 *flags != ICE_ETHTOOL_VFR)
4829 return -EOPNOTSUPP;
4830
4831 vf = repr->vf;
4832
4833 if (ice_check_vf_ready_for_cfg(vf))
4834 return -EBUSY;
4835
4836 *flags = 0;
4837
4838 return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
4839 }
4840
4841 static const struct ethtool_ops ice_ethtool_ops = {
4842 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4843 ETHTOOL_COALESCE_USE_ADAPTIVE |
4844 ETHTOOL_COALESCE_RX_USECS_HIGH,
4845 .supported_input_xfrm = RXH_XFRM_SYM_XOR,
4846 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
4847 .get_link_ksettings = ice_get_link_ksettings,
4848 .set_link_ksettings = ice_set_link_ksettings,
4849 .get_fec_stats = ice_get_fec_stats,
4850 .get_eth_mac_stats = ice_get_eth_mac_stats,
4851 .get_pause_stats = ice_get_pause_stats,
4852 .get_rmon_stats = ice_get_rmon_stats,
4853 .get_ts_stats = ice_get_ts_stats,
4854 .get_drvinfo = ice_get_drvinfo,
4855 .get_regs_len = ice_get_regs_len,
4856 .get_regs = ice_get_regs,
4857 .get_wol = ice_get_wol,
4858 .set_wol = ice_set_wol,
4859 .get_msglevel = ice_get_msglevel,
4860 .set_msglevel = ice_set_msglevel,
4861 .self_test = ice_self_test,
4862 .get_link = ethtool_op_get_link,
4863 .get_link_ext_stats = ice_get_link_ext_stats,
4864 .get_eeprom_len = ice_get_eeprom_len,
4865 .get_eeprom = ice_get_eeprom,
4866 .get_coalesce = ice_get_coalesce,
4867 .set_coalesce = ice_set_coalesce,
4868 .get_strings = ice_get_strings,
4869 .set_phys_id = ice_set_phys_id,
4870 .get_ethtool_stats = ice_get_ethtool_stats,
4871 .get_priv_flags = ice_get_priv_flags,
4872 .set_priv_flags = ice_set_priv_flags,
4873 .get_sset_count = ice_get_sset_count,
4874 .get_rxnfc = ice_get_rxnfc,
4875 .set_rxnfc = ice_set_rxnfc,
4876 .get_rx_ring_count = ice_get_rx_ring_count,
4877 .get_ringparam = ice_get_ringparam,
4878 .set_ringparam = ice_set_ringparam,
4879 .nway_reset = ice_nway_reset,
4880 .get_pauseparam = ice_get_pauseparam,
4881 .set_pauseparam = ice_set_pauseparam,
4882 .reset = ice_ethtool_reset,
4883 .get_rxfh_key_size = ice_get_rxfh_key_size,
4884 .get_rxfh_indir_size = ice_get_rxfh_indir_size,
4885 .get_rxfh = ice_get_rxfh,
4886 .set_rxfh = ice_set_rxfh,
4887 .get_rxfh_fields = ice_get_rxfh_fields,
4888 .set_rxfh_fields = ice_set_rxfh_fields,
4889 .get_channels = ice_get_channels,
4890 .set_channels = ice_set_channels,
4891 .get_ts_info = ice_get_ts_info,
4892 .get_per_queue_coalesce = ice_get_per_q_coalesce,
4893 .set_per_queue_coalesce = ice_set_per_q_coalesce,
4894 .get_fecparam = ice_get_fecparam,
4895 .set_fecparam = ice_set_fecparam,
4896 .get_module_info = ice_get_module_info,
4897 .get_module_eeprom = ice_get_module_eeprom,
4898 };
4899
4900 static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
4901 .get_link_ksettings = ice_get_link_ksettings,
4902 .set_link_ksettings = ice_set_link_ksettings,
4903 .get_drvinfo = ice_get_drvinfo,
4904 .get_regs_len = ice_get_regs_len,
4905 .get_regs = ice_get_regs,
4906 .get_wol = ice_get_wol,
4907 .set_wol = ice_set_wol,
4908 .get_msglevel = ice_get_msglevel,
4909 .set_msglevel = ice_set_msglevel,
4910 .get_link = ethtool_op_get_link,
4911 .get_eeprom_len = ice_get_eeprom_len,
4912 .get_eeprom = ice_get_eeprom,
4913 .get_strings = ice_get_strings,
4914 .get_ethtool_stats = ice_get_ethtool_stats,
4915 .get_sset_count = ice_get_sset_count,
4916 .get_ringparam = ice_get_ringparam,
4917 .set_ringparam = ice_set_ringparam,
4918 .nway_reset = ice_nway_reset,
4919 .get_channels = ice_get_channels,
4920 };
4921
4922 /**
4923 * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops
4924 * @netdev: network interface device structure
4925 */
ice_set_ethtool_safe_mode_ops(struct net_device * netdev)4926 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
4927 {
4928 netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
4929 }
4930
4931 static const struct ethtool_ops ice_ethtool_repr_ops = {
4932 .get_drvinfo = ice_repr_get_drvinfo,
4933 .get_link = ethtool_op_get_link,
4934 .get_strings = ice_repr_get_strings,
4935 .get_ethtool_stats = ice_repr_get_ethtool_stats,
4936 .get_sset_count = ice_repr_get_sset_count,
4937 .reset = ice_repr_ethtool_reset,
4938 };
4939
4940 /**
4941 * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops
4942 * @netdev: network interface device structure
4943 */
ice_set_ethtool_repr_ops(struct net_device * netdev)4944 void ice_set_ethtool_repr_ops(struct net_device *netdev)
4945 {
4946 netdev->ethtool_ops = &ice_ethtool_repr_ops;
4947 }
4948
4949 /**
4950 * ice_set_ethtool_ops - setup netdev ethtool ops
4951 * @netdev: network interface device structure
4952 *
4953 * setup netdev ethtool ops with ice specific ops
4954 */
ice_set_ethtool_ops(struct net_device * netdev)4955 void ice_set_ethtool_ops(struct net_device *netdev)
4956 {
4957 netdev->ethtool_ops = &ice_ethtool_ops;
4958 }
4959