1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* ethtool support for ice */ 5 6 #include "ice.h" 7 #include "ice_ethtool.h" 8 #include "ice_flow.h" 9 #include "ice_fltr.h" 10 #include "ice_lib.h" 11 #include "ice_dcb_lib.h" 12 #include <net/dcbnl.h> 13 #include <net/libeth/rx.h> 14 15 struct ice_stats { 16 char stat_string[ETH_GSTRING_LEN]; 17 int sizeof_stat; 18 int stat_offset; 19 }; 20 21 #define ICE_STAT(_type, _name, _stat) { \ 22 .stat_string = _name, \ 23 .sizeof_stat = sizeof_field(_type, _stat), \ 24 .stat_offset = offsetof(_type, _stat) \ 25 } 26 27 #define ICE_VSI_STAT(_name, _stat) \ 28 ICE_STAT(struct ice_vsi, _name, _stat) 29 #define ICE_PF_STAT(_name, _stat) \ 30 ICE_STAT(struct ice_pf, _name, _stat) 31 32 static int ice_q_stats_len(struct net_device *netdev) 33 { 34 struct ice_netdev_priv *np = netdev_priv(netdev); 35 36 /* One packets and one bytes count per queue */ 37 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 2); 38 } 39 40 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats) 41 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) 42 43 #define ICE_PFC_STATS_LEN ( \ 44 (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \ 45 sizeof_field(struct ice_pf, stats.priority_xon_rx) + \ 46 sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \ 47 sizeof_field(struct ice_pf, stats.priority_xon_tx)) \ 48 / sizeof(u64)) 49 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \ 50 ICE_VSI_STATS_LEN + ice_q_stats_len(n)) 51 52 static const struct ice_stats ice_gstrings_vsi_stats[] = { 53 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast), 54 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), 55 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast), 56 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), 57 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), 58 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), 59 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes), 60 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), 61 ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards), 62 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), 63 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), 64 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), 65 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), 66 ICE_VSI_STAT("tx_linearize", tx_linearize), 67 ICE_VSI_STAT("tx_busy", tx_busy), 68 ICE_VSI_STAT("tx_restart", tx_restart), 69 }; 70 71 enum ice_ethtool_test_id { 72 ICE_ETH_TEST_REG = 0, 73 ICE_ETH_TEST_EEPROM, 74 ICE_ETH_TEST_INTR, 75 ICE_ETH_TEST_LOOP, 76 ICE_ETH_TEST_LINK, 77 }; 78 79 static const char ice_gstrings_test[][ETH_GSTRING_LEN] = { 80 "Register test (offline)", 81 "EEPROM test (offline)", 82 "Interrupt test (offline)", 83 "Loopback test (offline)", 84 "Link test (on/offline)", 85 }; 86 87 #define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN) 88 89 /* These PF_STATs might look like duplicates of some NETDEV_STATs, 90 * but they aren't. This device is capable of supporting multiple 91 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual 92 * netdevs whereas the PF_STATs are for the physical function that's 93 * hosting these netdevs. 94 * 95 * The PF_STATs are appended to the netdev stats only when ethtool -S 96 * is queried on the base PF netdev. 97 */ 98 static const struct ice_stats ice_gstrings_pf_stats[] = { 99 ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes), 100 ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes), 101 ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast), 102 ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast), 103 ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast), 104 ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast), 105 ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), 106 ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), 107 ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), 108 ICE_PF_STAT("tx_timeout.nic", tx_timeout_count), 109 ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), 110 ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), 111 ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), 112 ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127), 113 ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255), 114 ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255), 115 ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511), 116 ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511), 117 ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023), 118 ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023), 119 ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522), 120 ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522), 121 ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big), 122 ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big), 123 ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx), 124 ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx), 125 ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx), 126 ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx), 127 ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down), 128 ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize), 129 ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments), 130 ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), 131 ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), 132 ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), 133 ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error), 134 ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), 135 ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), 136 ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), 137 ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults), 138 ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults), 139 ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match), 140 ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status), 141 ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped), 142 ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts), 143 ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed), 144 ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded), 145 ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates), 146 }; 147 148 static const u32 ice_regs_dump_list[] = { 149 PFGEN_STATE, 150 PRTGEN_STATUS, 151 QRX_CTRL(0), 152 QINT_TQCTL(0), 153 QINT_RQCTL(0), 154 PFINT_OICR_ENA, 155 QRX_ITR(0), 156 #define GLDCB_TLPM_PCI_DM 0x000A0180 157 GLDCB_TLPM_PCI_DM, 158 #define GLDCB_TLPM_TC2PFC 0x000A0194 159 GLDCB_TLPM_TC2PFC, 160 #define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4)) 161 TCDCB_TLPM_WAIT_DM(0), 162 TCDCB_TLPM_WAIT_DM(1), 163 TCDCB_TLPM_WAIT_DM(2), 164 TCDCB_TLPM_WAIT_DM(3), 165 TCDCB_TLPM_WAIT_DM(4), 166 TCDCB_TLPM_WAIT_DM(5), 167 TCDCB_TLPM_WAIT_DM(6), 168 TCDCB_TLPM_WAIT_DM(7), 169 TCDCB_TLPM_WAIT_DM(8), 170 TCDCB_TLPM_WAIT_DM(9), 171 TCDCB_TLPM_WAIT_DM(10), 172 TCDCB_TLPM_WAIT_DM(11), 173 TCDCB_TLPM_WAIT_DM(12), 174 TCDCB_TLPM_WAIT_DM(13), 175 TCDCB_TLPM_WAIT_DM(14), 176 TCDCB_TLPM_WAIT_DM(15), 177 TCDCB_TLPM_WAIT_DM(16), 178 TCDCB_TLPM_WAIT_DM(17), 179 TCDCB_TLPM_WAIT_DM(18), 180 TCDCB_TLPM_WAIT_DM(19), 181 TCDCB_TLPM_WAIT_DM(20), 182 TCDCB_TLPM_WAIT_DM(21), 183 TCDCB_TLPM_WAIT_DM(22), 184 TCDCB_TLPM_WAIT_DM(23), 185 TCDCB_TLPM_WAIT_DM(24), 186 TCDCB_TLPM_WAIT_DM(25), 187 TCDCB_TLPM_WAIT_DM(26), 188 TCDCB_TLPM_WAIT_DM(27), 189 TCDCB_TLPM_WAIT_DM(28), 190 TCDCB_TLPM_WAIT_DM(29), 191 TCDCB_TLPM_WAIT_DM(30), 192 TCDCB_TLPM_WAIT_DM(31), 193 #define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90 194 GLPCI_WATMK_CLNT_PIPEMON, 195 #define GLPCI_CUR_CLNT_COMMON 0x000BFD84 196 GLPCI_CUR_CLNT_COMMON, 197 #define GLPCI_CUR_CLNT_PIPEMON 0x000BFD88 198 GLPCI_CUR_CLNT_PIPEMON, 199 #define GLPCI_PCIERR 0x0009DEB0 200 GLPCI_PCIERR, 201 #define GLPSM_DEBUG_CTL_STATUS 0x000B0600 202 GLPSM_DEBUG_CTL_STATUS, 203 #define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0680 204 GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT, 205 #define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0684 206 GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT, 207 #define GLPSM0_DEBUG_DT_OUT_OF_WINDOW 0x000B0688 208 GLPSM0_DEBUG_DT_OUT_OF_WINDOW, 209 #define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT 0x000B069C 210 GLPSM0_DEBUG_INTF_HW_ERROR_DETECT, 211 #define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT 0x000B06A0 212 GLPSM0_DEBUG_MISC_HW_ERROR_DETECT, 213 #define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0E80 214 GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT, 215 #define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0E84 216 GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT, 217 #define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT 0x000B0E88 218 GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT, 219 #define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT 0x000B0E8C 220 GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT, 221 #define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT 0x000B0E90 222 GLPSM1_DEBUG_MISC_HW_ERROR_DETECT, 223 #define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT 0x000B1680 224 GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT, 225 #define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B1684 226 GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT, 227 #define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT 0x000B1688 228 GLPSM2_DEBUG_MISC_HW_ERROR_DETECT, 229 #define GLTDPU_TCLAN_COMP_BOB(_i) (0x00049ADC + ((_i) * 4)) 230 GLTDPU_TCLAN_COMP_BOB(1), 231 GLTDPU_TCLAN_COMP_BOB(2), 232 GLTDPU_TCLAN_COMP_BOB(3), 233 GLTDPU_TCLAN_COMP_BOB(4), 234 GLTDPU_TCLAN_COMP_BOB(5), 235 GLTDPU_TCLAN_COMP_BOB(6), 236 GLTDPU_TCLAN_COMP_BOB(7), 237 GLTDPU_TCLAN_COMP_BOB(8), 238 #define GLTDPU_TCB_CMD_BOB(_i) (0x0004975C + ((_i) * 4)) 239 GLTDPU_TCB_CMD_BOB(1), 240 GLTDPU_TCB_CMD_BOB(2), 241 GLTDPU_TCB_CMD_BOB(3), 242 GLTDPU_TCB_CMD_BOB(4), 243 GLTDPU_TCB_CMD_BOB(5), 244 GLTDPU_TCB_CMD_BOB(6), 245 GLTDPU_TCB_CMD_BOB(7), 246 GLTDPU_TCB_CMD_BOB(8), 247 #define GLTDPU_PSM_UPDATE_BOB(_i) (0x00049B5C + ((_i) * 4)) 248 GLTDPU_PSM_UPDATE_BOB(1), 249 GLTDPU_PSM_UPDATE_BOB(2), 250 GLTDPU_PSM_UPDATE_BOB(3), 251 GLTDPU_PSM_UPDATE_BOB(4), 252 GLTDPU_PSM_UPDATE_BOB(5), 253 GLTDPU_PSM_UPDATE_BOB(6), 254 GLTDPU_PSM_UPDATE_BOB(7), 255 GLTDPU_PSM_UPDATE_BOB(8), 256 #define GLTCB_CMD_IN_BOB(_i) (0x000AE288 + ((_i) * 4)) 257 GLTCB_CMD_IN_BOB(1), 258 GLTCB_CMD_IN_BOB(2), 259 GLTCB_CMD_IN_BOB(3), 260 GLTCB_CMD_IN_BOB(4), 261 GLTCB_CMD_IN_BOB(5), 262 GLTCB_CMD_IN_BOB(6), 263 GLTCB_CMD_IN_BOB(7), 264 GLTCB_CMD_IN_BOB(8), 265 #define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i) (0x000FC148 + ((_i) * 4)) 266 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1), 267 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2), 268 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3), 269 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4), 270 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5), 271 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6), 272 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7), 273 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8), 274 #define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4)) 275 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1), 276 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2), 277 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3), 278 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4), 279 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5), 280 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6), 281 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7), 282 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8), 283 #define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i) (0x000FC1C8 + ((_i) * 4)) 284 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1), 285 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2), 286 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3), 287 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4), 288 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5), 289 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6), 290 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7), 291 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8), 292 #define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i) (0x000FC188 + ((_i) * 4)) 293 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1), 294 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2), 295 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3), 296 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4), 297 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5), 298 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6), 299 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7), 300 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8), 301 #define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4)) 302 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1), 303 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2), 304 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3), 305 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4), 306 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5), 307 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6), 308 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7), 309 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8), 310 #define PRTDCB_TCUPM_REG_CM(_i) (0x000BC360 + ((_i) * 4)) 311 PRTDCB_TCUPM_REG_CM(0), 312 PRTDCB_TCUPM_REG_CM(1), 313 PRTDCB_TCUPM_REG_CM(2), 314 PRTDCB_TCUPM_REG_CM(3), 315 #define PRTDCB_TCUPM_REG_DM(_i) (0x000BC3A0 + ((_i) * 4)) 316 PRTDCB_TCUPM_REG_DM(0), 317 PRTDCB_TCUPM_REG_DM(1), 318 PRTDCB_TCUPM_REG_DM(2), 319 PRTDCB_TCUPM_REG_DM(3), 320 #define PRTDCB_TLPM_REG_DM(_i) (0x000A0000 + ((_i) * 4)) 321 PRTDCB_TLPM_REG_DM(0), 322 PRTDCB_TLPM_REG_DM(1), 323 PRTDCB_TLPM_REG_DM(2), 324 PRTDCB_TLPM_REG_DM(3), 325 }; 326 327 struct ice_priv_flag { 328 char name[ETH_GSTRING_LEN]; 329 u32 bitno; /* bit position in pf->flags */ 330 }; 331 332 #define ICE_PRIV_FLAG(_name, _bitno) { \ 333 .name = _name, \ 334 .bitno = _bitno, \ 335 } 336 337 static const struct ice_priv_flag ice_gstrings_priv_flags[] = { 338 ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), 339 ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), 340 ICE_PRIV_FLAG("vf-true-promisc-support", 341 ICE_FLAG_VF_TRUE_PROMISC_ENA), 342 ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), 343 ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING), 344 }; 345 346 #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) 347 348 static const u32 ice_adv_lnk_speed_100[] __initconst = { 349 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 350 }; 351 352 static const u32 ice_adv_lnk_speed_1000[] __initconst = { 353 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 354 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 355 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 356 }; 357 358 static const u32 ice_adv_lnk_speed_2500[] __initconst = { 359 ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 360 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 361 }; 362 363 static const u32 ice_adv_lnk_speed_5000[] __initconst = { 364 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 365 }; 366 367 static const u32 ice_adv_lnk_speed_10000[] __initconst = { 368 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 369 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 370 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 371 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 372 }; 373 374 static const u32 ice_adv_lnk_speed_25000[] __initconst = { 375 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 376 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 377 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 378 }; 379 380 static const u32 ice_adv_lnk_speed_40000[] __initconst = { 381 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 382 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 383 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 384 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 385 }; 386 387 static const u32 ice_adv_lnk_speed_50000[] __initconst = { 388 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 389 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 390 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 391 }; 392 393 static const u32 ice_adv_lnk_speed_100000[] __initconst = { 394 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 395 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 396 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 397 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 398 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 399 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 400 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 401 }; 402 403 static const u32 ice_adv_lnk_speed_200000[] __initconst = { 404 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 405 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 406 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 407 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 408 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 409 }; 410 411 static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = { 412 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100), 413 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000), 414 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500), 415 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000), 416 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000), 417 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000), 418 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000), 419 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000), 420 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000), 421 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000), 422 }; 423 424 void __init ice_adv_lnk_speed_maps_init(void) 425 { 426 ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps, 427 ARRAY_SIZE(ice_adv_lnk_speed_maps)); 428 } 429 430 static void 431 __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, 432 struct ice_vsi *vsi) 433 { 434 struct ice_pf *pf = vsi->back; 435 struct ice_hw *hw = &pf->hw; 436 struct ice_orom_info *orom; 437 struct ice_nvm_info *nvm; 438 439 nvm = &hw->flash.nvm; 440 orom = &hw->flash.orom; 441 442 strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); 443 444 /* Display NVM version (from which the firmware version can be 445 * determined) which contains more pertinent information. 446 */ 447 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 448 "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, 449 nvm->eetrack, orom->major, orom->build, orom->patch); 450 451 strscpy(drvinfo->bus_info, pci_name(pf->pdev), 452 sizeof(drvinfo->bus_info)); 453 } 454 455 static void 456 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 457 { 458 struct ice_netdev_priv *np = netdev_priv(netdev); 459 460 __ice_get_drvinfo(netdev, drvinfo, np->vsi); 461 drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; 462 } 463 464 static int ice_get_regs_len(struct net_device __always_unused *netdev) 465 { 466 return (sizeof(ice_regs_dump_list) + 467 sizeof(struct ice_regdump_to_ethtool)); 468 } 469 470 /** 471 * ice_ethtool_get_maxspeed - Get the max speed for given lport 472 * @hw: pointer to the HW struct 473 * @lport: logical port for which max speed is requested 474 * @max_speed: return max speed for input lport 475 * 476 * Return: 0 on success, negative on failure. 477 */ 478 static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed) 479 { 480 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {}; 481 bool active_valid = false, pending_valid = true; 482 u8 option_count = ICE_AQC_PORT_OPT_MAX; 483 u8 active_idx = 0, pending_idx = 0; 484 int status; 485 486 status = ice_aq_get_port_options(hw, options, &option_count, lport, 487 true, &active_idx, &active_valid, 488 &pending_idx, &pending_valid); 489 if (status) 490 return -EIO; 491 if (!active_valid) 492 return -EINVAL; 493 494 *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M; 495 return 0; 496 } 497 498 /** 499 * ice_is_serdes_muxed - returns whether serdes is muxed in hardware 500 * @hw: pointer to the HW struct 501 * 502 * Return: true when serdes is muxed, false when serdes is not muxed. 503 */ 504 static bool ice_is_serdes_muxed(struct ice_hw *hw) 505 { 506 u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); 507 508 return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value); 509 } 510 511 static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology, 512 u8 lport, bool is_muxed) 513 { 514 switch (lport) { 515 case 0: 516 port_topology->pcs_quad_select = 0; 517 port_topology->pcs_port = 0; 518 port_topology->primary_serdes_lane = 0; 519 break; 520 case 1: 521 port_topology->pcs_quad_select = 1; 522 port_topology->pcs_port = 0; 523 if (is_muxed) 524 port_topology->primary_serdes_lane = 2; 525 else 526 port_topology->primary_serdes_lane = 4; 527 break; 528 case 2: 529 port_topology->pcs_quad_select = 0; 530 port_topology->pcs_port = 1; 531 port_topology->primary_serdes_lane = 1; 532 break; 533 case 3: 534 port_topology->pcs_quad_select = 1; 535 port_topology->pcs_port = 1; 536 if (is_muxed) 537 port_topology->primary_serdes_lane = 3; 538 else 539 port_topology->primary_serdes_lane = 5; 540 break; 541 case 4: 542 port_topology->pcs_quad_select = 0; 543 port_topology->pcs_port = 2; 544 port_topology->primary_serdes_lane = 2; 545 break; 546 case 5: 547 port_topology->pcs_quad_select = 1; 548 port_topology->pcs_port = 2; 549 port_topology->primary_serdes_lane = 6; 550 break; 551 case 6: 552 port_topology->pcs_quad_select = 0; 553 port_topology->pcs_port = 3; 554 port_topology->primary_serdes_lane = 3; 555 break; 556 case 7: 557 port_topology->pcs_quad_select = 1; 558 port_topology->pcs_port = 3; 559 port_topology->primary_serdes_lane = 7; 560 break; 561 default: 562 return -EINVAL; 563 } 564 565 return 0; 566 } 567 568 static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology, 569 u8 lport, bool is_muxed) 570 { 571 switch (lport) { 572 case 0: 573 port_topology->pcs_quad_select = 0; 574 port_topology->pcs_port = 0; 575 port_topology->primary_serdes_lane = 0; 576 break; 577 case 1: 578 port_topology->pcs_quad_select = 1; 579 port_topology->pcs_port = 0; 580 if (is_muxed) 581 port_topology->primary_serdes_lane = 2; 582 else 583 port_topology->primary_serdes_lane = 4; 584 break; 585 case 2: 586 port_topology->pcs_quad_select = 0; 587 port_topology->pcs_port = 1; 588 port_topology->primary_serdes_lane = 1; 589 break; 590 case 3: 591 port_topology->pcs_quad_select = 1; 592 port_topology->pcs_port = 1; 593 if (is_muxed) 594 port_topology->primary_serdes_lane = 3; 595 else 596 port_topology->primary_serdes_lane = 5; 597 break; 598 case 4: 599 port_topology->pcs_quad_select = 0; 600 port_topology->pcs_port = 2; 601 port_topology->primary_serdes_lane = 2; 602 break; 603 case 5: 604 port_topology->pcs_quad_select = 1; 605 port_topology->pcs_port = 2; 606 port_topology->primary_serdes_lane = 6; 607 break; 608 case 6: 609 port_topology->pcs_quad_select = 0; 610 port_topology->pcs_port = 3; 611 port_topology->primary_serdes_lane = 3; 612 break; 613 case 7: 614 port_topology->pcs_quad_select = 1; 615 port_topology->pcs_port = 3; 616 port_topology->primary_serdes_lane = 7; 617 break; 618 default: 619 return -EINVAL; 620 } 621 622 return 0; 623 } 624 625 /** 626 * ice_get_port_topology - returns physical topology like pcsquad, pcsport, 627 * serdes number 628 * @hw: pointer to the HW struct 629 * @lport: logical port for which physical info requested 630 * @port_topology: buffer to hold port topology 631 * 632 * Return: 0 on success, negative on failure. 633 */ 634 static int ice_get_port_topology(struct ice_hw *hw, u8 lport, 635 struct ice_port_topology *port_topology) 636 { 637 struct ice_aqc_get_link_topo cmd = {}; 638 u16 node_handle = 0; 639 u8 cage_type = 0; 640 bool is_muxed; 641 int err; 642 u8 ctx; 643 644 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S; 645 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; 646 cmd.addr.topo_params.node_type_ctx = ctx; 647 648 err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle); 649 if (err) 650 return -EINVAL; 651 652 is_muxed = ice_is_serdes_muxed(hw); 653 654 if (cage_type == 0x11 || /* SFP+ */ 655 cage_type == 0x12) { /* SFP28 */ 656 port_topology->serdes_lane_count = 1; 657 err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed); 658 if (err) 659 return err; 660 } else if (cage_type == 0x13 || /* QSFP */ 661 cage_type == 0x14) { /* QSFP28 */ 662 u8 max_speed = 0; 663 664 err = ice_ethtool_get_maxspeed(hw, lport, &max_speed); 665 if (err) 666 return err; 667 668 if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G) 669 port_topology->serdes_lane_count = 4; 670 else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G || 671 max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G) 672 port_topology->serdes_lane_count = 2; 673 else 674 port_topology->serdes_lane_count = 1; 675 676 err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed); 677 if (err) 678 return err; 679 } else { 680 return -EINVAL; 681 } 682 683 return 0; 684 } 685 686 /** 687 * ice_get_tx_rx_equa - read serdes tx rx equaliser param 688 * @hw: pointer to the HW struct 689 * @serdes_num: represents the serdes number 690 * @ptr: structure to read all serdes parameter for given serdes 691 * 692 * Return: all serdes equalization parameter supported per serdes number 693 */ 694 static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, 695 struct ice_serdes_equalization_to_ethtool *ptr) 696 { 697 static const int tx = ICE_AQC_OP_CODE_TX_EQU; 698 static const int rx = ICE_AQC_OP_CODE_RX_EQU; 699 struct { 700 int data_in; 701 int opcode; 702 int *out; 703 } aq_params[] = { 704 { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 }, 705 { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 }, 706 { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten }, 707 { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 }, 708 { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 }, 709 { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 }, 710 { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 }, 711 { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 }, 712 { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf }, 713 { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf }, 714 { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf }, 715 { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf }, 716 { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc }, 717 { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw }, 718 { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain }, 719 { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 }, 720 { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 }, 721 { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 }, 722 { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 }, 723 { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 }, 724 { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 }, 725 { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 }, 726 { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 }, 727 { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 }, 728 { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 }, 729 { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 }, 730 { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 }, 731 }; 732 int err; 733 734 for (int i = 0; i < ARRAY_SIZE(aq_params); i++) { 735 err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in, 736 aq_params[i].opcode, 737 serdes_num, aq_params[i].out); 738 if (err) 739 break; 740 } 741 742 return err; 743 } 744 745 /** 746 * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per 747 * pcsquad, pcsport 748 * @netdev: pointer to net device structure 749 * @p: output buffer to fill requested register dump 750 * 751 * Return: 0 on success, negative on failure. 752 */ 753 static int ice_get_extended_regs(struct net_device *netdev, void *p) 754 { 755 struct ice_netdev_priv *np = netdev_priv(netdev); 756 struct ice_regdump_to_ethtool *ice_prv_regs_buf; 757 struct ice_port_topology port_topology = {}; 758 struct ice_port_info *pi; 759 struct ice_pf *pf; 760 struct ice_hw *hw; 761 unsigned int i; 762 int err; 763 764 pf = np->vsi->back; 765 hw = &pf->hw; 766 pi = np->vsi->port_info; 767 768 /* Serdes parameters are not supported if not the PF VSI */ 769 if (np->vsi->type != ICE_VSI_PF || !pi) 770 return -EINVAL; 771 772 err = ice_get_port_topology(hw, pi->lport, &port_topology); 773 if (err) 774 return -EINVAL; 775 if (port_topology.serdes_lane_count > 4) 776 return -EINVAL; 777 778 ice_prv_regs_buf = p; 779 780 /* Get serdes equalization parameter for available serdes */ 781 for (i = 0; i < port_topology.serdes_lane_count; i++) { 782 u8 serdes_num = 0; 783 784 serdes_num = port_topology.primary_serdes_lane + i; 785 err = ice_get_tx_rx_equa(hw, serdes_num, 786 &ice_prv_regs_buf->equalization[i]); 787 if (err) 788 return -EINVAL; 789 } 790 791 return 0; 792 } 793 794 static void 795 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 796 { 797 struct ice_pf *pf = ice_netdev_to_pf(netdev); 798 struct ice_hw *hw = &pf->hw; 799 u32 *regs_buf = (u32 *)p; 800 unsigned int i; 801 802 regs->version = 2; 803 804 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) 805 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); 806 807 ice_get_extended_regs(netdev, (void *)®s_buf[i]); 808 } 809 810 static u32 ice_get_msglevel(struct net_device *netdev) 811 { 812 struct ice_pf *pf = ice_netdev_to_pf(netdev); 813 814 #ifndef CONFIG_DYNAMIC_DEBUG 815 if (pf->hw.debug_mask) 816 netdev_info(netdev, "hw debug_mask: 0x%llX\n", 817 pf->hw.debug_mask); 818 #endif /* !CONFIG_DYNAMIC_DEBUG */ 819 820 return pf->msg_enable; 821 } 822 823 static void ice_set_msglevel(struct net_device *netdev, u32 data) 824 { 825 struct ice_pf *pf = ice_netdev_to_pf(netdev); 826 827 #ifndef CONFIG_DYNAMIC_DEBUG 828 if (ICE_DBG_USER & data) 829 pf->hw.debug_mask = data; 830 else 831 pf->msg_enable = data; 832 #else 833 pf->msg_enable = data; 834 #endif /* !CONFIG_DYNAMIC_DEBUG */ 835 } 836 837 static void ice_get_link_ext_stats(struct net_device *netdev, 838 struct ethtool_link_ext_stats *stats) 839 { 840 struct ice_pf *pf = ice_netdev_to_pf(netdev); 841 842 stats->link_down_events = pf->link_down_events; 843 } 844 845 static int ice_get_eeprom_len(struct net_device *netdev) 846 { 847 struct ice_pf *pf = ice_netdev_to_pf(netdev); 848 849 return (int)pf->hw.flash.flash_size; 850 } 851 852 static int 853 ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 854 u8 *bytes) 855 { 856 struct ice_pf *pf = ice_netdev_to_pf(netdev); 857 struct ice_hw *hw = &pf->hw; 858 struct device *dev; 859 int ret; 860 u8 *buf; 861 862 dev = ice_pf_to_dev(pf); 863 864 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 865 netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n", 866 eeprom->cmd, eeprom->offset, eeprom->len); 867 868 buf = kzalloc(eeprom->len, GFP_KERNEL); 869 if (!buf) 870 return -ENOMEM; 871 872 ret = ice_acquire_nvm(hw, ICE_RES_READ); 873 if (ret) { 874 dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n", 875 ret, libie_aq_str(hw->adminq.sq_last_status)); 876 goto out; 877 } 878 879 ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, 880 false); 881 if (ret) { 882 dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n", 883 ret, libie_aq_str(hw->adminq.sq_last_status)); 884 goto release; 885 } 886 887 memcpy(bytes, buf, eeprom->len); 888 release: 889 ice_release_nvm(hw); 890 out: 891 kfree(buf); 892 return ret; 893 } 894 895 /** 896 * ice_active_vfs - check if there are any active VFs 897 * @pf: board private structure 898 * 899 * Returns true if an active VF is found, otherwise returns false 900 */ 901 static bool ice_active_vfs(struct ice_pf *pf) 902 { 903 bool active = false; 904 struct ice_vf *vf; 905 unsigned int bkt; 906 907 rcu_read_lock(); 908 ice_for_each_vf_rcu(pf, bkt, vf) { 909 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 910 active = true; 911 break; 912 } 913 } 914 rcu_read_unlock(); 915 916 return active; 917 } 918 919 /** 920 * ice_link_test - perform a link test on a given net_device 921 * @netdev: network interface device structure 922 * 923 * This function performs one of the self-tests required by ethtool. 924 * Returns 0 on success, non-zero on failure. 925 */ 926 static u64 ice_link_test(struct net_device *netdev) 927 { 928 struct ice_netdev_priv *np = netdev_priv(netdev); 929 bool link_up = false; 930 int status; 931 932 netdev_info(netdev, "link test\n"); 933 status = ice_get_link_status(np->vsi->port_info, &link_up); 934 if (status) { 935 netdev_err(netdev, "link query error, status = %d\n", 936 status); 937 return 1; 938 } 939 940 if (!link_up) 941 return 2; 942 943 return 0; 944 } 945 946 /** 947 * ice_eeprom_test - perform an EEPROM test on a given net_device 948 * @netdev: network interface device structure 949 * 950 * This function performs one of the self-tests required by ethtool. 951 * Returns 0 on success, non-zero on failure. 952 */ 953 static u64 ice_eeprom_test(struct net_device *netdev) 954 { 955 struct ice_pf *pf = ice_netdev_to_pf(netdev); 956 957 netdev_info(netdev, "EEPROM test\n"); 958 return !!(ice_nvm_validate_checksum(&pf->hw)); 959 } 960 961 /** 962 * ice_reg_pattern_test 963 * @hw: pointer to the HW struct 964 * @reg: reg to be tested 965 * @mask: bits to be touched 966 */ 967 static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) 968 { 969 struct ice_pf *pf = (struct ice_pf *)hw->back; 970 struct device *dev = ice_pf_to_dev(pf); 971 static const u32 patterns[] = { 972 0x5A5A5A5A, 0xA5A5A5A5, 973 0x00000000, 0xFFFFFFFF 974 }; 975 u32 val, orig_val; 976 unsigned int i; 977 978 orig_val = rd32(hw, reg); 979 for (i = 0; i < ARRAY_SIZE(patterns); ++i) { 980 u32 pattern = patterns[i] & mask; 981 982 wr32(hw, reg, pattern); 983 val = rd32(hw, reg); 984 if (val == pattern) 985 continue; 986 dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n" 987 , __func__, reg, pattern, val); 988 return 1; 989 } 990 991 wr32(hw, reg, orig_val); 992 val = rd32(hw, reg); 993 if (val != orig_val) { 994 dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n" 995 , __func__, reg, orig_val, val); 996 return 1; 997 } 998 999 return 0; 1000 } 1001 1002 /** 1003 * ice_reg_test - perform a register test on a given net_device 1004 * @netdev: network interface device structure 1005 * 1006 * This function performs one of the self-tests required by ethtool. 1007 * Returns 0 on success, non-zero on failure. 1008 */ 1009 static u64 ice_reg_test(struct net_device *netdev) 1010 { 1011 struct ice_netdev_priv *np = netdev_priv(netdev); 1012 struct ice_hw *hw = np->vsi->port_info->hw; 1013 u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ? 1014 hw->func_caps.common_cap.num_msix_vectors - 1 : 1; 1015 struct ice_diag_reg_test_info { 1016 u32 address; 1017 u32 mask; 1018 u32 elem_num; 1019 u32 elem_size; 1020 } ice_reg_list[] = { 1021 {GLINT_ITR(0, 0), 0x00000fff, int_elements, 1022 GLINT_ITR(0, 1) - GLINT_ITR(0, 0)}, 1023 {GLINT_ITR(1, 0), 0x00000fff, int_elements, 1024 GLINT_ITR(1, 1) - GLINT_ITR(1, 0)}, 1025 {GLINT_ITR(0, 0), 0x00000fff, int_elements, 1026 GLINT_ITR(2, 1) - GLINT_ITR(2, 0)}, 1027 {GLINT_CTL, 0xffff0001, 1, 0} 1028 }; 1029 unsigned int i; 1030 1031 netdev_dbg(netdev, "Register test\n"); 1032 for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) { 1033 u32 j; 1034 1035 for (j = 0; j < ice_reg_list[i].elem_num; ++j) { 1036 u32 mask = ice_reg_list[i].mask; 1037 u32 reg = ice_reg_list[i].address + 1038 (j * ice_reg_list[i].elem_size); 1039 1040 /* bail on failure (non-zero return) */ 1041 if (ice_reg_pattern_test(hw, reg, mask)) 1042 return 1; 1043 } 1044 } 1045 1046 return 0; 1047 } 1048 1049 /** 1050 * ice_lbtest_prepare_rings - configure Tx/Rx test rings 1051 * @vsi: pointer to the VSI structure 1052 * 1053 * Function configures rings of a VSI for loopback test without 1054 * enabling interrupts or informing the kernel about new queues. 1055 * 1056 * Returns 0 on success, negative on failure. 1057 */ 1058 static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) 1059 { 1060 int status; 1061 1062 status = ice_vsi_setup_tx_rings(vsi); 1063 if (status) 1064 goto err_setup_tx_ring; 1065 1066 status = ice_vsi_setup_rx_rings(vsi); 1067 if (status) 1068 goto err_setup_rx_ring; 1069 1070 status = ice_vsi_cfg_lan(vsi); 1071 if (status) 1072 goto err_setup_rx_ring; 1073 1074 status = ice_vsi_start_all_rx_rings(vsi); 1075 if (status) 1076 goto err_start_rx_ring; 1077 1078 return 0; 1079 1080 err_start_rx_ring: 1081 ice_vsi_free_rx_rings(vsi); 1082 err_setup_rx_ring: 1083 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 1084 err_setup_tx_ring: 1085 ice_vsi_free_tx_rings(vsi); 1086 1087 return status; 1088 } 1089 1090 /** 1091 * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test 1092 * @vsi: pointer to the VSI structure 1093 * 1094 * Function stops and frees VSI rings after a loopback test. 1095 * Returns 0 on success, negative on failure. 1096 */ 1097 static int ice_lbtest_disable_rings(struct ice_vsi *vsi) 1098 { 1099 int status; 1100 1101 status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 1102 if (status) 1103 netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n", 1104 vsi->vsi_num, status); 1105 1106 status = ice_vsi_stop_all_rx_rings(vsi); 1107 if (status) 1108 netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n", 1109 vsi->vsi_num, status); 1110 1111 ice_vsi_free_tx_rings(vsi); 1112 ice_vsi_free_rx_rings(vsi); 1113 1114 return status; 1115 } 1116 1117 /** 1118 * ice_lbtest_create_frame - create test packet 1119 * @pf: pointer to the PF structure 1120 * @ret_data: allocated frame buffer 1121 * @size: size of the packet data 1122 * 1123 * Function allocates a frame with a test pattern on specific offsets. 1124 * Returns 0 on success, non-zero on failure. 1125 */ 1126 static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size) 1127 { 1128 u8 *data; 1129 1130 if (!pf) 1131 return -EINVAL; 1132 1133 data = kzalloc(size, GFP_KERNEL); 1134 if (!data) 1135 return -ENOMEM; 1136 1137 /* Since the ethernet test frame should always be at least 1138 * 64 bytes long, fill some octets in the payload with test data. 1139 */ 1140 memset(data, 0xFF, size); 1141 data[32] = 0xDE; 1142 data[42] = 0xAD; 1143 data[44] = 0xBE; 1144 data[46] = 0xEF; 1145 1146 *ret_data = data; 1147 1148 return 0; 1149 } 1150 1151 /** 1152 * ice_lbtest_check_frame - verify received loopback frame 1153 * @frame: pointer to the raw packet data 1154 * 1155 * Function verifies received test frame with a pattern. 1156 * Returns true if frame matches the pattern, false otherwise. 1157 */ 1158 static bool ice_lbtest_check_frame(u8 *frame) 1159 { 1160 /* Validate bytes of a frame under offsets chosen earlier */ 1161 if (frame[32] == 0xDE && 1162 frame[42] == 0xAD && 1163 frame[44] == 0xBE && 1164 frame[46] == 0xEF && 1165 frame[48] == 0xFF) 1166 return true; 1167 1168 return false; 1169 } 1170 1171 /** 1172 * ice_diag_send - send test frames to the test ring 1173 * @tx_ring: pointer to the transmit ring 1174 * @data: pointer to the raw packet data 1175 * @size: size of the packet to send 1176 * 1177 * Function sends loopback packets on a test Tx ring. 1178 */ 1179 static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size) 1180 { 1181 struct ice_tx_desc *tx_desc; 1182 struct ice_tx_buf *tx_buf; 1183 dma_addr_t dma; 1184 u64 td_cmd; 1185 1186 tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use); 1187 tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use]; 1188 1189 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 1190 if (dma_mapping_error(tx_ring->dev, dma)) 1191 return -EINVAL; 1192 1193 tx_desc->buf_addr = cpu_to_le64(dma); 1194 1195 /* These flags are required for a descriptor to be pushed out */ 1196 td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 1197 tx_desc->cmd_type_offset_bsz = 1198 cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 1199 (td_cmd << ICE_TXD_QW1_CMD_S) | 1200 ((u64)0 << ICE_TXD_QW1_OFFSET_S) | 1201 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 1202 ((u64)0 << ICE_TXD_QW1_L2TAG1_S)); 1203 1204 tx_buf->next_to_watch = tx_desc; 1205 1206 /* Force memory write to complete before letting h/w know 1207 * there are new descriptors to fetch. 1208 */ 1209 wmb(); 1210 1211 tx_ring->next_to_use++; 1212 if (tx_ring->next_to_use >= tx_ring->count) 1213 tx_ring->next_to_use = 0; 1214 1215 writel_relaxed(tx_ring->next_to_use, tx_ring->tail); 1216 1217 /* Wait until the packets get transmitted to the receive queue. */ 1218 usleep_range(1000, 2000); 1219 dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE); 1220 1221 return 0; 1222 } 1223 1224 #define ICE_LB_FRAME_SIZE 64 1225 /** 1226 * ice_lbtest_receive_frames - receive and verify test frames 1227 * @rx_ring: pointer to the receive ring 1228 * 1229 * Function receives loopback packets and verify their correctness. 1230 * Returns number of received valid frames. 1231 */ 1232 static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) 1233 { 1234 struct libeth_fqe *rx_buf; 1235 int valid_frames, i; 1236 struct page *page; 1237 u8 *received_buf; 1238 1239 valid_frames = 0; 1240 1241 for (i = 0; i < rx_ring->count; i++) { 1242 union ice_32b_rx_flex_desc *rx_desc; 1243 1244 rx_desc = ICE_RX_DESC(rx_ring, i); 1245 1246 if (!(rx_desc->wb.status_error0 & 1247 (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) | 1248 cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) 1249 continue; 1250 1251 rx_buf = &rx_ring->rx_fqes[i]; 1252 page = __netmem_to_page(rx_buf->netmem); 1253 received_buf = page_address(page) + rx_buf->offset + 1254 page->pp->p.offset; 1255 1256 if (ice_lbtest_check_frame(received_buf)) 1257 valid_frames++; 1258 } 1259 1260 return valid_frames; 1261 } 1262 1263 /** 1264 * ice_loopback_test - perform a loopback test on a given net_device 1265 * @netdev: network interface device structure 1266 * 1267 * This function performs one of the self-tests required by ethtool. 1268 * Returns 0 on success, non-zero on failure. 1269 */ 1270 static u64 ice_loopback_test(struct net_device *netdev) 1271 { 1272 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1273 struct ice_vsi *test_vsi; 1274 u8 *tx_frame __free(kfree) = NULL; 1275 u8 broadcast[ETH_ALEN], ret = 0; 1276 int num_frames, valid_frames; 1277 struct ice_tx_ring *tx_ring; 1278 struct ice_rx_ring *rx_ring; 1279 int i; 1280 1281 netdev_info(netdev, "loopback test\n"); 1282 1283 test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); 1284 if (!test_vsi) { 1285 netdev_err(netdev, "Failed to create a VSI for the loopback test\n"); 1286 return 1; 1287 } 1288 1289 test_vsi->netdev = netdev; 1290 tx_ring = test_vsi->tx_rings[0]; 1291 rx_ring = test_vsi->rx_rings[0]; 1292 1293 if (ice_lbtest_prepare_rings(test_vsi)) { 1294 ret = 2; 1295 goto lbtest_vsi_close; 1296 } 1297 1298 if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) { 1299 ret = 3; 1300 goto lbtest_rings_dis; 1301 } 1302 1303 /* Enable MAC loopback in firmware */ 1304 if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) { 1305 ret = 4; 1306 goto lbtest_mac_dis; 1307 } 1308 1309 /* Test VSI needs to receive broadcast packets */ 1310 eth_broadcast_addr(broadcast); 1311 if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) { 1312 ret = 5; 1313 goto lbtest_mac_dis; 1314 } 1315 1316 if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) { 1317 ret = 7; 1318 goto remove_mac_filters; 1319 } 1320 1321 num_frames = min_t(int, tx_ring->count, 32); 1322 for (i = 0; i < num_frames; i++) { 1323 if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) { 1324 ret = 8; 1325 goto remove_mac_filters; 1326 } 1327 } 1328 1329 valid_frames = ice_lbtest_receive_frames(rx_ring); 1330 if (!valid_frames) 1331 ret = 9; 1332 else if (valid_frames != num_frames) 1333 ret = 10; 1334 1335 remove_mac_filters: 1336 if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) 1337 netdev_err(netdev, "Could not remove MAC filter for the test VSI\n"); 1338 lbtest_mac_dis: 1339 /* Disable MAC loopback after the test is completed. */ 1340 if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) 1341 netdev_err(netdev, "Could not disable MAC loopback\n"); 1342 lbtest_rings_dis: 1343 if (ice_lbtest_disable_rings(test_vsi)) 1344 netdev_err(netdev, "Could not disable test rings\n"); 1345 lbtest_vsi_close: 1346 test_vsi->netdev = NULL; 1347 if (ice_vsi_release(test_vsi)) 1348 netdev_err(netdev, "Failed to remove the test VSI\n"); 1349 1350 return ret; 1351 } 1352 1353 /** 1354 * ice_intr_test - perform an interrupt test on a given net_device 1355 * @netdev: network interface device structure 1356 * 1357 * This function performs one of the self-tests required by ethtool. 1358 * Returns 0 on success, non-zero on failure. 1359 */ 1360 static u64 ice_intr_test(struct net_device *netdev) 1361 { 1362 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1363 u16 swic_old = pf->sw_int_count; 1364 1365 netdev_info(netdev, "interrupt test\n"); 1366 1367 wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index), 1368 GLINT_DYN_CTL_SW_ITR_INDX_M | 1369 GLINT_DYN_CTL_INTENA_MSK_M | 1370 GLINT_DYN_CTL_SWINT_TRIG_M); 1371 1372 usleep_range(1000, 2000); 1373 return (swic_old == pf->sw_int_count); 1374 } 1375 1376 /** 1377 * ice_self_test - handler function for performing a self-test by ethtool 1378 * @netdev: network interface device structure 1379 * @eth_test: ethtool_test structure 1380 * @data: required by ethtool.self_test 1381 * 1382 * This function is called after invoking 'ethtool -t devname' command where 1383 * devname is the name of the network device on which ethtool should operate. 1384 * It performs a set of self-tests to check if a device works properly. 1385 */ 1386 static void 1387 ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, 1388 u64 *data) 1389 { 1390 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1391 bool if_running = netif_running(netdev); 1392 struct device *dev; 1393 1394 dev = ice_pf_to_dev(pf); 1395 1396 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1397 netdev_info(netdev, "offline testing starting\n"); 1398 1399 set_bit(ICE_TESTING, pf->state); 1400 1401 if (ice_active_vfs(pf)) { 1402 dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); 1403 data[ICE_ETH_TEST_REG] = 1; 1404 data[ICE_ETH_TEST_EEPROM] = 1; 1405 data[ICE_ETH_TEST_INTR] = 1; 1406 data[ICE_ETH_TEST_LOOP] = 1; 1407 data[ICE_ETH_TEST_LINK] = 1; 1408 eth_test->flags |= ETH_TEST_FL_FAILED; 1409 clear_bit(ICE_TESTING, pf->state); 1410 goto skip_ol_tests; 1411 } 1412 /* If the device is online then take it offline */ 1413 if (if_running) 1414 /* indicate we're in test mode */ 1415 ice_stop(netdev); 1416 1417 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); 1418 data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev); 1419 data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev); 1420 data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev); 1421 data[ICE_ETH_TEST_REG] = ice_reg_test(netdev); 1422 1423 if (data[ICE_ETH_TEST_LINK] || 1424 data[ICE_ETH_TEST_EEPROM] || 1425 data[ICE_ETH_TEST_LOOP] || 1426 data[ICE_ETH_TEST_INTR] || 1427 data[ICE_ETH_TEST_REG]) 1428 eth_test->flags |= ETH_TEST_FL_FAILED; 1429 1430 clear_bit(ICE_TESTING, pf->state); 1431 1432 if (if_running) { 1433 int status = ice_open(netdev); 1434 1435 if (status) { 1436 dev_err(dev, "Could not open device %s, err %d\n", 1437 pf->int_name, status); 1438 } 1439 } 1440 } else { 1441 /* Online tests */ 1442 netdev_info(netdev, "online testing starting\n"); 1443 1444 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); 1445 if (data[ICE_ETH_TEST_LINK]) 1446 eth_test->flags |= ETH_TEST_FL_FAILED; 1447 1448 /* Offline only tests, not run in online; pass by default */ 1449 data[ICE_ETH_TEST_REG] = 0; 1450 data[ICE_ETH_TEST_EEPROM] = 0; 1451 data[ICE_ETH_TEST_INTR] = 0; 1452 data[ICE_ETH_TEST_LOOP] = 0; 1453 } 1454 1455 skip_ol_tests: 1456 netdev_info(netdev, "testing finished\n"); 1457 } 1458 1459 static void 1460 __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, 1461 struct ice_vsi *vsi) 1462 { 1463 unsigned int i; 1464 u8 *p = data; 1465 1466 switch (stringset) { 1467 case ETH_SS_STATS: 1468 for (i = 0; i < ICE_VSI_STATS_LEN; i++) 1469 ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string); 1470 1471 if (ice_is_port_repr_netdev(netdev)) 1472 return; 1473 1474 ice_for_each_alloc_txq(vsi, i) { 1475 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 1476 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 1477 } 1478 1479 ice_for_each_alloc_rxq(vsi, i) { 1480 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 1481 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 1482 } 1483 1484 if (vsi->type != ICE_VSI_PF) 1485 return; 1486 1487 for (i = 0; i < ICE_PF_STATS_LEN; i++) 1488 ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string); 1489 1490 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 1491 ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i); 1492 ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i); 1493 } 1494 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 1495 ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i); 1496 ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i); 1497 } 1498 break; 1499 case ETH_SS_TEST: 1500 memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN); 1501 break; 1502 case ETH_SS_PRIV_FLAGS: 1503 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) 1504 ethtool_puts(&p, ice_gstrings_priv_flags[i].name); 1505 break; 1506 default: 1507 break; 1508 } 1509 } 1510 1511 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1512 { 1513 struct ice_netdev_priv *np = netdev_priv(netdev); 1514 1515 __ice_get_strings(netdev, stringset, data, np->vsi); 1516 } 1517 1518 static int 1519 ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) 1520 { 1521 struct ice_netdev_priv *np = netdev_priv(netdev); 1522 bool led_active; 1523 1524 switch (state) { 1525 case ETHTOOL_ID_ACTIVE: 1526 led_active = true; 1527 break; 1528 case ETHTOOL_ID_INACTIVE: 1529 led_active = false; 1530 break; 1531 default: 1532 return -EINVAL; 1533 } 1534 1535 if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL)) 1536 return -EIO; 1537 1538 return 0; 1539 } 1540 1541 /** 1542 * ice_set_fec_cfg - Set link FEC options 1543 * @netdev: network interface device structure 1544 * @req_fec: FEC mode to configure 1545 */ 1546 static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) 1547 { 1548 struct ice_netdev_priv *np = netdev_priv(netdev); 1549 struct ice_aqc_set_phy_cfg_data config = { 0 }; 1550 struct ice_vsi *vsi = np->vsi; 1551 struct ice_port_info *pi; 1552 1553 pi = vsi->port_info; 1554 if (!pi) 1555 return -EOPNOTSUPP; 1556 1557 /* Changing the FEC parameters is not supported if not the PF VSI */ 1558 if (vsi->type != ICE_VSI_PF) { 1559 netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n"); 1560 return -EOPNOTSUPP; 1561 } 1562 1563 /* Proceed only if requesting different FEC mode */ 1564 if (pi->phy.curr_user_fec_req == req_fec) 1565 return 0; 1566 1567 /* Copy the current user PHY configuration. The current user PHY 1568 * configuration is initialized during probe from PHY capabilities 1569 * software mode, and updated on set PHY configuration. 1570 */ 1571 memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config)); 1572 1573 ice_cfg_phy_fec(pi, &config, req_fec); 1574 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1575 1576 if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL)) 1577 return -EAGAIN; 1578 1579 /* Save requested FEC config */ 1580 pi->phy.curr_user_fec_req = req_fec; 1581 1582 return 0; 1583 } 1584 1585 /** 1586 * ice_set_fecparam - Set FEC link options 1587 * @netdev: network interface device structure 1588 * @fecparam: Ethtool structure to retrieve FEC parameters 1589 */ 1590 static int 1591 ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) 1592 { 1593 struct ice_netdev_priv *np = netdev_priv(netdev); 1594 struct ice_vsi *vsi = np->vsi; 1595 enum ice_fec_mode fec; 1596 1597 switch (fecparam->fec) { 1598 case ETHTOOL_FEC_AUTO: 1599 fec = ICE_FEC_AUTO; 1600 break; 1601 case ETHTOOL_FEC_RS: 1602 fec = ICE_FEC_RS; 1603 break; 1604 case ETHTOOL_FEC_BASER: 1605 fec = ICE_FEC_BASER; 1606 break; 1607 case ETHTOOL_FEC_OFF: 1608 case ETHTOOL_FEC_NONE: 1609 fec = ICE_FEC_NONE; 1610 break; 1611 default: 1612 dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n", 1613 fecparam->fec); 1614 return -EINVAL; 1615 } 1616 1617 return ice_set_fec_cfg(netdev, fec); 1618 } 1619 1620 /** 1621 * ice_get_fecparam - Get link FEC options 1622 * @netdev: network interface device structure 1623 * @fecparam: Ethtool structure to retrieve FEC parameters 1624 */ 1625 static int 1626 ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) 1627 { 1628 struct ice_netdev_priv *np = netdev_priv(netdev); 1629 struct ice_aqc_get_phy_caps_data *caps; 1630 struct ice_link_status *link_info; 1631 struct ice_vsi *vsi = np->vsi; 1632 struct ice_port_info *pi; 1633 int err; 1634 1635 pi = vsi->port_info; 1636 1637 if (!pi) 1638 return -EOPNOTSUPP; 1639 link_info = &pi->phy.link_info; 1640 1641 /* Set FEC mode based on negotiated link info */ 1642 switch (link_info->fec_info) { 1643 case ICE_AQ_LINK_25G_KR_FEC_EN: 1644 fecparam->active_fec = ETHTOOL_FEC_BASER; 1645 break; 1646 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 1647 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 1648 fecparam->active_fec = ETHTOOL_FEC_RS; 1649 break; 1650 default: 1651 fecparam->active_fec = ETHTOOL_FEC_OFF; 1652 break; 1653 } 1654 1655 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 1656 if (!caps) 1657 return -ENOMEM; 1658 1659 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 1660 caps, NULL); 1661 if (err) 1662 goto done; 1663 1664 /* Set supported/configured FEC modes based on PHY capability */ 1665 if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) 1666 fecparam->fec |= ETHTOOL_FEC_AUTO; 1667 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || 1668 caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 1669 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN || 1670 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 1671 fecparam->fec |= ETHTOOL_FEC_BASER; 1672 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 1673 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ || 1674 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) 1675 fecparam->fec |= ETHTOOL_FEC_RS; 1676 if (caps->link_fec_options == 0) 1677 fecparam->fec |= ETHTOOL_FEC_OFF; 1678 1679 done: 1680 kfree(caps); 1681 return err; 1682 } 1683 1684 /** 1685 * ice_nway_reset - restart autonegotiation 1686 * @netdev: network interface device structure 1687 */ 1688 static int ice_nway_reset(struct net_device *netdev) 1689 { 1690 struct ice_netdev_priv *np = netdev_priv(netdev); 1691 struct ice_vsi *vsi = np->vsi; 1692 int err; 1693 1694 /* If VSI state is up, then restart autoneg with link up */ 1695 if (!test_bit(ICE_DOWN, vsi->back->state)) 1696 err = ice_set_link(vsi, true); 1697 else 1698 err = ice_set_link(vsi, false); 1699 1700 return err; 1701 } 1702 1703 /** 1704 * ice_get_priv_flags - report device private flags 1705 * @netdev: network interface device structure 1706 * 1707 * The get string set count and the string set should be matched for each 1708 * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags 1709 * array. 1710 * 1711 * Returns a u32 bitmap of flags. 1712 */ 1713 static u32 ice_get_priv_flags(struct net_device *netdev) 1714 { 1715 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1716 u32 i, ret_flags = 0; 1717 1718 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 1719 const struct ice_priv_flag *priv_flag; 1720 1721 priv_flag = &ice_gstrings_priv_flags[i]; 1722 1723 if (test_bit(priv_flag->bitno, pf->flags)) 1724 ret_flags |= BIT(i); 1725 } 1726 1727 return ret_flags; 1728 } 1729 1730 /** 1731 * ice_set_priv_flags - set private flags 1732 * @netdev: network interface device structure 1733 * @flags: bit flags to be set 1734 */ 1735 static int ice_set_priv_flags(struct net_device *netdev, u32 flags) 1736 { 1737 struct ice_netdev_priv *np = netdev_priv(netdev); 1738 DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS); 1739 DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS); 1740 struct ice_vsi *vsi = np->vsi; 1741 struct ice_pf *pf = vsi->back; 1742 struct device *dev; 1743 int ret = 0; 1744 u32 i; 1745 1746 if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) 1747 return -EINVAL; 1748 1749 dev = ice_pf_to_dev(pf); 1750 set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); 1751 1752 bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS); 1753 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 1754 const struct ice_priv_flag *priv_flag; 1755 1756 priv_flag = &ice_gstrings_priv_flags[i]; 1757 1758 if (flags & BIT(i)) 1759 set_bit(priv_flag->bitno, pf->flags); 1760 else 1761 clear_bit(priv_flag->bitno, pf->flags); 1762 } 1763 1764 bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); 1765 1766 /* Do not allow change to link-down-on-close when Total Port Shutdown 1767 * is enabled. 1768 */ 1769 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) && 1770 test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { 1771 dev_err(dev, "Setting link-down-on-close not supported on this port\n"); 1772 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1773 ret = -EINVAL; 1774 goto ethtool_exit; 1775 } 1776 1777 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { 1778 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { 1779 int status; 1780 1781 /* Disable FW LLDP engine */ 1782 status = ice_cfg_lldp_mib_change(&pf->hw, false); 1783 1784 /* If unregistering for LLDP events fails, this is 1785 * not an error state, as there shouldn't be any 1786 * events to respond to. 1787 */ 1788 if (status) 1789 dev_info(dev, "Failed to unreg for LLDP events\n"); 1790 1791 /* The AQ call to stop the FW LLDP agent will generate 1792 * an error if the agent is already stopped. 1793 */ 1794 status = ice_aq_stop_lldp(&pf->hw, true, true, NULL); 1795 if (status) 1796 dev_warn(dev, "Fail to stop LLDP agent\n"); 1797 /* Use case for having the FW LLDP agent stopped 1798 * will likely not need DCB, so failure to init is 1799 * not a concern of ethtool 1800 */ 1801 status = ice_init_pf_dcb(pf, true); 1802 if (status) 1803 dev_warn(dev, "Fail to init DCB\n"); 1804 1805 pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED; 1806 pf->dcbx_cap |= DCB_CAP_DCBX_HOST; 1807 } else { 1808 bool dcbx_agent_status; 1809 int status; 1810 1811 if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { 1812 clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); 1813 dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n"); 1814 ret = -EOPNOTSUPP; 1815 goto ethtool_exit; 1816 } 1817 1818 /* Remove rule to direct LLDP packets to default VSI. 1819 * The FW LLDP engine will now be consuming them. 1820 */ 1821 ice_cfg_sw_rx_lldp(vsi->back, false); 1822 1823 /* AQ command to start FW LLDP agent will return an 1824 * error if the agent is already started 1825 */ 1826 status = ice_aq_start_lldp(&pf->hw, true, NULL); 1827 if (status) 1828 dev_warn(dev, "Fail to start LLDP Agent\n"); 1829 1830 /* AQ command to start FW DCBX agent will fail if 1831 * the agent is already started 1832 */ 1833 status = ice_aq_start_stop_dcbx(&pf->hw, true, 1834 &dcbx_agent_status, 1835 NULL); 1836 if (status) 1837 dev_dbg(dev, "Failed to start FW DCBX\n"); 1838 1839 dev_info(dev, "FW DCBX agent is %s\n", 1840 dcbx_agent_status ? "ACTIVE" : "DISABLED"); 1841 1842 /* Failure to configure MIB change or init DCB is not 1843 * relevant to ethtool. Print notification that 1844 * registration/init failed but do not return error 1845 * state to ethtool 1846 */ 1847 status = ice_init_pf_dcb(pf, true); 1848 if (status) 1849 dev_dbg(dev, "Fail to init DCB\n"); 1850 1851 /* Register for MIB change events */ 1852 status = ice_cfg_lldp_mib_change(&pf->hw, true); 1853 if (status) 1854 dev_dbg(dev, "Fail to enable MIB change events\n"); 1855 1856 pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST; 1857 pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; 1858 1859 ice_nway_reset(netdev); 1860 } 1861 } 1862 /* don't allow modification of this flag when a single VF is in 1863 * promiscuous mode because it's not supported 1864 */ 1865 if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) && 1866 ice_is_any_vf_in_unicast_promisc(pf)) { 1867 dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n"); 1868 /* toggle bit back to previous state */ 1869 change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags); 1870 ret = -EAGAIN; 1871 } 1872 1873 if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) && 1874 ice_has_vfs(pf)) { 1875 dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n"); 1876 /* toggle bit back to previous state */ 1877 change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags); 1878 ret = -EOPNOTSUPP; 1879 } 1880 ethtool_exit: 1881 clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); 1882 return ret; 1883 } 1884 1885 static int ice_get_sset_count(struct net_device *netdev, int sset) 1886 { 1887 switch (sset) { 1888 case ETH_SS_STATS: 1889 /* The number (and order) of strings reported *must* remain 1890 * constant for a given netdevice. This function must not 1891 * report a different number based on run time parameters 1892 * (such as the number of queues in use, or the setting of 1893 * a private ethtool flag). This is due to the nature of the 1894 * ethtool stats API. 1895 * 1896 * Userspace programs such as ethtool must make 3 separate 1897 * ioctl requests, one for size, one for the strings, and 1898 * finally one for the stats. Since these cross into 1899 * userspace, changes to the number or size could result in 1900 * undefined memory access or incorrect string<->value 1901 * correlations for statistics. 1902 * 1903 * Even if it appears to be safe, changes to the size or 1904 * order of strings will suffer from race conditions and are 1905 * not safe. 1906 */ 1907 return ICE_ALL_STATS_LEN(netdev); 1908 case ETH_SS_TEST: 1909 return ICE_TEST_LEN; 1910 case ETH_SS_PRIV_FLAGS: 1911 return ICE_PRIV_FLAG_ARRAY_SIZE; 1912 default: 1913 return -EOPNOTSUPP; 1914 } 1915 } 1916 1917 static void 1918 __ice_get_ethtool_stats(struct net_device *netdev, 1919 struct ethtool_stats __always_unused *stats, u64 *data, 1920 struct ice_vsi *vsi) 1921 { 1922 struct ice_pf *pf = vsi->back; 1923 struct ice_tx_ring *tx_ring; 1924 struct ice_rx_ring *rx_ring; 1925 unsigned int j; 1926 int i = 0; 1927 char *p; 1928 1929 ice_update_pf_stats(pf); 1930 ice_update_vsi_stats(vsi); 1931 1932 for (j = 0; j < ICE_VSI_STATS_LEN; j++) { 1933 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; 1934 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == 1935 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1936 } 1937 1938 if (ice_is_port_repr_netdev(netdev)) 1939 return; 1940 1941 /* populate per queue stats */ 1942 rcu_read_lock(); 1943 1944 ice_for_each_alloc_txq(vsi, j) { 1945 u64 pkts, bytes; 1946 1947 tx_ring = READ_ONCE(vsi->tx_rings[j]); 1948 if (!tx_ring || !tx_ring->ring_stats) { 1949 data[i++] = 0; 1950 data[i++] = 0; 1951 continue; 1952 } 1953 1954 ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes); 1955 1956 data[i++] = pkts; 1957 data[i++] = bytes; 1958 } 1959 1960 ice_for_each_alloc_rxq(vsi, j) { 1961 u64 pkts, bytes; 1962 1963 rx_ring = READ_ONCE(vsi->rx_rings[j]); 1964 if (!rx_ring || !rx_ring->ring_stats) { 1965 data[i++] = 0; 1966 data[i++] = 0; 1967 continue; 1968 } 1969 1970 ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes); 1971 1972 data[i++] = pkts; 1973 data[i++] = bytes; 1974 } 1975 1976 rcu_read_unlock(); 1977 1978 if (vsi->type != ICE_VSI_PF) 1979 return; 1980 1981 for (j = 0; j < ICE_PF_STATS_LEN; j++) { 1982 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset; 1983 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat == 1984 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1985 } 1986 1987 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { 1988 data[i++] = pf->stats.priority_xon_tx[j]; 1989 data[i++] = pf->stats.priority_xoff_tx[j]; 1990 } 1991 1992 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { 1993 data[i++] = pf->stats.priority_xon_rx[j]; 1994 data[i++] = pf->stats.priority_xoff_rx[j]; 1995 } 1996 } 1997 1998 static void 1999 ice_get_ethtool_stats(struct net_device *netdev, 2000 struct ethtool_stats __always_unused *stats, u64 *data) 2001 { 2002 struct ice_netdev_priv *np = netdev_priv(netdev); 2003 2004 __ice_get_ethtool_stats(netdev, stats, data, np->vsi); 2005 } 2006 2007 #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \ 2008 ICE_PHY_TYPE_LOW_100M_SGMII) 2009 2010 #define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \ 2011 ICE_PHY_TYPE_LOW_1000BASE_T | \ 2012 ICE_PHY_TYPE_LOW_1000BASE_SX | \ 2013 ICE_PHY_TYPE_LOW_1000BASE_LX | \ 2014 ICE_PHY_TYPE_LOW_1000BASE_KX | \ 2015 ICE_PHY_TYPE_LOW_1G_SGMII | \ 2016 ICE_PHY_TYPE_LOW_2500BASE_T | \ 2017 ICE_PHY_TYPE_LOW_2500BASE_X | \ 2018 ICE_PHY_TYPE_LOW_2500BASE_KX | \ 2019 ICE_PHY_TYPE_LOW_5GBASE_T | \ 2020 ICE_PHY_TYPE_LOW_5GBASE_KR | \ 2021 ICE_PHY_TYPE_LOW_10GBASE_T | \ 2022 ICE_PHY_TYPE_LOW_10G_SFI_DA | \ 2023 ICE_PHY_TYPE_LOW_10GBASE_SR | \ 2024 ICE_PHY_TYPE_LOW_10GBASE_LR | \ 2025 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ 2026 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ 2027 ICE_PHY_TYPE_LOW_10G_SFI_C2C) 2028 2029 #define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ 2030 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ 2031 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ 2032 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ 2033 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ 2034 ICE_PHY_TYPE_LOW_100G_CAUI4 | \ 2035 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ 2036 ICE_PHY_TYPE_LOW_100G_AUI4 | \ 2037 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ 2038 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ 2039 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ 2040 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ 2041 ICE_PHY_TYPE_LOW_100GBASE_DR) 2042 2043 #define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ 2044 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\ 2045 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ 2046 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ 2047 ICE_PHY_TYPE_HIGH_100G_AUI2) 2048 2049 #define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \ 2050 ICE_PHY_TYPE_HIGH_200G_SR4 | \ 2051 ICE_PHY_TYPE_HIGH_200G_FR4 | \ 2052 ICE_PHY_TYPE_HIGH_200G_LR4 | \ 2053 ICE_PHY_TYPE_HIGH_200G_DR4 | \ 2054 ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \ 2055 ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \ 2056 ICE_PHY_TYPE_HIGH_200G_AUI4) 2057 2058 /** 2059 * ice_mask_min_supported_speeds 2060 * @hw: pointer to the HW structure 2061 * @phy_types_high: PHY type high 2062 * @phy_types_low: PHY type low to apply minimum supported speeds mask 2063 * 2064 * Apply minimum supported speeds mask to PHY type low. These are the speeds 2065 * for ethtool supported link mode. 2066 */ 2067 static void 2068 ice_mask_min_supported_speeds(struct ice_hw *hw, 2069 u64 phy_types_high, u64 *phy_types_low) 2070 { 2071 /* if QSFP connection with 100G speed, minimum supported speed is 25G */ 2072 if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) || 2073 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) || 2074 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G)) 2075 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G; 2076 else if (!ice_is_100m_speed_supported(hw)) 2077 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; 2078 } 2079 2080 /** 2081 * ice_linkmode_set_bit - set link mode bit 2082 * @phy_to_ethtool: PHY type to ethtool link mode struct to set 2083 * @ks: ethtool link ksettings struct to fill out 2084 * @req_speeds: speed requested by user 2085 * @advert_phy_type: advertised PHY type 2086 * @phy_type: PHY type 2087 */ 2088 static void 2089 ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool *phy_to_ethtool, 2090 struct ethtool_link_ksettings *ks, u32 req_speeds, 2091 u64 advert_phy_type, u32 phy_type) 2092 { 2093 linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported); 2094 2095 if (req_speeds & phy_to_ethtool->aq_link_speed || 2096 (!req_speeds && advert_phy_type & BIT(phy_type))) 2097 linkmode_set_bit(phy_to_ethtool->link_mode, 2098 ks->link_modes.advertising); 2099 } 2100 2101 /** 2102 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes 2103 * @netdev: network interface device structure 2104 * @ks: ethtool link ksettings struct to fill out 2105 */ 2106 static void 2107 ice_phy_type_to_ethtool(struct net_device *netdev, 2108 struct ethtool_link_ksettings *ks) 2109 { 2110 struct ice_netdev_priv *np = netdev_priv(netdev); 2111 struct ice_vsi *vsi = np->vsi; 2112 struct ice_pf *pf = vsi->back; 2113 u64 advert_phy_type_lo = 0; 2114 u64 advert_phy_type_hi = 0; 2115 u64 phy_types_high = 0; 2116 u64 phy_types_low = 0; 2117 u32 req_speeds; 2118 u32 i; 2119 2120 req_speeds = vsi->port_info->phy.link_info.req_speeds; 2121 2122 /* Check if lenient mode is supported and enabled, or in strict mode. 2123 * 2124 * In lenient mode the Supported link modes are the PHY types without 2125 * media. The Advertising link mode is either 1. the user requested 2126 * speed, 2. the override PHY mask, or 3. the PHY types with media. 2127 * 2128 * In strict mode Supported link mode are the PHY type with media, 2129 * and Advertising link modes are the media PHY type or the speed 2130 * requested by user. 2131 */ 2132 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { 2133 phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo); 2134 phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi); 2135 2136 ice_mask_min_supported_speeds(&pf->hw, phy_types_high, 2137 &phy_types_low); 2138 /* determine advertised modes based on link override only 2139 * if it's supported and if the FW doesn't abstract the 2140 * driver from having to account for link overrides 2141 */ 2142 if (ice_fw_supports_link_override(&pf->hw) && 2143 !ice_fw_supports_report_dflt_cfg(&pf->hw)) { 2144 struct ice_link_default_override_tlv *ldo; 2145 2146 ldo = &pf->link_dflt_override; 2147 /* If override enabled and PHY mask set, then 2148 * Advertising link mode is the intersection of the PHY 2149 * types without media and the override PHY mask. 2150 */ 2151 if (ldo->options & ICE_LINK_OVERRIDE_EN && 2152 (ldo->phy_type_low || ldo->phy_type_high)) { 2153 advert_phy_type_lo = 2154 le64_to_cpu(pf->nvm_phy_type_lo) & 2155 ldo->phy_type_low; 2156 advert_phy_type_hi = 2157 le64_to_cpu(pf->nvm_phy_type_hi) & 2158 ldo->phy_type_high; 2159 } 2160 } 2161 } else { 2162 /* strict mode */ 2163 phy_types_low = vsi->port_info->phy.phy_type_low; 2164 phy_types_high = vsi->port_info->phy.phy_type_high; 2165 } 2166 2167 /* If Advertising link mode PHY type is not using override PHY type, 2168 * then use PHY type with media. 2169 */ 2170 if (!advert_phy_type_lo && !advert_phy_type_hi) { 2171 advert_phy_type_lo = vsi->port_info->phy.phy_type_low; 2172 advert_phy_type_hi = vsi->port_info->phy.phy_type_high; 2173 } 2174 2175 linkmode_zero(ks->link_modes.supported); 2176 linkmode_zero(ks->link_modes.advertising); 2177 2178 for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) { 2179 if (phy_types_low & BIT_ULL(i)) 2180 ice_linkmode_set_bit(&phy_type_low_lkup[i], ks, 2181 req_speeds, advert_phy_type_lo, 2182 i); 2183 } 2184 2185 for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) { 2186 if (phy_types_high & BIT_ULL(i)) 2187 ice_linkmode_set_bit(&phy_type_high_lkup[i], ks, 2188 req_speeds, advert_phy_type_hi, 2189 i); 2190 } 2191 } 2192 2193 #define TEST_SET_BITS_TIMEOUT 50 2194 #define TEST_SET_BITS_SLEEP_MAX 2000 2195 #define TEST_SET_BITS_SLEEP_MIN 1000 2196 2197 /** 2198 * ice_get_settings_link_up - Get Link settings for when link is up 2199 * @ks: ethtool ksettings to fill in 2200 * @netdev: network interface device structure 2201 */ 2202 static void 2203 ice_get_settings_link_up(struct ethtool_link_ksettings *ks, 2204 struct net_device *netdev) 2205 { 2206 struct ice_netdev_priv *np = netdev_priv(netdev); 2207 struct ice_port_info *pi = np->vsi->port_info; 2208 struct ice_link_status *link_info; 2209 struct ice_vsi *vsi = np->vsi; 2210 2211 link_info = &vsi->port_info->phy.link_info; 2212 2213 /* Get supported and advertised settings from PHY ability with media */ 2214 ice_phy_type_to_ethtool(netdev, ks); 2215 2216 switch (link_info->link_speed) { 2217 case ICE_AQ_LINK_SPEED_200GB: 2218 ks->base.speed = SPEED_200000; 2219 break; 2220 case ICE_AQ_LINK_SPEED_100GB: 2221 ks->base.speed = SPEED_100000; 2222 break; 2223 case ICE_AQ_LINK_SPEED_50GB: 2224 ks->base.speed = SPEED_50000; 2225 break; 2226 case ICE_AQ_LINK_SPEED_40GB: 2227 ks->base.speed = SPEED_40000; 2228 break; 2229 case ICE_AQ_LINK_SPEED_25GB: 2230 ks->base.speed = SPEED_25000; 2231 break; 2232 case ICE_AQ_LINK_SPEED_20GB: 2233 ks->base.speed = SPEED_20000; 2234 break; 2235 case ICE_AQ_LINK_SPEED_10GB: 2236 ks->base.speed = SPEED_10000; 2237 break; 2238 case ICE_AQ_LINK_SPEED_5GB: 2239 ks->base.speed = SPEED_5000; 2240 break; 2241 case ICE_AQ_LINK_SPEED_2500MB: 2242 ks->base.speed = SPEED_2500; 2243 break; 2244 case ICE_AQ_LINK_SPEED_1000MB: 2245 ks->base.speed = SPEED_1000; 2246 break; 2247 case ICE_AQ_LINK_SPEED_100MB: 2248 ks->base.speed = SPEED_100; 2249 break; 2250 default: 2251 netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", 2252 link_info->link_speed); 2253 break; 2254 } 2255 ks->base.duplex = DUPLEX_FULL; 2256 2257 if (link_info->an_info & ICE_AQ_AN_COMPLETED) 2258 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2259 Autoneg); 2260 2261 /* Set flow control negotiated Rx/Tx pause */ 2262 switch (pi->fc.current_mode) { 2263 case ICE_FC_FULL: 2264 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); 2265 break; 2266 case ICE_FC_TX_PAUSE: 2267 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); 2268 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2269 Asym_Pause); 2270 break; 2271 case ICE_FC_RX_PAUSE: 2272 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2273 Asym_Pause); 2274 break; 2275 case ICE_FC_PFC: 2276 default: 2277 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); 2278 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, 2279 Asym_Pause); 2280 break; 2281 } 2282 } 2283 2284 /** 2285 * ice_get_settings_link_down - Get the Link settings when link is down 2286 * @ks: ethtool ksettings to fill in 2287 * @netdev: network interface device structure 2288 * 2289 * Reports link settings that can be determined when link is down 2290 */ 2291 static void 2292 ice_get_settings_link_down(struct ethtool_link_ksettings *ks, 2293 struct net_device *netdev) 2294 { 2295 /* link is down and the driver needs to fall back on 2296 * supported PHY types to figure out what info to display 2297 */ 2298 ice_phy_type_to_ethtool(netdev, ks); 2299 2300 /* With no link, speed and duplex are unknown */ 2301 ks->base.speed = SPEED_UNKNOWN; 2302 ks->base.duplex = DUPLEX_UNKNOWN; 2303 } 2304 2305 /** 2306 * ice_get_link_ksettings - Get Link Speed and Duplex settings 2307 * @netdev: network interface device structure 2308 * @ks: ethtool ksettings 2309 * 2310 * Reports speed/duplex settings based on media_type 2311 */ 2312 static int 2313 ice_get_link_ksettings(struct net_device *netdev, 2314 struct ethtool_link_ksettings *ks) 2315 { 2316 struct ice_netdev_priv *np = netdev_priv(netdev); 2317 struct ice_aqc_get_phy_caps_data *caps; 2318 struct ice_link_status *hw_link_info; 2319 struct ice_vsi *vsi = np->vsi; 2320 int err; 2321 2322 ethtool_link_ksettings_zero_link_mode(ks, supported); 2323 ethtool_link_ksettings_zero_link_mode(ks, advertising); 2324 ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); 2325 hw_link_info = &vsi->port_info->phy.link_info; 2326 2327 /* set speed and duplex */ 2328 if (hw_link_info->link_info & ICE_AQ_LINK_UP) 2329 ice_get_settings_link_up(ks, netdev); 2330 else 2331 ice_get_settings_link_down(ks, netdev); 2332 2333 /* set autoneg settings */ 2334 ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? 2335 AUTONEG_ENABLE : AUTONEG_DISABLE; 2336 2337 /* set media type settings */ 2338 switch (vsi->port_info->phy.media_type) { 2339 case ICE_MEDIA_FIBER: 2340 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 2341 ks->base.port = PORT_FIBRE; 2342 break; 2343 case ICE_MEDIA_BASET: 2344 ethtool_link_ksettings_add_link_mode(ks, supported, TP); 2345 ethtool_link_ksettings_add_link_mode(ks, advertising, TP); 2346 ks->base.port = PORT_TP; 2347 break; 2348 case ICE_MEDIA_BACKPLANE: 2349 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); 2350 ethtool_link_ksettings_add_link_mode(ks, advertising, 2351 Backplane); 2352 ks->base.port = PORT_NONE; 2353 break; 2354 case ICE_MEDIA_DA: 2355 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 2356 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); 2357 ks->base.port = PORT_DA; 2358 break; 2359 default: 2360 ks->base.port = PORT_OTHER; 2361 break; 2362 } 2363 2364 /* flow control is symmetric and always supported */ 2365 ethtool_link_ksettings_add_link_mode(ks, supported, Pause); 2366 2367 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 2368 if (!caps) 2369 return -ENOMEM; 2370 2371 err = ice_aq_get_phy_caps(vsi->port_info, false, 2372 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 2373 if (err) 2374 goto done; 2375 2376 /* Set the advertised flow control based on the PHY capability */ 2377 if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && 2378 (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) { 2379 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 2380 ethtool_link_ksettings_add_link_mode(ks, advertising, 2381 Asym_Pause); 2382 } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) { 2383 ethtool_link_ksettings_add_link_mode(ks, advertising, 2384 Asym_Pause); 2385 } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) { 2386 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 2387 ethtool_link_ksettings_add_link_mode(ks, advertising, 2388 Asym_Pause); 2389 } else { 2390 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); 2391 ethtool_link_ksettings_del_link_mode(ks, advertising, 2392 Asym_Pause); 2393 } 2394 2395 /* Set advertised FEC modes based on PHY capability */ 2396 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); 2397 2398 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 2399 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 2400 ethtool_link_ksettings_add_link_mode(ks, advertising, 2401 FEC_BASER); 2402 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 2403 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 2404 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); 2405 2406 err = ice_aq_get_phy_caps(vsi->port_info, false, 2407 ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); 2408 if (err) 2409 goto done; 2410 2411 /* Set supported FEC modes based on PHY capability */ 2412 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); 2413 2414 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || 2415 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) 2416 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); 2417 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) 2418 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); 2419 2420 /* Set supported and advertised autoneg */ 2421 if (ice_is_phy_caps_an_enabled(caps)) { 2422 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 2423 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 2424 } 2425 2426 done: 2427 kfree(caps); 2428 return err; 2429 } 2430 2431 /** 2432 * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed 2433 * @speed: ethtool forced speed 2434 */ 2435 static u16 ice_speed_to_aq_link(int speed) 2436 { 2437 int aq_speed; 2438 2439 switch (speed) { 2440 case SPEED_10: 2441 aq_speed = ICE_AQ_LINK_SPEED_10MB; 2442 break; 2443 case SPEED_100: 2444 aq_speed = ICE_AQ_LINK_SPEED_100MB; 2445 break; 2446 case SPEED_1000: 2447 aq_speed = ICE_AQ_LINK_SPEED_1000MB; 2448 break; 2449 case SPEED_2500: 2450 aq_speed = ICE_AQ_LINK_SPEED_2500MB; 2451 break; 2452 case SPEED_5000: 2453 aq_speed = ICE_AQ_LINK_SPEED_5GB; 2454 break; 2455 case SPEED_10000: 2456 aq_speed = ICE_AQ_LINK_SPEED_10GB; 2457 break; 2458 case SPEED_20000: 2459 aq_speed = ICE_AQ_LINK_SPEED_20GB; 2460 break; 2461 case SPEED_25000: 2462 aq_speed = ICE_AQ_LINK_SPEED_25GB; 2463 break; 2464 case SPEED_40000: 2465 aq_speed = ICE_AQ_LINK_SPEED_40GB; 2466 break; 2467 case SPEED_50000: 2468 aq_speed = ICE_AQ_LINK_SPEED_50GB; 2469 break; 2470 case SPEED_100000: 2471 aq_speed = ICE_AQ_LINK_SPEED_100GB; 2472 break; 2473 default: 2474 aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN; 2475 break; 2476 } 2477 return aq_speed; 2478 } 2479 2480 /** 2481 * ice_ksettings_find_adv_link_speed - Find advertising link speed 2482 * @ks: ethtool ksettings 2483 */ 2484 static u16 2485 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) 2486 { 2487 const struct ethtool_forced_speed_map *map; 2488 u16 adv_link_speed = 0; 2489 2490 for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) { 2491 map = ice_adv_lnk_speed_maps + i; 2492 if (linkmode_intersects(ks->link_modes.advertising, map->caps)) 2493 adv_link_speed |= ice_speed_to_aq_link(map->speed); 2494 } 2495 2496 return adv_link_speed; 2497 } 2498 2499 /** 2500 * ice_setup_autoneg 2501 * @p: port info 2502 * @ks: ethtool_link_ksettings 2503 * @config: configuration that will be sent down to FW 2504 * @autoneg_enabled: autonegotiation is enabled or not 2505 * @autoneg_changed: will there a change in autonegotiation 2506 * @netdev: network interface device structure 2507 * 2508 * Setup PHY autonegotiation feature 2509 */ 2510 static int 2511 ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, 2512 struct ice_aqc_set_phy_cfg_data *config, 2513 u8 autoneg_enabled, u8 *autoneg_changed, 2514 struct net_device *netdev) 2515 { 2516 int err = 0; 2517 2518 *autoneg_changed = 0; 2519 2520 /* Check autoneg */ 2521 if (autoneg_enabled == AUTONEG_ENABLE) { 2522 /* If autoneg was not already enabled */ 2523 if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) { 2524 /* If autoneg is not supported, return error */ 2525 if (!ethtool_link_ksettings_test_link_mode(ks, 2526 supported, 2527 Autoneg)) { 2528 netdev_info(netdev, "Autoneg not supported on this phy.\n"); 2529 err = -EINVAL; 2530 } else { 2531 /* Autoneg is allowed to change */ 2532 config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2533 *autoneg_changed = 1; 2534 } 2535 } 2536 } else { 2537 /* If autoneg is currently enabled */ 2538 if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { 2539 /* If autoneg is supported 10GBASE_T is the only PHY 2540 * that can disable it, so otherwise return error 2541 */ 2542 if (ethtool_link_ksettings_test_link_mode(ks, 2543 supported, 2544 Autoneg)) { 2545 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 2546 err = -EINVAL; 2547 } else { 2548 /* Autoneg is allowed to change */ 2549 config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2550 *autoneg_changed = 1; 2551 } 2552 } 2553 } 2554 2555 return err; 2556 } 2557 2558 /** 2559 * ice_set_phy_type_from_speed - set phy_types based on speeds 2560 * and advertised modes 2561 * @ks: ethtool link ksettings struct 2562 * @phy_type_low: pointer to the lower part of phy_type 2563 * @phy_type_high: pointer to the higher part of phy_type 2564 * @adv_link_speed: targeted link speeds bitmap 2565 */ 2566 static void 2567 ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks, 2568 u64 *phy_type_low, u64 *phy_type_high, 2569 u16 adv_link_speed) 2570 { 2571 /* Handle 1000M speed in a special way because ice_update_phy_type 2572 * enables all link modes, but having mixed copper and optical 2573 * standards is not supported. 2574 */ 2575 adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB; 2576 2577 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2578 1000baseT_Full)) 2579 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T | 2580 ICE_PHY_TYPE_LOW_1G_SGMII; 2581 2582 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2583 1000baseKX_Full)) 2584 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX; 2585 2586 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2587 1000baseX_Full)) 2588 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX | 2589 ICE_PHY_TYPE_LOW_1000BASE_LX; 2590 2591 ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed); 2592 } 2593 2594 /** 2595 * ice_set_link_ksettings - Set Speed and Duplex 2596 * @netdev: network interface device structure 2597 * @ks: ethtool ksettings 2598 * 2599 * Set speed/duplex per media_types advertised/forced 2600 */ 2601 static int 2602 ice_set_link_ksettings(struct net_device *netdev, 2603 const struct ethtool_link_ksettings *ks) 2604 { 2605 struct ice_netdev_priv *np = netdev_priv(netdev); 2606 u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; 2607 struct ethtool_link_ksettings copy_ks = *ks; 2608 struct ethtool_link_ksettings safe_ks = {}; 2609 struct ice_aqc_get_phy_caps_data *phy_caps; 2610 struct ice_aqc_set_phy_cfg_data config; 2611 u16 adv_link_speed, curr_link_speed; 2612 struct ice_pf *pf = np->vsi->back; 2613 struct ice_port_info *pi; 2614 u8 autoneg_changed = 0; 2615 u64 phy_type_high = 0; 2616 u64 phy_type_low = 0; 2617 bool linkup; 2618 int err; 2619 2620 pi = np->vsi->port_info; 2621 2622 if (!pi) 2623 return -EIO; 2624 2625 if (pi->phy.media_type != ICE_MEDIA_BASET && 2626 pi->phy.media_type != ICE_MEDIA_FIBER && 2627 pi->phy.media_type != ICE_MEDIA_BACKPLANE && 2628 pi->phy.media_type != ICE_MEDIA_DA && 2629 pi->phy.link_info.link_info & ICE_AQ_LINK_UP) 2630 return -EOPNOTSUPP; 2631 2632 phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL); 2633 if (!phy_caps) 2634 return -ENOMEM; 2635 2636 /* Get the PHY capabilities based on media */ 2637 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2638 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2639 phy_caps, NULL); 2640 else 2641 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2642 phy_caps, NULL); 2643 if (err) 2644 goto done; 2645 2646 /* save autoneg out of ksettings */ 2647 autoneg = copy_ks.base.autoneg; 2648 2649 /* Get link modes supported by hardware.*/ 2650 ice_phy_type_to_ethtool(netdev, &safe_ks); 2651 2652 /* and check against modes requested by user. 2653 * Return an error if unsupported mode was set. 2654 */ 2655 if (!bitmap_subset(copy_ks.link_modes.advertising, 2656 safe_ks.link_modes.supported, 2657 __ETHTOOL_LINK_MODE_MASK_NBITS)) { 2658 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) 2659 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2660 err = -EOPNOTSUPP; 2661 goto done; 2662 } 2663 2664 /* get our own copy of the bits to check against */ 2665 memset(&safe_ks, 0, sizeof(safe_ks)); 2666 safe_ks.base.cmd = copy_ks.base.cmd; 2667 safe_ks.base.link_mode_masks_nwords = 2668 copy_ks.base.link_mode_masks_nwords; 2669 ice_get_link_ksettings(netdev, &safe_ks); 2670 2671 /* set autoneg back to what it currently is */ 2672 copy_ks.base.autoneg = safe_ks.base.autoneg; 2673 /* we don't compare the speed */ 2674 copy_ks.base.speed = safe_ks.base.speed; 2675 2676 /* If copy_ks.base and safe_ks.base are not the same now, then they are 2677 * trying to set something that we do not support. 2678 */ 2679 if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) { 2680 err = -EOPNOTSUPP; 2681 goto done; 2682 } 2683 2684 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 2685 timeout--; 2686 if (!timeout) { 2687 err = -EBUSY; 2688 goto done; 2689 } 2690 usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); 2691 } 2692 2693 /* Copy the current user PHY configuration. The current user PHY 2694 * configuration is initialized during probe from PHY capabilities 2695 * software mode, and updated on set PHY configuration. 2696 */ 2697 config = pi->phy.curr_user_phy_cfg; 2698 2699 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2700 2701 /* Check autoneg */ 2702 err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed, 2703 netdev); 2704 2705 if (err) 2706 goto done; 2707 2708 /* Call to get the current link speed */ 2709 pi->phy.get_link_info = true; 2710 err = ice_get_link_status(pi, &linkup); 2711 if (err) 2712 goto done; 2713 2714 curr_link_speed = pi->phy.curr_user_speed_req; 2715 adv_link_speed = ice_ksettings_find_adv_link_speed(ks); 2716 2717 /* If speed didn't get set, set it to what it currently is. 2718 * This is needed because if advertise is 0 (as it is when autoneg 2719 * is disabled) then speed won't get set. 2720 */ 2721 if (!adv_link_speed) 2722 adv_link_speed = curr_link_speed; 2723 2724 /* Convert the advertise link speeds to their corresponded PHY_TYPE */ 2725 ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high, 2726 adv_link_speed); 2727 2728 if (!autoneg_changed && adv_link_speed == curr_link_speed) { 2729 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); 2730 goto done; 2731 } 2732 2733 /* save the requested speeds */ 2734 pi->phy.link_info.req_speeds = adv_link_speed; 2735 2736 /* set link and auto negotiation so changes take effect */ 2737 config.caps |= ICE_AQ_PHY_ENA_LINK; 2738 2739 /* check if there is a PHY type for the requested advertised speed */ 2740 if (!(phy_type_low || phy_type_high)) { 2741 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2742 err = -EOPNOTSUPP; 2743 goto done; 2744 } 2745 2746 /* intersect requested advertised speed PHY types with media PHY types 2747 * for set PHY configuration 2748 */ 2749 config.phy_type_high = cpu_to_le64(phy_type_high) & 2750 phy_caps->phy_type_high; 2751 config.phy_type_low = cpu_to_le64(phy_type_low) & 2752 phy_caps->phy_type_low; 2753 2754 if (!(config.phy_type_high || config.phy_type_low)) { 2755 /* If there is no intersection and lenient mode is enabled, then 2756 * intersect the requested advertised speed with NVM media type 2757 * PHY types. 2758 */ 2759 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { 2760 config.phy_type_high = cpu_to_le64(phy_type_high) & 2761 pf->nvm_phy_type_hi; 2762 config.phy_type_low = cpu_to_le64(phy_type_low) & 2763 pf->nvm_phy_type_lo; 2764 } else { 2765 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2766 err = -EOPNOTSUPP; 2767 goto done; 2768 } 2769 } 2770 2771 /* If link is up put link down */ 2772 if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) { 2773 /* Tell the OS link is going down, the link will go 2774 * back up when fw says it is ready asynchronously 2775 */ 2776 ice_print_link_msg(np->vsi, false); 2777 netif_carrier_off(netdev); 2778 netif_tx_stop_all_queues(netdev); 2779 } 2780 2781 /* make the aq call */ 2782 err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); 2783 if (err) { 2784 netdev_info(netdev, "Set phy config failed,\n"); 2785 goto done; 2786 } 2787 2788 /* Save speed request */ 2789 pi->phy.curr_user_speed_req = adv_link_speed; 2790 done: 2791 kfree(phy_caps); 2792 clear_bit(ICE_CFG_BUSY, pf->state); 2793 2794 return err; 2795 } 2796 2797 static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc) 2798 { 2799 u32 hdrs = ICE_FLOW_SEG_HDR_NONE; 2800 2801 switch (nfc->flow_type) { 2802 case TCP_V4_FLOW: 2803 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4; 2804 break; 2805 case UDP_V4_FLOW: 2806 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4; 2807 break; 2808 case SCTP_V4_FLOW: 2809 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4; 2810 break; 2811 case GTPU_V4_FLOW: 2812 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4; 2813 break; 2814 case GTPC_V4_FLOW: 2815 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4; 2816 break; 2817 case GTPC_TEID_V4_FLOW: 2818 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4; 2819 break; 2820 case GTPU_EH_V4_FLOW: 2821 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4; 2822 break; 2823 case GTPU_UL_V4_FLOW: 2824 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4; 2825 break; 2826 case GTPU_DL_V4_FLOW: 2827 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4; 2828 break; 2829 case TCP_V6_FLOW: 2830 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6; 2831 break; 2832 case UDP_V6_FLOW: 2833 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6; 2834 break; 2835 case SCTP_V6_FLOW: 2836 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6; 2837 break; 2838 case GTPU_V6_FLOW: 2839 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6; 2840 break; 2841 case GTPC_V6_FLOW: 2842 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6; 2843 break; 2844 case GTPC_TEID_V6_FLOW: 2845 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6; 2846 break; 2847 case GTPU_EH_V6_FLOW: 2848 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6; 2849 break; 2850 case GTPU_UL_V6_FLOW: 2851 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6; 2852 break; 2853 case GTPU_DL_V6_FLOW: 2854 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6; 2855 break; 2856 default: 2857 break; 2858 } 2859 return hdrs; 2860 } 2861 2862 static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm) 2863 { 2864 u64 hfld = ICE_HASH_INVALID; 2865 2866 if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) { 2867 switch (nfc->flow_type) { 2868 case TCP_V4_FLOW: 2869 case UDP_V4_FLOW: 2870 case SCTP_V4_FLOW: 2871 case GTPU_V4_FLOW: 2872 case GTPC_V4_FLOW: 2873 case GTPC_TEID_V4_FLOW: 2874 case GTPU_EH_V4_FLOW: 2875 case GTPU_UL_V4_FLOW: 2876 case GTPU_DL_V4_FLOW: 2877 if (nfc->data & RXH_IP_SRC) 2878 hfld |= ICE_FLOW_HASH_FLD_IPV4_SA; 2879 if (nfc->data & RXH_IP_DST) 2880 hfld |= ICE_FLOW_HASH_FLD_IPV4_DA; 2881 break; 2882 case TCP_V6_FLOW: 2883 case UDP_V6_FLOW: 2884 case SCTP_V6_FLOW: 2885 case GTPU_V6_FLOW: 2886 case GTPC_V6_FLOW: 2887 case GTPC_TEID_V6_FLOW: 2888 case GTPU_EH_V6_FLOW: 2889 case GTPU_UL_V6_FLOW: 2890 case GTPU_DL_V6_FLOW: 2891 if (nfc->data & RXH_IP_SRC) 2892 hfld |= ICE_FLOW_HASH_FLD_IPV6_SA; 2893 if (nfc->data & RXH_IP_DST) 2894 hfld |= ICE_FLOW_HASH_FLD_IPV6_DA; 2895 break; 2896 default: 2897 break; 2898 } 2899 } 2900 2901 if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) { 2902 switch (nfc->flow_type) { 2903 case TCP_V4_FLOW: 2904 case TCP_V6_FLOW: 2905 if (nfc->data & RXH_L4_B_0_1) 2906 hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT; 2907 if (nfc->data & RXH_L4_B_2_3) 2908 hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT; 2909 break; 2910 case UDP_V4_FLOW: 2911 case UDP_V6_FLOW: 2912 if (nfc->data & RXH_L4_B_0_1) 2913 hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT; 2914 if (nfc->data & RXH_L4_B_2_3) 2915 hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT; 2916 break; 2917 case SCTP_V4_FLOW: 2918 case SCTP_V6_FLOW: 2919 if (nfc->data & RXH_L4_B_0_1) 2920 hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT; 2921 if (nfc->data & RXH_L4_B_2_3) 2922 hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT; 2923 break; 2924 default: 2925 break; 2926 } 2927 } 2928 2929 if (nfc->data & RXH_GTP_TEID) { 2930 switch (nfc->flow_type) { 2931 case GTPC_TEID_V4_FLOW: 2932 case GTPC_TEID_V6_FLOW: 2933 hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID; 2934 break; 2935 case GTPU_V4_FLOW: 2936 case GTPU_V6_FLOW: 2937 hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID; 2938 break; 2939 case GTPU_EH_V4_FLOW: 2940 case GTPU_EH_V6_FLOW: 2941 hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID; 2942 break; 2943 case GTPU_UL_V4_FLOW: 2944 case GTPU_UL_V6_FLOW: 2945 hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID; 2946 break; 2947 case GTPU_DL_V4_FLOW: 2948 case GTPU_DL_V6_FLOW: 2949 hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID; 2950 break; 2951 default: 2952 break; 2953 } 2954 } 2955 2956 return hfld; 2957 } 2958 2959 static int 2960 ice_set_rxfh_fields(struct net_device *netdev, 2961 const struct ethtool_rxfh_fields *nfc, 2962 struct netlink_ext_ack *extack) 2963 { 2964 struct ice_netdev_priv *np = netdev_priv(netdev); 2965 struct ice_vsi *vsi = np->vsi; 2966 struct ice_pf *pf = vsi->back; 2967 struct ice_rss_hash_cfg cfg; 2968 struct device *dev; 2969 u64 hashed_flds; 2970 int status; 2971 bool symm; 2972 u32 hdrs; 2973 2974 dev = ice_pf_to_dev(pf); 2975 if (ice_is_safe_mode(pf)) { 2976 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 2977 vsi->vsi_num); 2978 return -EINVAL; 2979 } 2980 2981 symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); 2982 hashed_flds = ice_parse_hash_flds(nfc, symm); 2983 if (hashed_flds == ICE_HASH_INVALID) { 2984 dev_dbg(dev, "Invalid hash fields, vsi num = %d\n", 2985 vsi->vsi_num); 2986 return -EINVAL; 2987 } 2988 2989 hdrs = ice_parse_hdrs(nfc); 2990 if (hdrs == ICE_FLOW_SEG_HDR_NONE) { 2991 dev_dbg(dev, "Header type is not valid, vsi num = %d\n", 2992 vsi->vsi_num); 2993 return -EINVAL; 2994 } 2995 2996 cfg.hash_flds = hashed_flds; 2997 cfg.addl_hdrs = hdrs; 2998 cfg.hdr_type = ICE_RSS_ANY_HEADERS; 2999 cfg.symm = symm; 3000 3001 status = ice_add_rss_cfg(&pf->hw, vsi, &cfg); 3002 if (status) { 3003 dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", 3004 vsi->vsi_num, status); 3005 return status; 3006 } 3007 3008 return 0; 3009 } 3010 3011 static int 3012 ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc) 3013 { 3014 struct ice_netdev_priv *np = netdev_priv(netdev); 3015 struct ice_vsi *vsi = np->vsi; 3016 struct ice_pf *pf = vsi->back; 3017 struct device *dev; 3018 u64 hash_flds; 3019 bool symm; 3020 u32 hdrs; 3021 3022 dev = ice_pf_to_dev(pf); 3023 3024 nfc->data = 0; 3025 if (ice_is_safe_mode(pf)) { 3026 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 3027 vsi->vsi_num); 3028 return 0; 3029 } 3030 3031 hdrs = ice_parse_hdrs(nfc); 3032 if (hdrs == ICE_FLOW_SEG_HDR_NONE) { 3033 dev_dbg(dev, "Header type is not valid, vsi num = %d\n", 3034 vsi->vsi_num); 3035 return 0; 3036 } 3037 3038 hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm); 3039 if (hash_flds == ICE_HASH_INVALID) { 3040 dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n", 3041 vsi->vsi_num); 3042 return 0; 3043 } 3044 3045 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA || 3046 hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA) 3047 nfc->data |= (u64)RXH_IP_SRC; 3048 3049 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA || 3050 hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA) 3051 nfc->data |= (u64)RXH_IP_DST; 3052 3053 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT || 3054 hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT || 3055 hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) 3056 nfc->data |= (u64)RXH_L4_B_0_1; 3057 3058 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT || 3059 hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT || 3060 hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT) 3061 nfc->data |= (u64)RXH_L4_B_2_3; 3062 3063 if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID || 3064 hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID || 3065 hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID || 3066 hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID || 3067 hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID) 3068 nfc->data |= (u64)RXH_GTP_TEID; 3069 3070 return 0; 3071 } 3072 3073 /** 3074 * ice_set_rxnfc - command to set Rx flow rules. 3075 * @netdev: network interface device structure 3076 * @cmd: ethtool rxnfc command 3077 * 3078 * Returns 0 for success and negative values for errors 3079 */ 3080 static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3081 { 3082 struct ice_netdev_priv *np = netdev_priv(netdev); 3083 struct ice_vsi *vsi = np->vsi; 3084 3085 switch (cmd->cmd) { 3086 case ETHTOOL_SRXCLSRLINS: 3087 return ice_add_fdir_ethtool(vsi, cmd); 3088 case ETHTOOL_SRXCLSRLDEL: 3089 return ice_del_fdir_ethtool(vsi, cmd); 3090 default: 3091 break; 3092 } 3093 return -EOPNOTSUPP; 3094 } 3095 3096 /** 3097 * ice_get_rx_ring_count - get RX ring count 3098 * @netdev: network interface device structure 3099 * 3100 * Return: number of RX rings. 3101 */ 3102 static u32 ice_get_rx_ring_count(struct net_device *netdev) 3103 { 3104 struct ice_netdev_priv *np = netdev_priv(netdev); 3105 struct ice_vsi *vsi = np->vsi; 3106 3107 return vsi->rss_size; 3108 } 3109 3110 /** 3111 * ice_get_rxnfc - command to get Rx flow classification rules 3112 * @netdev: network interface device structure 3113 * @cmd: ethtool rxnfc command 3114 * @rule_locs: buffer to rturn Rx flow classification rules 3115 * 3116 * Returns Success if the command is supported. 3117 */ 3118 static int 3119 ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3120 u32 __always_unused *rule_locs) 3121 { 3122 struct ice_netdev_priv *np = netdev_priv(netdev); 3123 struct ice_vsi *vsi = np->vsi; 3124 int ret = -EOPNOTSUPP; 3125 struct ice_hw *hw; 3126 3127 hw = &vsi->back->hw; 3128 3129 switch (cmd->cmd) { 3130 case ETHTOOL_GRXCLSRLCNT: 3131 cmd->rule_cnt = hw->fdir_active_fltr; 3132 /* report total rule count */ 3133 cmd->data = ice_get_fdir_cnt_all(hw); 3134 ret = 0; 3135 break; 3136 case ETHTOOL_GRXCLSRULE: 3137 ret = ice_get_ethtool_fdir_entry(hw, cmd); 3138 break; 3139 case ETHTOOL_GRXCLSRLALL: 3140 ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs); 3141 break; 3142 default: 3143 break; 3144 } 3145 3146 return ret; 3147 } 3148 3149 static void 3150 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, 3151 struct kernel_ethtool_ringparam *kernel_ring, 3152 struct netlink_ext_ack *extack) 3153 { 3154 struct ice_netdev_priv *np = netdev_priv(netdev); 3155 struct ice_vsi *vsi = np->vsi; 3156 struct ice_hw *hw; 3157 3158 hw = &vsi->back->hw; 3159 ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw); 3160 ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw); 3161 if (vsi->tx_rings && vsi->rx_rings) { 3162 ring->rx_pending = vsi->rx_rings[0]->count; 3163 ring->tx_pending = vsi->tx_rings[0]->count; 3164 } else { 3165 ring->rx_pending = 0; 3166 ring->tx_pending = 0; 3167 } 3168 3169 /* Rx mini and jumbo rings are not supported */ 3170 ring->rx_mini_max_pending = 0; 3171 ring->rx_jumbo_max_pending = 0; 3172 ring->rx_mini_pending = 0; 3173 ring->rx_jumbo_pending = 0; 3174 3175 kernel_ring->tcp_data_split = vsi->hsplit ? 3176 ETHTOOL_TCP_DATA_SPLIT_ENABLED : 3177 ETHTOOL_TCP_DATA_SPLIT_DISABLED; 3178 } 3179 3180 static int 3181 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, 3182 struct kernel_ethtool_ringparam *kernel_ring, 3183 struct netlink_ext_ack *extack) 3184 { 3185 struct ice_netdev_priv *np = netdev_priv(netdev); 3186 struct ice_tx_ring *xdp_rings = NULL; 3187 struct ice_tx_ring *tx_rings = NULL; 3188 struct ice_rx_ring *rx_rings = NULL; 3189 struct ice_vsi *vsi = np->vsi; 3190 struct ice_pf *pf = vsi->back; 3191 int i, timeout = 50, err = 0; 3192 struct ice_hw *hw = &pf->hw; 3193 u16 new_rx_cnt, new_tx_cnt; 3194 bool hsplit; 3195 3196 if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || 3197 ring->tx_pending < ICE_MIN_NUM_DESC || 3198 ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || 3199 ring->rx_pending < ICE_MIN_NUM_DESC) { 3200 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", 3201 ring->tx_pending, ring->rx_pending, 3202 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw), 3203 ICE_REQ_DESC_MULTIPLE); 3204 return -EINVAL; 3205 } 3206 3207 /* Return if there is no rings (device is reloading) */ 3208 if (!vsi->tx_rings || !vsi->rx_rings) 3209 return -EBUSY; 3210 3211 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); 3212 if (new_tx_cnt != ring->tx_pending) 3213 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", 3214 new_tx_cnt); 3215 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); 3216 if (new_rx_cnt != ring->rx_pending) 3217 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", 3218 new_rx_cnt); 3219 3220 hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED; 3221 3222 /* if nothing to do return success */ 3223 if (new_tx_cnt == vsi->tx_rings[0]->count && 3224 new_rx_cnt == vsi->rx_rings[0]->count && 3225 hsplit == vsi->hsplit) { 3226 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); 3227 return 0; 3228 } 3229 3230 /* If there is a AF_XDP UMEM attached to any of Rx rings, 3231 * disallow changing the number of descriptors -- regardless 3232 * if the netdev is running or not. 3233 */ 3234 if (ice_xsk_any_rx_ring_ena(vsi)) 3235 return -EBUSY; 3236 3237 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 3238 timeout--; 3239 if (!timeout) 3240 return -EBUSY; 3241 usleep_range(1000, 2000); 3242 } 3243 3244 /* set for the next time the netdev is started */ 3245 if (!netif_running(vsi->netdev)) { 3246 ice_for_each_alloc_txq(vsi, i) 3247 vsi->tx_rings[i]->count = new_tx_cnt; 3248 ice_for_each_alloc_rxq(vsi, i) 3249 vsi->rx_rings[i]->count = new_rx_cnt; 3250 if (ice_is_xdp_ena_vsi(vsi)) 3251 ice_for_each_xdp_txq(vsi, i) 3252 vsi->xdp_rings[i]->count = new_tx_cnt; 3253 vsi->num_tx_desc = (u16)new_tx_cnt; 3254 vsi->num_rx_desc = (u16)new_rx_cnt; 3255 vsi->hsplit = hsplit; 3256 3257 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); 3258 goto done; 3259 } 3260 3261 if (new_tx_cnt == vsi->tx_rings[0]->count) 3262 goto process_rx; 3263 3264 /* alloc updated Tx resources */ 3265 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", 3266 vsi->tx_rings[0]->count, new_tx_cnt); 3267 3268 tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL); 3269 if (!tx_rings) { 3270 err = -ENOMEM; 3271 goto done; 3272 } 3273 3274 ice_for_each_txq(vsi, i) { 3275 /* clone ring and setup updated count */ 3276 tx_rings[i] = *vsi->tx_rings[i]; 3277 tx_rings[i].count = new_tx_cnt; 3278 tx_rings[i].desc = NULL; 3279 tx_rings[i].tx_buf = NULL; 3280 tx_rings[i].tstamp_ring = NULL; 3281 tx_rings[i].tx_tstamps = &pf->ptp.port.tx; 3282 err = ice_setup_tx_ring(&tx_rings[i]); 3283 if (err) { 3284 while (i--) 3285 ice_clean_tx_ring(&tx_rings[i]); 3286 kfree(tx_rings); 3287 goto done; 3288 } 3289 } 3290 3291 if (!ice_is_xdp_ena_vsi(vsi)) 3292 goto process_rx; 3293 3294 /* alloc updated XDP resources */ 3295 netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n", 3296 vsi->xdp_rings[0]->count, new_tx_cnt); 3297 3298 xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL); 3299 if (!xdp_rings) { 3300 err = -ENOMEM; 3301 goto free_tx; 3302 } 3303 3304 ice_for_each_xdp_txq(vsi, i) { 3305 /* clone ring and setup updated count */ 3306 xdp_rings[i] = *vsi->xdp_rings[i]; 3307 xdp_rings[i].count = new_tx_cnt; 3308 xdp_rings[i].desc = NULL; 3309 xdp_rings[i].tx_buf = NULL; 3310 err = ice_setup_tx_ring(&xdp_rings[i]); 3311 if (err) { 3312 while (i--) 3313 ice_clean_tx_ring(&xdp_rings[i]); 3314 kfree(xdp_rings); 3315 goto free_tx; 3316 } 3317 ice_set_ring_xdp(&xdp_rings[i]); 3318 } 3319 3320 process_rx: 3321 if (new_rx_cnt == vsi->rx_rings[0]->count) 3322 goto process_link; 3323 3324 /* alloc updated Rx resources */ 3325 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", 3326 vsi->rx_rings[0]->count, new_rx_cnt); 3327 3328 rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); 3329 if (!rx_rings) { 3330 err = -ENOMEM; 3331 goto done; 3332 } 3333 3334 ice_for_each_rxq(vsi, i) { 3335 /* clone ring and setup updated count */ 3336 rx_rings[i] = *vsi->rx_rings[i]; 3337 rx_rings[i].count = new_rx_cnt; 3338 rx_rings[i].cached_phctime = pf->ptp.cached_phc_time; 3339 rx_rings[i].desc = NULL; 3340 rx_rings[i].xdp_buf = NULL; 3341 3342 /* this is to allow wr32 to have something to write to 3343 * during early allocation of Rx buffers 3344 */ 3345 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS; 3346 3347 err = ice_setup_rx_ring(&rx_rings[i]); 3348 if (err) 3349 goto rx_unwind; 3350 rx_unwind: 3351 if (err) { 3352 while (i) { 3353 i--; 3354 ice_free_rx_ring(&rx_rings[i]); 3355 } 3356 kfree(rx_rings); 3357 err = -ENOMEM; 3358 goto free_tx; 3359 } 3360 } 3361 3362 process_link: 3363 vsi->hsplit = hsplit; 3364 3365 /* Bring interface down, copy in the new ring info, then restore the 3366 * interface. if VSI is up, bring it down and then back up 3367 */ 3368 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 3369 ice_down(vsi); 3370 3371 if (tx_rings) { 3372 ice_for_each_txq(vsi, i) { 3373 ice_free_tx_ring(vsi->tx_rings[i]); 3374 *vsi->tx_rings[i] = tx_rings[i]; 3375 } 3376 kfree(tx_rings); 3377 } 3378 3379 if (rx_rings) { 3380 ice_for_each_rxq(vsi, i) { 3381 ice_free_rx_ring(vsi->rx_rings[i]); 3382 /* copy the real tail offset */ 3383 rx_rings[i].tail = vsi->rx_rings[i]->tail; 3384 /* this is to fake out the allocation routine 3385 * into thinking it has to realloc everything 3386 * but the recycling logic will let us re-use 3387 * the buffers allocated above 3388 */ 3389 rx_rings[i].next_to_use = 0; 3390 rx_rings[i].next_to_clean = 0; 3391 *vsi->rx_rings[i] = rx_rings[i]; 3392 } 3393 kfree(rx_rings); 3394 } 3395 3396 if (xdp_rings) { 3397 ice_for_each_xdp_txq(vsi, i) { 3398 ice_free_tx_ring(vsi->xdp_rings[i]); 3399 *vsi->xdp_rings[i] = xdp_rings[i]; 3400 } 3401 kfree(xdp_rings); 3402 } 3403 3404 vsi->num_tx_desc = new_tx_cnt; 3405 vsi->num_rx_desc = new_rx_cnt; 3406 ice_up(vsi); 3407 } 3408 goto done; 3409 3410 free_tx: 3411 /* error cleanup if the Rx allocations failed after getting Tx */ 3412 if (tx_rings) { 3413 ice_for_each_txq(vsi, i) 3414 ice_free_tx_ring(&tx_rings[i]); 3415 kfree(tx_rings); 3416 } 3417 3418 done: 3419 clear_bit(ICE_CFG_BUSY, pf->state); 3420 return err; 3421 } 3422 3423 /** 3424 * ice_get_pauseparam - Get Flow Control status 3425 * @netdev: network interface device structure 3426 * @pause: ethernet pause (flow control) parameters 3427 * 3428 * Get requested flow control status from PHY capability. 3429 * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which 3430 * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report 3431 * the negotiated Rx/Tx pause via lp_advertising. 3432 */ 3433 static void 3434 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 3435 { 3436 struct ice_netdev_priv *np = netdev_priv(netdev); 3437 struct ice_port_info *pi = np->vsi->port_info; 3438 struct ice_aqc_get_phy_caps_data *pcaps; 3439 struct ice_dcbx_cfg *dcbx_cfg; 3440 int status; 3441 3442 /* Initialize pause params */ 3443 pause->rx_pause = 0; 3444 pause->tx_pause = 0; 3445 3446 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; 3447 3448 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3449 if (!pcaps) 3450 return; 3451 3452 /* Get current PHY config */ 3453 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 3454 NULL); 3455 if (status) 3456 goto out; 3457 3458 pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : 3459 AUTONEG_DISABLE; 3460 3461 if (dcbx_cfg->pfc.pfcena) 3462 /* PFC enabled so report LFC as off */ 3463 goto out; 3464 3465 if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3466 pause->tx_pause = 1; 3467 if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3468 pause->rx_pause = 1; 3469 3470 out: 3471 kfree(pcaps); 3472 } 3473 3474 /** 3475 * ice_set_pauseparam - Set Flow Control parameter 3476 * @netdev: network interface device structure 3477 * @pause: return Tx/Rx flow control status 3478 */ 3479 static int 3480 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 3481 { 3482 struct ice_netdev_priv *np = netdev_priv(netdev); 3483 struct ice_aqc_get_phy_caps_data *pcaps; 3484 struct ice_link_status *hw_link_info; 3485 struct ice_pf *pf = np->vsi->back; 3486 struct ice_dcbx_cfg *dcbx_cfg; 3487 struct ice_vsi *vsi = np->vsi; 3488 struct ice_hw *hw = &pf->hw; 3489 struct ice_port_info *pi; 3490 u8 aq_failures; 3491 bool link_up; 3492 u32 is_an; 3493 int err; 3494 3495 pi = vsi->port_info; 3496 hw_link_info = &pi->phy.link_info; 3497 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; 3498 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; 3499 3500 /* Changing the port's flow control is not supported if this isn't the 3501 * PF VSI 3502 */ 3503 if (vsi->type != ICE_VSI_PF) { 3504 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n"); 3505 return -EOPNOTSUPP; 3506 } 3507 3508 /* Get pause param reports configured and negotiated flow control pause 3509 * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is 3510 * defined get pause param pause->autoneg reports SW configured setting, 3511 * so compare pause->autoneg with SW configured to prevent the user from 3512 * using set pause param to chance autoneg. 3513 */ 3514 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3515 if (!pcaps) 3516 return -ENOMEM; 3517 3518 /* Get current PHY config */ 3519 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 3520 NULL); 3521 if (err) { 3522 kfree(pcaps); 3523 return err; 3524 } 3525 3526 is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : 3527 AUTONEG_DISABLE; 3528 3529 kfree(pcaps); 3530 3531 if (pause->autoneg != is_an) { 3532 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 3533 return -EOPNOTSUPP; 3534 } 3535 3536 /* If we have link and don't have autoneg */ 3537 if (!test_bit(ICE_DOWN, pf->state) && 3538 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { 3539 /* Send message that it might not necessarily work*/ 3540 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); 3541 } 3542 3543 if (dcbx_cfg->pfc.pfcena) { 3544 netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); 3545 return -EOPNOTSUPP; 3546 } 3547 if (pause->rx_pause && pause->tx_pause) 3548 pi->fc.req_mode = ICE_FC_FULL; 3549 else if (pause->rx_pause && !pause->tx_pause) 3550 pi->fc.req_mode = ICE_FC_RX_PAUSE; 3551 else if (!pause->rx_pause && pause->tx_pause) 3552 pi->fc.req_mode = ICE_FC_TX_PAUSE; 3553 else if (!pause->rx_pause && !pause->tx_pause) 3554 pi->fc.req_mode = ICE_FC_NONE; 3555 else 3556 return -EINVAL; 3557 3558 /* Set the FC mode and only restart AN if link is up */ 3559 err = ice_set_fc(pi, &aq_failures, link_up); 3560 3561 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { 3562 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n", 3563 err, libie_aq_str(hw->adminq.sq_last_status)); 3564 err = -EAGAIN; 3565 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { 3566 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n", 3567 err, libie_aq_str(hw->adminq.sq_last_status)); 3568 err = -EAGAIN; 3569 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { 3570 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n", 3571 err, libie_aq_str(hw->adminq.sq_last_status)); 3572 err = -EAGAIN; 3573 } 3574 3575 return err; 3576 } 3577 3578 /** 3579 * ice_get_rxfh_key_size - get the RSS hash key size 3580 * @netdev: network interface device structure 3581 * 3582 * Returns the table size. 3583 */ 3584 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) 3585 { 3586 return ICE_VSIQF_HKEY_ARRAY_SIZE; 3587 } 3588 3589 /** 3590 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size 3591 * @netdev: network interface device structure 3592 * 3593 * Returns the table size. 3594 */ 3595 static u32 ice_get_rxfh_indir_size(struct net_device *netdev) 3596 { 3597 struct ice_netdev_priv *np = netdev_priv(netdev); 3598 3599 return np->vsi->rss_table_size; 3600 } 3601 3602 /** 3603 * ice_get_rxfh - get the Rx flow hash indirection table 3604 * @netdev: network interface device structure 3605 * @rxfh: pointer to param struct (indir, key, hfunc) 3606 * 3607 * Reads the indirection table directly from the hardware. 3608 */ 3609 static int 3610 ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) 3611 { 3612 struct ice_netdev_priv *np = netdev_priv(netdev); 3613 struct ice_vsi *vsi = np->vsi; 3614 struct ice_pf *pf = vsi->back; 3615 u16 qcount, offset; 3616 int err, i; 3617 u8 *lut; 3618 3619 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 3620 netdev_warn(netdev, "RSS is not supported on this VSI!\n"); 3621 return -EOPNOTSUPP; 3622 } 3623 3624 qcount = vsi->mqprio_qopt.qopt.count[0]; 3625 offset = vsi->mqprio_qopt.qopt.offset[0]; 3626 3627 rxfh->hfunc = ETH_RSS_HASH_TOP; 3628 if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) 3629 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; 3630 3631 if (!rxfh->indir) 3632 return 0; 3633 3634 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 3635 if (!lut) 3636 return -ENOMEM; 3637 3638 err = ice_get_rss(vsi, rxfh->key, lut, vsi->rss_table_size); 3639 if (err) 3640 goto out; 3641 3642 if (ice_is_adq_active(pf)) { 3643 for (i = 0; i < vsi->rss_table_size; i++) 3644 rxfh->indir[i] = offset + lut[i] % qcount; 3645 goto out; 3646 } 3647 3648 for (i = 0; i < vsi->rss_table_size; i++) 3649 rxfh->indir[i] = lut[i]; 3650 3651 out: 3652 kfree(lut); 3653 return err; 3654 } 3655 3656 /** 3657 * ice_set_rxfh - set the Rx flow hash indirection table 3658 * @netdev: network interface device structure 3659 * @rxfh: pointer to param struct (indir, key, hfunc) 3660 * @extack: extended ACK from the Netlink message 3661 * 3662 * Returns -EINVAL if the table specifies an invalid queue ID, otherwise 3663 * returns 0 after programming the table. 3664 */ 3665 static int 3666 ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, 3667 struct netlink_ext_ack *extack) 3668 { 3669 struct ice_netdev_priv *np = netdev_priv(netdev); 3670 u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; 3671 struct ice_vsi *vsi = np->vsi; 3672 struct ice_pf *pf = vsi->back; 3673 struct device *dev; 3674 int err; 3675 3676 dev = ice_pf_to_dev(pf); 3677 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 3678 rxfh->hfunc != ETH_RSS_HASH_TOP) 3679 return -EOPNOTSUPP; 3680 3681 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 3682 /* RSS not supported return error here */ 3683 netdev_warn(netdev, "RSS is not configured on this VSI!\n"); 3684 return -EIO; 3685 } 3686 3687 if (ice_is_adq_active(pf)) { 3688 netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n"); 3689 return -EOPNOTSUPP; 3690 } 3691 3692 /* Update the VSI's hash function */ 3693 if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR) 3694 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; 3695 3696 err = ice_set_rss_hfunc(vsi, hfunc); 3697 if (err) 3698 return err; 3699 3700 if (rxfh->key) { 3701 if (!vsi->rss_hkey_user) { 3702 vsi->rss_hkey_user = 3703 devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE, 3704 GFP_KERNEL); 3705 if (!vsi->rss_hkey_user) 3706 return -ENOMEM; 3707 } 3708 memcpy(vsi->rss_hkey_user, rxfh->key, 3709 ICE_VSIQF_HKEY_ARRAY_SIZE); 3710 3711 err = ice_set_rss_key(vsi, vsi->rss_hkey_user); 3712 if (err) 3713 return err; 3714 } 3715 3716 if (!vsi->rss_lut_user) { 3717 vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size, 3718 GFP_KERNEL); 3719 if (!vsi->rss_lut_user) 3720 return -ENOMEM; 3721 } 3722 3723 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 3724 if (rxfh->indir) { 3725 int i; 3726 3727 for (i = 0; i < vsi->rss_table_size; i++) 3728 vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); 3729 } else { 3730 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, 3731 vsi->rss_size); 3732 } 3733 3734 err = ice_set_rss_lut(vsi, vsi->rss_lut_user, vsi->rss_table_size); 3735 if (err) 3736 return err; 3737 3738 return 0; 3739 } 3740 3741 static int 3742 ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) 3743 { 3744 struct ice_pf *pf = ice_netdev_to_pf(dev); 3745 3746 /* only report timestamping if PTP is enabled */ 3747 if (pf->ptp.state != ICE_PTP_READY) 3748 return ethtool_op_get_ts_info(dev, info); 3749 3750 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 3751 SOF_TIMESTAMPING_TX_HARDWARE | 3752 SOF_TIMESTAMPING_RX_HARDWARE | 3753 SOF_TIMESTAMPING_RAW_HARDWARE; 3754 3755 info->phc_index = ice_ptp_clock_index(pf); 3756 3757 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 3758 3759 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 3760 3761 return 0; 3762 } 3763 3764 /** 3765 * ice_get_max_txq - return the maximum number of Tx queues for in a PF 3766 * @pf: PF structure 3767 */ 3768 static int ice_get_max_txq(struct ice_pf *pf) 3769 { 3770 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq); 3771 } 3772 3773 /** 3774 * ice_get_max_rxq - return the maximum number of Rx queues for in a PF 3775 * @pf: PF structure 3776 */ 3777 static int ice_get_max_rxq(struct ice_pf *pf) 3778 { 3779 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq); 3780 } 3781 3782 /** 3783 * ice_get_combined_cnt - return the current number of combined channels 3784 * @vsi: PF VSI pointer 3785 * 3786 * Go through all queue vectors and count ones that have both Rx and Tx ring 3787 * attached 3788 */ 3789 static u32 ice_get_combined_cnt(struct ice_vsi *vsi) 3790 { 3791 u32 combined = 0; 3792 int q_idx; 3793 3794 ice_for_each_q_vector(vsi, q_idx) { 3795 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 3796 3797 combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx); 3798 } 3799 3800 return combined; 3801 } 3802 3803 /** 3804 * ice_get_channels - get the current and max supported channels 3805 * @dev: network interface device structure 3806 * @ch: ethtool channel data structure 3807 */ 3808 static void 3809 ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) 3810 { 3811 struct ice_netdev_priv *np = netdev_priv(dev); 3812 struct ice_vsi *vsi = np->vsi; 3813 struct ice_pf *pf = vsi->back; 3814 3815 /* report maximum channels */ 3816 ch->max_rx = ice_get_max_rxq(pf); 3817 ch->max_tx = ice_get_max_txq(pf); 3818 ch->max_combined = min_t(int, ch->max_rx, ch->max_tx); 3819 3820 /* report current channels */ 3821 ch->combined_count = ice_get_combined_cnt(vsi); 3822 ch->rx_count = vsi->num_rxq - ch->combined_count; 3823 ch->tx_count = vsi->num_txq - ch->combined_count; 3824 3825 /* report other queues */ 3826 ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 3827 ch->max_other = ch->other_count; 3828 } 3829 3830 /** 3831 * ice_get_valid_rss_size - return valid number of RSS queues 3832 * @hw: pointer to the HW structure 3833 * @new_size: requested RSS queues 3834 */ 3835 static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size) 3836 { 3837 struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; 3838 3839 return min_t(int, new_size, BIT(caps->rss_table_entry_width)); 3840 } 3841 3842 /** 3843 * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size 3844 * @vsi: VSI to reconfigure RSS LUT on 3845 * @req_rss_size: requested range of queue numbers for hashing 3846 * 3847 * Set the VSI's RSS parameters, configure the RSS LUT based on these. 3848 */ 3849 static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) 3850 { 3851 struct ice_pf *pf = vsi->back; 3852 struct device *dev; 3853 struct ice_hw *hw; 3854 int err; 3855 u8 *lut; 3856 3857 dev = ice_pf_to_dev(pf); 3858 hw = &pf->hw; 3859 3860 if (!req_rss_size) 3861 return -EINVAL; 3862 3863 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 3864 if (!lut) 3865 return -ENOMEM; 3866 3867 /* set RSS LUT parameters */ 3868 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 3869 vsi->rss_size = 1; 3870 else 3871 vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size); 3872 3873 /* create/set RSS LUT */ 3874 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 3875 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 3876 if (err) 3877 dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err, 3878 libie_aq_str(hw->adminq.sq_last_status)); 3879 3880 kfree(lut); 3881 return err; 3882 } 3883 3884 /** 3885 * ice_set_channels - set the number channels 3886 * @dev: network interface device structure 3887 * @ch: ethtool channel data structure 3888 */ 3889 static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) 3890 { 3891 struct ice_netdev_priv *np = netdev_priv(dev); 3892 struct ice_vsi *vsi = np->vsi; 3893 struct ice_pf *pf = vsi->back; 3894 int new_rx = 0, new_tx = 0; 3895 bool locked = false; 3896 int ret = 0; 3897 3898 /* do not support changing channels in Safe Mode */ 3899 if (ice_is_safe_mode(pf)) { 3900 netdev_err(dev, "Changing channel in Safe Mode is not supported\n"); 3901 return -EOPNOTSUPP; 3902 } 3903 /* do not support changing other_count */ 3904 if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U)) 3905 return -EINVAL; 3906 3907 if (ice_is_adq_active(pf)) { 3908 netdev_err(dev, "Cannot set channels with ADQ configured.\n"); 3909 return -EOPNOTSUPP; 3910 } 3911 3912 if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) { 3913 netdev_err(dev, "Cannot set channels when Flow Director filters are active\n"); 3914 return -EOPNOTSUPP; 3915 } 3916 3917 if (ch->rx_count && ch->tx_count) { 3918 netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n"); 3919 return -EINVAL; 3920 } 3921 3922 new_rx = ch->combined_count + ch->rx_count; 3923 new_tx = ch->combined_count + ch->tx_count; 3924 3925 if (new_rx < vsi->tc_cfg.numtc) { 3926 netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n", 3927 vsi->tc_cfg.numtc); 3928 return -EINVAL; 3929 } 3930 if (new_tx < vsi->tc_cfg.numtc) { 3931 netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n", 3932 vsi->tc_cfg.numtc); 3933 return -EINVAL; 3934 } 3935 if (new_rx > ice_get_max_rxq(pf)) { 3936 netdev_err(dev, "Maximum allowed Rx channels is %d\n", 3937 ice_get_max_rxq(pf)); 3938 return -EINVAL; 3939 } 3940 if (new_tx > ice_get_max_txq(pf)) { 3941 netdev_err(dev, "Maximum allowed Tx channels is %d\n", 3942 ice_get_max_txq(pf)); 3943 return -EINVAL; 3944 } 3945 3946 if (pf->cdev_info && pf->cdev_info->adev) { 3947 mutex_lock(&pf->adev_mutex); 3948 device_lock(&pf->cdev_info->adev->dev); 3949 locked = true; 3950 if (pf->cdev_info->adev->dev.driver) { 3951 netdev_err(dev, "Cannot change channels when RDMA is active\n"); 3952 ret = -EBUSY; 3953 goto adev_unlock; 3954 } 3955 } 3956 3957 ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked); 3958 3959 if (!netif_is_rxfh_configured(dev)) { 3960 ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx); 3961 goto adev_unlock; 3962 } 3963 3964 /* Update rss_size due to change in Rx queues */ 3965 vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx); 3966 3967 adev_unlock: 3968 if (locked) { 3969 device_unlock(&pf->cdev_info->adev->dev); 3970 mutex_unlock(&pf->adev_mutex); 3971 } 3972 return ret; 3973 } 3974 3975 /** 3976 * ice_get_wol - get current Wake on LAN configuration 3977 * @netdev: network interface device structure 3978 * @wol: Ethtool structure to retrieve WoL settings 3979 */ 3980 static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 3981 { 3982 struct ice_netdev_priv *np = netdev_priv(netdev); 3983 struct ice_pf *pf = np->vsi->back; 3984 3985 if (np->vsi->type != ICE_VSI_PF) 3986 netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n"); 3987 3988 /* Get WoL settings based on the HW capability */ 3989 if (ice_is_wol_supported(&pf->hw)) { 3990 wol->supported = WAKE_MAGIC; 3991 wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0; 3992 } else { 3993 wol->supported = 0; 3994 wol->wolopts = 0; 3995 } 3996 } 3997 3998 /** 3999 * ice_set_wol - set Wake on LAN on supported device 4000 * @netdev: network interface device structure 4001 * @wol: Ethtool structure to set WoL 4002 */ 4003 static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 4004 { 4005 struct ice_netdev_priv *np = netdev_priv(netdev); 4006 struct ice_vsi *vsi = np->vsi; 4007 struct ice_pf *pf = vsi->back; 4008 4009 if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw)) 4010 return -EOPNOTSUPP; 4011 4012 /* only magic packet is supported */ 4013 if (wol->wolopts && wol->wolopts != WAKE_MAGIC) 4014 return -EOPNOTSUPP; 4015 4016 /* Set WoL only if there is a new value */ 4017 if (pf->wol_ena != !!wol->wolopts) { 4018 pf->wol_ena = !!wol->wolopts; 4019 device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena); 4020 netdev_dbg(netdev, "WoL magic packet %sabled\n", 4021 pf->wol_ena ? "en" : "dis"); 4022 } 4023 4024 return 0; 4025 } 4026 4027 /** 4028 * ice_get_rc_coalesce - get ITR values for specific ring container 4029 * @ec: ethtool structure to fill with driver's coalesce settings 4030 * @rc: ring container that the ITR values will come from 4031 * 4032 * Query the device for ice_ring_container specific ITR values. This is 4033 * done per ice_ring_container because each q_vector can have 1 or more rings 4034 * and all of said ring(s) will have the same ITR values. 4035 * 4036 * Returns 0 on success, negative otherwise. 4037 */ 4038 static int 4039 ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc) 4040 { 4041 if (!rc->rx_ring) 4042 return -EINVAL; 4043 4044 switch (rc->type) { 4045 case ICE_RX_CONTAINER: 4046 ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc); 4047 ec->rx_coalesce_usecs = rc->itr_setting; 4048 ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl; 4049 break; 4050 case ICE_TX_CONTAINER: 4051 ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc); 4052 ec->tx_coalesce_usecs = rc->itr_setting; 4053 break; 4054 default: 4055 dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type); 4056 return -EINVAL; 4057 } 4058 4059 return 0; 4060 } 4061 4062 /** 4063 * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings 4064 * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings 4065 * @ec: coalesce settings to program the device with 4066 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 4067 * 4068 * Return 0 on success, and negative under the following conditions: 4069 * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed. 4070 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings. 4071 */ 4072 static int 4073 ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) 4074 { 4075 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 4076 if (ice_get_rc_coalesce(ec, 4077 &vsi->rx_rings[q_num]->q_vector->rx)) 4078 return -EINVAL; 4079 if (ice_get_rc_coalesce(ec, 4080 &vsi->tx_rings[q_num]->q_vector->tx)) 4081 return -EINVAL; 4082 } else if (q_num < vsi->num_rxq) { 4083 if (ice_get_rc_coalesce(ec, 4084 &vsi->rx_rings[q_num]->q_vector->rx)) 4085 return -EINVAL; 4086 } else if (q_num < vsi->num_txq) { 4087 if (ice_get_rc_coalesce(ec, 4088 &vsi->tx_rings[q_num]->q_vector->tx)) 4089 return -EINVAL; 4090 } else { 4091 return -EINVAL; 4092 } 4093 4094 return 0; 4095 } 4096 4097 /** 4098 * __ice_get_coalesce - get ITR/INTRL values for the device 4099 * @netdev: pointer to the netdev associated with this query 4100 * @ec: ethtool structure to fill with driver's coalesce settings 4101 * @q_num: queue number to get the coalesce settings for 4102 * 4103 * If the caller passes in a negative q_num then we return coalesce settings 4104 * based on queue number 0, else use the actual q_num passed in. 4105 */ 4106 static int 4107 __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 4108 int q_num) 4109 { 4110 struct ice_netdev_priv *np = netdev_priv(netdev); 4111 struct ice_vsi *vsi = np->vsi; 4112 4113 if (q_num < 0) 4114 q_num = 0; 4115 4116 if (ice_get_q_coalesce(vsi, ec, q_num)) 4117 return -EINVAL; 4118 4119 return 0; 4120 } 4121 4122 static int ice_get_coalesce(struct net_device *netdev, 4123 struct ethtool_coalesce *ec, 4124 struct kernel_ethtool_coalesce *kernel_coal, 4125 struct netlink_ext_ack *extack) 4126 { 4127 return __ice_get_coalesce(netdev, ec, -1); 4128 } 4129 4130 static int 4131 ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, 4132 struct ethtool_coalesce *ec) 4133 { 4134 return __ice_get_coalesce(netdev, ec, q_num); 4135 } 4136 4137 /** 4138 * ice_set_rc_coalesce - set ITR values for specific ring container 4139 * @ec: ethtool structure from user to update ITR settings 4140 * @rc: ring container that the ITR values will come from 4141 * @vsi: VSI associated to the ring container 4142 * 4143 * Set specific ITR values. This is done per ice_ring_container because each 4144 * q_vector can have 1 or more rings and all of said ring(s) will have the same 4145 * ITR values. 4146 * 4147 * Returns 0 on success, negative otherwise. 4148 */ 4149 static int 4150 ice_set_rc_coalesce(struct ethtool_coalesce *ec, 4151 struct ice_ring_container *rc, struct ice_vsi *vsi) 4152 { 4153 const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx"; 4154 u32 use_adaptive_coalesce, coalesce_usecs; 4155 struct ice_pf *pf = vsi->back; 4156 u16 itr_setting; 4157 4158 if (!rc->rx_ring) 4159 return -EINVAL; 4160 4161 switch (rc->type) { 4162 case ICE_RX_CONTAINER: 4163 { 4164 struct ice_q_vector *q_vector = rc->rx_ring->q_vector; 4165 4166 if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || 4167 (ec->rx_coalesce_usecs_high && 4168 ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { 4169 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n", 4170 c_type_str, pf->hw.intrl_gran, 4171 ICE_MAX_INTRL); 4172 return -EINVAL; 4173 } 4174 if (ec->rx_coalesce_usecs_high != q_vector->intrl && 4175 (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) { 4176 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n", 4177 c_type_str); 4178 return -EINVAL; 4179 } 4180 if (ec->rx_coalesce_usecs_high != q_vector->intrl) 4181 q_vector->intrl = ec->rx_coalesce_usecs_high; 4182 4183 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; 4184 coalesce_usecs = ec->rx_coalesce_usecs; 4185 4186 break; 4187 } 4188 case ICE_TX_CONTAINER: 4189 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; 4190 coalesce_usecs = ec->tx_coalesce_usecs; 4191 4192 break; 4193 default: 4194 dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n", 4195 rc->type); 4196 return -EINVAL; 4197 } 4198 4199 itr_setting = rc->itr_setting; 4200 if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { 4201 netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", 4202 c_type_str, c_type_str); 4203 return -EINVAL; 4204 } 4205 4206 if (coalesce_usecs > ICE_ITR_MAX) { 4207 netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n", 4208 c_type_str, ICE_ITR_MAX); 4209 return -EINVAL; 4210 } 4211 4212 if (use_adaptive_coalesce) { 4213 rc->itr_mode = ITR_DYNAMIC; 4214 } else { 4215 rc->itr_mode = ITR_STATIC; 4216 /* store user facing value how it was set */ 4217 rc->itr_setting = coalesce_usecs; 4218 /* write the change to the register */ 4219 ice_write_itr(rc, coalesce_usecs); 4220 /* force writes to take effect immediately, the flush shouldn't 4221 * be done in the functions above because the intent is for 4222 * them to do lazy writes. 4223 */ 4224 ice_flush(&pf->hw); 4225 } 4226 4227 return 0; 4228 } 4229 4230 /** 4231 * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings 4232 * @vsi: VSI associated to the queue that need updating 4233 * @ec: coalesce settings to program the device with 4234 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 4235 * 4236 * Return 0 on success, and negative under the following conditions: 4237 * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed. 4238 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings. 4239 */ 4240 static int 4241 ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) 4242 { 4243 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 4244 if (ice_set_rc_coalesce(ec, 4245 &vsi->rx_rings[q_num]->q_vector->rx, 4246 vsi)) 4247 return -EINVAL; 4248 4249 if (ice_set_rc_coalesce(ec, 4250 &vsi->tx_rings[q_num]->q_vector->tx, 4251 vsi)) 4252 return -EINVAL; 4253 } else if (q_num < vsi->num_rxq) { 4254 if (ice_set_rc_coalesce(ec, 4255 &vsi->rx_rings[q_num]->q_vector->rx, 4256 vsi)) 4257 return -EINVAL; 4258 } else if (q_num < vsi->num_txq) { 4259 if (ice_set_rc_coalesce(ec, 4260 &vsi->tx_rings[q_num]->q_vector->tx, 4261 vsi)) 4262 return -EINVAL; 4263 } else { 4264 return -EINVAL; 4265 } 4266 4267 return 0; 4268 } 4269 4270 /** 4271 * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs 4272 * @netdev: netdev used for print 4273 * @itr_setting: previous user setting 4274 * @use_adaptive_coalesce: if adaptive coalesce is enabled or being enabled 4275 * @coalesce_usecs: requested value of [tx|rx]-usecs 4276 * @c_type_str: either "rx" or "tx" to match user set field of [tx|rx]-usecs 4277 */ 4278 static void 4279 ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting, 4280 u32 use_adaptive_coalesce, u32 coalesce_usecs, 4281 const char *c_type_str) 4282 { 4283 if (use_adaptive_coalesce) 4284 return; 4285 4286 if (itr_setting != coalesce_usecs && (coalesce_usecs % 2)) 4287 netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n", 4288 c_type_str, coalesce_usecs, c_type_str, 4289 ITR_REG_ALIGN(coalesce_usecs)); 4290 } 4291 4292 /** 4293 * __ice_set_coalesce - set ITR/INTRL values for the device 4294 * @netdev: pointer to the netdev associated with this query 4295 * @ec: ethtool structure to fill with driver's coalesce settings 4296 * @q_num: queue number to get the coalesce settings for 4297 * 4298 * If the caller passes in a negative q_num then we set the coalesce settings 4299 * for all Tx/Rx queues, else use the actual q_num passed in. 4300 */ 4301 static int 4302 __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 4303 int q_num) 4304 { 4305 struct ice_netdev_priv *np = netdev_priv(netdev); 4306 struct ice_vsi *vsi = np->vsi; 4307 4308 if (q_num < 0) { 4309 struct ice_q_vector *q_vector = vsi->q_vectors[0]; 4310 int v_idx; 4311 4312 if (q_vector) { 4313 ice_print_if_odd_usecs(netdev, q_vector->rx.itr_setting, 4314 ec->use_adaptive_rx_coalesce, 4315 ec->rx_coalesce_usecs, "rx"); 4316 4317 ice_print_if_odd_usecs(netdev, q_vector->tx.itr_setting, 4318 ec->use_adaptive_tx_coalesce, 4319 ec->tx_coalesce_usecs, "tx"); 4320 } 4321 4322 ice_for_each_q_vector(vsi, v_idx) { 4323 /* In some cases if DCB is configured the num_[rx|tx]q 4324 * can be less than vsi->num_q_vectors. This check 4325 * accounts for that so we don't report a false failure 4326 */ 4327 if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq) 4328 goto set_complete; 4329 4330 if (ice_set_q_coalesce(vsi, ec, v_idx)) 4331 return -EINVAL; 4332 4333 ice_set_q_vector_intrl(vsi->q_vectors[v_idx]); 4334 } 4335 goto set_complete; 4336 } 4337 4338 if (ice_set_q_coalesce(vsi, ec, q_num)) 4339 return -EINVAL; 4340 4341 ice_set_q_vector_intrl(vsi->q_vectors[q_num]); 4342 4343 set_complete: 4344 return 0; 4345 } 4346 4347 static int ice_set_coalesce(struct net_device *netdev, 4348 struct ethtool_coalesce *ec, 4349 struct kernel_ethtool_coalesce *kernel_coal, 4350 struct netlink_ext_ack *extack) 4351 { 4352 return __ice_set_coalesce(netdev, ec, -1); 4353 } 4354 4355 static int 4356 ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, 4357 struct ethtool_coalesce *ec) 4358 { 4359 return __ice_set_coalesce(netdev, ec, q_num); 4360 } 4361 4362 static void 4363 ice_repr_get_drvinfo(struct net_device *netdev, 4364 struct ethtool_drvinfo *drvinfo) 4365 { 4366 struct ice_repr *repr = ice_netdev_to_repr(netdev); 4367 4368 if (repr->ops.ready(repr)) 4369 return; 4370 4371 __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); 4372 } 4373 4374 static void 4375 ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 4376 { 4377 struct ice_repr *repr = ice_netdev_to_repr(netdev); 4378 4379 /* for port representors only ETH_SS_STATS is supported */ 4380 if (repr->ops.ready(repr) || stringset != ETH_SS_STATS) 4381 return; 4382 4383 __ice_get_strings(netdev, stringset, data, repr->src_vsi); 4384 } 4385 4386 static void 4387 ice_repr_get_ethtool_stats(struct net_device *netdev, 4388 struct ethtool_stats __always_unused *stats, 4389 u64 *data) 4390 { 4391 struct ice_repr *repr = ice_netdev_to_repr(netdev); 4392 4393 if (repr->ops.ready(repr)) 4394 return; 4395 4396 __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi); 4397 } 4398 4399 static int ice_repr_get_sset_count(struct net_device *netdev, int sset) 4400 { 4401 switch (sset) { 4402 case ETH_SS_STATS: 4403 return ICE_VSI_STATS_LEN; 4404 default: 4405 return -EOPNOTSUPP; 4406 } 4407 } 4408 4409 #define ICE_I2C_EEPROM_DEV_ADDR 0xA0 4410 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2 4411 #define ICE_MODULE_TYPE_SFP 0x03 4412 #define ICE_MODULE_TYPE_QSFP_PLUS 0x0D 4413 #define ICE_MODULE_TYPE_QSFP28 0x11 4414 #define ICE_MODULE_SFF_ADDR_MODE 0x04 4415 #define ICE_MODULE_SFF_DIAG_CAPAB 0x40 4416 #define ICE_MODULE_REVISION_ADDR 0x01 4417 #define ICE_MODULE_SFF_8472_COMP 0x5E 4418 #define ICE_MODULE_SFF_8472_SWAP 0x5C 4419 #define ICE_MODULE_QSFP_MAX_LEN 640 4420 4421 /** 4422 * ice_get_module_info - get SFF module type and revision information 4423 * @netdev: network interface device structure 4424 * @modinfo: module EEPROM size and layout information structure 4425 */ 4426 static int 4427 ice_get_module_info(struct net_device *netdev, 4428 struct ethtool_modinfo *modinfo) 4429 { 4430 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4431 struct ice_hw *hw = &pf->hw; 4432 u8 sff8472_comp = 0; 4433 u8 sff8472_swap = 0; 4434 u8 sff8636_rev = 0; 4435 u8 value = 0; 4436 int status; 4437 4438 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, 4439 0, &value, 1, 0, NULL); 4440 if (status) 4441 return status; 4442 4443 switch (value) { 4444 case ICE_MODULE_TYPE_SFP: 4445 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 4446 ICE_MODULE_SFF_8472_COMP, 0x00, 0, 4447 &sff8472_comp, 1, 0, NULL); 4448 if (status) 4449 return status; 4450 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 4451 ICE_MODULE_SFF_8472_SWAP, 0x00, 0, 4452 &sff8472_swap, 1, 0, NULL); 4453 if (status) 4454 return status; 4455 4456 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { 4457 modinfo->type = ETH_MODULE_SFF_8079; 4458 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 4459 } else if (sff8472_comp && 4460 (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) { 4461 modinfo->type = ETH_MODULE_SFF_8472; 4462 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4463 } else { 4464 modinfo->type = ETH_MODULE_SFF_8079; 4465 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 4466 } 4467 break; 4468 case ICE_MODULE_TYPE_QSFP_PLUS: 4469 case ICE_MODULE_TYPE_QSFP28: 4470 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 4471 ICE_MODULE_REVISION_ADDR, 0x00, 0, 4472 &sff8636_rev, 1, 0, NULL); 4473 if (status) 4474 return status; 4475 /* Check revision compliance */ 4476 if (sff8636_rev > 0x02) { 4477 /* Module is SFF-8636 compliant */ 4478 modinfo->type = ETH_MODULE_SFF_8636; 4479 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; 4480 } else { 4481 modinfo->type = ETH_MODULE_SFF_8436; 4482 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; 4483 } 4484 break; 4485 default: 4486 netdev_warn(netdev, "SFF Module Type not recognized.\n"); 4487 return -EINVAL; 4488 } 4489 return 0; 4490 } 4491 4492 /** 4493 * ice_get_module_eeprom - fill buffer with SFF EEPROM contents 4494 * @netdev: network interface device structure 4495 * @ee: EEPROM dump request structure 4496 * @data: buffer to be filled with EEPROM contents 4497 */ 4498 static int 4499 ice_get_module_eeprom(struct net_device *netdev, 4500 struct ethtool_eeprom *ee, u8 *data) 4501 { 4502 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4503 #define SFF_READ_BLOCK_SIZE 8 4504 u8 value[SFF_READ_BLOCK_SIZE] = { 0 }; 4505 u8 addr = ICE_I2C_EEPROM_DEV_ADDR; 4506 struct ice_hw *hw = &pf->hw; 4507 bool is_sfp = false; 4508 unsigned int i, j; 4509 u16 offset = 0; 4510 u8 page = 0; 4511 int status; 4512 4513 if (!ee || !ee->len || !data) 4514 return -EINVAL; 4515 4516 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, 4517 NULL); 4518 if (status) 4519 return status; 4520 4521 if (value[0] == ICE_MODULE_TYPE_SFP) 4522 is_sfp = true; 4523 4524 memset(data, 0, ee->len); 4525 for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) { 4526 offset = i + ee->offset; 4527 page = 0; 4528 4529 /* Check if we need to access the other memory page */ 4530 if (is_sfp) { 4531 if (offset >= ETH_MODULE_SFF_8079_LEN) { 4532 offset -= ETH_MODULE_SFF_8079_LEN; 4533 addr = ICE_I2C_EEPROM_DEV_ADDR2; 4534 } 4535 } else { 4536 while (offset >= ETH_MODULE_SFF_8436_LEN) { 4537 /* Compute memory page number and offset. */ 4538 offset -= ETH_MODULE_SFF_8436_LEN / 2; 4539 page++; 4540 } 4541 } 4542 4543 /* Bit 2 of EEPROM address 0x02 declares upper 4544 * pages are disabled on QSFP modules. 4545 * SFP modules only ever use page 0. 4546 */ 4547 if (page == 0 || !(data[0x2] & 0x4)) { 4548 u32 copy_len; 4549 4550 /* If i2c bus is busy due to slow page change or 4551 * link management access, call can fail. This is normal. 4552 * So we retry this a few times. 4553 */ 4554 for (j = 0; j < 4; j++) { 4555 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 4556 !is_sfp, value, 4557 SFF_READ_BLOCK_SIZE, 4558 0, NULL); 4559 netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n", 4560 addr, offset, page, is_sfp, 4561 value[0], value[1], value[2], value[3], 4562 value[4], value[5], value[6], value[7], 4563 status); 4564 if (status) { 4565 usleep_range(1500, 2500); 4566 memset(value, 0, SFF_READ_BLOCK_SIZE); 4567 continue; 4568 } 4569 break; 4570 } 4571 4572 /* Make sure we have enough room for the new block */ 4573 copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i); 4574 memcpy(data + i, value, copy_len); 4575 } 4576 } 4577 return 0; 4578 } 4579 4580 /** 4581 * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per 4582 * pcsquad, pcsport 4583 * @hw: pointer to the HW struct 4584 * @pcs_quad: pcsquad for input port 4585 * @pcs_port: pcsport for input port 4586 * @fec_stats: buffer to hold FEC statistics for given port 4587 * 4588 * Return: 0 on success, negative on failure. 4589 */ 4590 static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, 4591 struct ethtool_fec_stats *fec_stats) 4592 { 4593 u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0; 4594 u32 fec_corr_low_val = 0, fec_corr_high_val = 0; 4595 int err; 4596 4597 if (pcs_quad > 1 || pcs_port > 3) 4598 return -EINVAL; 4599 4600 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW, 4601 &fec_corr_low_val); 4602 if (err) 4603 return err; 4604 4605 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH, 4606 &fec_corr_high_val); 4607 if (err) 4608 return err; 4609 4610 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, 4611 ICE_FEC_UNCORR_LOW, 4612 &fec_uncorr_low_val); 4613 if (err) 4614 return err; 4615 4616 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, 4617 ICE_FEC_UNCORR_HIGH, 4618 &fec_uncorr_high_val); 4619 if (err) 4620 return err; 4621 4622 fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) + 4623 fec_corr_low_val; 4624 fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) + 4625 fec_uncorr_low_val; 4626 return 0; 4627 } 4628 4629 /** 4630 * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev 4631 * @netdev: network interface device structure 4632 * @fec_stats: buffer to hold FEC statistics for given port 4633 * @hist: buffer to put FEC histogram statistics for given port 4634 * 4635 */ 4636 static void ice_get_fec_stats(struct net_device *netdev, 4637 struct ethtool_fec_stats *fec_stats, 4638 struct ethtool_fec_hist *hist) 4639 { 4640 struct ice_netdev_priv *np = netdev_priv(netdev); 4641 struct ice_port_topology port_topology; 4642 struct ice_port_info *pi; 4643 struct ice_pf *pf; 4644 struct ice_hw *hw; 4645 int err; 4646 4647 pf = np->vsi->back; 4648 hw = &pf->hw; 4649 pi = np->vsi->port_info; 4650 4651 /* Serdes parameters are not supported if not the PF VSI */ 4652 if (np->vsi->type != ICE_VSI_PF || !pi) 4653 return; 4654 4655 err = ice_get_port_topology(hw, pi->lport, &port_topology); 4656 if (err) { 4657 netdev_info(netdev, "Extended register dump failed Lport %d\n", 4658 pi->lport); 4659 return; 4660 } 4661 4662 /* Get FEC correctable, uncorrectable counter */ 4663 err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select, 4664 port_topology.pcs_port, fec_stats); 4665 if (err) 4666 netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n", 4667 pi->lport, err); 4668 } 4669 4670 static void ice_get_eth_mac_stats(struct net_device *netdev, 4671 struct ethtool_eth_mac_stats *mac_stats) 4672 { 4673 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4674 struct ice_hw_port_stats *ps = &pf->stats; 4675 4676 mac_stats->FramesTransmittedOK = ps->eth.tx_unicast + 4677 ps->eth.tx_multicast + 4678 ps->eth.tx_broadcast; 4679 mac_stats->FramesReceivedOK = ps->eth.rx_unicast + 4680 ps->eth.rx_multicast + 4681 ps->eth.rx_broadcast; 4682 mac_stats->FrameCheckSequenceErrors = ps->crc_errors; 4683 mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes; 4684 mac_stats->OctetsReceivedOK = ps->eth.rx_bytes; 4685 mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast; 4686 mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast; 4687 mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast; 4688 mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast; 4689 mac_stats->InRangeLengthErrors = ps->rx_len_errors; 4690 mac_stats->FrameTooLongErrors = ps->rx_oversize; 4691 } 4692 4693 static void ice_get_pause_stats(struct net_device *netdev, 4694 struct ethtool_pause_stats *pause_stats) 4695 { 4696 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4697 struct ice_hw_port_stats *ps = &pf->stats; 4698 4699 pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx; 4700 pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx; 4701 } 4702 4703 static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = { 4704 { 0, 64 }, 4705 { 65, 127 }, 4706 { 128, 255 }, 4707 { 256, 511 }, 4708 { 512, 1023 }, 4709 { 1024, 1522 }, 4710 { 1523, 9522 }, 4711 {} 4712 }; 4713 4714 static void ice_get_rmon_stats(struct net_device *netdev, 4715 struct ethtool_rmon_stats *rmon, 4716 const struct ethtool_rmon_hist_range **ranges) 4717 { 4718 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4719 struct ice_hw_port_stats *ps = &pf->stats; 4720 4721 rmon->undersize_pkts = ps->rx_undersize; 4722 rmon->oversize_pkts = ps->rx_oversize; 4723 rmon->fragments = ps->rx_fragments; 4724 rmon->jabbers = ps->rx_jabber; 4725 4726 rmon->hist[0] = ps->rx_size_64; 4727 rmon->hist[1] = ps->rx_size_127; 4728 rmon->hist[2] = ps->rx_size_255; 4729 rmon->hist[3] = ps->rx_size_511; 4730 rmon->hist[4] = ps->rx_size_1023; 4731 rmon->hist[5] = ps->rx_size_1522; 4732 rmon->hist[6] = ps->rx_size_big; 4733 4734 rmon->hist_tx[0] = ps->tx_size_64; 4735 rmon->hist_tx[1] = ps->tx_size_127; 4736 rmon->hist_tx[2] = ps->tx_size_255; 4737 rmon->hist_tx[3] = ps->tx_size_511; 4738 rmon->hist_tx[4] = ps->tx_size_1023; 4739 rmon->hist_tx[5] = ps->tx_size_1522; 4740 rmon->hist_tx[6] = ps->tx_size_big; 4741 4742 *ranges = ice_rmon_ranges; 4743 } 4744 4745 /* ice_get_ts_stats - provide timestamping stats 4746 * @netdev: the netdevice pointer from ethtool 4747 * @ts_stats: the ethtool data structure to fill in 4748 */ 4749 static void ice_get_ts_stats(struct net_device *netdev, 4750 struct ethtool_ts_stats *ts_stats) 4751 { 4752 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4753 struct ice_ptp *ptp = &pf->ptp; 4754 4755 ts_stats->pkts = ptp->tx_hwtstamp_good; 4756 ts_stats->err = ptp->tx_hwtstamp_skipped + 4757 ptp->tx_hwtstamp_flushed + 4758 ptp->tx_hwtstamp_discarded; 4759 ts_stats->lost = ptp->tx_hwtstamp_timeouts; 4760 } 4761 4762 #define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \ 4763 ETH_RESET_FILTER | ETH_RESET_OFFLOAD) 4764 4765 #define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \ 4766 ETH_RESET_SHARED_SHIFT) 4767 4768 #define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \ 4769 (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \ 4770 (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)) 4771 4772 #define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR 4773 4774 /** 4775 * ice_ethtool_reset - triggers a given type of reset 4776 * @dev: network interface device structure 4777 * @flags: set of reset flags 4778 * 4779 * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags. 4780 */ 4781 static int ice_ethtool_reset(struct net_device *dev, u32 *flags) 4782 { 4783 struct ice_pf *pf = ice_netdev_to_pf(dev); 4784 enum ice_reset_req reset; 4785 4786 switch (*flags) { 4787 case ICE_ETHTOOL_CORER: 4788 reset = ICE_RESET_CORER; 4789 break; 4790 case ICE_ETHTOOL_GLOBR: 4791 reset = ICE_RESET_GLOBR; 4792 break; 4793 case ICE_ETHTOOL_PFR: 4794 reset = ICE_RESET_PFR; 4795 break; 4796 default: 4797 netdev_info(dev, "Unsupported set of ethtool flags"); 4798 return -EOPNOTSUPP; 4799 } 4800 4801 ice_schedule_reset(pf, reset); 4802 4803 *flags = 0; 4804 4805 return 0; 4806 } 4807 4808 /** 4809 * ice_repr_ethtool_reset - triggers a VF reset 4810 * @dev: network interface device structure 4811 * @flags: set of reset flags 4812 * 4813 * Return: 0 on success, 4814 * -EOPNOTSUPP when using unsupported set of flags 4815 * -EBUSY when VF is not ready for reset. 4816 */ 4817 static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags) 4818 { 4819 struct ice_repr *repr = ice_netdev_to_repr(dev); 4820 struct ice_vf *vf; 4821 4822 if (repr->type != ICE_REPR_TYPE_VF || 4823 *flags != ICE_ETHTOOL_VFR) 4824 return -EOPNOTSUPP; 4825 4826 vf = repr->vf; 4827 4828 if (ice_check_vf_ready_for_cfg(vf)) 4829 return -EBUSY; 4830 4831 *flags = 0; 4832 4833 return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); 4834 } 4835 4836 static const struct ethtool_ops ice_ethtool_ops = { 4837 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 4838 ETHTOOL_COALESCE_USE_ADAPTIVE | 4839 ETHTOOL_COALESCE_RX_USECS_HIGH, 4840 .supported_input_xfrm = RXH_XFRM_SYM_XOR, 4841 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, 4842 .get_link_ksettings = ice_get_link_ksettings, 4843 .set_link_ksettings = ice_set_link_ksettings, 4844 .get_fec_stats = ice_get_fec_stats, 4845 .get_eth_mac_stats = ice_get_eth_mac_stats, 4846 .get_pause_stats = ice_get_pause_stats, 4847 .get_rmon_stats = ice_get_rmon_stats, 4848 .get_ts_stats = ice_get_ts_stats, 4849 .get_drvinfo = ice_get_drvinfo, 4850 .get_regs_len = ice_get_regs_len, 4851 .get_regs = ice_get_regs, 4852 .get_wol = ice_get_wol, 4853 .set_wol = ice_set_wol, 4854 .get_msglevel = ice_get_msglevel, 4855 .set_msglevel = ice_set_msglevel, 4856 .self_test = ice_self_test, 4857 .get_link = ethtool_op_get_link, 4858 .get_link_ext_stats = ice_get_link_ext_stats, 4859 .get_eeprom_len = ice_get_eeprom_len, 4860 .get_eeprom = ice_get_eeprom, 4861 .get_coalesce = ice_get_coalesce, 4862 .set_coalesce = ice_set_coalesce, 4863 .get_strings = ice_get_strings, 4864 .set_phys_id = ice_set_phys_id, 4865 .get_ethtool_stats = ice_get_ethtool_stats, 4866 .get_priv_flags = ice_get_priv_flags, 4867 .set_priv_flags = ice_set_priv_flags, 4868 .get_sset_count = ice_get_sset_count, 4869 .get_rxnfc = ice_get_rxnfc, 4870 .set_rxnfc = ice_set_rxnfc, 4871 .get_rx_ring_count = ice_get_rx_ring_count, 4872 .get_ringparam = ice_get_ringparam, 4873 .set_ringparam = ice_set_ringparam, 4874 .nway_reset = ice_nway_reset, 4875 .get_pauseparam = ice_get_pauseparam, 4876 .set_pauseparam = ice_set_pauseparam, 4877 .reset = ice_ethtool_reset, 4878 .get_rxfh_key_size = ice_get_rxfh_key_size, 4879 .get_rxfh_indir_size = ice_get_rxfh_indir_size, 4880 .get_rxfh = ice_get_rxfh, 4881 .set_rxfh = ice_set_rxfh, 4882 .get_rxfh_fields = ice_get_rxfh_fields, 4883 .set_rxfh_fields = ice_set_rxfh_fields, 4884 .get_channels = ice_get_channels, 4885 .set_channels = ice_set_channels, 4886 .get_ts_info = ice_get_ts_info, 4887 .get_per_queue_coalesce = ice_get_per_q_coalesce, 4888 .set_per_queue_coalesce = ice_set_per_q_coalesce, 4889 .get_fecparam = ice_get_fecparam, 4890 .set_fecparam = ice_set_fecparam, 4891 .get_module_info = ice_get_module_info, 4892 .get_module_eeprom = ice_get_module_eeprom, 4893 }; 4894 4895 static const struct ethtool_ops ice_ethtool_safe_mode_ops = { 4896 .get_link_ksettings = ice_get_link_ksettings, 4897 .set_link_ksettings = ice_set_link_ksettings, 4898 .get_drvinfo = ice_get_drvinfo, 4899 .get_regs_len = ice_get_regs_len, 4900 .get_regs = ice_get_regs, 4901 .get_wol = ice_get_wol, 4902 .set_wol = ice_set_wol, 4903 .get_msglevel = ice_get_msglevel, 4904 .set_msglevel = ice_set_msglevel, 4905 .get_link = ethtool_op_get_link, 4906 .get_eeprom_len = ice_get_eeprom_len, 4907 .get_eeprom = ice_get_eeprom, 4908 .get_strings = ice_get_strings, 4909 .get_ethtool_stats = ice_get_ethtool_stats, 4910 .get_sset_count = ice_get_sset_count, 4911 .get_ringparam = ice_get_ringparam, 4912 .set_ringparam = ice_set_ringparam, 4913 .nway_reset = ice_nway_reset, 4914 .get_channels = ice_get_channels, 4915 }; 4916 4917 /** 4918 * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops 4919 * @netdev: network interface device structure 4920 */ 4921 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) 4922 { 4923 netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; 4924 } 4925 4926 static const struct ethtool_ops ice_ethtool_repr_ops = { 4927 .get_drvinfo = ice_repr_get_drvinfo, 4928 .get_link = ethtool_op_get_link, 4929 .get_strings = ice_repr_get_strings, 4930 .get_ethtool_stats = ice_repr_get_ethtool_stats, 4931 .get_sset_count = ice_repr_get_sset_count, 4932 .reset = ice_repr_ethtool_reset, 4933 }; 4934 4935 /** 4936 * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops 4937 * @netdev: network interface device structure 4938 */ 4939 void ice_set_ethtool_repr_ops(struct net_device *netdev) 4940 { 4941 netdev->ethtool_ops = &ice_ethtool_repr_ops; 4942 } 4943 4944 /** 4945 * ice_set_ethtool_ops - setup netdev ethtool ops 4946 * @netdev: network interface device structure 4947 * 4948 * setup netdev ethtool ops with ice specific ops 4949 */ 4950 void ice_set_ethtool_ops(struct net_device *netdev) 4951 { 4952 netdev->ethtool_ops = &ice_ethtool_ops; 4953 } 4954