1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 /* ethtool support for ice */ 5 6 #include "ice.h" 7 #include "ice_ethtool.h" 8 #include "ice_flow.h" 9 #include "ice_fltr.h" 10 #include "ice_lib.h" 11 #include "ice_dcb_lib.h" 12 #include <net/dcbnl.h> 13 #include <net/libeth/rx.h> 14 15 struct ice_stats { 16 char stat_string[ETH_GSTRING_LEN]; 17 int sizeof_stat; 18 int stat_offset; 19 }; 20 21 #define ICE_STAT(_type, _name, _stat) { \ 22 .stat_string = _name, \ 23 .sizeof_stat = sizeof_field(_type, _stat), \ 24 .stat_offset = offsetof(_type, _stat) \ 25 } 26 27 #define ICE_VSI_STAT(_name, _stat) \ 28 ICE_STAT(struct ice_vsi, _name, _stat) 29 #define ICE_PF_STAT(_name, _stat) \ 30 ICE_STAT(struct ice_pf, _name, _stat) 31 32 static int ice_q_stats_len(struct net_device *netdev) 33 { 34 struct ice_netdev_priv *np = netdev_priv(netdev); 35 36 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * 37 (sizeof(struct ice_q_stats) / sizeof(u64))); 38 } 39 40 #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats) 41 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) 42 43 #define ICE_PFC_STATS_LEN ( \ 44 (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \ 45 sizeof_field(struct ice_pf, stats.priority_xon_rx) + \ 46 sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \ 47 sizeof_field(struct ice_pf, stats.priority_xon_tx)) \ 48 / sizeof(u64)) 49 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \ 50 ICE_VSI_STATS_LEN + ice_q_stats_len(n)) 51 52 static const struct ice_stats ice_gstrings_vsi_stats[] = { 53 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast), 54 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), 55 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast), 56 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast), 57 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), 58 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), 59 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes), 60 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes), 61 ICE_VSI_STAT("rx_dropped", eth_stats.rx_discards), 62 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), 63 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), 64 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), 65 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), 66 ICE_VSI_STAT("tx_linearize", tx_linearize), 67 ICE_VSI_STAT("tx_busy", tx_busy), 68 ICE_VSI_STAT("tx_restart", tx_restart), 69 }; 70 71 enum ice_ethtool_test_id { 72 ICE_ETH_TEST_REG = 0, 73 ICE_ETH_TEST_EEPROM, 74 ICE_ETH_TEST_INTR, 75 ICE_ETH_TEST_LOOP, 76 ICE_ETH_TEST_LINK, 77 }; 78 79 static const char ice_gstrings_test[][ETH_GSTRING_LEN] = { 80 "Register test (offline)", 81 "EEPROM test (offline)", 82 "Interrupt test (offline)", 83 "Loopback test (offline)", 84 "Link test (on/offline)", 85 }; 86 87 #define ICE_TEST_LEN (sizeof(ice_gstrings_test) / ETH_GSTRING_LEN) 88 89 /* These PF_STATs might look like duplicates of some NETDEV_STATs, 90 * but they aren't. This device is capable of supporting multiple 91 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual 92 * netdevs whereas the PF_STATs are for the physical function that's 93 * hosting these netdevs. 94 * 95 * The PF_STATs are appended to the netdev stats only when ethtool -S 96 * is queried on the base PF netdev. 97 */ 98 static const struct ice_stats ice_gstrings_pf_stats[] = { 99 ICE_PF_STAT("rx_bytes.nic", stats.eth.rx_bytes), 100 ICE_PF_STAT("tx_bytes.nic", stats.eth.tx_bytes), 101 ICE_PF_STAT("rx_unicast.nic", stats.eth.rx_unicast), 102 ICE_PF_STAT("tx_unicast.nic", stats.eth.tx_unicast), 103 ICE_PF_STAT("rx_multicast.nic", stats.eth.rx_multicast), 104 ICE_PF_STAT("tx_multicast.nic", stats.eth.tx_multicast), 105 ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), 106 ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), 107 ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), 108 ICE_PF_STAT("tx_timeout.nic", tx_timeout_count), 109 ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), 110 ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), 111 ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), 112 ICE_PF_STAT("tx_size_127.nic", stats.tx_size_127), 113 ICE_PF_STAT("rx_size_255.nic", stats.rx_size_255), 114 ICE_PF_STAT("tx_size_255.nic", stats.tx_size_255), 115 ICE_PF_STAT("rx_size_511.nic", stats.rx_size_511), 116 ICE_PF_STAT("tx_size_511.nic", stats.tx_size_511), 117 ICE_PF_STAT("rx_size_1023.nic", stats.rx_size_1023), 118 ICE_PF_STAT("tx_size_1023.nic", stats.tx_size_1023), 119 ICE_PF_STAT("rx_size_1522.nic", stats.rx_size_1522), 120 ICE_PF_STAT("tx_size_1522.nic", stats.tx_size_1522), 121 ICE_PF_STAT("rx_size_big.nic", stats.rx_size_big), 122 ICE_PF_STAT("tx_size_big.nic", stats.tx_size_big), 123 ICE_PF_STAT("link_xon_rx.nic", stats.link_xon_rx), 124 ICE_PF_STAT("link_xon_tx.nic", stats.link_xon_tx), 125 ICE_PF_STAT("link_xoff_rx.nic", stats.link_xoff_rx), 126 ICE_PF_STAT("link_xoff_tx.nic", stats.link_xoff_tx), 127 ICE_PF_STAT("tx_dropped_link_down.nic", stats.tx_dropped_link_down), 128 ICE_PF_STAT("rx_undersize.nic", stats.rx_undersize), 129 ICE_PF_STAT("rx_fragments.nic", stats.rx_fragments), 130 ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), 131 ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), 132 ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), 133 ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error), 134 ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), 135 ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), 136 ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), 137 ICE_PF_STAT("mac_local_faults.nic", stats.mac_local_faults), 138 ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults), 139 ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match), 140 ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status), 141 ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped), 142 ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts), 143 ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed), 144 ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded), 145 ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates), 146 }; 147 148 static const u32 ice_regs_dump_list[] = { 149 PFGEN_STATE, 150 PRTGEN_STATUS, 151 QRX_CTRL(0), 152 QINT_TQCTL(0), 153 QINT_RQCTL(0), 154 PFINT_OICR_ENA, 155 QRX_ITR(0), 156 #define GLDCB_TLPM_PCI_DM 0x000A0180 157 GLDCB_TLPM_PCI_DM, 158 #define GLDCB_TLPM_TC2PFC 0x000A0194 159 GLDCB_TLPM_TC2PFC, 160 #define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4)) 161 TCDCB_TLPM_WAIT_DM(0), 162 TCDCB_TLPM_WAIT_DM(1), 163 TCDCB_TLPM_WAIT_DM(2), 164 TCDCB_TLPM_WAIT_DM(3), 165 TCDCB_TLPM_WAIT_DM(4), 166 TCDCB_TLPM_WAIT_DM(5), 167 TCDCB_TLPM_WAIT_DM(6), 168 TCDCB_TLPM_WAIT_DM(7), 169 TCDCB_TLPM_WAIT_DM(8), 170 TCDCB_TLPM_WAIT_DM(9), 171 TCDCB_TLPM_WAIT_DM(10), 172 TCDCB_TLPM_WAIT_DM(11), 173 TCDCB_TLPM_WAIT_DM(12), 174 TCDCB_TLPM_WAIT_DM(13), 175 TCDCB_TLPM_WAIT_DM(14), 176 TCDCB_TLPM_WAIT_DM(15), 177 TCDCB_TLPM_WAIT_DM(16), 178 TCDCB_TLPM_WAIT_DM(17), 179 TCDCB_TLPM_WAIT_DM(18), 180 TCDCB_TLPM_WAIT_DM(19), 181 TCDCB_TLPM_WAIT_DM(20), 182 TCDCB_TLPM_WAIT_DM(21), 183 TCDCB_TLPM_WAIT_DM(22), 184 TCDCB_TLPM_WAIT_DM(23), 185 TCDCB_TLPM_WAIT_DM(24), 186 TCDCB_TLPM_WAIT_DM(25), 187 TCDCB_TLPM_WAIT_DM(26), 188 TCDCB_TLPM_WAIT_DM(27), 189 TCDCB_TLPM_WAIT_DM(28), 190 TCDCB_TLPM_WAIT_DM(29), 191 TCDCB_TLPM_WAIT_DM(30), 192 TCDCB_TLPM_WAIT_DM(31), 193 #define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90 194 GLPCI_WATMK_CLNT_PIPEMON, 195 #define GLPCI_CUR_CLNT_COMMON 0x000BFD84 196 GLPCI_CUR_CLNT_COMMON, 197 #define GLPCI_CUR_CLNT_PIPEMON 0x000BFD88 198 GLPCI_CUR_CLNT_PIPEMON, 199 #define GLPCI_PCIERR 0x0009DEB0 200 GLPCI_PCIERR, 201 #define GLPSM_DEBUG_CTL_STATUS 0x000B0600 202 GLPSM_DEBUG_CTL_STATUS, 203 #define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0680 204 GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT, 205 #define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0684 206 GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT, 207 #define GLPSM0_DEBUG_DT_OUT_OF_WINDOW 0x000B0688 208 GLPSM0_DEBUG_DT_OUT_OF_WINDOW, 209 #define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT 0x000B069C 210 GLPSM0_DEBUG_INTF_HW_ERROR_DETECT, 211 #define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT 0x000B06A0 212 GLPSM0_DEBUG_MISC_HW_ERROR_DETECT, 213 #define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0E80 214 GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT, 215 #define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0E84 216 GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT, 217 #define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT 0x000B0E88 218 GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT, 219 #define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT 0x000B0E8C 220 GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT, 221 #define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT 0x000B0E90 222 GLPSM1_DEBUG_MISC_HW_ERROR_DETECT, 223 #define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT 0x000B1680 224 GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT, 225 #define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B1684 226 GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT, 227 #define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT 0x000B1688 228 GLPSM2_DEBUG_MISC_HW_ERROR_DETECT, 229 #define GLTDPU_TCLAN_COMP_BOB(_i) (0x00049ADC + ((_i) * 4)) 230 GLTDPU_TCLAN_COMP_BOB(1), 231 GLTDPU_TCLAN_COMP_BOB(2), 232 GLTDPU_TCLAN_COMP_BOB(3), 233 GLTDPU_TCLAN_COMP_BOB(4), 234 GLTDPU_TCLAN_COMP_BOB(5), 235 GLTDPU_TCLAN_COMP_BOB(6), 236 GLTDPU_TCLAN_COMP_BOB(7), 237 GLTDPU_TCLAN_COMP_BOB(8), 238 #define GLTDPU_TCB_CMD_BOB(_i) (0x0004975C + ((_i) * 4)) 239 GLTDPU_TCB_CMD_BOB(1), 240 GLTDPU_TCB_CMD_BOB(2), 241 GLTDPU_TCB_CMD_BOB(3), 242 GLTDPU_TCB_CMD_BOB(4), 243 GLTDPU_TCB_CMD_BOB(5), 244 GLTDPU_TCB_CMD_BOB(6), 245 GLTDPU_TCB_CMD_BOB(7), 246 GLTDPU_TCB_CMD_BOB(8), 247 #define GLTDPU_PSM_UPDATE_BOB(_i) (0x00049B5C + ((_i) * 4)) 248 GLTDPU_PSM_UPDATE_BOB(1), 249 GLTDPU_PSM_UPDATE_BOB(2), 250 GLTDPU_PSM_UPDATE_BOB(3), 251 GLTDPU_PSM_UPDATE_BOB(4), 252 GLTDPU_PSM_UPDATE_BOB(5), 253 GLTDPU_PSM_UPDATE_BOB(6), 254 GLTDPU_PSM_UPDATE_BOB(7), 255 GLTDPU_PSM_UPDATE_BOB(8), 256 #define GLTCB_CMD_IN_BOB(_i) (0x000AE288 + ((_i) * 4)) 257 GLTCB_CMD_IN_BOB(1), 258 GLTCB_CMD_IN_BOB(2), 259 GLTCB_CMD_IN_BOB(3), 260 GLTCB_CMD_IN_BOB(4), 261 GLTCB_CMD_IN_BOB(5), 262 GLTCB_CMD_IN_BOB(6), 263 GLTCB_CMD_IN_BOB(7), 264 GLTCB_CMD_IN_BOB(8), 265 #define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i) (0x000FC148 + ((_i) * 4)) 266 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1), 267 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2), 268 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3), 269 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4), 270 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5), 271 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6), 272 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7), 273 GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8), 274 #define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4)) 275 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1), 276 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2), 277 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3), 278 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4), 279 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5), 280 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6), 281 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7), 282 GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8), 283 #define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i) (0x000FC1C8 + ((_i) * 4)) 284 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1), 285 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2), 286 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3), 287 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4), 288 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5), 289 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6), 290 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7), 291 GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8), 292 #define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i) (0x000FC188 + ((_i) * 4)) 293 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1), 294 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2), 295 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3), 296 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4), 297 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5), 298 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6), 299 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7), 300 GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8), 301 #define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4)) 302 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1), 303 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2), 304 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3), 305 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4), 306 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5), 307 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6), 308 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7), 309 GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8), 310 #define PRTDCB_TCUPM_REG_CM(_i) (0x000BC360 + ((_i) * 4)) 311 PRTDCB_TCUPM_REG_CM(0), 312 PRTDCB_TCUPM_REG_CM(1), 313 PRTDCB_TCUPM_REG_CM(2), 314 PRTDCB_TCUPM_REG_CM(3), 315 #define PRTDCB_TCUPM_REG_DM(_i) (0x000BC3A0 + ((_i) * 4)) 316 PRTDCB_TCUPM_REG_DM(0), 317 PRTDCB_TCUPM_REG_DM(1), 318 PRTDCB_TCUPM_REG_DM(2), 319 PRTDCB_TCUPM_REG_DM(3), 320 #define PRTDCB_TLPM_REG_DM(_i) (0x000A0000 + ((_i) * 4)) 321 PRTDCB_TLPM_REG_DM(0), 322 PRTDCB_TLPM_REG_DM(1), 323 PRTDCB_TLPM_REG_DM(2), 324 PRTDCB_TLPM_REG_DM(3), 325 }; 326 327 struct ice_priv_flag { 328 char name[ETH_GSTRING_LEN]; 329 u32 bitno; /* bit position in pf->flags */ 330 }; 331 332 #define ICE_PRIV_FLAG(_name, _bitno) { \ 333 .name = _name, \ 334 .bitno = _bitno, \ 335 } 336 337 static const struct ice_priv_flag ice_gstrings_priv_flags[] = { 338 ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), 339 ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), 340 ICE_PRIV_FLAG("vf-true-promisc-support", 341 ICE_FLAG_VF_TRUE_PROMISC_ENA), 342 ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), 343 ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING), 344 }; 345 346 #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) 347 348 static const u32 ice_adv_lnk_speed_100[] __initconst = { 349 ETHTOOL_LINK_MODE_100baseT_Full_BIT, 350 }; 351 352 static const u32 ice_adv_lnk_speed_1000[] __initconst = { 353 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 354 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 355 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 356 }; 357 358 static const u32 ice_adv_lnk_speed_2500[] __initconst = { 359 ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 360 ETHTOOL_LINK_MODE_2500baseX_Full_BIT, 361 }; 362 363 static const u32 ice_adv_lnk_speed_5000[] __initconst = { 364 ETHTOOL_LINK_MODE_5000baseT_Full_BIT, 365 }; 366 367 static const u32 ice_adv_lnk_speed_10000[] __initconst = { 368 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 369 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 370 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 371 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 372 }; 373 374 static const u32 ice_adv_lnk_speed_25000[] __initconst = { 375 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 376 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 377 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 378 }; 379 380 static const u32 ice_adv_lnk_speed_40000[] __initconst = { 381 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 382 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 383 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 384 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 385 }; 386 387 static const u32 ice_adv_lnk_speed_50000[] __initconst = { 388 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 389 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 390 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 391 }; 392 393 static const u32 ice_adv_lnk_speed_100000[] __initconst = { 394 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 395 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 396 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 397 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 398 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 399 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 400 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 401 }; 402 403 static const u32 ice_adv_lnk_speed_200000[] __initconst = { 404 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 405 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 406 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 407 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, 408 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 409 }; 410 411 static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = { 412 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100), 413 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000), 414 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500), 415 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000), 416 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000), 417 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000), 418 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000), 419 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000), 420 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000), 421 ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000), 422 }; 423 424 void __init ice_adv_lnk_speed_maps_init(void) 425 { 426 ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps, 427 ARRAY_SIZE(ice_adv_lnk_speed_maps)); 428 } 429 430 static void 431 __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, 432 struct ice_vsi *vsi) 433 { 434 struct ice_pf *pf = vsi->back; 435 struct ice_hw *hw = &pf->hw; 436 struct ice_orom_info *orom; 437 struct ice_nvm_info *nvm; 438 439 nvm = &hw->flash.nvm; 440 orom = &hw->flash.orom; 441 442 strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); 443 444 /* Display NVM version (from which the firmware version can be 445 * determined) which contains more pertinent information. 446 */ 447 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 448 "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, 449 nvm->eetrack, orom->major, orom->build, orom->patch); 450 451 strscpy(drvinfo->bus_info, pci_name(pf->pdev), 452 sizeof(drvinfo->bus_info)); 453 } 454 455 static void 456 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 457 { 458 struct ice_netdev_priv *np = netdev_priv(netdev); 459 460 __ice_get_drvinfo(netdev, drvinfo, np->vsi); 461 drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; 462 } 463 464 static int ice_get_regs_len(struct net_device __always_unused *netdev) 465 { 466 return (sizeof(ice_regs_dump_list) + 467 sizeof(struct ice_regdump_to_ethtool)); 468 } 469 470 /** 471 * ice_ethtool_get_maxspeed - Get the max speed for given lport 472 * @hw: pointer to the HW struct 473 * @lport: logical port for which max speed is requested 474 * @max_speed: return max speed for input lport 475 * 476 * Return: 0 on success, negative on failure. 477 */ 478 static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed) 479 { 480 struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {}; 481 bool active_valid = false, pending_valid = true; 482 u8 option_count = ICE_AQC_PORT_OPT_MAX; 483 u8 active_idx = 0, pending_idx = 0; 484 int status; 485 486 status = ice_aq_get_port_options(hw, options, &option_count, lport, 487 true, &active_idx, &active_valid, 488 &pending_idx, &pending_valid); 489 if (status) 490 return -EIO; 491 if (!active_valid) 492 return -EINVAL; 493 494 *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M; 495 return 0; 496 } 497 498 /** 499 * ice_is_serdes_muxed - returns whether serdes is muxed in hardware 500 * @hw: pointer to the HW struct 501 * 502 * Return: true when serdes is muxed, false when serdes is not muxed. 503 */ 504 static bool ice_is_serdes_muxed(struct ice_hw *hw) 505 { 506 u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); 507 508 return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value); 509 } 510 511 static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology, 512 u8 lport, bool is_muxed) 513 { 514 switch (lport) { 515 case 0: 516 port_topology->pcs_quad_select = 0; 517 port_topology->pcs_port = 0; 518 port_topology->primary_serdes_lane = 0; 519 break; 520 case 1: 521 port_topology->pcs_quad_select = 1; 522 port_topology->pcs_port = 0; 523 if (is_muxed) 524 port_topology->primary_serdes_lane = 2; 525 else 526 port_topology->primary_serdes_lane = 4; 527 break; 528 case 2: 529 port_topology->pcs_quad_select = 0; 530 port_topology->pcs_port = 1; 531 port_topology->primary_serdes_lane = 1; 532 break; 533 case 3: 534 port_topology->pcs_quad_select = 1; 535 port_topology->pcs_port = 1; 536 if (is_muxed) 537 port_topology->primary_serdes_lane = 3; 538 else 539 port_topology->primary_serdes_lane = 5; 540 break; 541 case 4: 542 port_topology->pcs_quad_select = 0; 543 port_topology->pcs_port = 2; 544 port_topology->primary_serdes_lane = 2; 545 break; 546 case 5: 547 port_topology->pcs_quad_select = 1; 548 port_topology->pcs_port = 2; 549 port_topology->primary_serdes_lane = 6; 550 break; 551 case 6: 552 port_topology->pcs_quad_select = 0; 553 port_topology->pcs_port = 3; 554 port_topology->primary_serdes_lane = 3; 555 break; 556 case 7: 557 port_topology->pcs_quad_select = 1; 558 port_topology->pcs_port = 3; 559 port_topology->primary_serdes_lane = 7; 560 break; 561 default: 562 return -EINVAL; 563 } 564 565 return 0; 566 } 567 568 static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology, 569 u8 lport, bool is_muxed) 570 { 571 switch (lport) { 572 case 0: 573 port_topology->pcs_quad_select = 0; 574 port_topology->pcs_port = 0; 575 port_topology->primary_serdes_lane = 0; 576 break; 577 case 1: 578 port_topology->pcs_quad_select = 1; 579 port_topology->pcs_port = 0; 580 if (is_muxed) 581 port_topology->primary_serdes_lane = 2; 582 else 583 port_topology->primary_serdes_lane = 4; 584 break; 585 case 2: 586 port_topology->pcs_quad_select = 0; 587 port_topology->pcs_port = 1; 588 port_topology->primary_serdes_lane = 1; 589 break; 590 case 3: 591 port_topology->pcs_quad_select = 1; 592 port_topology->pcs_port = 1; 593 if (is_muxed) 594 port_topology->primary_serdes_lane = 3; 595 else 596 port_topology->primary_serdes_lane = 5; 597 break; 598 case 4: 599 port_topology->pcs_quad_select = 0; 600 port_topology->pcs_port = 2; 601 port_topology->primary_serdes_lane = 2; 602 break; 603 case 5: 604 port_topology->pcs_quad_select = 1; 605 port_topology->pcs_port = 2; 606 port_topology->primary_serdes_lane = 6; 607 break; 608 case 6: 609 port_topology->pcs_quad_select = 0; 610 port_topology->pcs_port = 3; 611 port_topology->primary_serdes_lane = 3; 612 break; 613 case 7: 614 port_topology->pcs_quad_select = 1; 615 port_topology->pcs_port = 3; 616 port_topology->primary_serdes_lane = 7; 617 break; 618 default: 619 return -EINVAL; 620 } 621 622 return 0; 623 } 624 625 /** 626 * ice_get_port_topology - returns physical topology like pcsquad, pcsport, 627 * serdes number 628 * @hw: pointer to the HW struct 629 * @lport: logical port for which physical info requested 630 * @port_topology: buffer to hold port topology 631 * 632 * Return: 0 on success, negative on failure. 633 */ 634 static int ice_get_port_topology(struct ice_hw *hw, u8 lport, 635 struct ice_port_topology *port_topology) 636 { 637 struct ice_aqc_get_link_topo cmd = {}; 638 u16 node_handle = 0; 639 u8 cage_type = 0; 640 bool is_muxed; 641 int err; 642 u8 ctx; 643 644 ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S; 645 ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; 646 cmd.addr.topo_params.node_type_ctx = ctx; 647 648 err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle); 649 if (err) 650 return -EINVAL; 651 652 is_muxed = ice_is_serdes_muxed(hw); 653 654 if (cage_type == 0x11 || /* SFP+ */ 655 cage_type == 0x12) { /* SFP28 */ 656 port_topology->serdes_lane_count = 1; 657 err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed); 658 if (err) 659 return err; 660 } else if (cage_type == 0x13 || /* QSFP */ 661 cage_type == 0x14) { /* QSFP28 */ 662 u8 max_speed = 0; 663 664 err = ice_ethtool_get_maxspeed(hw, lport, &max_speed); 665 if (err) 666 return err; 667 668 if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G) 669 port_topology->serdes_lane_count = 4; 670 else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G || 671 max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G) 672 port_topology->serdes_lane_count = 2; 673 else 674 port_topology->serdes_lane_count = 1; 675 676 err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed); 677 if (err) 678 return err; 679 } else { 680 return -EINVAL; 681 } 682 683 return 0; 684 } 685 686 /** 687 * ice_get_tx_rx_equa - read serdes tx rx equaliser param 688 * @hw: pointer to the HW struct 689 * @serdes_num: represents the serdes number 690 * @ptr: structure to read all serdes parameter for given serdes 691 * 692 * Return: all serdes equalization parameter supported per serdes number 693 */ 694 static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, 695 struct ice_serdes_equalization_to_ethtool *ptr) 696 { 697 static const int tx = ICE_AQC_OP_CODE_TX_EQU; 698 static const int rx = ICE_AQC_OP_CODE_RX_EQU; 699 struct { 700 int data_in; 701 int opcode; 702 int *out; 703 } aq_params[] = { 704 { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 }, 705 { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 }, 706 { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten }, 707 { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 }, 708 { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 }, 709 { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 }, 710 { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 }, 711 { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 }, 712 { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf }, 713 { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf }, 714 { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf }, 715 { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf }, 716 { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc }, 717 { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw }, 718 { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain }, 719 { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 }, 720 { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 }, 721 { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 }, 722 { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 }, 723 { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 }, 724 { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 }, 725 { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 }, 726 { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 }, 727 { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 }, 728 { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 }, 729 { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 }, 730 { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 }, 731 }; 732 int err; 733 734 for (int i = 0; i < ARRAY_SIZE(aq_params); i++) { 735 err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in, 736 aq_params[i].opcode, 737 serdes_num, aq_params[i].out); 738 if (err) 739 break; 740 } 741 742 return err; 743 } 744 745 /** 746 * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per 747 * pcsquad, pcsport 748 * @netdev: pointer to net device structure 749 * @p: output buffer to fill requested register dump 750 * 751 * Return: 0 on success, negative on failure. 752 */ 753 static int ice_get_extended_regs(struct net_device *netdev, void *p) 754 { 755 struct ice_netdev_priv *np = netdev_priv(netdev); 756 struct ice_regdump_to_ethtool *ice_prv_regs_buf; 757 struct ice_port_topology port_topology = {}; 758 struct ice_port_info *pi; 759 struct ice_pf *pf; 760 struct ice_hw *hw; 761 unsigned int i; 762 int err; 763 764 pf = np->vsi->back; 765 hw = &pf->hw; 766 pi = np->vsi->port_info; 767 768 /* Serdes parameters are not supported if not the PF VSI */ 769 if (np->vsi->type != ICE_VSI_PF || !pi) 770 return -EINVAL; 771 772 err = ice_get_port_topology(hw, pi->lport, &port_topology); 773 if (err) 774 return -EINVAL; 775 if (port_topology.serdes_lane_count > 4) 776 return -EINVAL; 777 778 ice_prv_regs_buf = p; 779 780 /* Get serdes equalization parameter for available serdes */ 781 for (i = 0; i < port_topology.serdes_lane_count; i++) { 782 u8 serdes_num = 0; 783 784 serdes_num = port_topology.primary_serdes_lane + i; 785 err = ice_get_tx_rx_equa(hw, serdes_num, 786 &ice_prv_regs_buf->equalization[i]); 787 if (err) 788 return -EINVAL; 789 } 790 791 return 0; 792 } 793 794 static void 795 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 796 { 797 struct ice_pf *pf = ice_netdev_to_pf(netdev); 798 struct ice_hw *hw = &pf->hw; 799 u32 *regs_buf = (u32 *)p; 800 unsigned int i; 801 802 regs->version = 2; 803 804 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) 805 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); 806 807 ice_get_extended_regs(netdev, (void *)®s_buf[i]); 808 } 809 810 static u32 ice_get_msglevel(struct net_device *netdev) 811 { 812 struct ice_pf *pf = ice_netdev_to_pf(netdev); 813 814 #ifndef CONFIG_DYNAMIC_DEBUG 815 if (pf->hw.debug_mask) 816 netdev_info(netdev, "hw debug_mask: 0x%llX\n", 817 pf->hw.debug_mask); 818 #endif /* !CONFIG_DYNAMIC_DEBUG */ 819 820 return pf->msg_enable; 821 } 822 823 static void ice_set_msglevel(struct net_device *netdev, u32 data) 824 { 825 struct ice_pf *pf = ice_netdev_to_pf(netdev); 826 827 #ifndef CONFIG_DYNAMIC_DEBUG 828 if (ICE_DBG_USER & data) 829 pf->hw.debug_mask = data; 830 else 831 pf->msg_enable = data; 832 #else 833 pf->msg_enable = data; 834 #endif /* !CONFIG_DYNAMIC_DEBUG */ 835 } 836 837 static void ice_get_link_ext_stats(struct net_device *netdev, 838 struct ethtool_link_ext_stats *stats) 839 { 840 struct ice_pf *pf = ice_netdev_to_pf(netdev); 841 842 stats->link_down_events = pf->link_down_events; 843 } 844 845 static int ice_get_eeprom_len(struct net_device *netdev) 846 { 847 struct ice_pf *pf = ice_netdev_to_pf(netdev); 848 849 return (int)pf->hw.flash.flash_size; 850 } 851 852 static int 853 ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, 854 u8 *bytes) 855 { 856 struct ice_pf *pf = ice_netdev_to_pf(netdev); 857 struct ice_hw *hw = &pf->hw; 858 struct device *dev; 859 int ret; 860 u8 *buf; 861 862 dev = ice_pf_to_dev(pf); 863 864 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 865 netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n", 866 eeprom->cmd, eeprom->offset, eeprom->len); 867 868 buf = kzalloc(eeprom->len, GFP_KERNEL); 869 if (!buf) 870 return -ENOMEM; 871 872 ret = ice_acquire_nvm(hw, ICE_RES_READ); 873 if (ret) { 874 dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n", 875 ret, libie_aq_str(hw->adminq.sq_last_status)); 876 goto out; 877 } 878 879 ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, 880 false); 881 if (ret) { 882 dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n", 883 ret, libie_aq_str(hw->adminq.sq_last_status)); 884 goto release; 885 } 886 887 memcpy(bytes, buf, eeprom->len); 888 release: 889 ice_release_nvm(hw); 890 out: 891 kfree(buf); 892 return ret; 893 } 894 895 /** 896 * ice_active_vfs - check if there are any active VFs 897 * @pf: board private structure 898 * 899 * Returns true if an active VF is found, otherwise returns false 900 */ 901 static bool ice_active_vfs(struct ice_pf *pf) 902 { 903 bool active = false; 904 struct ice_vf *vf; 905 unsigned int bkt; 906 907 rcu_read_lock(); 908 ice_for_each_vf_rcu(pf, bkt, vf) { 909 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 910 active = true; 911 break; 912 } 913 } 914 rcu_read_unlock(); 915 916 return active; 917 } 918 919 /** 920 * ice_link_test - perform a link test on a given net_device 921 * @netdev: network interface device structure 922 * 923 * This function performs one of the self-tests required by ethtool. 924 * Returns 0 on success, non-zero on failure. 925 */ 926 static u64 ice_link_test(struct net_device *netdev) 927 { 928 struct ice_netdev_priv *np = netdev_priv(netdev); 929 bool link_up = false; 930 int status; 931 932 netdev_info(netdev, "link test\n"); 933 status = ice_get_link_status(np->vsi->port_info, &link_up); 934 if (status) { 935 netdev_err(netdev, "link query error, status = %d\n", 936 status); 937 return 1; 938 } 939 940 if (!link_up) 941 return 2; 942 943 return 0; 944 } 945 946 /** 947 * ice_eeprom_test - perform an EEPROM test on a given net_device 948 * @netdev: network interface device structure 949 * 950 * This function performs one of the self-tests required by ethtool. 951 * Returns 0 on success, non-zero on failure. 952 */ 953 static u64 ice_eeprom_test(struct net_device *netdev) 954 { 955 struct ice_pf *pf = ice_netdev_to_pf(netdev); 956 957 netdev_info(netdev, "EEPROM test\n"); 958 return !!(ice_nvm_validate_checksum(&pf->hw)); 959 } 960 961 /** 962 * ice_reg_pattern_test 963 * @hw: pointer to the HW struct 964 * @reg: reg to be tested 965 * @mask: bits to be touched 966 */ 967 static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) 968 { 969 struct ice_pf *pf = (struct ice_pf *)hw->back; 970 struct device *dev = ice_pf_to_dev(pf); 971 static const u32 patterns[] = { 972 0x5A5A5A5A, 0xA5A5A5A5, 973 0x00000000, 0xFFFFFFFF 974 }; 975 u32 val, orig_val; 976 unsigned int i; 977 978 orig_val = rd32(hw, reg); 979 for (i = 0; i < ARRAY_SIZE(patterns); ++i) { 980 u32 pattern = patterns[i] & mask; 981 982 wr32(hw, reg, pattern); 983 val = rd32(hw, reg); 984 if (val == pattern) 985 continue; 986 dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n" 987 , __func__, reg, pattern, val); 988 return 1; 989 } 990 991 wr32(hw, reg, orig_val); 992 val = rd32(hw, reg); 993 if (val != orig_val) { 994 dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n" 995 , __func__, reg, orig_val, val); 996 return 1; 997 } 998 999 return 0; 1000 } 1001 1002 /** 1003 * ice_reg_test - perform a register test on a given net_device 1004 * @netdev: network interface device structure 1005 * 1006 * This function performs one of the self-tests required by ethtool. 1007 * Returns 0 on success, non-zero on failure. 1008 */ 1009 static u64 ice_reg_test(struct net_device *netdev) 1010 { 1011 struct ice_netdev_priv *np = netdev_priv(netdev); 1012 struct ice_hw *hw = np->vsi->port_info->hw; 1013 u32 int_elements = hw->func_caps.common_cap.num_msix_vectors ? 1014 hw->func_caps.common_cap.num_msix_vectors - 1 : 1; 1015 struct ice_diag_reg_test_info { 1016 u32 address; 1017 u32 mask; 1018 u32 elem_num; 1019 u32 elem_size; 1020 } ice_reg_list[] = { 1021 {GLINT_ITR(0, 0), 0x00000fff, int_elements, 1022 GLINT_ITR(0, 1) - GLINT_ITR(0, 0)}, 1023 {GLINT_ITR(1, 0), 0x00000fff, int_elements, 1024 GLINT_ITR(1, 1) - GLINT_ITR(1, 0)}, 1025 {GLINT_ITR(0, 0), 0x00000fff, int_elements, 1026 GLINT_ITR(2, 1) - GLINT_ITR(2, 0)}, 1027 {GLINT_CTL, 0xffff0001, 1, 0} 1028 }; 1029 unsigned int i; 1030 1031 netdev_dbg(netdev, "Register test\n"); 1032 for (i = 0; i < ARRAY_SIZE(ice_reg_list); ++i) { 1033 u32 j; 1034 1035 for (j = 0; j < ice_reg_list[i].elem_num; ++j) { 1036 u32 mask = ice_reg_list[i].mask; 1037 u32 reg = ice_reg_list[i].address + 1038 (j * ice_reg_list[i].elem_size); 1039 1040 /* bail on failure (non-zero return) */ 1041 if (ice_reg_pattern_test(hw, reg, mask)) 1042 return 1; 1043 } 1044 } 1045 1046 return 0; 1047 } 1048 1049 /** 1050 * ice_lbtest_prepare_rings - configure Tx/Rx test rings 1051 * @vsi: pointer to the VSI structure 1052 * 1053 * Function configures rings of a VSI for loopback test without 1054 * enabling interrupts or informing the kernel about new queues. 1055 * 1056 * Returns 0 on success, negative on failure. 1057 */ 1058 static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) 1059 { 1060 int status; 1061 1062 status = ice_vsi_setup_tx_rings(vsi); 1063 if (status) 1064 goto err_setup_tx_ring; 1065 1066 status = ice_vsi_setup_rx_rings(vsi); 1067 if (status) 1068 goto err_setup_rx_ring; 1069 1070 status = ice_vsi_cfg_lan(vsi); 1071 if (status) 1072 goto err_setup_rx_ring; 1073 1074 status = ice_vsi_start_all_rx_rings(vsi); 1075 if (status) 1076 goto err_start_rx_ring; 1077 1078 return 0; 1079 1080 err_start_rx_ring: 1081 ice_vsi_free_rx_rings(vsi); 1082 err_setup_rx_ring: 1083 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 1084 err_setup_tx_ring: 1085 ice_vsi_free_tx_rings(vsi); 1086 1087 return status; 1088 } 1089 1090 /** 1091 * ice_lbtest_disable_rings - disable Tx/Rx test rings after loopback test 1092 * @vsi: pointer to the VSI structure 1093 * 1094 * Function stops and frees VSI rings after a loopback test. 1095 * Returns 0 on success, negative on failure. 1096 */ 1097 static int ice_lbtest_disable_rings(struct ice_vsi *vsi) 1098 { 1099 int status; 1100 1101 status = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); 1102 if (status) 1103 netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n", 1104 vsi->vsi_num, status); 1105 1106 status = ice_vsi_stop_all_rx_rings(vsi); 1107 if (status) 1108 netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n", 1109 vsi->vsi_num, status); 1110 1111 ice_vsi_free_tx_rings(vsi); 1112 ice_vsi_free_rx_rings(vsi); 1113 1114 return status; 1115 } 1116 1117 /** 1118 * ice_lbtest_create_frame - create test packet 1119 * @pf: pointer to the PF structure 1120 * @ret_data: allocated frame buffer 1121 * @size: size of the packet data 1122 * 1123 * Function allocates a frame with a test pattern on specific offsets. 1124 * Returns 0 on success, non-zero on failure. 1125 */ 1126 static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size) 1127 { 1128 u8 *data; 1129 1130 if (!pf) 1131 return -EINVAL; 1132 1133 data = kzalloc(size, GFP_KERNEL); 1134 if (!data) 1135 return -ENOMEM; 1136 1137 /* Since the ethernet test frame should always be at least 1138 * 64 bytes long, fill some octets in the payload with test data. 1139 */ 1140 memset(data, 0xFF, size); 1141 data[32] = 0xDE; 1142 data[42] = 0xAD; 1143 data[44] = 0xBE; 1144 data[46] = 0xEF; 1145 1146 *ret_data = data; 1147 1148 return 0; 1149 } 1150 1151 /** 1152 * ice_lbtest_check_frame - verify received loopback frame 1153 * @frame: pointer to the raw packet data 1154 * 1155 * Function verifies received test frame with a pattern. 1156 * Returns true if frame matches the pattern, false otherwise. 1157 */ 1158 static bool ice_lbtest_check_frame(u8 *frame) 1159 { 1160 /* Validate bytes of a frame under offsets chosen earlier */ 1161 if (frame[32] == 0xDE && 1162 frame[42] == 0xAD && 1163 frame[44] == 0xBE && 1164 frame[46] == 0xEF && 1165 frame[48] == 0xFF) 1166 return true; 1167 1168 return false; 1169 } 1170 1171 /** 1172 * ice_diag_send - send test frames to the test ring 1173 * @tx_ring: pointer to the transmit ring 1174 * @data: pointer to the raw packet data 1175 * @size: size of the packet to send 1176 * 1177 * Function sends loopback packets on a test Tx ring. 1178 */ 1179 static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size) 1180 { 1181 struct ice_tx_desc *tx_desc; 1182 struct ice_tx_buf *tx_buf; 1183 dma_addr_t dma; 1184 u64 td_cmd; 1185 1186 tx_desc = ICE_TX_DESC(tx_ring, tx_ring->next_to_use); 1187 tx_buf = &tx_ring->tx_buf[tx_ring->next_to_use]; 1188 1189 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); 1190 if (dma_mapping_error(tx_ring->dev, dma)) 1191 return -EINVAL; 1192 1193 tx_desc->buf_addr = cpu_to_le64(dma); 1194 1195 /* These flags are required for a descriptor to be pushed out */ 1196 td_cmd = (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS); 1197 tx_desc->cmd_type_offset_bsz = 1198 cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | 1199 (td_cmd << ICE_TXD_QW1_CMD_S) | 1200 ((u64)0 << ICE_TXD_QW1_OFFSET_S) | 1201 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 1202 ((u64)0 << ICE_TXD_QW1_L2TAG1_S)); 1203 1204 tx_buf->next_to_watch = tx_desc; 1205 1206 /* Force memory write to complete before letting h/w know 1207 * there are new descriptors to fetch. 1208 */ 1209 wmb(); 1210 1211 tx_ring->next_to_use++; 1212 if (tx_ring->next_to_use >= tx_ring->count) 1213 tx_ring->next_to_use = 0; 1214 1215 writel_relaxed(tx_ring->next_to_use, tx_ring->tail); 1216 1217 /* Wait until the packets get transmitted to the receive queue. */ 1218 usleep_range(1000, 2000); 1219 dma_unmap_single(tx_ring->dev, dma, size, DMA_TO_DEVICE); 1220 1221 return 0; 1222 } 1223 1224 #define ICE_LB_FRAME_SIZE 64 1225 /** 1226 * ice_lbtest_receive_frames - receive and verify test frames 1227 * @rx_ring: pointer to the receive ring 1228 * 1229 * Function receives loopback packets and verify their correctness. 1230 * Returns number of received valid frames. 1231 */ 1232 static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) 1233 { 1234 struct libeth_fqe *rx_buf; 1235 int valid_frames, i; 1236 struct page *page; 1237 u8 *received_buf; 1238 1239 valid_frames = 0; 1240 1241 for (i = 0; i < rx_ring->count; i++) { 1242 union ice_32b_rx_flex_desc *rx_desc; 1243 1244 rx_desc = ICE_RX_DESC(rx_ring, i); 1245 1246 if (!(rx_desc->wb.status_error0 & 1247 (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) | 1248 cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) 1249 continue; 1250 1251 rx_buf = &rx_ring->rx_fqes[i]; 1252 page = __netmem_to_page(rx_buf->netmem); 1253 received_buf = page_address(page) + rx_buf->offset + 1254 page->pp->p.offset; 1255 1256 if (ice_lbtest_check_frame(received_buf)) 1257 valid_frames++; 1258 } 1259 1260 return valid_frames; 1261 } 1262 1263 /** 1264 * ice_loopback_test - perform a loopback test on a given net_device 1265 * @netdev: network interface device structure 1266 * 1267 * This function performs one of the self-tests required by ethtool. 1268 * Returns 0 on success, non-zero on failure. 1269 */ 1270 static u64 ice_loopback_test(struct net_device *netdev) 1271 { 1272 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1273 struct ice_vsi *test_vsi; 1274 u8 *tx_frame __free(kfree) = NULL; 1275 u8 broadcast[ETH_ALEN], ret = 0; 1276 int num_frames, valid_frames; 1277 struct ice_tx_ring *tx_ring; 1278 struct ice_rx_ring *rx_ring; 1279 int i; 1280 1281 netdev_info(netdev, "loopback test\n"); 1282 1283 test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); 1284 if (!test_vsi) { 1285 netdev_err(netdev, "Failed to create a VSI for the loopback test\n"); 1286 return 1; 1287 } 1288 1289 test_vsi->netdev = netdev; 1290 tx_ring = test_vsi->tx_rings[0]; 1291 rx_ring = test_vsi->rx_rings[0]; 1292 1293 if (ice_lbtest_prepare_rings(test_vsi)) { 1294 ret = 2; 1295 goto lbtest_vsi_close; 1296 } 1297 1298 if (ice_alloc_rx_bufs(rx_ring, rx_ring->count)) { 1299 ret = 3; 1300 goto lbtest_rings_dis; 1301 } 1302 1303 /* Enable MAC loopback in firmware */ 1304 if (ice_aq_set_mac_loopback(&pf->hw, true, NULL)) { 1305 ret = 4; 1306 goto lbtest_mac_dis; 1307 } 1308 1309 /* Test VSI needs to receive broadcast packets */ 1310 eth_broadcast_addr(broadcast); 1311 if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) { 1312 ret = 5; 1313 goto lbtest_mac_dis; 1314 } 1315 1316 if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) { 1317 ret = 7; 1318 goto remove_mac_filters; 1319 } 1320 1321 num_frames = min_t(int, tx_ring->count, 32); 1322 for (i = 0; i < num_frames; i++) { 1323 if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) { 1324 ret = 8; 1325 goto remove_mac_filters; 1326 } 1327 } 1328 1329 valid_frames = ice_lbtest_receive_frames(rx_ring); 1330 if (!valid_frames) 1331 ret = 9; 1332 else if (valid_frames != num_frames) 1333 ret = 10; 1334 1335 remove_mac_filters: 1336 if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) 1337 netdev_err(netdev, "Could not remove MAC filter for the test VSI\n"); 1338 lbtest_mac_dis: 1339 /* Disable MAC loopback after the test is completed. */ 1340 if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) 1341 netdev_err(netdev, "Could not disable MAC loopback\n"); 1342 lbtest_rings_dis: 1343 if (ice_lbtest_disable_rings(test_vsi)) 1344 netdev_err(netdev, "Could not disable test rings\n"); 1345 lbtest_vsi_close: 1346 test_vsi->netdev = NULL; 1347 if (ice_vsi_release(test_vsi)) 1348 netdev_err(netdev, "Failed to remove the test VSI\n"); 1349 1350 return ret; 1351 } 1352 1353 /** 1354 * ice_intr_test - perform an interrupt test on a given net_device 1355 * @netdev: network interface device structure 1356 * 1357 * This function performs one of the self-tests required by ethtool. 1358 * Returns 0 on success, non-zero on failure. 1359 */ 1360 static u64 ice_intr_test(struct net_device *netdev) 1361 { 1362 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1363 u16 swic_old = pf->sw_int_count; 1364 1365 netdev_info(netdev, "interrupt test\n"); 1366 1367 wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index), 1368 GLINT_DYN_CTL_SW_ITR_INDX_M | 1369 GLINT_DYN_CTL_INTENA_MSK_M | 1370 GLINT_DYN_CTL_SWINT_TRIG_M); 1371 1372 usleep_range(1000, 2000); 1373 return (swic_old == pf->sw_int_count); 1374 } 1375 1376 /** 1377 * ice_self_test - handler function for performing a self-test by ethtool 1378 * @netdev: network interface device structure 1379 * @eth_test: ethtool_test structure 1380 * @data: required by ethtool.self_test 1381 * 1382 * This function is called after invoking 'ethtool -t devname' command where 1383 * devname is the name of the network device on which ethtool should operate. 1384 * It performs a set of self-tests to check if a device works properly. 1385 */ 1386 static void 1387 ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, 1388 u64 *data) 1389 { 1390 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1391 bool if_running = netif_running(netdev); 1392 struct device *dev; 1393 1394 dev = ice_pf_to_dev(pf); 1395 1396 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1397 netdev_info(netdev, "offline testing starting\n"); 1398 1399 set_bit(ICE_TESTING, pf->state); 1400 1401 if (ice_active_vfs(pf)) { 1402 dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); 1403 data[ICE_ETH_TEST_REG] = 1; 1404 data[ICE_ETH_TEST_EEPROM] = 1; 1405 data[ICE_ETH_TEST_INTR] = 1; 1406 data[ICE_ETH_TEST_LOOP] = 1; 1407 data[ICE_ETH_TEST_LINK] = 1; 1408 eth_test->flags |= ETH_TEST_FL_FAILED; 1409 clear_bit(ICE_TESTING, pf->state); 1410 goto skip_ol_tests; 1411 } 1412 /* If the device is online then take it offline */ 1413 if (if_running) 1414 /* indicate we're in test mode */ 1415 ice_stop(netdev); 1416 1417 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); 1418 data[ICE_ETH_TEST_EEPROM] = ice_eeprom_test(netdev); 1419 data[ICE_ETH_TEST_INTR] = ice_intr_test(netdev); 1420 data[ICE_ETH_TEST_LOOP] = ice_loopback_test(netdev); 1421 data[ICE_ETH_TEST_REG] = ice_reg_test(netdev); 1422 1423 if (data[ICE_ETH_TEST_LINK] || 1424 data[ICE_ETH_TEST_EEPROM] || 1425 data[ICE_ETH_TEST_LOOP] || 1426 data[ICE_ETH_TEST_INTR] || 1427 data[ICE_ETH_TEST_REG]) 1428 eth_test->flags |= ETH_TEST_FL_FAILED; 1429 1430 clear_bit(ICE_TESTING, pf->state); 1431 1432 if (if_running) { 1433 int status = ice_open(netdev); 1434 1435 if (status) { 1436 dev_err(dev, "Could not open device %s, err %d\n", 1437 pf->int_name, status); 1438 } 1439 } 1440 } else { 1441 /* Online tests */ 1442 netdev_info(netdev, "online testing starting\n"); 1443 1444 data[ICE_ETH_TEST_LINK] = ice_link_test(netdev); 1445 if (data[ICE_ETH_TEST_LINK]) 1446 eth_test->flags |= ETH_TEST_FL_FAILED; 1447 1448 /* Offline only tests, not run in online; pass by default */ 1449 data[ICE_ETH_TEST_REG] = 0; 1450 data[ICE_ETH_TEST_EEPROM] = 0; 1451 data[ICE_ETH_TEST_INTR] = 0; 1452 data[ICE_ETH_TEST_LOOP] = 0; 1453 } 1454 1455 skip_ol_tests: 1456 netdev_info(netdev, "testing finished\n"); 1457 } 1458 1459 static void 1460 __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data, 1461 struct ice_vsi *vsi) 1462 { 1463 unsigned int i; 1464 u8 *p = data; 1465 1466 switch (stringset) { 1467 case ETH_SS_STATS: 1468 for (i = 0; i < ICE_VSI_STATS_LEN; i++) 1469 ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string); 1470 1471 if (ice_is_port_repr_netdev(netdev)) 1472 return; 1473 1474 ice_for_each_alloc_txq(vsi, i) { 1475 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 1476 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 1477 } 1478 1479 ice_for_each_alloc_rxq(vsi, i) { 1480 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 1481 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 1482 } 1483 1484 if (vsi->type != ICE_VSI_PF) 1485 return; 1486 1487 for (i = 0; i < ICE_PF_STATS_LEN; i++) 1488 ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string); 1489 1490 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 1491 ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i); 1492 ethtool_sprintf(&p, "tx_priority_%u_xoff.nic", i); 1493 } 1494 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 1495 ethtool_sprintf(&p, "rx_priority_%u_xon.nic", i); 1496 ethtool_sprintf(&p, "rx_priority_%u_xoff.nic", i); 1497 } 1498 break; 1499 case ETH_SS_TEST: 1500 memcpy(data, ice_gstrings_test, ICE_TEST_LEN * ETH_GSTRING_LEN); 1501 break; 1502 case ETH_SS_PRIV_FLAGS: 1503 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) 1504 ethtool_puts(&p, ice_gstrings_priv_flags[i].name); 1505 break; 1506 default: 1507 break; 1508 } 1509 } 1510 1511 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1512 { 1513 struct ice_netdev_priv *np = netdev_priv(netdev); 1514 1515 __ice_get_strings(netdev, stringset, data, np->vsi); 1516 } 1517 1518 static int 1519 ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) 1520 { 1521 struct ice_netdev_priv *np = netdev_priv(netdev); 1522 bool led_active; 1523 1524 switch (state) { 1525 case ETHTOOL_ID_ACTIVE: 1526 led_active = true; 1527 break; 1528 case ETHTOOL_ID_INACTIVE: 1529 led_active = false; 1530 break; 1531 default: 1532 return -EINVAL; 1533 } 1534 1535 if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL)) 1536 return -EIO; 1537 1538 return 0; 1539 } 1540 1541 /** 1542 * ice_set_fec_cfg - Set link FEC options 1543 * @netdev: network interface device structure 1544 * @req_fec: FEC mode to configure 1545 */ 1546 static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) 1547 { 1548 struct ice_netdev_priv *np = netdev_priv(netdev); 1549 struct ice_aqc_set_phy_cfg_data config = { 0 }; 1550 struct ice_vsi *vsi = np->vsi; 1551 struct ice_port_info *pi; 1552 1553 pi = vsi->port_info; 1554 if (!pi) 1555 return -EOPNOTSUPP; 1556 1557 /* Changing the FEC parameters is not supported if not the PF VSI */ 1558 if (vsi->type != ICE_VSI_PF) { 1559 netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n"); 1560 return -EOPNOTSUPP; 1561 } 1562 1563 /* Proceed only if requesting different FEC mode */ 1564 if (pi->phy.curr_user_fec_req == req_fec) 1565 return 0; 1566 1567 /* Copy the current user PHY configuration. The current user PHY 1568 * configuration is initialized during probe from PHY capabilities 1569 * software mode, and updated on set PHY configuration. 1570 */ 1571 memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config)); 1572 1573 ice_cfg_phy_fec(pi, &config, req_fec); 1574 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 1575 1576 if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL)) 1577 return -EAGAIN; 1578 1579 /* Save requested FEC config */ 1580 pi->phy.curr_user_fec_req = req_fec; 1581 1582 return 0; 1583 } 1584 1585 /** 1586 * ice_set_fecparam - Set FEC link options 1587 * @netdev: network interface device structure 1588 * @fecparam: Ethtool structure to retrieve FEC parameters 1589 */ 1590 static int 1591 ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) 1592 { 1593 struct ice_netdev_priv *np = netdev_priv(netdev); 1594 struct ice_vsi *vsi = np->vsi; 1595 enum ice_fec_mode fec; 1596 1597 switch (fecparam->fec) { 1598 case ETHTOOL_FEC_AUTO: 1599 fec = ICE_FEC_AUTO; 1600 break; 1601 case ETHTOOL_FEC_RS: 1602 fec = ICE_FEC_RS; 1603 break; 1604 case ETHTOOL_FEC_BASER: 1605 fec = ICE_FEC_BASER; 1606 break; 1607 case ETHTOOL_FEC_OFF: 1608 case ETHTOOL_FEC_NONE: 1609 fec = ICE_FEC_NONE; 1610 break; 1611 default: 1612 dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n", 1613 fecparam->fec); 1614 return -EINVAL; 1615 } 1616 1617 return ice_set_fec_cfg(netdev, fec); 1618 } 1619 1620 /** 1621 * ice_get_fecparam - Get link FEC options 1622 * @netdev: network interface device structure 1623 * @fecparam: Ethtool structure to retrieve FEC parameters 1624 */ 1625 static int 1626 ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) 1627 { 1628 struct ice_netdev_priv *np = netdev_priv(netdev); 1629 struct ice_aqc_get_phy_caps_data *caps; 1630 struct ice_link_status *link_info; 1631 struct ice_vsi *vsi = np->vsi; 1632 struct ice_port_info *pi; 1633 int err; 1634 1635 pi = vsi->port_info; 1636 1637 if (!pi) 1638 return -EOPNOTSUPP; 1639 link_info = &pi->phy.link_info; 1640 1641 /* Set FEC mode based on negotiated link info */ 1642 switch (link_info->fec_info) { 1643 case ICE_AQ_LINK_25G_KR_FEC_EN: 1644 fecparam->active_fec = ETHTOOL_FEC_BASER; 1645 break; 1646 case ICE_AQ_LINK_25G_RS_528_FEC_EN: 1647 case ICE_AQ_LINK_25G_RS_544_FEC_EN: 1648 fecparam->active_fec = ETHTOOL_FEC_RS; 1649 break; 1650 default: 1651 fecparam->active_fec = ETHTOOL_FEC_OFF; 1652 break; 1653 } 1654 1655 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 1656 if (!caps) 1657 return -ENOMEM; 1658 1659 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 1660 caps, NULL); 1661 if (err) 1662 goto done; 1663 1664 /* Set supported/configured FEC modes based on PHY capability */ 1665 if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) 1666 fecparam->fec |= ETHTOOL_FEC_AUTO; 1667 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || 1668 caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 1669 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN || 1670 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 1671 fecparam->fec |= ETHTOOL_FEC_BASER; 1672 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 1673 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ || 1674 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) 1675 fecparam->fec |= ETHTOOL_FEC_RS; 1676 if (caps->link_fec_options == 0) 1677 fecparam->fec |= ETHTOOL_FEC_OFF; 1678 1679 done: 1680 kfree(caps); 1681 return err; 1682 } 1683 1684 /** 1685 * ice_nway_reset - restart autonegotiation 1686 * @netdev: network interface device structure 1687 */ 1688 static int ice_nway_reset(struct net_device *netdev) 1689 { 1690 struct ice_netdev_priv *np = netdev_priv(netdev); 1691 struct ice_vsi *vsi = np->vsi; 1692 int err; 1693 1694 /* If VSI state is up, then restart autoneg with link up */ 1695 if (!test_bit(ICE_DOWN, vsi->back->state)) 1696 err = ice_set_link(vsi, true); 1697 else 1698 err = ice_set_link(vsi, false); 1699 1700 return err; 1701 } 1702 1703 /** 1704 * ice_get_priv_flags - report device private flags 1705 * @netdev: network interface device structure 1706 * 1707 * The get string set count and the string set should be matched for each 1708 * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags 1709 * array. 1710 * 1711 * Returns a u32 bitmap of flags. 1712 */ 1713 static u32 ice_get_priv_flags(struct net_device *netdev) 1714 { 1715 struct ice_pf *pf = ice_netdev_to_pf(netdev); 1716 u32 i, ret_flags = 0; 1717 1718 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 1719 const struct ice_priv_flag *priv_flag; 1720 1721 priv_flag = &ice_gstrings_priv_flags[i]; 1722 1723 if (test_bit(priv_flag->bitno, pf->flags)) 1724 ret_flags |= BIT(i); 1725 } 1726 1727 return ret_flags; 1728 } 1729 1730 /** 1731 * ice_set_priv_flags - set private flags 1732 * @netdev: network interface device structure 1733 * @flags: bit flags to be set 1734 */ 1735 static int ice_set_priv_flags(struct net_device *netdev, u32 flags) 1736 { 1737 struct ice_netdev_priv *np = netdev_priv(netdev); 1738 DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS); 1739 DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS); 1740 struct ice_vsi *vsi = np->vsi; 1741 struct ice_pf *pf = vsi->back; 1742 struct device *dev; 1743 int ret = 0; 1744 u32 i; 1745 1746 if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) 1747 return -EINVAL; 1748 1749 dev = ice_pf_to_dev(pf); 1750 set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); 1751 1752 bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS); 1753 for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { 1754 const struct ice_priv_flag *priv_flag; 1755 1756 priv_flag = &ice_gstrings_priv_flags[i]; 1757 1758 if (flags & BIT(i)) 1759 set_bit(priv_flag->bitno, pf->flags); 1760 else 1761 clear_bit(priv_flag->bitno, pf->flags); 1762 } 1763 1764 bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); 1765 1766 /* Do not allow change to link-down-on-close when Total Port Shutdown 1767 * is enabled. 1768 */ 1769 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) && 1770 test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { 1771 dev_err(dev, "Setting link-down-on-close not supported on this port\n"); 1772 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); 1773 ret = -EINVAL; 1774 goto ethtool_exit; 1775 } 1776 1777 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { 1778 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { 1779 int status; 1780 1781 /* Disable FW LLDP engine */ 1782 status = ice_cfg_lldp_mib_change(&pf->hw, false); 1783 1784 /* If unregistering for LLDP events fails, this is 1785 * not an error state, as there shouldn't be any 1786 * events to respond to. 1787 */ 1788 if (status) 1789 dev_info(dev, "Failed to unreg for LLDP events\n"); 1790 1791 /* The AQ call to stop the FW LLDP agent will generate 1792 * an error if the agent is already stopped. 1793 */ 1794 status = ice_aq_stop_lldp(&pf->hw, true, true, NULL); 1795 if (status) 1796 dev_warn(dev, "Fail to stop LLDP agent\n"); 1797 /* Use case for having the FW LLDP agent stopped 1798 * will likely not need DCB, so failure to init is 1799 * not a concern of ethtool 1800 */ 1801 status = ice_init_pf_dcb(pf, true); 1802 if (status) 1803 dev_warn(dev, "Fail to init DCB\n"); 1804 1805 pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED; 1806 pf->dcbx_cap |= DCB_CAP_DCBX_HOST; 1807 } else { 1808 bool dcbx_agent_status; 1809 int status; 1810 1811 if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { 1812 clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); 1813 dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n"); 1814 ret = -EOPNOTSUPP; 1815 goto ethtool_exit; 1816 } 1817 1818 /* Remove rule to direct LLDP packets to default VSI. 1819 * The FW LLDP engine will now be consuming them. 1820 */ 1821 ice_cfg_sw_rx_lldp(vsi->back, false); 1822 1823 /* AQ command to start FW LLDP agent will return an 1824 * error if the agent is already started 1825 */ 1826 status = ice_aq_start_lldp(&pf->hw, true, NULL); 1827 if (status) 1828 dev_warn(dev, "Fail to start LLDP Agent\n"); 1829 1830 /* AQ command to start FW DCBX agent will fail if 1831 * the agent is already started 1832 */ 1833 status = ice_aq_start_stop_dcbx(&pf->hw, true, 1834 &dcbx_agent_status, 1835 NULL); 1836 if (status) 1837 dev_dbg(dev, "Failed to start FW DCBX\n"); 1838 1839 dev_info(dev, "FW DCBX agent is %s\n", 1840 dcbx_agent_status ? "ACTIVE" : "DISABLED"); 1841 1842 /* Failure to configure MIB change or init DCB is not 1843 * relevant to ethtool. Print notification that 1844 * registration/init failed but do not return error 1845 * state to ethtool 1846 */ 1847 status = ice_init_pf_dcb(pf, true); 1848 if (status) 1849 dev_dbg(dev, "Fail to init DCB\n"); 1850 1851 /* Register for MIB change events */ 1852 status = ice_cfg_lldp_mib_change(&pf->hw, true); 1853 if (status) 1854 dev_dbg(dev, "Fail to enable MIB change events\n"); 1855 1856 pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST; 1857 pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; 1858 1859 ice_nway_reset(netdev); 1860 } 1861 } 1862 /* don't allow modification of this flag when a single VF is in 1863 * promiscuous mode because it's not supported 1864 */ 1865 if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) && 1866 ice_is_any_vf_in_unicast_promisc(pf)) { 1867 dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n"); 1868 /* toggle bit back to previous state */ 1869 change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags); 1870 ret = -EAGAIN; 1871 } 1872 1873 if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) && 1874 ice_has_vfs(pf)) { 1875 dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n"); 1876 /* toggle bit back to previous state */ 1877 change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags); 1878 ret = -EOPNOTSUPP; 1879 } 1880 ethtool_exit: 1881 clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); 1882 return ret; 1883 } 1884 1885 static int ice_get_sset_count(struct net_device *netdev, int sset) 1886 { 1887 switch (sset) { 1888 case ETH_SS_STATS: 1889 /* The number (and order) of strings reported *must* remain 1890 * constant for a given netdevice. This function must not 1891 * report a different number based on run time parameters 1892 * (such as the number of queues in use, or the setting of 1893 * a private ethtool flag). This is due to the nature of the 1894 * ethtool stats API. 1895 * 1896 * Userspace programs such as ethtool must make 3 separate 1897 * ioctl requests, one for size, one for the strings, and 1898 * finally one for the stats. Since these cross into 1899 * userspace, changes to the number or size could result in 1900 * undefined memory access or incorrect string<->value 1901 * correlations for statistics. 1902 * 1903 * Even if it appears to be safe, changes to the size or 1904 * order of strings will suffer from race conditions and are 1905 * not safe. 1906 */ 1907 return ICE_ALL_STATS_LEN(netdev); 1908 case ETH_SS_TEST: 1909 return ICE_TEST_LEN; 1910 case ETH_SS_PRIV_FLAGS: 1911 return ICE_PRIV_FLAG_ARRAY_SIZE; 1912 default: 1913 return -EOPNOTSUPP; 1914 } 1915 } 1916 1917 static void 1918 __ice_get_ethtool_stats(struct net_device *netdev, 1919 struct ethtool_stats __always_unused *stats, u64 *data, 1920 struct ice_vsi *vsi) 1921 { 1922 struct ice_pf *pf = vsi->back; 1923 struct ice_tx_ring *tx_ring; 1924 struct ice_rx_ring *rx_ring; 1925 unsigned int j; 1926 int i = 0; 1927 char *p; 1928 1929 ice_update_pf_stats(pf); 1930 ice_update_vsi_stats(vsi); 1931 1932 for (j = 0; j < ICE_VSI_STATS_LEN; j++) { 1933 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; 1934 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == 1935 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1936 } 1937 1938 if (ice_is_port_repr_netdev(netdev)) 1939 return; 1940 1941 /* populate per queue stats */ 1942 rcu_read_lock(); 1943 1944 ice_for_each_alloc_txq(vsi, j) { 1945 tx_ring = READ_ONCE(vsi->tx_rings[j]); 1946 if (tx_ring && tx_ring->ring_stats) { 1947 data[i++] = tx_ring->ring_stats->stats.pkts; 1948 data[i++] = tx_ring->ring_stats->stats.bytes; 1949 } else { 1950 data[i++] = 0; 1951 data[i++] = 0; 1952 } 1953 } 1954 1955 ice_for_each_alloc_rxq(vsi, j) { 1956 rx_ring = READ_ONCE(vsi->rx_rings[j]); 1957 if (rx_ring && rx_ring->ring_stats) { 1958 data[i++] = rx_ring->ring_stats->stats.pkts; 1959 data[i++] = rx_ring->ring_stats->stats.bytes; 1960 } else { 1961 data[i++] = 0; 1962 data[i++] = 0; 1963 } 1964 } 1965 1966 rcu_read_unlock(); 1967 1968 if (vsi->type != ICE_VSI_PF) 1969 return; 1970 1971 for (j = 0; j < ICE_PF_STATS_LEN; j++) { 1972 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset; 1973 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat == 1974 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1975 } 1976 1977 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { 1978 data[i++] = pf->stats.priority_xon_tx[j]; 1979 data[i++] = pf->stats.priority_xoff_tx[j]; 1980 } 1981 1982 for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { 1983 data[i++] = pf->stats.priority_xon_rx[j]; 1984 data[i++] = pf->stats.priority_xoff_rx[j]; 1985 } 1986 } 1987 1988 static void 1989 ice_get_ethtool_stats(struct net_device *netdev, 1990 struct ethtool_stats __always_unused *stats, u64 *data) 1991 { 1992 struct ice_netdev_priv *np = netdev_priv(netdev); 1993 1994 __ice_get_ethtool_stats(netdev, stats, data, np->vsi); 1995 } 1996 1997 #define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \ 1998 ICE_PHY_TYPE_LOW_100M_SGMII) 1999 2000 #define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \ 2001 ICE_PHY_TYPE_LOW_1000BASE_T | \ 2002 ICE_PHY_TYPE_LOW_1000BASE_SX | \ 2003 ICE_PHY_TYPE_LOW_1000BASE_LX | \ 2004 ICE_PHY_TYPE_LOW_1000BASE_KX | \ 2005 ICE_PHY_TYPE_LOW_1G_SGMII | \ 2006 ICE_PHY_TYPE_LOW_2500BASE_T | \ 2007 ICE_PHY_TYPE_LOW_2500BASE_X | \ 2008 ICE_PHY_TYPE_LOW_2500BASE_KX | \ 2009 ICE_PHY_TYPE_LOW_5GBASE_T | \ 2010 ICE_PHY_TYPE_LOW_5GBASE_KR | \ 2011 ICE_PHY_TYPE_LOW_10GBASE_T | \ 2012 ICE_PHY_TYPE_LOW_10G_SFI_DA | \ 2013 ICE_PHY_TYPE_LOW_10GBASE_SR | \ 2014 ICE_PHY_TYPE_LOW_10GBASE_LR | \ 2015 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ 2016 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ 2017 ICE_PHY_TYPE_LOW_10G_SFI_C2C) 2018 2019 #define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ 2020 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ 2021 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ 2022 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ 2023 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ 2024 ICE_PHY_TYPE_LOW_100G_CAUI4 | \ 2025 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ 2026 ICE_PHY_TYPE_LOW_100G_AUI4 | \ 2027 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ 2028 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ 2029 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ 2030 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ 2031 ICE_PHY_TYPE_LOW_100GBASE_DR) 2032 2033 #define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ 2034 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\ 2035 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ 2036 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ 2037 ICE_PHY_TYPE_HIGH_100G_AUI2) 2038 2039 #define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \ 2040 ICE_PHY_TYPE_HIGH_200G_SR4 | \ 2041 ICE_PHY_TYPE_HIGH_200G_FR4 | \ 2042 ICE_PHY_TYPE_HIGH_200G_LR4 | \ 2043 ICE_PHY_TYPE_HIGH_200G_DR4 | \ 2044 ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \ 2045 ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \ 2046 ICE_PHY_TYPE_HIGH_200G_AUI4) 2047 2048 /** 2049 * ice_mask_min_supported_speeds 2050 * @hw: pointer to the HW structure 2051 * @phy_types_high: PHY type high 2052 * @phy_types_low: PHY type low to apply minimum supported speeds mask 2053 * 2054 * Apply minimum supported speeds mask to PHY type low. These are the speeds 2055 * for ethtool supported link mode. 2056 */ 2057 static void 2058 ice_mask_min_supported_speeds(struct ice_hw *hw, 2059 u64 phy_types_high, u64 *phy_types_low) 2060 { 2061 /* if QSFP connection with 100G speed, minimum supported speed is 25G */ 2062 if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) || 2063 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) || 2064 (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G)) 2065 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G; 2066 else if (!ice_is_100m_speed_supported(hw)) 2067 *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; 2068 } 2069 2070 /** 2071 * ice_linkmode_set_bit - set link mode bit 2072 * @phy_to_ethtool: PHY type to ethtool link mode struct to set 2073 * @ks: ethtool link ksettings struct to fill out 2074 * @req_speeds: speed requested by user 2075 * @advert_phy_type: advertised PHY type 2076 * @phy_type: PHY type 2077 */ 2078 static void 2079 ice_linkmode_set_bit(const struct ice_phy_type_to_ethtool *phy_to_ethtool, 2080 struct ethtool_link_ksettings *ks, u32 req_speeds, 2081 u64 advert_phy_type, u32 phy_type) 2082 { 2083 linkmode_set_bit(phy_to_ethtool->link_mode, ks->link_modes.supported); 2084 2085 if (req_speeds & phy_to_ethtool->aq_link_speed || 2086 (!req_speeds && advert_phy_type & BIT(phy_type))) 2087 linkmode_set_bit(phy_to_ethtool->link_mode, 2088 ks->link_modes.advertising); 2089 } 2090 2091 /** 2092 * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes 2093 * @netdev: network interface device structure 2094 * @ks: ethtool link ksettings struct to fill out 2095 */ 2096 static void 2097 ice_phy_type_to_ethtool(struct net_device *netdev, 2098 struct ethtool_link_ksettings *ks) 2099 { 2100 struct ice_netdev_priv *np = netdev_priv(netdev); 2101 struct ice_vsi *vsi = np->vsi; 2102 struct ice_pf *pf = vsi->back; 2103 u64 advert_phy_type_lo = 0; 2104 u64 advert_phy_type_hi = 0; 2105 u64 phy_types_high = 0; 2106 u64 phy_types_low = 0; 2107 u32 req_speeds; 2108 u32 i; 2109 2110 req_speeds = vsi->port_info->phy.link_info.req_speeds; 2111 2112 /* Check if lenient mode is supported and enabled, or in strict mode. 2113 * 2114 * In lenient mode the Supported link modes are the PHY types without 2115 * media. The Advertising link mode is either 1. the user requested 2116 * speed, 2. the override PHY mask, or 3. the PHY types with media. 2117 * 2118 * In strict mode Supported link mode are the PHY type with media, 2119 * and Advertising link modes are the media PHY type or the speed 2120 * requested by user. 2121 */ 2122 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { 2123 phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo); 2124 phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi); 2125 2126 ice_mask_min_supported_speeds(&pf->hw, phy_types_high, 2127 &phy_types_low); 2128 /* determine advertised modes based on link override only 2129 * if it's supported and if the FW doesn't abstract the 2130 * driver from having to account for link overrides 2131 */ 2132 if (ice_fw_supports_link_override(&pf->hw) && 2133 !ice_fw_supports_report_dflt_cfg(&pf->hw)) { 2134 struct ice_link_default_override_tlv *ldo; 2135 2136 ldo = &pf->link_dflt_override; 2137 /* If override enabled and PHY mask set, then 2138 * Advertising link mode is the intersection of the PHY 2139 * types without media and the override PHY mask. 2140 */ 2141 if (ldo->options & ICE_LINK_OVERRIDE_EN && 2142 (ldo->phy_type_low || ldo->phy_type_high)) { 2143 advert_phy_type_lo = 2144 le64_to_cpu(pf->nvm_phy_type_lo) & 2145 ldo->phy_type_low; 2146 advert_phy_type_hi = 2147 le64_to_cpu(pf->nvm_phy_type_hi) & 2148 ldo->phy_type_high; 2149 } 2150 } 2151 } else { 2152 /* strict mode */ 2153 phy_types_low = vsi->port_info->phy.phy_type_low; 2154 phy_types_high = vsi->port_info->phy.phy_type_high; 2155 } 2156 2157 /* If Advertising link mode PHY type is not using override PHY type, 2158 * then use PHY type with media. 2159 */ 2160 if (!advert_phy_type_lo && !advert_phy_type_hi) { 2161 advert_phy_type_lo = vsi->port_info->phy.phy_type_low; 2162 advert_phy_type_hi = vsi->port_info->phy.phy_type_high; 2163 } 2164 2165 linkmode_zero(ks->link_modes.supported); 2166 linkmode_zero(ks->link_modes.advertising); 2167 2168 for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) { 2169 if (phy_types_low & BIT_ULL(i)) 2170 ice_linkmode_set_bit(&phy_type_low_lkup[i], ks, 2171 req_speeds, advert_phy_type_lo, 2172 i); 2173 } 2174 2175 for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) { 2176 if (phy_types_high & BIT_ULL(i)) 2177 ice_linkmode_set_bit(&phy_type_high_lkup[i], ks, 2178 req_speeds, advert_phy_type_hi, 2179 i); 2180 } 2181 } 2182 2183 #define TEST_SET_BITS_TIMEOUT 50 2184 #define TEST_SET_BITS_SLEEP_MAX 2000 2185 #define TEST_SET_BITS_SLEEP_MIN 1000 2186 2187 /** 2188 * ice_get_settings_link_up - Get Link settings for when link is up 2189 * @ks: ethtool ksettings to fill in 2190 * @netdev: network interface device structure 2191 */ 2192 static void 2193 ice_get_settings_link_up(struct ethtool_link_ksettings *ks, 2194 struct net_device *netdev) 2195 { 2196 struct ice_netdev_priv *np = netdev_priv(netdev); 2197 struct ice_port_info *pi = np->vsi->port_info; 2198 struct ice_link_status *link_info; 2199 struct ice_vsi *vsi = np->vsi; 2200 2201 link_info = &vsi->port_info->phy.link_info; 2202 2203 /* Get supported and advertised settings from PHY ability with media */ 2204 ice_phy_type_to_ethtool(netdev, ks); 2205 2206 switch (link_info->link_speed) { 2207 case ICE_AQ_LINK_SPEED_200GB: 2208 ks->base.speed = SPEED_200000; 2209 break; 2210 case ICE_AQ_LINK_SPEED_100GB: 2211 ks->base.speed = SPEED_100000; 2212 break; 2213 case ICE_AQ_LINK_SPEED_50GB: 2214 ks->base.speed = SPEED_50000; 2215 break; 2216 case ICE_AQ_LINK_SPEED_40GB: 2217 ks->base.speed = SPEED_40000; 2218 break; 2219 case ICE_AQ_LINK_SPEED_25GB: 2220 ks->base.speed = SPEED_25000; 2221 break; 2222 case ICE_AQ_LINK_SPEED_20GB: 2223 ks->base.speed = SPEED_20000; 2224 break; 2225 case ICE_AQ_LINK_SPEED_10GB: 2226 ks->base.speed = SPEED_10000; 2227 break; 2228 case ICE_AQ_LINK_SPEED_5GB: 2229 ks->base.speed = SPEED_5000; 2230 break; 2231 case ICE_AQ_LINK_SPEED_2500MB: 2232 ks->base.speed = SPEED_2500; 2233 break; 2234 case ICE_AQ_LINK_SPEED_1000MB: 2235 ks->base.speed = SPEED_1000; 2236 break; 2237 case ICE_AQ_LINK_SPEED_100MB: 2238 ks->base.speed = SPEED_100; 2239 break; 2240 default: 2241 netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", 2242 link_info->link_speed); 2243 break; 2244 } 2245 ks->base.duplex = DUPLEX_FULL; 2246 2247 if (link_info->an_info & ICE_AQ_AN_COMPLETED) 2248 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2249 Autoneg); 2250 2251 /* Set flow control negotiated Rx/Tx pause */ 2252 switch (pi->fc.current_mode) { 2253 case ICE_FC_FULL: 2254 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); 2255 break; 2256 case ICE_FC_TX_PAUSE: 2257 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause); 2258 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2259 Asym_Pause); 2260 break; 2261 case ICE_FC_RX_PAUSE: 2262 ethtool_link_ksettings_add_link_mode(ks, lp_advertising, 2263 Asym_Pause); 2264 break; 2265 case ICE_FC_PFC: 2266 default: 2267 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause); 2268 ethtool_link_ksettings_del_link_mode(ks, lp_advertising, 2269 Asym_Pause); 2270 break; 2271 } 2272 } 2273 2274 /** 2275 * ice_get_settings_link_down - Get the Link settings when link is down 2276 * @ks: ethtool ksettings to fill in 2277 * @netdev: network interface device structure 2278 * 2279 * Reports link settings that can be determined when link is down 2280 */ 2281 static void 2282 ice_get_settings_link_down(struct ethtool_link_ksettings *ks, 2283 struct net_device *netdev) 2284 { 2285 /* link is down and the driver needs to fall back on 2286 * supported PHY types to figure out what info to display 2287 */ 2288 ice_phy_type_to_ethtool(netdev, ks); 2289 2290 /* With no link, speed and duplex are unknown */ 2291 ks->base.speed = SPEED_UNKNOWN; 2292 ks->base.duplex = DUPLEX_UNKNOWN; 2293 } 2294 2295 /** 2296 * ice_get_link_ksettings - Get Link Speed and Duplex settings 2297 * @netdev: network interface device structure 2298 * @ks: ethtool ksettings 2299 * 2300 * Reports speed/duplex settings based on media_type 2301 */ 2302 static int 2303 ice_get_link_ksettings(struct net_device *netdev, 2304 struct ethtool_link_ksettings *ks) 2305 { 2306 struct ice_netdev_priv *np = netdev_priv(netdev); 2307 struct ice_aqc_get_phy_caps_data *caps; 2308 struct ice_link_status *hw_link_info; 2309 struct ice_vsi *vsi = np->vsi; 2310 int err; 2311 2312 ethtool_link_ksettings_zero_link_mode(ks, supported); 2313 ethtool_link_ksettings_zero_link_mode(ks, advertising); 2314 ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); 2315 hw_link_info = &vsi->port_info->phy.link_info; 2316 2317 /* set speed and duplex */ 2318 if (hw_link_info->link_info & ICE_AQ_LINK_UP) 2319 ice_get_settings_link_up(ks, netdev); 2320 else 2321 ice_get_settings_link_down(ks, netdev); 2322 2323 /* set autoneg settings */ 2324 ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? 2325 AUTONEG_ENABLE : AUTONEG_DISABLE; 2326 2327 /* set media type settings */ 2328 switch (vsi->port_info->phy.media_type) { 2329 case ICE_MEDIA_FIBER: 2330 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 2331 ks->base.port = PORT_FIBRE; 2332 break; 2333 case ICE_MEDIA_BASET: 2334 ethtool_link_ksettings_add_link_mode(ks, supported, TP); 2335 ethtool_link_ksettings_add_link_mode(ks, advertising, TP); 2336 ks->base.port = PORT_TP; 2337 break; 2338 case ICE_MEDIA_BACKPLANE: 2339 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); 2340 ethtool_link_ksettings_add_link_mode(ks, advertising, 2341 Backplane); 2342 ks->base.port = PORT_NONE; 2343 break; 2344 case ICE_MEDIA_DA: 2345 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 2346 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); 2347 ks->base.port = PORT_DA; 2348 break; 2349 default: 2350 ks->base.port = PORT_OTHER; 2351 break; 2352 } 2353 2354 /* flow control is symmetric and always supported */ 2355 ethtool_link_ksettings_add_link_mode(ks, supported, Pause); 2356 2357 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 2358 if (!caps) 2359 return -ENOMEM; 2360 2361 err = ice_aq_get_phy_caps(vsi->port_info, false, 2362 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); 2363 if (err) 2364 goto done; 2365 2366 /* Set the advertised flow control based on the PHY capability */ 2367 if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && 2368 (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) { 2369 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 2370 ethtool_link_ksettings_add_link_mode(ks, advertising, 2371 Asym_Pause); 2372 } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) { 2373 ethtool_link_ksettings_add_link_mode(ks, advertising, 2374 Asym_Pause); 2375 } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) { 2376 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 2377 ethtool_link_ksettings_add_link_mode(ks, advertising, 2378 Asym_Pause); 2379 } else { 2380 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); 2381 ethtool_link_ksettings_del_link_mode(ks, advertising, 2382 Asym_Pause); 2383 } 2384 2385 /* Set advertised FEC modes based on PHY capability */ 2386 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); 2387 2388 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || 2389 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) 2390 ethtool_link_ksettings_add_link_mode(ks, advertising, 2391 FEC_BASER); 2392 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || 2393 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) 2394 ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); 2395 2396 err = ice_aq_get_phy_caps(vsi->port_info, false, 2397 ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); 2398 if (err) 2399 goto done; 2400 2401 /* Set supported FEC modes based on PHY capability */ 2402 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); 2403 2404 if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN || 2405 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN) 2406 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); 2407 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) 2408 ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); 2409 2410 /* Set supported and advertised autoneg */ 2411 if (ice_is_phy_caps_an_enabled(caps)) { 2412 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 2413 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 2414 } 2415 2416 done: 2417 kfree(caps); 2418 return err; 2419 } 2420 2421 /** 2422 * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed 2423 * @speed: ethtool forced speed 2424 */ 2425 static u16 ice_speed_to_aq_link(int speed) 2426 { 2427 int aq_speed; 2428 2429 switch (speed) { 2430 case SPEED_10: 2431 aq_speed = ICE_AQ_LINK_SPEED_10MB; 2432 break; 2433 case SPEED_100: 2434 aq_speed = ICE_AQ_LINK_SPEED_100MB; 2435 break; 2436 case SPEED_1000: 2437 aq_speed = ICE_AQ_LINK_SPEED_1000MB; 2438 break; 2439 case SPEED_2500: 2440 aq_speed = ICE_AQ_LINK_SPEED_2500MB; 2441 break; 2442 case SPEED_5000: 2443 aq_speed = ICE_AQ_LINK_SPEED_5GB; 2444 break; 2445 case SPEED_10000: 2446 aq_speed = ICE_AQ_LINK_SPEED_10GB; 2447 break; 2448 case SPEED_20000: 2449 aq_speed = ICE_AQ_LINK_SPEED_20GB; 2450 break; 2451 case SPEED_25000: 2452 aq_speed = ICE_AQ_LINK_SPEED_25GB; 2453 break; 2454 case SPEED_40000: 2455 aq_speed = ICE_AQ_LINK_SPEED_40GB; 2456 break; 2457 case SPEED_50000: 2458 aq_speed = ICE_AQ_LINK_SPEED_50GB; 2459 break; 2460 case SPEED_100000: 2461 aq_speed = ICE_AQ_LINK_SPEED_100GB; 2462 break; 2463 default: 2464 aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN; 2465 break; 2466 } 2467 return aq_speed; 2468 } 2469 2470 /** 2471 * ice_ksettings_find_adv_link_speed - Find advertising link speed 2472 * @ks: ethtool ksettings 2473 */ 2474 static u16 2475 ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) 2476 { 2477 const struct ethtool_forced_speed_map *map; 2478 u16 adv_link_speed = 0; 2479 2480 for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) { 2481 map = ice_adv_lnk_speed_maps + i; 2482 if (linkmode_intersects(ks->link_modes.advertising, map->caps)) 2483 adv_link_speed |= ice_speed_to_aq_link(map->speed); 2484 } 2485 2486 return adv_link_speed; 2487 } 2488 2489 /** 2490 * ice_setup_autoneg 2491 * @p: port info 2492 * @ks: ethtool_link_ksettings 2493 * @config: configuration that will be sent down to FW 2494 * @autoneg_enabled: autonegotiation is enabled or not 2495 * @autoneg_changed: will there a change in autonegotiation 2496 * @netdev: network interface device structure 2497 * 2498 * Setup PHY autonegotiation feature 2499 */ 2500 static int 2501 ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, 2502 struct ice_aqc_set_phy_cfg_data *config, 2503 u8 autoneg_enabled, u8 *autoneg_changed, 2504 struct net_device *netdev) 2505 { 2506 int err = 0; 2507 2508 *autoneg_changed = 0; 2509 2510 /* Check autoneg */ 2511 if (autoneg_enabled == AUTONEG_ENABLE) { 2512 /* If autoneg was not already enabled */ 2513 if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) { 2514 /* If autoneg is not supported, return error */ 2515 if (!ethtool_link_ksettings_test_link_mode(ks, 2516 supported, 2517 Autoneg)) { 2518 netdev_info(netdev, "Autoneg not supported on this phy.\n"); 2519 err = -EINVAL; 2520 } else { 2521 /* Autoneg is allowed to change */ 2522 config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2523 *autoneg_changed = 1; 2524 } 2525 } 2526 } else { 2527 /* If autoneg is currently enabled */ 2528 if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { 2529 /* If autoneg is supported 10GBASE_T is the only PHY 2530 * that can disable it, so otherwise return error 2531 */ 2532 if (ethtool_link_ksettings_test_link_mode(ks, 2533 supported, 2534 Autoneg)) { 2535 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 2536 err = -EINVAL; 2537 } else { 2538 /* Autoneg is allowed to change */ 2539 config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2540 *autoneg_changed = 1; 2541 } 2542 } 2543 } 2544 2545 return err; 2546 } 2547 2548 /** 2549 * ice_set_phy_type_from_speed - set phy_types based on speeds 2550 * and advertised modes 2551 * @ks: ethtool link ksettings struct 2552 * @phy_type_low: pointer to the lower part of phy_type 2553 * @phy_type_high: pointer to the higher part of phy_type 2554 * @adv_link_speed: targeted link speeds bitmap 2555 */ 2556 static void 2557 ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks, 2558 u64 *phy_type_low, u64 *phy_type_high, 2559 u16 adv_link_speed) 2560 { 2561 /* Handle 1000M speed in a special way because ice_update_phy_type 2562 * enables all link modes, but having mixed copper and optical 2563 * standards is not supported. 2564 */ 2565 adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB; 2566 2567 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2568 1000baseT_Full)) 2569 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T | 2570 ICE_PHY_TYPE_LOW_1G_SGMII; 2571 2572 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2573 1000baseKX_Full)) 2574 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX; 2575 2576 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 2577 1000baseX_Full)) 2578 *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX | 2579 ICE_PHY_TYPE_LOW_1000BASE_LX; 2580 2581 ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed); 2582 } 2583 2584 /** 2585 * ice_set_link_ksettings - Set Speed and Duplex 2586 * @netdev: network interface device structure 2587 * @ks: ethtool ksettings 2588 * 2589 * Set speed/duplex per media_types advertised/forced 2590 */ 2591 static int 2592 ice_set_link_ksettings(struct net_device *netdev, 2593 const struct ethtool_link_ksettings *ks) 2594 { 2595 struct ice_netdev_priv *np = netdev_priv(netdev); 2596 u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; 2597 struct ethtool_link_ksettings copy_ks = *ks; 2598 struct ethtool_link_ksettings safe_ks = {}; 2599 struct ice_aqc_get_phy_caps_data *phy_caps; 2600 struct ice_aqc_set_phy_cfg_data config; 2601 u16 adv_link_speed, curr_link_speed; 2602 struct ice_pf *pf = np->vsi->back; 2603 struct ice_port_info *pi; 2604 u8 autoneg_changed = 0; 2605 u64 phy_type_high = 0; 2606 u64 phy_type_low = 0; 2607 bool linkup; 2608 int err; 2609 2610 pi = np->vsi->port_info; 2611 2612 if (!pi) 2613 return -EIO; 2614 2615 if (pi->phy.media_type != ICE_MEDIA_BASET && 2616 pi->phy.media_type != ICE_MEDIA_FIBER && 2617 pi->phy.media_type != ICE_MEDIA_BACKPLANE && 2618 pi->phy.media_type != ICE_MEDIA_DA && 2619 pi->phy.link_info.link_info & ICE_AQ_LINK_UP) 2620 return -EOPNOTSUPP; 2621 2622 phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL); 2623 if (!phy_caps) 2624 return -ENOMEM; 2625 2626 /* Get the PHY capabilities based on media */ 2627 if (ice_fw_supports_report_dflt_cfg(pi->hw)) 2628 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, 2629 phy_caps, NULL); 2630 else 2631 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2632 phy_caps, NULL); 2633 if (err) 2634 goto done; 2635 2636 /* save autoneg out of ksettings */ 2637 autoneg = copy_ks.base.autoneg; 2638 2639 /* Get link modes supported by hardware.*/ 2640 ice_phy_type_to_ethtool(netdev, &safe_ks); 2641 2642 /* and check against modes requested by user. 2643 * Return an error if unsupported mode was set. 2644 */ 2645 if (!bitmap_subset(copy_ks.link_modes.advertising, 2646 safe_ks.link_modes.supported, 2647 __ETHTOOL_LINK_MODE_MASK_NBITS)) { 2648 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) 2649 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2650 err = -EOPNOTSUPP; 2651 goto done; 2652 } 2653 2654 /* get our own copy of the bits to check against */ 2655 memset(&safe_ks, 0, sizeof(safe_ks)); 2656 safe_ks.base.cmd = copy_ks.base.cmd; 2657 safe_ks.base.link_mode_masks_nwords = 2658 copy_ks.base.link_mode_masks_nwords; 2659 ice_get_link_ksettings(netdev, &safe_ks); 2660 2661 /* set autoneg back to what it currently is */ 2662 copy_ks.base.autoneg = safe_ks.base.autoneg; 2663 /* we don't compare the speed */ 2664 copy_ks.base.speed = safe_ks.base.speed; 2665 2666 /* If copy_ks.base and safe_ks.base are not the same now, then they are 2667 * trying to set something that we do not support. 2668 */ 2669 if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) { 2670 err = -EOPNOTSUPP; 2671 goto done; 2672 } 2673 2674 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 2675 timeout--; 2676 if (!timeout) { 2677 err = -EBUSY; 2678 goto done; 2679 } 2680 usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); 2681 } 2682 2683 /* Copy the current user PHY configuration. The current user PHY 2684 * configuration is initialized during probe from PHY capabilities 2685 * software mode, and updated on set PHY configuration. 2686 */ 2687 config = pi->phy.curr_user_phy_cfg; 2688 2689 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2690 2691 /* Check autoneg */ 2692 err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed, 2693 netdev); 2694 2695 if (err) 2696 goto done; 2697 2698 /* Call to get the current link speed */ 2699 pi->phy.get_link_info = true; 2700 err = ice_get_link_status(pi, &linkup); 2701 if (err) 2702 goto done; 2703 2704 curr_link_speed = pi->phy.curr_user_speed_req; 2705 adv_link_speed = ice_ksettings_find_adv_link_speed(ks); 2706 2707 /* If speed didn't get set, set it to what it currently is. 2708 * This is needed because if advertise is 0 (as it is when autoneg 2709 * is disabled) then speed won't get set. 2710 */ 2711 if (!adv_link_speed) 2712 adv_link_speed = curr_link_speed; 2713 2714 /* Convert the advertise link speeds to their corresponded PHY_TYPE */ 2715 ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high, 2716 adv_link_speed); 2717 2718 if (!autoneg_changed && adv_link_speed == curr_link_speed) { 2719 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); 2720 goto done; 2721 } 2722 2723 /* save the requested speeds */ 2724 pi->phy.link_info.req_speeds = adv_link_speed; 2725 2726 /* set link and auto negotiation so changes take effect */ 2727 config.caps |= ICE_AQ_PHY_ENA_LINK; 2728 2729 /* check if there is a PHY type for the requested advertised speed */ 2730 if (!(phy_type_low || phy_type_high)) { 2731 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2732 err = -EOPNOTSUPP; 2733 goto done; 2734 } 2735 2736 /* intersect requested advertised speed PHY types with media PHY types 2737 * for set PHY configuration 2738 */ 2739 config.phy_type_high = cpu_to_le64(phy_type_high) & 2740 phy_caps->phy_type_high; 2741 config.phy_type_low = cpu_to_le64(phy_type_low) & 2742 phy_caps->phy_type_low; 2743 2744 if (!(config.phy_type_high || config.phy_type_low)) { 2745 /* If there is no intersection and lenient mode is enabled, then 2746 * intersect the requested advertised speed with NVM media type 2747 * PHY types. 2748 */ 2749 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { 2750 config.phy_type_high = cpu_to_le64(phy_type_high) & 2751 pf->nvm_phy_type_hi; 2752 config.phy_type_low = cpu_to_le64(phy_type_low) & 2753 pf->nvm_phy_type_lo; 2754 } else { 2755 netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2756 err = -EOPNOTSUPP; 2757 goto done; 2758 } 2759 } 2760 2761 /* If link is up put link down */ 2762 if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) { 2763 /* Tell the OS link is going down, the link will go 2764 * back up when fw says it is ready asynchronously 2765 */ 2766 ice_print_link_msg(np->vsi, false); 2767 netif_carrier_off(netdev); 2768 netif_tx_stop_all_queues(netdev); 2769 } 2770 2771 /* make the aq call */ 2772 err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); 2773 if (err) { 2774 netdev_info(netdev, "Set phy config failed,\n"); 2775 goto done; 2776 } 2777 2778 /* Save speed request */ 2779 pi->phy.curr_user_speed_req = adv_link_speed; 2780 done: 2781 kfree(phy_caps); 2782 clear_bit(ICE_CFG_BUSY, pf->state); 2783 2784 return err; 2785 } 2786 2787 static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc) 2788 { 2789 u32 hdrs = ICE_FLOW_SEG_HDR_NONE; 2790 2791 switch (nfc->flow_type) { 2792 case TCP_V4_FLOW: 2793 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4; 2794 break; 2795 case UDP_V4_FLOW: 2796 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4; 2797 break; 2798 case SCTP_V4_FLOW: 2799 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4; 2800 break; 2801 case GTPU_V4_FLOW: 2802 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4; 2803 break; 2804 case GTPC_V4_FLOW: 2805 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4; 2806 break; 2807 case GTPC_TEID_V4_FLOW: 2808 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4; 2809 break; 2810 case GTPU_EH_V4_FLOW: 2811 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4; 2812 break; 2813 case GTPU_UL_V4_FLOW: 2814 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4; 2815 break; 2816 case GTPU_DL_V4_FLOW: 2817 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4; 2818 break; 2819 case TCP_V6_FLOW: 2820 hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6; 2821 break; 2822 case UDP_V6_FLOW: 2823 hdrs |= ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6; 2824 break; 2825 case SCTP_V6_FLOW: 2826 hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6; 2827 break; 2828 case GTPU_V6_FLOW: 2829 hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6; 2830 break; 2831 case GTPC_V6_FLOW: 2832 hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6; 2833 break; 2834 case GTPC_TEID_V6_FLOW: 2835 hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6; 2836 break; 2837 case GTPU_EH_V6_FLOW: 2838 hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6; 2839 break; 2840 case GTPU_UL_V6_FLOW: 2841 hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6; 2842 break; 2843 case GTPU_DL_V6_FLOW: 2844 hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6; 2845 break; 2846 default: 2847 break; 2848 } 2849 return hdrs; 2850 } 2851 2852 static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm) 2853 { 2854 u64 hfld = ICE_HASH_INVALID; 2855 2856 if (nfc->data & RXH_IP_SRC || nfc->data & RXH_IP_DST) { 2857 switch (nfc->flow_type) { 2858 case TCP_V4_FLOW: 2859 case UDP_V4_FLOW: 2860 case SCTP_V4_FLOW: 2861 case GTPU_V4_FLOW: 2862 case GTPC_V4_FLOW: 2863 case GTPC_TEID_V4_FLOW: 2864 case GTPU_EH_V4_FLOW: 2865 case GTPU_UL_V4_FLOW: 2866 case GTPU_DL_V4_FLOW: 2867 if (nfc->data & RXH_IP_SRC) 2868 hfld |= ICE_FLOW_HASH_FLD_IPV4_SA; 2869 if (nfc->data & RXH_IP_DST) 2870 hfld |= ICE_FLOW_HASH_FLD_IPV4_DA; 2871 break; 2872 case TCP_V6_FLOW: 2873 case UDP_V6_FLOW: 2874 case SCTP_V6_FLOW: 2875 case GTPU_V6_FLOW: 2876 case GTPC_V6_FLOW: 2877 case GTPC_TEID_V6_FLOW: 2878 case GTPU_EH_V6_FLOW: 2879 case GTPU_UL_V6_FLOW: 2880 case GTPU_DL_V6_FLOW: 2881 if (nfc->data & RXH_IP_SRC) 2882 hfld |= ICE_FLOW_HASH_FLD_IPV6_SA; 2883 if (nfc->data & RXH_IP_DST) 2884 hfld |= ICE_FLOW_HASH_FLD_IPV6_DA; 2885 break; 2886 default: 2887 break; 2888 } 2889 } 2890 2891 if (nfc->data & RXH_L4_B_0_1 || nfc->data & RXH_L4_B_2_3) { 2892 switch (nfc->flow_type) { 2893 case TCP_V4_FLOW: 2894 case TCP_V6_FLOW: 2895 if (nfc->data & RXH_L4_B_0_1) 2896 hfld |= ICE_FLOW_HASH_FLD_TCP_SRC_PORT; 2897 if (nfc->data & RXH_L4_B_2_3) 2898 hfld |= ICE_FLOW_HASH_FLD_TCP_DST_PORT; 2899 break; 2900 case UDP_V4_FLOW: 2901 case UDP_V6_FLOW: 2902 if (nfc->data & RXH_L4_B_0_1) 2903 hfld |= ICE_FLOW_HASH_FLD_UDP_SRC_PORT; 2904 if (nfc->data & RXH_L4_B_2_3) 2905 hfld |= ICE_FLOW_HASH_FLD_UDP_DST_PORT; 2906 break; 2907 case SCTP_V4_FLOW: 2908 case SCTP_V6_FLOW: 2909 if (nfc->data & RXH_L4_B_0_1) 2910 hfld |= ICE_FLOW_HASH_FLD_SCTP_SRC_PORT; 2911 if (nfc->data & RXH_L4_B_2_3) 2912 hfld |= ICE_FLOW_HASH_FLD_SCTP_DST_PORT; 2913 break; 2914 default: 2915 break; 2916 } 2917 } 2918 2919 if (nfc->data & RXH_GTP_TEID) { 2920 switch (nfc->flow_type) { 2921 case GTPC_TEID_V4_FLOW: 2922 case GTPC_TEID_V6_FLOW: 2923 hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID; 2924 break; 2925 case GTPU_V4_FLOW: 2926 case GTPU_V6_FLOW: 2927 hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID; 2928 break; 2929 case GTPU_EH_V4_FLOW: 2930 case GTPU_EH_V6_FLOW: 2931 hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID; 2932 break; 2933 case GTPU_UL_V4_FLOW: 2934 case GTPU_UL_V6_FLOW: 2935 hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID; 2936 break; 2937 case GTPU_DL_V4_FLOW: 2938 case GTPU_DL_V6_FLOW: 2939 hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID; 2940 break; 2941 default: 2942 break; 2943 } 2944 } 2945 2946 return hfld; 2947 } 2948 2949 static int 2950 ice_set_rxfh_fields(struct net_device *netdev, 2951 const struct ethtool_rxfh_fields *nfc, 2952 struct netlink_ext_ack *extack) 2953 { 2954 struct ice_netdev_priv *np = netdev_priv(netdev); 2955 struct ice_vsi *vsi = np->vsi; 2956 struct ice_pf *pf = vsi->back; 2957 struct ice_rss_hash_cfg cfg; 2958 struct device *dev; 2959 u64 hashed_flds; 2960 int status; 2961 bool symm; 2962 u32 hdrs; 2963 2964 dev = ice_pf_to_dev(pf); 2965 if (ice_is_safe_mode(pf)) { 2966 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 2967 vsi->vsi_num); 2968 return -EINVAL; 2969 } 2970 2971 symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); 2972 hashed_flds = ice_parse_hash_flds(nfc, symm); 2973 if (hashed_flds == ICE_HASH_INVALID) { 2974 dev_dbg(dev, "Invalid hash fields, vsi num = %d\n", 2975 vsi->vsi_num); 2976 return -EINVAL; 2977 } 2978 2979 hdrs = ice_parse_hdrs(nfc); 2980 if (hdrs == ICE_FLOW_SEG_HDR_NONE) { 2981 dev_dbg(dev, "Header type is not valid, vsi num = %d\n", 2982 vsi->vsi_num); 2983 return -EINVAL; 2984 } 2985 2986 cfg.hash_flds = hashed_flds; 2987 cfg.addl_hdrs = hdrs; 2988 cfg.hdr_type = ICE_RSS_ANY_HEADERS; 2989 cfg.symm = symm; 2990 2991 status = ice_add_rss_cfg(&pf->hw, vsi, &cfg); 2992 if (status) { 2993 dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", 2994 vsi->vsi_num, status); 2995 return status; 2996 } 2997 2998 return 0; 2999 } 3000 3001 static int 3002 ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc) 3003 { 3004 struct ice_netdev_priv *np = netdev_priv(netdev); 3005 struct ice_vsi *vsi = np->vsi; 3006 struct ice_pf *pf = vsi->back; 3007 struct device *dev; 3008 u64 hash_flds; 3009 bool symm; 3010 u32 hdrs; 3011 3012 dev = ice_pf_to_dev(pf); 3013 3014 nfc->data = 0; 3015 if (ice_is_safe_mode(pf)) { 3016 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n", 3017 vsi->vsi_num); 3018 return 0; 3019 } 3020 3021 hdrs = ice_parse_hdrs(nfc); 3022 if (hdrs == ICE_FLOW_SEG_HDR_NONE) { 3023 dev_dbg(dev, "Header type is not valid, vsi num = %d\n", 3024 vsi->vsi_num); 3025 return 0; 3026 } 3027 3028 hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm); 3029 if (hash_flds == ICE_HASH_INVALID) { 3030 dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n", 3031 vsi->vsi_num); 3032 return 0; 3033 } 3034 3035 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA || 3036 hash_flds & ICE_FLOW_HASH_FLD_IPV6_SA) 3037 nfc->data |= (u64)RXH_IP_SRC; 3038 3039 if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_DA || 3040 hash_flds & ICE_FLOW_HASH_FLD_IPV6_DA) 3041 nfc->data |= (u64)RXH_IP_DST; 3042 3043 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_SRC_PORT || 3044 hash_flds & ICE_FLOW_HASH_FLD_UDP_SRC_PORT || 3045 hash_flds & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) 3046 nfc->data |= (u64)RXH_L4_B_0_1; 3047 3048 if (hash_flds & ICE_FLOW_HASH_FLD_TCP_DST_PORT || 3049 hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT || 3050 hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT) 3051 nfc->data |= (u64)RXH_L4_B_2_3; 3052 3053 if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID || 3054 hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID || 3055 hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID || 3056 hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID || 3057 hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID) 3058 nfc->data |= (u64)RXH_GTP_TEID; 3059 3060 return 0; 3061 } 3062 3063 /** 3064 * ice_set_rxnfc - command to set Rx flow rules. 3065 * @netdev: network interface device structure 3066 * @cmd: ethtool rxnfc command 3067 * 3068 * Returns 0 for success and negative values for errors 3069 */ 3070 static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3071 { 3072 struct ice_netdev_priv *np = netdev_priv(netdev); 3073 struct ice_vsi *vsi = np->vsi; 3074 3075 switch (cmd->cmd) { 3076 case ETHTOOL_SRXCLSRLINS: 3077 return ice_add_fdir_ethtool(vsi, cmd); 3078 case ETHTOOL_SRXCLSRLDEL: 3079 return ice_del_fdir_ethtool(vsi, cmd); 3080 default: 3081 break; 3082 } 3083 return -EOPNOTSUPP; 3084 } 3085 3086 /** 3087 * ice_get_rxnfc - command to get Rx flow classification rules 3088 * @netdev: network interface device structure 3089 * @cmd: ethtool rxnfc command 3090 * @rule_locs: buffer to rturn Rx flow classification rules 3091 * 3092 * Returns Success if the command is supported. 3093 */ 3094 static int 3095 ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 3096 u32 __always_unused *rule_locs) 3097 { 3098 struct ice_netdev_priv *np = netdev_priv(netdev); 3099 struct ice_vsi *vsi = np->vsi; 3100 int ret = -EOPNOTSUPP; 3101 struct ice_hw *hw; 3102 3103 hw = &vsi->back->hw; 3104 3105 switch (cmd->cmd) { 3106 case ETHTOOL_GRXRINGS: 3107 cmd->data = vsi->rss_size; 3108 ret = 0; 3109 break; 3110 case ETHTOOL_GRXCLSRLCNT: 3111 cmd->rule_cnt = hw->fdir_active_fltr; 3112 /* report total rule count */ 3113 cmd->data = ice_get_fdir_cnt_all(hw); 3114 ret = 0; 3115 break; 3116 case ETHTOOL_GRXCLSRULE: 3117 ret = ice_get_ethtool_fdir_entry(hw, cmd); 3118 break; 3119 case ETHTOOL_GRXCLSRLALL: 3120 ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs); 3121 break; 3122 default: 3123 break; 3124 } 3125 3126 return ret; 3127 } 3128 3129 static void 3130 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, 3131 struct kernel_ethtool_ringparam *kernel_ring, 3132 struct netlink_ext_ack *extack) 3133 { 3134 struct ice_netdev_priv *np = netdev_priv(netdev); 3135 struct ice_vsi *vsi = np->vsi; 3136 struct ice_hw *hw; 3137 3138 hw = &vsi->back->hw; 3139 ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw); 3140 ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw); 3141 if (vsi->tx_rings && vsi->rx_rings) { 3142 ring->rx_pending = vsi->rx_rings[0]->count; 3143 ring->tx_pending = vsi->tx_rings[0]->count; 3144 } else { 3145 ring->rx_pending = 0; 3146 ring->tx_pending = 0; 3147 } 3148 3149 /* Rx mini and jumbo rings are not supported */ 3150 ring->rx_mini_max_pending = 0; 3151 ring->rx_jumbo_max_pending = 0; 3152 ring->rx_mini_pending = 0; 3153 ring->rx_jumbo_pending = 0; 3154 3155 kernel_ring->tcp_data_split = vsi->hsplit ? 3156 ETHTOOL_TCP_DATA_SPLIT_ENABLED : 3157 ETHTOOL_TCP_DATA_SPLIT_DISABLED; 3158 } 3159 3160 static int 3161 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, 3162 struct kernel_ethtool_ringparam *kernel_ring, 3163 struct netlink_ext_ack *extack) 3164 { 3165 struct ice_netdev_priv *np = netdev_priv(netdev); 3166 struct ice_tx_ring *xdp_rings = NULL; 3167 struct ice_tx_ring *tx_rings = NULL; 3168 struct ice_rx_ring *rx_rings = NULL; 3169 struct ice_vsi *vsi = np->vsi; 3170 struct ice_pf *pf = vsi->back; 3171 int i, timeout = 50, err = 0; 3172 struct ice_hw *hw = &pf->hw; 3173 u16 new_rx_cnt, new_tx_cnt; 3174 bool hsplit; 3175 3176 if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || 3177 ring->tx_pending < ICE_MIN_NUM_DESC || 3178 ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || 3179 ring->rx_pending < ICE_MIN_NUM_DESC) { 3180 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", 3181 ring->tx_pending, ring->rx_pending, 3182 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw), 3183 ICE_REQ_DESC_MULTIPLE); 3184 return -EINVAL; 3185 } 3186 3187 /* Return if there is no rings (device is reloading) */ 3188 if (!vsi->tx_rings || !vsi->rx_rings) 3189 return -EBUSY; 3190 3191 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE); 3192 if (new_tx_cnt != ring->tx_pending) 3193 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", 3194 new_tx_cnt); 3195 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE); 3196 if (new_rx_cnt != ring->rx_pending) 3197 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", 3198 new_rx_cnt); 3199 3200 hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED; 3201 3202 /* if nothing to do return success */ 3203 if (new_tx_cnt == vsi->tx_rings[0]->count && 3204 new_rx_cnt == vsi->rx_rings[0]->count && 3205 hsplit == vsi->hsplit) { 3206 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); 3207 return 0; 3208 } 3209 3210 /* If there is a AF_XDP UMEM attached to any of Rx rings, 3211 * disallow changing the number of descriptors -- regardless 3212 * if the netdev is running or not. 3213 */ 3214 if (ice_xsk_any_rx_ring_ena(vsi)) 3215 return -EBUSY; 3216 3217 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 3218 timeout--; 3219 if (!timeout) 3220 return -EBUSY; 3221 usleep_range(1000, 2000); 3222 } 3223 3224 /* set for the next time the netdev is started */ 3225 if (!netif_running(vsi->netdev)) { 3226 ice_for_each_alloc_txq(vsi, i) 3227 vsi->tx_rings[i]->count = new_tx_cnt; 3228 ice_for_each_alloc_rxq(vsi, i) 3229 vsi->rx_rings[i]->count = new_rx_cnt; 3230 if (ice_is_xdp_ena_vsi(vsi)) 3231 ice_for_each_xdp_txq(vsi, i) 3232 vsi->xdp_rings[i]->count = new_tx_cnt; 3233 vsi->num_tx_desc = (u16)new_tx_cnt; 3234 vsi->num_rx_desc = (u16)new_rx_cnt; 3235 vsi->hsplit = hsplit; 3236 3237 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); 3238 goto done; 3239 } 3240 3241 if (new_tx_cnt == vsi->tx_rings[0]->count) 3242 goto process_rx; 3243 3244 /* alloc updated Tx resources */ 3245 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", 3246 vsi->tx_rings[0]->count, new_tx_cnt); 3247 3248 tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL); 3249 if (!tx_rings) { 3250 err = -ENOMEM; 3251 goto done; 3252 } 3253 3254 ice_for_each_txq(vsi, i) { 3255 /* clone ring and setup updated count */ 3256 tx_rings[i] = *vsi->tx_rings[i]; 3257 tx_rings[i].count = new_tx_cnt; 3258 tx_rings[i].desc = NULL; 3259 tx_rings[i].tx_buf = NULL; 3260 tx_rings[i].tstamp_ring = NULL; 3261 tx_rings[i].tx_tstamps = &pf->ptp.port.tx; 3262 err = ice_setup_tx_ring(&tx_rings[i]); 3263 if (err) { 3264 while (i--) 3265 ice_clean_tx_ring(&tx_rings[i]); 3266 kfree(tx_rings); 3267 goto done; 3268 } 3269 } 3270 3271 if (!ice_is_xdp_ena_vsi(vsi)) 3272 goto process_rx; 3273 3274 /* alloc updated XDP resources */ 3275 netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n", 3276 vsi->xdp_rings[0]->count, new_tx_cnt); 3277 3278 xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL); 3279 if (!xdp_rings) { 3280 err = -ENOMEM; 3281 goto free_tx; 3282 } 3283 3284 ice_for_each_xdp_txq(vsi, i) { 3285 /* clone ring and setup updated count */ 3286 xdp_rings[i] = *vsi->xdp_rings[i]; 3287 xdp_rings[i].count = new_tx_cnt; 3288 xdp_rings[i].desc = NULL; 3289 xdp_rings[i].tx_buf = NULL; 3290 err = ice_setup_tx_ring(&xdp_rings[i]); 3291 if (err) { 3292 while (i--) 3293 ice_clean_tx_ring(&xdp_rings[i]); 3294 kfree(xdp_rings); 3295 goto free_tx; 3296 } 3297 ice_set_ring_xdp(&xdp_rings[i]); 3298 } 3299 3300 process_rx: 3301 if (new_rx_cnt == vsi->rx_rings[0]->count) 3302 goto process_link; 3303 3304 /* alloc updated Rx resources */ 3305 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", 3306 vsi->rx_rings[0]->count, new_rx_cnt); 3307 3308 rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); 3309 if (!rx_rings) { 3310 err = -ENOMEM; 3311 goto done; 3312 } 3313 3314 ice_for_each_rxq(vsi, i) { 3315 /* clone ring and setup updated count */ 3316 rx_rings[i] = *vsi->rx_rings[i]; 3317 rx_rings[i].count = new_rx_cnt; 3318 rx_rings[i].cached_phctime = pf->ptp.cached_phc_time; 3319 rx_rings[i].desc = NULL; 3320 rx_rings[i].xdp_buf = NULL; 3321 3322 /* this is to allow wr32 to have something to write to 3323 * during early allocation of Rx buffers 3324 */ 3325 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS; 3326 3327 err = ice_setup_rx_ring(&rx_rings[i]); 3328 if (err) 3329 goto rx_unwind; 3330 rx_unwind: 3331 if (err) { 3332 while (i) { 3333 i--; 3334 ice_free_rx_ring(&rx_rings[i]); 3335 } 3336 kfree(rx_rings); 3337 err = -ENOMEM; 3338 goto free_tx; 3339 } 3340 } 3341 3342 process_link: 3343 vsi->hsplit = hsplit; 3344 3345 /* Bring interface down, copy in the new ring info, then restore the 3346 * interface. if VSI is up, bring it down and then back up 3347 */ 3348 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { 3349 ice_down(vsi); 3350 3351 if (tx_rings) { 3352 ice_for_each_txq(vsi, i) { 3353 ice_free_tx_ring(vsi->tx_rings[i]); 3354 *vsi->tx_rings[i] = tx_rings[i]; 3355 } 3356 kfree(tx_rings); 3357 } 3358 3359 if (rx_rings) { 3360 ice_for_each_rxq(vsi, i) { 3361 ice_free_rx_ring(vsi->rx_rings[i]); 3362 /* copy the real tail offset */ 3363 rx_rings[i].tail = vsi->rx_rings[i]->tail; 3364 /* this is to fake out the allocation routine 3365 * into thinking it has to realloc everything 3366 * but the recycling logic will let us re-use 3367 * the buffers allocated above 3368 */ 3369 rx_rings[i].next_to_use = 0; 3370 rx_rings[i].next_to_clean = 0; 3371 rx_rings[i].next_to_alloc = 0; 3372 *vsi->rx_rings[i] = rx_rings[i]; 3373 } 3374 kfree(rx_rings); 3375 } 3376 3377 if (xdp_rings) { 3378 ice_for_each_xdp_txq(vsi, i) { 3379 ice_free_tx_ring(vsi->xdp_rings[i]); 3380 *vsi->xdp_rings[i] = xdp_rings[i]; 3381 } 3382 kfree(xdp_rings); 3383 } 3384 3385 vsi->num_tx_desc = new_tx_cnt; 3386 vsi->num_rx_desc = new_rx_cnt; 3387 ice_up(vsi); 3388 } 3389 goto done; 3390 3391 free_tx: 3392 /* error cleanup if the Rx allocations failed after getting Tx */ 3393 if (tx_rings) { 3394 ice_for_each_txq(vsi, i) 3395 ice_free_tx_ring(&tx_rings[i]); 3396 kfree(tx_rings); 3397 } 3398 3399 done: 3400 clear_bit(ICE_CFG_BUSY, pf->state); 3401 return err; 3402 } 3403 3404 /** 3405 * ice_get_pauseparam - Get Flow Control status 3406 * @netdev: network interface device structure 3407 * @pause: ethernet pause (flow control) parameters 3408 * 3409 * Get requested flow control status from PHY capability. 3410 * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which 3411 * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report 3412 * the negotiated Rx/Tx pause via lp_advertising. 3413 */ 3414 static void 3415 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 3416 { 3417 struct ice_netdev_priv *np = netdev_priv(netdev); 3418 struct ice_port_info *pi = np->vsi->port_info; 3419 struct ice_aqc_get_phy_caps_data *pcaps; 3420 struct ice_dcbx_cfg *dcbx_cfg; 3421 int status; 3422 3423 /* Initialize pause params */ 3424 pause->rx_pause = 0; 3425 pause->tx_pause = 0; 3426 3427 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; 3428 3429 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3430 if (!pcaps) 3431 return; 3432 3433 /* Get current PHY config */ 3434 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 3435 NULL); 3436 if (status) 3437 goto out; 3438 3439 pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : 3440 AUTONEG_DISABLE; 3441 3442 if (dcbx_cfg->pfc.pfcena) 3443 /* PFC enabled so report LFC as off */ 3444 goto out; 3445 3446 if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3447 pause->tx_pause = 1; 3448 if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3449 pause->rx_pause = 1; 3450 3451 out: 3452 kfree(pcaps); 3453 } 3454 3455 /** 3456 * ice_set_pauseparam - Set Flow Control parameter 3457 * @netdev: network interface device structure 3458 * @pause: return Tx/Rx flow control status 3459 */ 3460 static int 3461 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 3462 { 3463 struct ice_netdev_priv *np = netdev_priv(netdev); 3464 struct ice_aqc_get_phy_caps_data *pcaps; 3465 struct ice_link_status *hw_link_info; 3466 struct ice_pf *pf = np->vsi->back; 3467 struct ice_dcbx_cfg *dcbx_cfg; 3468 struct ice_vsi *vsi = np->vsi; 3469 struct ice_hw *hw = &pf->hw; 3470 struct ice_port_info *pi; 3471 u8 aq_failures; 3472 bool link_up; 3473 u32 is_an; 3474 int err; 3475 3476 pi = vsi->port_info; 3477 hw_link_info = &pi->phy.link_info; 3478 dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; 3479 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; 3480 3481 /* Changing the port's flow control is not supported if this isn't the 3482 * PF VSI 3483 */ 3484 if (vsi->type != ICE_VSI_PF) { 3485 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n"); 3486 return -EOPNOTSUPP; 3487 } 3488 3489 /* Get pause param reports configured and negotiated flow control pause 3490 * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is 3491 * defined get pause param pause->autoneg reports SW configured setting, 3492 * so compare pause->autoneg with SW configured to prevent the user from 3493 * using set pause param to chance autoneg. 3494 */ 3495 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3496 if (!pcaps) 3497 return -ENOMEM; 3498 3499 /* Get current PHY config */ 3500 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, 3501 NULL); 3502 if (err) { 3503 kfree(pcaps); 3504 return err; 3505 } 3506 3507 is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : 3508 AUTONEG_DISABLE; 3509 3510 kfree(pcaps); 3511 3512 if (pause->autoneg != is_an) { 3513 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 3514 return -EOPNOTSUPP; 3515 } 3516 3517 /* If we have link and don't have autoneg */ 3518 if (!test_bit(ICE_DOWN, pf->state) && 3519 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) { 3520 /* Send message that it might not necessarily work*/ 3521 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); 3522 } 3523 3524 if (dcbx_cfg->pfc.pfcena) { 3525 netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); 3526 return -EOPNOTSUPP; 3527 } 3528 if (pause->rx_pause && pause->tx_pause) 3529 pi->fc.req_mode = ICE_FC_FULL; 3530 else if (pause->rx_pause && !pause->tx_pause) 3531 pi->fc.req_mode = ICE_FC_RX_PAUSE; 3532 else if (!pause->rx_pause && pause->tx_pause) 3533 pi->fc.req_mode = ICE_FC_TX_PAUSE; 3534 else if (!pause->rx_pause && !pause->tx_pause) 3535 pi->fc.req_mode = ICE_FC_NONE; 3536 else 3537 return -EINVAL; 3538 3539 /* Set the FC mode and only restart AN if link is up */ 3540 err = ice_set_fc(pi, &aq_failures, link_up); 3541 3542 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { 3543 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n", 3544 err, libie_aq_str(hw->adminq.sq_last_status)); 3545 err = -EAGAIN; 3546 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { 3547 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n", 3548 err, libie_aq_str(hw->adminq.sq_last_status)); 3549 err = -EAGAIN; 3550 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { 3551 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n", 3552 err, libie_aq_str(hw->adminq.sq_last_status)); 3553 err = -EAGAIN; 3554 } 3555 3556 return err; 3557 } 3558 3559 /** 3560 * ice_get_rxfh_key_size - get the RSS hash key size 3561 * @netdev: network interface device structure 3562 * 3563 * Returns the table size. 3564 */ 3565 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) 3566 { 3567 return ICE_VSIQF_HKEY_ARRAY_SIZE; 3568 } 3569 3570 /** 3571 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size 3572 * @netdev: network interface device structure 3573 * 3574 * Returns the table size. 3575 */ 3576 static u32 ice_get_rxfh_indir_size(struct net_device *netdev) 3577 { 3578 struct ice_netdev_priv *np = netdev_priv(netdev); 3579 3580 return np->vsi->rss_table_size; 3581 } 3582 3583 /** 3584 * ice_get_rxfh - get the Rx flow hash indirection table 3585 * @netdev: network interface device structure 3586 * @rxfh: pointer to param struct (indir, key, hfunc) 3587 * 3588 * Reads the indirection table directly from the hardware. 3589 */ 3590 static int 3591 ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) 3592 { 3593 struct ice_netdev_priv *np = netdev_priv(netdev); 3594 struct ice_vsi *vsi = np->vsi; 3595 struct ice_pf *pf = vsi->back; 3596 u16 qcount, offset; 3597 int err, i; 3598 u8 *lut; 3599 3600 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 3601 netdev_warn(netdev, "RSS is not supported on this VSI!\n"); 3602 return -EOPNOTSUPP; 3603 } 3604 3605 qcount = vsi->mqprio_qopt.qopt.count[0]; 3606 offset = vsi->mqprio_qopt.qopt.offset[0]; 3607 3608 rxfh->hfunc = ETH_RSS_HASH_TOP; 3609 if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) 3610 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; 3611 3612 if (!rxfh->indir) 3613 return 0; 3614 3615 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 3616 if (!lut) 3617 return -ENOMEM; 3618 3619 err = ice_get_rss_key(vsi, rxfh->key); 3620 if (err) 3621 goto out; 3622 3623 err = ice_get_rss_lut(vsi, lut, vsi->rss_table_size); 3624 if (err) 3625 goto out; 3626 3627 if (ice_is_adq_active(pf)) { 3628 for (i = 0; i < vsi->rss_table_size; i++) 3629 rxfh->indir[i] = offset + lut[i] % qcount; 3630 goto out; 3631 } 3632 3633 for (i = 0; i < vsi->rss_table_size; i++) 3634 rxfh->indir[i] = lut[i]; 3635 3636 out: 3637 kfree(lut); 3638 return err; 3639 } 3640 3641 /** 3642 * ice_set_rxfh - set the Rx flow hash indirection table 3643 * @netdev: network interface device structure 3644 * @rxfh: pointer to param struct (indir, key, hfunc) 3645 * @extack: extended ACK from the Netlink message 3646 * 3647 * Returns -EINVAL if the table specifies an invalid queue ID, otherwise 3648 * returns 0 after programming the table. 3649 */ 3650 static int 3651 ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, 3652 struct netlink_ext_ack *extack) 3653 { 3654 struct ice_netdev_priv *np = netdev_priv(netdev); 3655 u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; 3656 struct ice_vsi *vsi = np->vsi; 3657 struct ice_pf *pf = vsi->back; 3658 struct device *dev; 3659 int err; 3660 3661 dev = ice_pf_to_dev(pf); 3662 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 3663 rxfh->hfunc != ETH_RSS_HASH_TOP) 3664 return -EOPNOTSUPP; 3665 3666 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 3667 /* RSS not supported return error here */ 3668 netdev_warn(netdev, "RSS is not configured on this VSI!\n"); 3669 return -EIO; 3670 } 3671 3672 if (ice_is_adq_active(pf)) { 3673 netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n"); 3674 return -EOPNOTSUPP; 3675 } 3676 3677 /* Update the VSI's hash function */ 3678 if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR) 3679 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ; 3680 3681 err = ice_set_rss_hfunc(vsi, hfunc); 3682 if (err) 3683 return err; 3684 3685 if (rxfh->key) { 3686 if (!vsi->rss_hkey_user) { 3687 vsi->rss_hkey_user = 3688 devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE, 3689 GFP_KERNEL); 3690 if (!vsi->rss_hkey_user) 3691 return -ENOMEM; 3692 } 3693 memcpy(vsi->rss_hkey_user, rxfh->key, 3694 ICE_VSIQF_HKEY_ARRAY_SIZE); 3695 3696 err = ice_set_rss_key(vsi, vsi->rss_hkey_user); 3697 if (err) 3698 return err; 3699 } 3700 3701 if (!vsi->rss_lut_user) { 3702 vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size, 3703 GFP_KERNEL); 3704 if (!vsi->rss_lut_user) 3705 return -ENOMEM; 3706 } 3707 3708 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 3709 if (rxfh->indir) { 3710 int i; 3711 3712 for (i = 0; i < vsi->rss_table_size; i++) 3713 vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); 3714 } else { 3715 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size, 3716 vsi->rss_size); 3717 } 3718 3719 err = ice_set_rss_lut(vsi, vsi->rss_lut_user, vsi->rss_table_size); 3720 if (err) 3721 return err; 3722 3723 return 0; 3724 } 3725 3726 static int 3727 ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) 3728 { 3729 struct ice_pf *pf = ice_netdev_to_pf(dev); 3730 3731 /* only report timestamping if PTP is enabled */ 3732 if (pf->ptp.state != ICE_PTP_READY) 3733 return ethtool_op_get_ts_info(dev, info); 3734 3735 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 3736 SOF_TIMESTAMPING_TX_HARDWARE | 3737 SOF_TIMESTAMPING_RX_HARDWARE | 3738 SOF_TIMESTAMPING_RAW_HARDWARE; 3739 3740 info->phc_index = ice_ptp_clock_index(pf); 3741 3742 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 3743 3744 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 3745 3746 return 0; 3747 } 3748 3749 /** 3750 * ice_get_max_txq - return the maximum number of Tx queues for in a PF 3751 * @pf: PF structure 3752 */ 3753 static int ice_get_max_txq(struct ice_pf *pf) 3754 { 3755 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq); 3756 } 3757 3758 /** 3759 * ice_get_max_rxq - return the maximum number of Rx queues for in a PF 3760 * @pf: PF structure 3761 */ 3762 static int ice_get_max_rxq(struct ice_pf *pf) 3763 { 3764 return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq); 3765 } 3766 3767 /** 3768 * ice_get_combined_cnt - return the current number of combined channels 3769 * @vsi: PF VSI pointer 3770 * 3771 * Go through all queue vectors and count ones that have both Rx and Tx ring 3772 * attached 3773 */ 3774 static u32 ice_get_combined_cnt(struct ice_vsi *vsi) 3775 { 3776 u32 combined = 0; 3777 int q_idx; 3778 3779 ice_for_each_q_vector(vsi, q_idx) { 3780 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; 3781 3782 combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx); 3783 } 3784 3785 return combined; 3786 } 3787 3788 /** 3789 * ice_get_channels - get the current and max supported channels 3790 * @dev: network interface device structure 3791 * @ch: ethtool channel data structure 3792 */ 3793 static void 3794 ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) 3795 { 3796 struct ice_netdev_priv *np = netdev_priv(dev); 3797 struct ice_vsi *vsi = np->vsi; 3798 struct ice_pf *pf = vsi->back; 3799 3800 /* report maximum channels */ 3801 ch->max_rx = ice_get_max_rxq(pf); 3802 ch->max_tx = ice_get_max_txq(pf); 3803 ch->max_combined = min_t(int, ch->max_rx, ch->max_tx); 3804 3805 /* report current channels */ 3806 ch->combined_count = ice_get_combined_cnt(vsi); 3807 ch->rx_count = vsi->num_rxq - ch->combined_count; 3808 ch->tx_count = vsi->num_txq - ch->combined_count; 3809 3810 /* report other queues */ 3811 ch->other_count = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; 3812 ch->max_other = ch->other_count; 3813 } 3814 3815 /** 3816 * ice_get_valid_rss_size - return valid number of RSS queues 3817 * @hw: pointer to the HW structure 3818 * @new_size: requested RSS queues 3819 */ 3820 static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size) 3821 { 3822 struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; 3823 3824 return min_t(int, new_size, BIT(caps->rss_table_entry_width)); 3825 } 3826 3827 /** 3828 * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size 3829 * @vsi: VSI to reconfigure RSS LUT on 3830 * @req_rss_size: requested range of queue numbers for hashing 3831 * 3832 * Set the VSI's RSS parameters, configure the RSS LUT based on these. 3833 */ 3834 static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) 3835 { 3836 struct ice_pf *pf = vsi->back; 3837 struct device *dev; 3838 struct ice_hw *hw; 3839 int err; 3840 u8 *lut; 3841 3842 dev = ice_pf_to_dev(pf); 3843 hw = &pf->hw; 3844 3845 if (!req_rss_size) 3846 return -EINVAL; 3847 3848 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); 3849 if (!lut) 3850 return -ENOMEM; 3851 3852 /* set RSS LUT parameters */ 3853 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 3854 vsi->rss_size = 1; 3855 else 3856 vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size); 3857 3858 /* create/set RSS LUT */ 3859 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); 3860 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size); 3861 if (err) 3862 dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err, 3863 libie_aq_str(hw->adminq.sq_last_status)); 3864 3865 kfree(lut); 3866 return err; 3867 } 3868 3869 /** 3870 * ice_set_channels - set the number channels 3871 * @dev: network interface device structure 3872 * @ch: ethtool channel data structure 3873 */ 3874 static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) 3875 { 3876 struct ice_netdev_priv *np = netdev_priv(dev); 3877 struct ice_vsi *vsi = np->vsi; 3878 struct ice_pf *pf = vsi->back; 3879 int new_rx = 0, new_tx = 0; 3880 bool locked = false; 3881 int ret = 0; 3882 3883 /* do not support changing channels in Safe Mode */ 3884 if (ice_is_safe_mode(pf)) { 3885 netdev_err(dev, "Changing channel in Safe Mode is not supported\n"); 3886 return -EOPNOTSUPP; 3887 } 3888 /* do not support changing other_count */ 3889 if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U)) 3890 return -EINVAL; 3891 3892 if (ice_is_adq_active(pf)) { 3893 netdev_err(dev, "Cannot set channels with ADQ configured.\n"); 3894 return -EOPNOTSUPP; 3895 } 3896 3897 if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) { 3898 netdev_err(dev, "Cannot set channels when Flow Director filters are active\n"); 3899 return -EOPNOTSUPP; 3900 } 3901 3902 if (ch->rx_count && ch->tx_count) { 3903 netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n"); 3904 return -EINVAL; 3905 } 3906 3907 new_rx = ch->combined_count + ch->rx_count; 3908 new_tx = ch->combined_count + ch->tx_count; 3909 3910 if (new_rx < vsi->tc_cfg.numtc) { 3911 netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n", 3912 vsi->tc_cfg.numtc); 3913 return -EINVAL; 3914 } 3915 if (new_tx < vsi->tc_cfg.numtc) { 3916 netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n", 3917 vsi->tc_cfg.numtc); 3918 return -EINVAL; 3919 } 3920 if (new_rx > ice_get_max_rxq(pf)) { 3921 netdev_err(dev, "Maximum allowed Rx channels is %d\n", 3922 ice_get_max_rxq(pf)); 3923 return -EINVAL; 3924 } 3925 if (new_tx > ice_get_max_txq(pf)) { 3926 netdev_err(dev, "Maximum allowed Tx channels is %d\n", 3927 ice_get_max_txq(pf)); 3928 return -EINVAL; 3929 } 3930 3931 if (pf->cdev_info && pf->cdev_info->adev) { 3932 mutex_lock(&pf->adev_mutex); 3933 device_lock(&pf->cdev_info->adev->dev); 3934 locked = true; 3935 if (pf->cdev_info->adev->dev.driver) { 3936 netdev_err(dev, "Cannot change channels when RDMA is active\n"); 3937 ret = -EBUSY; 3938 goto adev_unlock; 3939 } 3940 } 3941 3942 ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked); 3943 3944 if (!netif_is_rxfh_configured(dev)) { 3945 ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx); 3946 goto adev_unlock; 3947 } 3948 3949 /* Update rss_size due to change in Rx queues */ 3950 vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx); 3951 3952 adev_unlock: 3953 if (locked) { 3954 device_unlock(&pf->cdev_info->adev->dev); 3955 mutex_unlock(&pf->adev_mutex); 3956 } 3957 return ret; 3958 } 3959 3960 /** 3961 * ice_get_wol - get current Wake on LAN configuration 3962 * @netdev: network interface device structure 3963 * @wol: Ethtool structure to retrieve WoL settings 3964 */ 3965 static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 3966 { 3967 struct ice_netdev_priv *np = netdev_priv(netdev); 3968 struct ice_pf *pf = np->vsi->back; 3969 3970 if (np->vsi->type != ICE_VSI_PF) 3971 netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n"); 3972 3973 /* Get WoL settings based on the HW capability */ 3974 if (ice_is_wol_supported(&pf->hw)) { 3975 wol->supported = WAKE_MAGIC; 3976 wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0; 3977 } else { 3978 wol->supported = 0; 3979 wol->wolopts = 0; 3980 } 3981 } 3982 3983 /** 3984 * ice_set_wol - set Wake on LAN on supported device 3985 * @netdev: network interface device structure 3986 * @wol: Ethtool structure to set WoL 3987 */ 3988 static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 3989 { 3990 struct ice_netdev_priv *np = netdev_priv(netdev); 3991 struct ice_vsi *vsi = np->vsi; 3992 struct ice_pf *pf = vsi->back; 3993 3994 if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw)) 3995 return -EOPNOTSUPP; 3996 3997 /* only magic packet is supported */ 3998 if (wol->wolopts && wol->wolopts != WAKE_MAGIC) 3999 return -EOPNOTSUPP; 4000 4001 /* Set WoL only if there is a new value */ 4002 if (pf->wol_ena != !!wol->wolopts) { 4003 pf->wol_ena = !!wol->wolopts; 4004 device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena); 4005 netdev_dbg(netdev, "WoL magic packet %sabled\n", 4006 pf->wol_ena ? "en" : "dis"); 4007 } 4008 4009 return 0; 4010 } 4011 4012 /** 4013 * ice_get_rc_coalesce - get ITR values for specific ring container 4014 * @ec: ethtool structure to fill with driver's coalesce settings 4015 * @rc: ring container that the ITR values will come from 4016 * 4017 * Query the device for ice_ring_container specific ITR values. This is 4018 * done per ice_ring_container because each q_vector can have 1 or more rings 4019 * and all of said ring(s) will have the same ITR values. 4020 * 4021 * Returns 0 on success, negative otherwise. 4022 */ 4023 static int 4024 ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc) 4025 { 4026 if (!rc->rx_ring) 4027 return -EINVAL; 4028 4029 switch (rc->type) { 4030 case ICE_RX_CONTAINER: 4031 ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc); 4032 ec->rx_coalesce_usecs = rc->itr_setting; 4033 ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl; 4034 break; 4035 case ICE_TX_CONTAINER: 4036 ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc); 4037 ec->tx_coalesce_usecs = rc->itr_setting; 4038 break; 4039 default: 4040 dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type); 4041 return -EINVAL; 4042 } 4043 4044 return 0; 4045 } 4046 4047 /** 4048 * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings 4049 * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings 4050 * @ec: coalesce settings to program the device with 4051 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 4052 * 4053 * Return 0 on success, and negative under the following conditions: 4054 * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed. 4055 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings. 4056 */ 4057 static int 4058 ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) 4059 { 4060 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 4061 if (ice_get_rc_coalesce(ec, 4062 &vsi->rx_rings[q_num]->q_vector->rx)) 4063 return -EINVAL; 4064 if (ice_get_rc_coalesce(ec, 4065 &vsi->tx_rings[q_num]->q_vector->tx)) 4066 return -EINVAL; 4067 } else if (q_num < vsi->num_rxq) { 4068 if (ice_get_rc_coalesce(ec, 4069 &vsi->rx_rings[q_num]->q_vector->rx)) 4070 return -EINVAL; 4071 } else if (q_num < vsi->num_txq) { 4072 if (ice_get_rc_coalesce(ec, 4073 &vsi->tx_rings[q_num]->q_vector->tx)) 4074 return -EINVAL; 4075 } else { 4076 return -EINVAL; 4077 } 4078 4079 return 0; 4080 } 4081 4082 /** 4083 * __ice_get_coalesce - get ITR/INTRL values for the device 4084 * @netdev: pointer to the netdev associated with this query 4085 * @ec: ethtool structure to fill with driver's coalesce settings 4086 * @q_num: queue number to get the coalesce settings for 4087 * 4088 * If the caller passes in a negative q_num then we return coalesce settings 4089 * based on queue number 0, else use the actual q_num passed in. 4090 */ 4091 static int 4092 __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 4093 int q_num) 4094 { 4095 struct ice_netdev_priv *np = netdev_priv(netdev); 4096 struct ice_vsi *vsi = np->vsi; 4097 4098 if (q_num < 0) 4099 q_num = 0; 4100 4101 if (ice_get_q_coalesce(vsi, ec, q_num)) 4102 return -EINVAL; 4103 4104 return 0; 4105 } 4106 4107 static int ice_get_coalesce(struct net_device *netdev, 4108 struct ethtool_coalesce *ec, 4109 struct kernel_ethtool_coalesce *kernel_coal, 4110 struct netlink_ext_ack *extack) 4111 { 4112 return __ice_get_coalesce(netdev, ec, -1); 4113 } 4114 4115 static int 4116 ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, 4117 struct ethtool_coalesce *ec) 4118 { 4119 return __ice_get_coalesce(netdev, ec, q_num); 4120 } 4121 4122 /** 4123 * ice_set_rc_coalesce - set ITR values for specific ring container 4124 * @ec: ethtool structure from user to update ITR settings 4125 * @rc: ring container that the ITR values will come from 4126 * @vsi: VSI associated to the ring container 4127 * 4128 * Set specific ITR values. This is done per ice_ring_container because each 4129 * q_vector can have 1 or more rings and all of said ring(s) will have the same 4130 * ITR values. 4131 * 4132 * Returns 0 on success, negative otherwise. 4133 */ 4134 static int 4135 ice_set_rc_coalesce(struct ethtool_coalesce *ec, 4136 struct ice_ring_container *rc, struct ice_vsi *vsi) 4137 { 4138 const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx"; 4139 u32 use_adaptive_coalesce, coalesce_usecs; 4140 struct ice_pf *pf = vsi->back; 4141 u16 itr_setting; 4142 4143 if (!rc->rx_ring) 4144 return -EINVAL; 4145 4146 switch (rc->type) { 4147 case ICE_RX_CONTAINER: 4148 { 4149 struct ice_q_vector *q_vector = rc->rx_ring->q_vector; 4150 4151 if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL || 4152 (ec->rx_coalesce_usecs_high && 4153 ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) { 4154 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n", 4155 c_type_str, pf->hw.intrl_gran, 4156 ICE_MAX_INTRL); 4157 return -EINVAL; 4158 } 4159 if (ec->rx_coalesce_usecs_high != q_vector->intrl && 4160 (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) { 4161 netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n", 4162 c_type_str); 4163 return -EINVAL; 4164 } 4165 if (ec->rx_coalesce_usecs_high != q_vector->intrl) 4166 q_vector->intrl = ec->rx_coalesce_usecs_high; 4167 4168 use_adaptive_coalesce = ec->use_adaptive_rx_coalesce; 4169 coalesce_usecs = ec->rx_coalesce_usecs; 4170 4171 break; 4172 } 4173 case ICE_TX_CONTAINER: 4174 use_adaptive_coalesce = ec->use_adaptive_tx_coalesce; 4175 coalesce_usecs = ec->tx_coalesce_usecs; 4176 4177 break; 4178 default: 4179 dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n", 4180 rc->type); 4181 return -EINVAL; 4182 } 4183 4184 itr_setting = rc->itr_setting; 4185 if (coalesce_usecs != itr_setting && use_adaptive_coalesce) { 4186 netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n", 4187 c_type_str, c_type_str); 4188 return -EINVAL; 4189 } 4190 4191 if (coalesce_usecs > ICE_ITR_MAX) { 4192 netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n", 4193 c_type_str, ICE_ITR_MAX); 4194 return -EINVAL; 4195 } 4196 4197 if (use_adaptive_coalesce) { 4198 rc->itr_mode = ITR_DYNAMIC; 4199 } else { 4200 rc->itr_mode = ITR_STATIC; 4201 /* store user facing value how it was set */ 4202 rc->itr_setting = coalesce_usecs; 4203 /* write the change to the register */ 4204 ice_write_itr(rc, coalesce_usecs); 4205 /* force writes to take effect immediately, the flush shouldn't 4206 * be done in the functions above because the intent is for 4207 * them to do lazy writes. 4208 */ 4209 ice_flush(&pf->hw); 4210 } 4211 4212 return 0; 4213 } 4214 4215 /** 4216 * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings 4217 * @vsi: VSI associated to the queue that need updating 4218 * @ec: coalesce settings to program the device with 4219 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index 4220 * 4221 * Return 0 on success, and negative under the following conditions: 4222 * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed. 4223 * 2. The q_num passed in is not a valid number/index for Tx and Rx rings. 4224 */ 4225 static int 4226 ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num) 4227 { 4228 if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { 4229 if (ice_set_rc_coalesce(ec, 4230 &vsi->rx_rings[q_num]->q_vector->rx, 4231 vsi)) 4232 return -EINVAL; 4233 4234 if (ice_set_rc_coalesce(ec, 4235 &vsi->tx_rings[q_num]->q_vector->tx, 4236 vsi)) 4237 return -EINVAL; 4238 } else if (q_num < vsi->num_rxq) { 4239 if (ice_set_rc_coalesce(ec, 4240 &vsi->rx_rings[q_num]->q_vector->rx, 4241 vsi)) 4242 return -EINVAL; 4243 } else if (q_num < vsi->num_txq) { 4244 if (ice_set_rc_coalesce(ec, 4245 &vsi->tx_rings[q_num]->q_vector->tx, 4246 vsi)) 4247 return -EINVAL; 4248 } else { 4249 return -EINVAL; 4250 } 4251 4252 return 0; 4253 } 4254 4255 /** 4256 * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs 4257 * @netdev: netdev used for print 4258 * @itr_setting: previous user setting 4259 * @use_adaptive_coalesce: if adaptive coalesce is enabled or being enabled 4260 * @coalesce_usecs: requested value of [tx|rx]-usecs 4261 * @c_type_str: either "rx" or "tx" to match user set field of [tx|rx]-usecs 4262 */ 4263 static void 4264 ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting, 4265 u32 use_adaptive_coalesce, u32 coalesce_usecs, 4266 const char *c_type_str) 4267 { 4268 if (use_adaptive_coalesce) 4269 return; 4270 4271 if (itr_setting != coalesce_usecs && (coalesce_usecs % 2)) 4272 netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n", 4273 c_type_str, coalesce_usecs, c_type_str, 4274 ITR_REG_ALIGN(coalesce_usecs)); 4275 } 4276 4277 /** 4278 * __ice_set_coalesce - set ITR/INTRL values for the device 4279 * @netdev: pointer to the netdev associated with this query 4280 * @ec: ethtool structure to fill with driver's coalesce settings 4281 * @q_num: queue number to get the coalesce settings for 4282 * 4283 * If the caller passes in a negative q_num then we set the coalesce settings 4284 * for all Tx/Rx queues, else use the actual q_num passed in. 4285 */ 4286 static int 4287 __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, 4288 int q_num) 4289 { 4290 struct ice_netdev_priv *np = netdev_priv(netdev); 4291 struct ice_vsi *vsi = np->vsi; 4292 4293 if (q_num < 0) { 4294 struct ice_q_vector *q_vector = vsi->q_vectors[0]; 4295 int v_idx; 4296 4297 if (q_vector) { 4298 ice_print_if_odd_usecs(netdev, q_vector->rx.itr_setting, 4299 ec->use_adaptive_rx_coalesce, 4300 ec->rx_coalesce_usecs, "rx"); 4301 4302 ice_print_if_odd_usecs(netdev, q_vector->tx.itr_setting, 4303 ec->use_adaptive_tx_coalesce, 4304 ec->tx_coalesce_usecs, "tx"); 4305 } 4306 4307 ice_for_each_q_vector(vsi, v_idx) { 4308 /* In some cases if DCB is configured the num_[rx|tx]q 4309 * can be less than vsi->num_q_vectors. This check 4310 * accounts for that so we don't report a false failure 4311 */ 4312 if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq) 4313 goto set_complete; 4314 4315 if (ice_set_q_coalesce(vsi, ec, v_idx)) 4316 return -EINVAL; 4317 4318 ice_set_q_vector_intrl(vsi->q_vectors[v_idx]); 4319 } 4320 goto set_complete; 4321 } 4322 4323 if (ice_set_q_coalesce(vsi, ec, q_num)) 4324 return -EINVAL; 4325 4326 ice_set_q_vector_intrl(vsi->q_vectors[q_num]); 4327 4328 set_complete: 4329 return 0; 4330 } 4331 4332 static int ice_set_coalesce(struct net_device *netdev, 4333 struct ethtool_coalesce *ec, 4334 struct kernel_ethtool_coalesce *kernel_coal, 4335 struct netlink_ext_ack *extack) 4336 { 4337 return __ice_set_coalesce(netdev, ec, -1); 4338 } 4339 4340 static int 4341 ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, 4342 struct ethtool_coalesce *ec) 4343 { 4344 return __ice_set_coalesce(netdev, ec, q_num); 4345 } 4346 4347 static void 4348 ice_repr_get_drvinfo(struct net_device *netdev, 4349 struct ethtool_drvinfo *drvinfo) 4350 { 4351 struct ice_repr *repr = ice_netdev_to_repr(netdev); 4352 4353 if (repr->ops.ready(repr)) 4354 return; 4355 4356 __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); 4357 } 4358 4359 static void 4360 ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 4361 { 4362 struct ice_repr *repr = ice_netdev_to_repr(netdev); 4363 4364 /* for port representors only ETH_SS_STATS is supported */ 4365 if (repr->ops.ready(repr) || stringset != ETH_SS_STATS) 4366 return; 4367 4368 __ice_get_strings(netdev, stringset, data, repr->src_vsi); 4369 } 4370 4371 static void 4372 ice_repr_get_ethtool_stats(struct net_device *netdev, 4373 struct ethtool_stats __always_unused *stats, 4374 u64 *data) 4375 { 4376 struct ice_repr *repr = ice_netdev_to_repr(netdev); 4377 4378 if (repr->ops.ready(repr)) 4379 return; 4380 4381 __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi); 4382 } 4383 4384 static int ice_repr_get_sset_count(struct net_device *netdev, int sset) 4385 { 4386 switch (sset) { 4387 case ETH_SS_STATS: 4388 return ICE_VSI_STATS_LEN; 4389 default: 4390 return -EOPNOTSUPP; 4391 } 4392 } 4393 4394 #define ICE_I2C_EEPROM_DEV_ADDR 0xA0 4395 #define ICE_I2C_EEPROM_DEV_ADDR2 0xA2 4396 #define ICE_MODULE_TYPE_SFP 0x03 4397 #define ICE_MODULE_TYPE_QSFP_PLUS 0x0D 4398 #define ICE_MODULE_TYPE_QSFP28 0x11 4399 #define ICE_MODULE_SFF_ADDR_MODE 0x04 4400 #define ICE_MODULE_SFF_DIAG_CAPAB 0x40 4401 #define ICE_MODULE_REVISION_ADDR 0x01 4402 #define ICE_MODULE_SFF_8472_COMP 0x5E 4403 #define ICE_MODULE_SFF_8472_SWAP 0x5C 4404 #define ICE_MODULE_QSFP_MAX_LEN 640 4405 4406 /** 4407 * ice_get_module_info - get SFF module type and revision information 4408 * @netdev: network interface device structure 4409 * @modinfo: module EEPROM size and layout information structure 4410 */ 4411 static int 4412 ice_get_module_info(struct net_device *netdev, 4413 struct ethtool_modinfo *modinfo) 4414 { 4415 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4416 struct ice_hw *hw = &pf->hw; 4417 u8 sff8472_comp = 0; 4418 u8 sff8472_swap = 0; 4419 u8 sff8636_rev = 0; 4420 u8 value = 0; 4421 int status; 4422 4423 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, 4424 0, &value, 1, 0, NULL); 4425 if (status) 4426 return status; 4427 4428 switch (value) { 4429 case ICE_MODULE_TYPE_SFP: 4430 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 4431 ICE_MODULE_SFF_8472_COMP, 0x00, 0, 4432 &sff8472_comp, 1, 0, NULL); 4433 if (status) 4434 return status; 4435 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 4436 ICE_MODULE_SFF_8472_SWAP, 0x00, 0, 4437 &sff8472_swap, 1, 0, NULL); 4438 if (status) 4439 return status; 4440 4441 if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { 4442 modinfo->type = ETH_MODULE_SFF_8079; 4443 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 4444 } else if (sff8472_comp && 4445 (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) { 4446 modinfo->type = ETH_MODULE_SFF_8472; 4447 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4448 } else { 4449 modinfo->type = ETH_MODULE_SFF_8079; 4450 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 4451 } 4452 break; 4453 case ICE_MODULE_TYPE_QSFP_PLUS: 4454 case ICE_MODULE_TYPE_QSFP28: 4455 status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 4456 ICE_MODULE_REVISION_ADDR, 0x00, 0, 4457 &sff8636_rev, 1, 0, NULL); 4458 if (status) 4459 return status; 4460 /* Check revision compliance */ 4461 if (sff8636_rev > 0x02) { 4462 /* Module is SFF-8636 compliant */ 4463 modinfo->type = ETH_MODULE_SFF_8636; 4464 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; 4465 } else { 4466 modinfo->type = ETH_MODULE_SFF_8436; 4467 modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN; 4468 } 4469 break; 4470 default: 4471 netdev_warn(netdev, "SFF Module Type not recognized.\n"); 4472 return -EINVAL; 4473 } 4474 return 0; 4475 } 4476 4477 /** 4478 * ice_get_module_eeprom - fill buffer with SFF EEPROM contents 4479 * @netdev: network interface device structure 4480 * @ee: EEPROM dump request structure 4481 * @data: buffer to be filled with EEPROM contents 4482 */ 4483 static int 4484 ice_get_module_eeprom(struct net_device *netdev, 4485 struct ethtool_eeprom *ee, u8 *data) 4486 { 4487 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4488 #define SFF_READ_BLOCK_SIZE 8 4489 u8 value[SFF_READ_BLOCK_SIZE] = { 0 }; 4490 u8 addr = ICE_I2C_EEPROM_DEV_ADDR; 4491 struct ice_hw *hw = &pf->hw; 4492 bool is_sfp = false; 4493 unsigned int i, j; 4494 u16 offset = 0; 4495 u8 page = 0; 4496 int status; 4497 4498 if (!ee || !ee->len || !data) 4499 return -EINVAL; 4500 4501 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, 4502 NULL); 4503 if (status) 4504 return status; 4505 4506 if (value[0] == ICE_MODULE_TYPE_SFP) 4507 is_sfp = true; 4508 4509 memset(data, 0, ee->len); 4510 for (i = 0; i < ee->len; i += SFF_READ_BLOCK_SIZE) { 4511 offset = i + ee->offset; 4512 page = 0; 4513 4514 /* Check if we need to access the other memory page */ 4515 if (is_sfp) { 4516 if (offset >= ETH_MODULE_SFF_8079_LEN) { 4517 offset -= ETH_MODULE_SFF_8079_LEN; 4518 addr = ICE_I2C_EEPROM_DEV_ADDR2; 4519 } 4520 } else { 4521 while (offset >= ETH_MODULE_SFF_8436_LEN) { 4522 /* Compute memory page number and offset. */ 4523 offset -= ETH_MODULE_SFF_8436_LEN / 2; 4524 page++; 4525 } 4526 } 4527 4528 /* Bit 2 of EEPROM address 0x02 declares upper 4529 * pages are disabled on QSFP modules. 4530 * SFP modules only ever use page 0. 4531 */ 4532 if (page == 0 || !(data[0x2] & 0x4)) { 4533 u32 copy_len; 4534 4535 /* If i2c bus is busy due to slow page change or 4536 * link management access, call can fail. This is normal. 4537 * So we retry this a few times. 4538 */ 4539 for (j = 0; j < 4; j++) { 4540 status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 4541 !is_sfp, value, 4542 SFF_READ_BLOCK_SIZE, 4543 0, NULL); 4544 netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n", 4545 addr, offset, page, is_sfp, 4546 value[0], value[1], value[2], value[3], 4547 value[4], value[5], value[6], value[7], 4548 status); 4549 if (status) { 4550 usleep_range(1500, 2500); 4551 memset(value, 0, SFF_READ_BLOCK_SIZE); 4552 continue; 4553 } 4554 break; 4555 } 4556 4557 /* Make sure we have enough room for the new block */ 4558 copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i); 4559 memcpy(data + i, value, copy_len); 4560 } 4561 } 4562 return 0; 4563 } 4564 4565 /** 4566 * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per 4567 * pcsquad, pcsport 4568 * @hw: pointer to the HW struct 4569 * @pcs_quad: pcsquad for input port 4570 * @pcs_port: pcsport for input port 4571 * @fec_stats: buffer to hold FEC statistics for given port 4572 * 4573 * Return: 0 on success, negative on failure. 4574 */ 4575 static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, 4576 struct ethtool_fec_stats *fec_stats) 4577 { 4578 u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0; 4579 u32 fec_corr_low_val = 0, fec_corr_high_val = 0; 4580 int err; 4581 4582 if (pcs_quad > 1 || pcs_port > 3) 4583 return -EINVAL; 4584 4585 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW, 4586 &fec_corr_low_val); 4587 if (err) 4588 return err; 4589 4590 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH, 4591 &fec_corr_high_val); 4592 if (err) 4593 return err; 4594 4595 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, 4596 ICE_FEC_UNCORR_LOW, 4597 &fec_uncorr_low_val); 4598 if (err) 4599 return err; 4600 4601 err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, 4602 ICE_FEC_UNCORR_HIGH, 4603 &fec_uncorr_high_val); 4604 if (err) 4605 return err; 4606 4607 fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) + 4608 fec_corr_low_val; 4609 fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) + 4610 fec_uncorr_low_val; 4611 return 0; 4612 } 4613 4614 /** 4615 * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev 4616 * @netdev: network interface device structure 4617 * @fec_stats: buffer to hold FEC statistics for given port 4618 * @hist: buffer to put FEC histogram statistics for given port 4619 * 4620 */ 4621 static void ice_get_fec_stats(struct net_device *netdev, 4622 struct ethtool_fec_stats *fec_stats, 4623 struct ethtool_fec_hist *hist) 4624 { 4625 struct ice_netdev_priv *np = netdev_priv(netdev); 4626 struct ice_port_topology port_topology; 4627 struct ice_port_info *pi; 4628 struct ice_pf *pf; 4629 struct ice_hw *hw; 4630 int err; 4631 4632 pf = np->vsi->back; 4633 hw = &pf->hw; 4634 pi = np->vsi->port_info; 4635 4636 /* Serdes parameters are not supported if not the PF VSI */ 4637 if (np->vsi->type != ICE_VSI_PF || !pi) 4638 return; 4639 4640 err = ice_get_port_topology(hw, pi->lport, &port_topology); 4641 if (err) { 4642 netdev_info(netdev, "Extended register dump failed Lport %d\n", 4643 pi->lport); 4644 return; 4645 } 4646 4647 /* Get FEC correctable, uncorrectable counter */ 4648 err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select, 4649 port_topology.pcs_port, fec_stats); 4650 if (err) 4651 netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n", 4652 pi->lport, err); 4653 } 4654 4655 static void ice_get_eth_mac_stats(struct net_device *netdev, 4656 struct ethtool_eth_mac_stats *mac_stats) 4657 { 4658 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4659 struct ice_hw_port_stats *ps = &pf->stats; 4660 4661 mac_stats->FramesTransmittedOK = ps->eth.tx_unicast + 4662 ps->eth.tx_multicast + 4663 ps->eth.tx_broadcast; 4664 mac_stats->FramesReceivedOK = ps->eth.rx_unicast + 4665 ps->eth.rx_multicast + 4666 ps->eth.rx_broadcast; 4667 mac_stats->FrameCheckSequenceErrors = ps->crc_errors; 4668 mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes; 4669 mac_stats->OctetsReceivedOK = ps->eth.rx_bytes; 4670 mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast; 4671 mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast; 4672 mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast; 4673 mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast; 4674 mac_stats->InRangeLengthErrors = ps->rx_len_errors; 4675 mac_stats->FrameTooLongErrors = ps->rx_oversize; 4676 } 4677 4678 static void ice_get_pause_stats(struct net_device *netdev, 4679 struct ethtool_pause_stats *pause_stats) 4680 { 4681 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4682 struct ice_hw_port_stats *ps = &pf->stats; 4683 4684 pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx; 4685 pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx; 4686 } 4687 4688 static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = { 4689 { 0, 64 }, 4690 { 65, 127 }, 4691 { 128, 255 }, 4692 { 256, 511 }, 4693 { 512, 1023 }, 4694 { 1024, 1522 }, 4695 { 1523, 9522 }, 4696 {} 4697 }; 4698 4699 static void ice_get_rmon_stats(struct net_device *netdev, 4700 struct ethtool_rmon_stats *rmon, 4701 const struct ethtool_rmon_hist_range **ranges) 4702 { 4703 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4704 struct ice_hw_port_stats *ps = &pf->stats; 4705 4706 rmon->undersize_pkts = ps->rx_undersize; 4707 rmon->oversize_pkts = ps->rx_oversize; 4708 rmon->fragments = ps->rx_fragments; 4709 rmon->jabbers = ps->rx_jabber; 4710 4711 rmon->hist[0] = ps->rx_size_64; 4712 rmon->hist[1] = ps->rx_size_127; 4713 rmon->hist[2] = ps->rx_size_255; 4714 rmon->hist[3] = ps->rx_size_511; 4715 rmon->hist[4] = ps->rx_size_1023; 4716 rmon->hist[5] = ps->rx_size_1522; 4717 rmon->hist[6] = ps->rx_size_big; 4718 4719 rmon->hist_tx[0] = ps->tx_size_64; 4720 rmon->hist_tx[1] = ps->tx_size_127; 4721 rmon->hist_tx[2] = ps->tx_size_255; 4722 rmon->hist_tx[3] = ps->tx_size_511; 4723 rmon->hist_tx[4] = ps->tx_size_1023; 4724 rmon->hist_tx[5] = ps->tx_size_1522; 4725 rmon->hist_tx[6] = ps->tx_size_big; 4726 4727 *ranges = ice_rmon_ranges; 4728 } 4729 4730 /* ice_get_ts_stats - provide timestamping stats 4731 * @netdev: the netdevice pointer from ethtool 4732 * @ts_stats: the ethtool data structure to fill in 4733 */ 4734 static void ice_get_ts_stats(struct net_device *netdev, 4735 struct ethtool_ts_stats *ts_stats) 4736 { 4737 struct ice_pf *pf = ice_netdev_to_pf(netdev); 4738 struct ice_ptp *ptp = &pf->ptp; 4739 4740 ts_stats->pkts = ptp->tx_hwtstamp_good; 4741 ts_stats->err = ptp->tx_hwtstamp_skipped + 4742 ptp->tx_hwtstamp_flushed + 4743 ptp->tx_hwtstamp_discarded; 4744 ts_stats->lost = ptp->tx_hwtstamp_timeouts; 4745 } 4746 4747 #define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \ 4748 ETH_RESET_FILTER | ETH_RESET_OFFLOAD) 4749 4750 #define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \ 4751 ETH_RESET_SHARED_SHIFT) 4752 4753 #define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \ 4754 (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \ 4755 (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)) 4756 4757 #define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR 4758 4759 /** 4760 * ice_ethtool_reset - triggers a given type of reset 4761 * @dev: network interface device structure 4762 * @flags: set of reset flags 4763 * 4764 * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags. 4765 */ 4766 static int ice_ethtool_reset(struct net_device *dev, u32 *flags) 4767 { 4768 struct ice_pf *pf = ice_netdev_to_pf(dev); 4769 enum ice_reset_req reset; 4770 4771 switch (*flags) { 4772 case ICE_ETHTOOL_CORER: 4773 reset = ICE_RESET_CORER; 4774 break; 4775 case ICE_ETHTOOL_GLOBR: 4776 reset = ICE_RESET_GLOBR; 4777 break; 4778 case ICE_ETHTOOL_PFR: 4779 reset = ICE_RESET_PFR; 4780 break; 4781 default: 4782 netdev_info(dev, "Unsupported set of ethtool flags"); 4783 return -EOPNOTSUPP; 4784 } 4785 4786 ice_schedule_reset(pf, reset); 4787 4788 *flags = 0; 4789 4790 return 0; 4791 } 4792 4793 /** 4794 * ice_repr_ethtool_reset - triggers a VF reset 4795 * @dev: network interface device structure 4796 * @flags: set of reset flags 4797 * 4798 * Return: 0 on success, 4799 * -EOPNOTSUPP when using unsupported set of flags 4800 * -EBUSY when VF is not ready for reset. 4801 */ 4802 static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags) 4803 { 4804 struct ice_repr *repr = ice_netdev_to_repr(dev); 4805 struct ice_vf *vf; 4806 4807 if (repr->type != ICE_REPR_TYPE_VF || 4808 *flags != ICE_ETHTOOL_VFR) 4809 return -EOPNOTSUPP; 4810 4811 vf = repr->vf; 4812 4813 if (ice_check_vf_ready_for_cfg(vf)) 4814 return -EBUSY; 4815 4816 *flags = 0; 4817 4818 return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); 4819 } 4820 4821 static const struct ethtool_ops ice_ethtool_ops = { 4822 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 4823 ETHTOOL_COALESCE_USE_ADAPTIVE | 4824 ETHTOOL_COALESCE_RX_USECS_HIGH, 4825 .supported_input_xfrm = RXH_XFRM_SYM_XOR, 4826 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, 4827 .get_link_ksettings = ice_get_link_ksettings, 4828 .set_link_ksettings = ice_set_link_ksettings, 4829 .get_fec_stats = ice_get_fec_stats, 4830 .get_eth_mac_stats = ice_get_eth_mac_stats, 4831 .get_pause_stats = ice_get_pause_stats, 4832 .get_rmon_stats = ice_get_rmon_stats, 4833 .get_ts_stats = ice_get_ts_stats, 4834 .get_drvinfo = ice_get_drvinfo, 4835 .get_regs_len = ice_get_regs_len, 4836 .get_regs = ice_get_regs, 4837 .get_wol = ice_get_wol, 4838 .set_wol = ice_set_wol, 4839 .get_msglevel = ice_get_msglevel, 4840 .set_msglevel = ice_set_msglevel, 4841 .self_test = ice_self_test, 4842 .get_link = ethtool_op_get_link, 4843 .get_link_ext_stats = ice_get_link_ext_stats, 4844 .get_eeprom_len = ice_get_eeprom_len, 4845 .get_eeprom = ice_get_eeprom, 4846 .get_coalesce = ice_get_coalesce, 4847 .set_coalesce = ice_set_coalesce, 4848 .get_strings = ice_get_strings, 4849 .set_phys_id = ice_set_phys_id, 4850 .get_ethtool_stats = ice_get_ethtool_stats, 4851 .get_priv_flags = ice_get_priv_flags, 4852 .set_priv_flags = ice_set_priv_flags, 4853 .get_sset_count = ice_get_sset_count, 4854 .get_rxnfc = ice_get_rxnfc, 4855 .set_rxnfc = ice_set_rxnfc, 4856 .get_ringparam = ice_get_ringparam, 4857 .set_ringparam = ice_set_ringparam, 4858 .nway_reset = ice_nway_reset, 4859 .get_pauseparam = ice_get_pauseparam, 4860 .set_pauseparam = ice_set_pauseparam, 4861 .reset = ice_ethtool_reset, 4862 .get_rxfh_key_size = ice_get_rxfh_key_size, 4863 .get_rxfh_indir_size = ice_get_rxfh_indir_size, 4864 .get_rxfh = ice_get_rxfh, 4865 .set_rxfh = ice_set_rxfh, 4866 .get_rxfh_fields = ice_get_rxfh_fields, 4867 .set_rxfh_fields = ice_set_rxfh_fields, 4868 .get_channels = ice_get_channels, 4869 .set_channels = ice_set_channels, 4870 .get_ts_info = ice_get_ts_info, 4871 .get_per_queue_coalesce = ice_get_per_q_coalesce, 4872 .set_per_queue_coalesce = ice_set_per_q_coalesce, 4873 .get_fecparam = ice_get_fecparam, 4874 .set_fecparam = ice_set_fecparam, 4875 .get_module_info = ice_get_module_info, 4876 .get_module_eeprom = ice_get_module_eeprom, 4877 }; 4878 4879 static const struct ethtool_ops ice_ethtool_safe_mode_ops = { 4880 .get_link_ksettings = ice_get_link_ksettings, 4881 .set_link_ksettings = ice_set_link_ksettings, 4882 .get_drvinfo = ice_get_drvinfo, 4883 .get_regs_len = ice_get_regs_len, 4884 .get_regs = ice_get_regs, 4885 .get_wol = ice_get_wol, 4886 .set_wol = ice_set_wol, 4887 .get_msglevel = ice_get_msglevel, 4888 .set_msglevel = ice_set_msglevel, 4889 .get_link = ethtool_op_get_link, 4890 .get_eeprom_len = ice_get_eeprom_len, 4891 .get_eeprom = ice_get_eeprom, 4892 .get_strings = ice_get_strings, 4893 .get_ethtool_stats = ice_get_ethtool_stats, 4894 .get_sset_count = ice_get_sset_count, 4895 .get_ringparam = ice_get_ringparam, 4896 .set_ringparam = ice_set_ringparam, 4897 .nway_reset = ice_nway_reset, 4898 .get_channels = ice_get_channels, 4899 }; 4900 4901 /** 4902 * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops 4903 * @netdev: network interface device structure 4904 */ 4905 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev) 4906 { 4907 netdev->ethtool_ops = &ice_ethtool_safe_mode_ops; 4908 } 4909 4910 static const struct ethtool_ops ice_ethtool_repr_ops = { 4911 .get_drvinfo = ice_repr_get_drvinfo, 4912 .get_link = ethtool_op_get_link, 4913 .get_strings = ice_repr_get_strings, 4914 .get_ethtool_stats = ice_repr_get_ethtool_stats, 4915 .get_sset_count = ice_repr_get_sset_count, 4916 .reset = ice_repr_ethtool_reset, 4917 }; 4918 4919 /** 4920 * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops 4921 * @netdev: network interface device structure 4922 */ 4923 void ice_set_ethtool_repr_ops(struct net_device *netdev) 4924 { 4925 netdev->ethtool_ops = &ice_ethtool_repr_ops; 4926 } 4927 4928 /** 4929 * ice_set_ethtool_ops - setup netdev ethtool ops 4930 * @netdev: network interface device structure 4931 * 4932 * setup netdev ethtool ops with ice specific ops 4933 */ 4934 void ice_set_ethtool_ops(struct net_device *netdev) 4935 { 4936 netdev->ethtool_ops = &ice_ethtool_ops; 4937 } 4938