1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/ctype.h> 13 #include <linux/stringify.h> 14 #include <linux/ethtool.h> 15 #include <linux/ethtool_netlink.h> 16 #include <linux/linkmode.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/etherdevice.h> 20 #include <linux/crc32.h> 21 #include <linux/firmware.h> 22 #include <linux/utsname.h> 23 #include <linux/time.h> 24 #include <linux/ptp_clock_kernel.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/timecounter.h> 27 #include <net/netdev_queues.h> 28 #include <net/netlink.h> 29 #include <linux/bnxt/hsi.h> 30 #include "bnxt.h" 31 #include "bnxt_hwrm.h" 32 #include "bnxt_ulp.h" 33 #include "bnxt_xdp.h" 34 #include "bnxt_ptp.h" 35 #include "bnxt_ethtool.h" 36 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 37 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 38 #include "bnxt_coredump.h" 39 40 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \ 41 do { \ 42 if (extack) \ 43 NL_SET_ERR_MSG_MOD(extack, msg); \ 44 netdev_err(dev, "%s\n", msg); \ 45 } while (0) 46 47 static u32 bnxt_get_msglevel(struct net_device *dev) 48 { 49 struct bnxt *bp = netdev_priv(dev); 50 51 return bp->msg_enable; 52 } 53 54 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 55 { 56 struct bnxt *bp = netdev_priv(dev); 57 58 bp->msg_enable = value; 59 } 60 61 static int bnxt_get_coalesce(struct net_device *dev, 62 struct ethtool_coalesce *coal, 63 struct kernel_ethtool_coalesce *kernel_coal, 64 struct netlink_ext_ack *extack) 65 { 66 struct bnxt *bp = netdev_priv(dev); 67 struct bnxt_coal *hw_coal; 68 u16 mult; 69 70 memset(coal, 0, sizeof(*coal)); 71 72 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 73 74 hw_coal = &bp->rx_coal; 75 mult = hw_coal->bufs_per_record; 76 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 77 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 78 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 79 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 80 if (hw_coal->flags & 81 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 82 kernel_coal->use_cqe_mode_rx = true; 83 84 hw_coal = &bp->tx_coal; 85 mult = hw_coal->bufs_per_record; 86 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 87 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 88 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 89 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 90 if (hw_coal->flags & 91 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 92 kernel_coal->use_cqe_mode_tx = true; 93 94 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 95 96 return 0; 97 } 98 99 static int bnxt_set_coalesce(struct net_device *dev, 100 struct ethtool_coalesce *coal, 101 struct kernel_ethtool_coalesce *kernel_coal, 102 struct netlink_ext_ack *extack) 103 { 104 struct bnxt *bp = netdev_priv(dev); 105 bool update_stats = false; 106 struct bnxt_coal *hw_coal; 107 int rc = 0; 108 u16 mult; 109 110 if (coal->use_adaptive_rx_coalesce) { 111 bp->flags |= BNXT_FLAG_DIM; 112 } else { 113 if (bp->flags & BNXT_FLAG_DIM) { 114 bp->flags &= ~(BNXT_FLAG_DIM); 115 goto reset_coalesce; 116 } 117 } 118 119 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && 120 !(bp->coal_cap.cmpl_params & 121 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) 122 return -EOPNOTSUPP; 123 124 hw_coal = &bp->rx_coal; 125 mult = hw_coal->bufs_per_record; 126 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 127 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 128 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 129 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 130 hw_coal->flags &= 131 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 132 if (kernel_coal->use_cqe_mode_rx) 133 hw_coal->flags |= 134 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 135 136 hw_coal = &bp->tx_coal; 137 mult = hw_coal->bufs_per_record; 138 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 139 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 140 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 141 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 142 hw_coal->flags &= 143 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 144 if (kernel_coal->use_cqe_mode_tx) 145 hw_coal->flags |= 146 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 147 148 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 149 u32 stats_ticks = coal->stats_block_coalesce_usecs; 150 151 /* Allow 0, which means disable. */ 152 if (stats_ticks) 153 stats_ticks = clamp_t(u32, stats_ticks, 154 BNXT_MIN_STATS_COAL_TICKS, 155 BNXT_MAX_STATS_COAL_TICKS); 156 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 157 bp->stats_coal_ticks = stats_ticks; 158 if (bp->stats_coal_ticks) 159 bp->current_interval = 160 bp->stats_coal_ticks * HZ / 1000000; 161 else 162 bp->current_interval = BNXT_TIMER_INTERVAL; 163 update_stats = true; 164 } 165 166 reset_coalesce: 167 if (test_bit(BNXT_STATE_OPEN, &bp->state)) { 168 if (update_stats) { 169 bnxt_close_nic(bp, true, false); 170 rc = bnxt_open_nic(bp, true, false); 171 } else { 172 rc = bnxt_hwrm_set_coal(bp); 173 } 174 } 175 176 return rc; 177 } 178 179 static const char * const bnxt_ring_rx_stats_str[] = { 180 "rx_ucast_packets", 181 "rx_mcast_packets", 182 "rx_bcast_packets", 183 "rx_discards", 184 "rx_errors", 185 "rx_ucast_bytes", 186 "rx_mcast_bytes", 187 "rx_bcast_bytes", 188 }; 189 190 static const char * const bnxt_ring_tx_stats_str[] = { 191 "tx_ucast_packets", 192 "tx_mcast_packets", 193 "tx_bcast_packets", 194 "tx_errors", 195 "tx_discards", 196 "tx_ucast_bytes", 197 "tx_mcast_bytes", 198 "tx_bcast_bytes", 199 }; 200 201 static const char * const bnxt_ring_tpa_stats_str[] = { 202 "tpa_packets", 203 "tpa_bytes", 204 "tpa_events", 205 "tpa_aborts", 206 }; 207 208 static const char * const bnxt_ring_tpa2_stats_str[] = { 209 "rx_tpa_eligible_pkt", 210 "rx_tpa_eligible_bytes", 211 "rx_tpa_pkt", 212 "rx_tpa_bytes", 213 "rx_tpa_errors", 214 "rx_tpa_events", 215 }; 216 217 static const char * const bnxt_rx_sw_stats_str[] = { 218 "rx_l4_csum_errors", 219 "rx_resets", 220 "rx_buf_errors", 221 }; 222 223 static const char * const bnxt_cmn_sw_stats_str[] = { 224 "missed_irqs", 225 }; 226 227 #define BNXT_RX_STATS_ENTRY(counter) \ 228 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 229 230 #define BNXT_TX_STATS_ENTRY(counter) \ 231 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 232 233 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 234 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 235 236 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 237 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 238 239 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 241 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 242 243 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 245 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 246 247 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 248 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 249 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 250 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 251 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 252 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 253 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 254 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 255 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 256 257 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 258 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 259 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 260 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 261 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 262 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 263 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 264 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 265 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 266 267 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 268 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 269 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 270 271 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 272 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 273 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 274 275 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 276 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 277 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 278 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 279 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 280 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 281 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 282 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 283 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 284 285 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 286 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 287 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 288 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 289 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 290 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 291 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 292 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 293 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 294 295 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 297 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 298 299 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 307 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 308 309 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 310 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 311 __stringify(counter##_pri##n) } 312 313 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 314 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 315 __stringify(counter##_pri##n) } 316 317 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 318 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 319 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 320 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 321 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 322 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 323 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 324 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 325 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 326 327 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 328 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 329 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 330 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 331 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 332 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 333 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 334 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 335 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 336 337 enum { 338 RX_TOTAL_DISCARDS, 339 TX_TOTAL_DISCARDS, 340 RX_NETPOLL_DISCARDS, 341 }; 342 343 static const char *const bnxt_ring_err_stats_arr[] = { 344 "rx_total_l4_csum_errors", 345 "rx_total_resets", 346 "rx_total_buf_errors", 347 "rx_total_oom_discards", 348 "rx_total_netpoll_discards", 349 "rx_total_ring_discards", 350 "tx_total_resets", 351 "tx_total_ring_discards", 352 "total_missed_irqs", 353 }; 354 355 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 356 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 357 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 358 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 359 360 static const struct { 361 long offset; 362 char string[ETH_GSTRING_LEN]; 363 } bnxt_port_stats_arr[] = { 364 BNXT_RX_STATS_ENTRY(rx_64b_frames), 365 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 366 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 367 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 368 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 369 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 370 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 371 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 372 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 373 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 374 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 375 BNXT_RX_STATS_ENTRY(rx_total_frames), 376 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 377 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 378 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 379 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 380 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 381 BNXT_RX_STATS_ENTRY(rx_pause_frames), 382 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 383 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 384 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 385 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 386 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 387 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 388 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 389 BNXT_RX_STATS_ENTRY(rx_good_frames), 390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 397 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 398 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 400 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 401 BNXT_RX_STATS_ENTRY(rx_bytes), 402 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 403 BNXT_RX_STATS_ENTRY(rx_runt_frames), 404 BNXT_RX_STATS_ENTRY(rx_stat_discard), 405 BNXT_RX_STATS_ENTRY(rx_stat_err), 406 407 BNXT_TX_STATS_ENTRY(tx_64b_frames), 408 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 409 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 410 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 411 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 412 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 413 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 414 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 415 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 416 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 417 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 418 BNXT_TX_STATS_ENTRY(tx_good_frames), 419 BNXT_TX_STATS_ENTRY(tx_total_frames), 420 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 421 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 422 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 423 BNXT_TX_STATS_ENTRY(tx_pause_frames), 424 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 425 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 426 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 427 BNXT_TX_STATS_ENTRY(tx_err), 428 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 436 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 438 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 439 BNXT_TX_STATS_ENTRY(tx_total_collisions), 440 BNXT_TX_STATS_ENTRY(tx_bytes), 441 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 442 BNXT_TX_STATS_ENTRY(tx_stat_discard), 443 BNXT_TX_STATS_ENTRY(tx_stat_error), 444 }; 445 446 static const struct { 447 long offset; 448 char string[ETH_GSTRING_LEN]; 449 } bnxt_port_stats_ext_arr[] = { 450 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 451 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 452 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 453 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 454 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 455 BNXT_RX_STATS_EXT_COS_ENTRIES, 456 BNXT_RX_STATS_EXT_PFC_ENTRIES, 457 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 458 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 459 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 460 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 461 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), 463 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), 464 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss), 465 }; 466 467 static const struct { 468 long offset; 469 char string[ETH_GSTRING_LEN]; 470 } bnxt_tx_port_stats_ext_arr[] = { 471 BNXT_TX_STATS_EXT_COS_ENTRIES, 472 BNXT_TX_STATS_EXT_PFC_ENTRIES, 473 }; 474 475 static const struct { 476 long base_off; 477 char string[ETH_GSTRING_LEN]; 478 } bnxt_rx_bytes_pri_arr[] = { 479 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 480 }; 481 482 static const struct { 483 long base_off; 484 char string[ETH_GSTRING_LEN]; 485 } bnxt_rx_pkts_pri_arr[] = { 486 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 487 }; 488 489 static const struct { 490 long base_off; 491 char string[ETH_GSTRING_LEN]; 492 } bnxt_tx_bytes_pri_arr[] = { 493 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 494 }; 495 496 static const struct { 497 long base_off; 498 char string[ETH_GSTRING_LEN]; 499 } bnxt_tx_pkts_pri_arr[] = { 500 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 501 }; 502 503 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) 504 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 505 #define BNXT_NUM_STATS_PRI \ 506 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 507 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 508 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 509 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 510 511 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 512 { 513 if (BNXT_SUPPORTS_TPA(bp)) { 514 if (bp->max_tpa_v2) { 515 if (BNXT_CHIP_P5(bp)) 516 return BNXT_NUM_TPA_RING_STATS_P5; 517 return BNXT_NUM_TPA_RING_STATS_P7; 518 } 519 return BNXT_NUM_TPA_RING_STATS; 520 } 521 return 0; 522 } 523 524 static int bnxt_get_num_ring_stats(struct bnxt *bp) 525 { 526 int rx, tx, cmn; 527 528 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 529 bnxt_get_num_tpa_ring_stats(bp); 530 tx = NUM_RING_TX_HW_STATS; 531 cmn = NUM_RING_CMN_SW_STATS; 532 return rx * bp->rx_nr_rings + 533 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) + 534 cmn * bp->cp_nr_rings; 535 } 536 537 static int bnxt_get_num_stats(struct bnxt *bp) 538 { 539 int num_stats = bnxt_get_num_ring_stats(bp); 540 int len; 541 542 num_stats += BNXT_NUM_RING_ERR_STATS; 543 544 if (bp->flags & BNXT_FLAG_PORT_STATS) 545 num_stats += BNXT_NUM_PORT_STATS; 546 547 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 548 len = min_t(int, bp->fw_rx_stats_ext_size, 549 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 550 num_stats += len; 551 len = min_t(int, bp->fw_tx_stats_ext_size, 552 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 553 num_stats += len; 554 if (bp->pri2cos_valid) 555 num_stats += BNXT_NUM_STATS_PRI; 556 } 557 558 return num_stats; 559 } 560 561 static int bnxt_get_sset_count(struct net_device *dev, int sset) 562 { 563 struct bnxt *bp = netdev_priv(dev); 564 565 switch (sset) { 566 case ETH_SS_STATS: 567 return bnxt_get_num_stats(bp); 568 case ETH_SS_TEST: 569 if (!bp->num_tests) 570 return -EOPNOTSUPP; 571 return bp->num_tests; 572 default: 573 return -EOPNOTSUPP; 574 } 575 } 576 577 static bool is_rx_ring(struct bnxt *bp, int ring_num) 578 { 579 return ring_num < bp->rx_nr_rings; 580 } 581 582 static bool is_tx_ring(struct bnxt *bp, int ring_num) 583 { 584 int tx_base = 0; 585 586 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 587 tx_base = bp->rx_nr_rings; 588 589 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 590 return true; 591 return false; 592 } 593 594 static void bnxt_get_ethtool_stats(struct net_device *dev, 595 struct ethtool_stats *stats, u64 *buf) 596 { 597 struct bnxt_total_ring_err_stats ring_err_stats = {0}; 598 struct bnxt *bp = netdev_priv(dev); 599 u64 *curr, *prev; 600 u32 tpa_stats; 601 u32 i, j = 0; 602 603 if (!bp->bnapi) { 604 j += bnxt_get_num_ring_stats(bp); 605 goto skip_ring_stats; 606 } 607 608 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 609 for (i = 0; i < bp->cp_nr_rings; i++) { 610 struct bnxt_napi *bnapi = bp->bnapi[i]; 611 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 612 u64 *sw_stats = cpr->stats.sw_stats; 613 u64 *sw; 614 int k; 615 616 if (is_rx_ring(bp, i)) { 617 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 618 buf[j] = sw_stats[k]; 619 } 620 if (is_tx_ring(bp, i)) { 621 k = NUM_RING_RX_HW_STATS; 622 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 623 j++, k++) 624 buf[j] = sw_stats[k]; 625 } 626 if (!tpa_stats || !is_rx_ring(bp, i)) 627 goto skip_tpa_ring_stats; 628 629 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 630 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 631 tpa_stats; j++, k++) 632 buf[j] = sw_stats[k]; 633 634 skip_tpa_ring_stats: 635 sw = (u64 *)&cpr->sw_stats->rx; 636 if (is_rx_ring(bp, i)) { 637 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 638 buf[j] = sw[k]; 639 } 640 641 sw = (u64 *)&cpr->sw_stats->cmn; 642 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 643 buf[j] = sw[k]; 644 } 645 646 bnxt_get_ring_err_stats(bp, &ring_err_stats); 647 648 skip_ring_stats: 649 curr = &ring_err_stats.rx_total_l4_csum_errors; 650 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; 651 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) 652 buf[j] = *curr + *prev; 653 654 if (bp->flags & BNXT_FLAG_PORT_STATS) { 655 u64 *port_stats = bp->port_stats.sw_stats; 656 657 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 658 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 659 } 660 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 661 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 662 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 663 u32 len; 664 665 len = min_t(u32, bp->fw_rx_stats_ext_size, 666 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 667 for (i = 0; i < len; i++, j++) { 668 buf[j] = *(rx_port_stats_ext + 669 bnxt_port_stats_ext_arr[i].offset); 670 } 671 len = min_t(u32, bp->fw_tx_stats_ext_size, 672 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 673 for (i = 0; i < len; i++, j++) { 674 buf[j] = *(tx_port_stats_ext + 675 bnxt_tx_port_stats_ext_arr[i].offset); 676 } 677 if (bp->pri2cos_valid) { 678 for (i = 0; i < 8; i++, j++) { 679 long n = bnxt_rx_bytes_pri_arr[i].base_off + 680 bp->pri2cos_idx[i]; 681 682 buf[j] = *(rx_port_stats_ext + n); 683 } 684 for (i = 0; i < 8; i++, j++) { 685 long n = bnxt_rx_pkts_pri_arr[i].base_off + 686 bp->pri2cos_idx[i]; 687 688 buf[j] = *(rx_port_stats_ext + n); 689 } 690 for (i = 0; i < 8; i++, j++) { 691 long n = bnxt_tx_bytes_pri_arr[i].base_off + 692 bp->pri2cos_idx[i]; 693 694 buf[j] = *(tx_port_stats_ext + n); 695 } 696 for (i = 0; i < 8; i++, j++) { 697 long n = bnxt_tx_pkts_pri_arr[i].base_off + 698 bp->pri2cos_idx[i]; 699 700 buf[j] = *(tx_port_stats_ext + n); 701 } 702 } 703 } 704 } 705 706 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 707 { 708 struct bnxt *bp = netdev_priv(dev); 709 u32 i, j, num_str; 710 const char *str; 711 712 switch (stringset) { 713 case ETH_SS_STATS: 714 for (i = 0; i < bp->cp_nr_rings; i++) { 715 if (is_rx_ring(bp, i)) 716 for (j = 0; j < NUM_RING_RX_HW_STATS; j++) { 717 str = bnxt_ring_rx_stats_str[j]; 718 ethtool_sprintf(&buf, "[%d]: %s", i, 719 str); 720 } 721 if (is_tx_ring(bp, i)) 722 for (j = 0; j < NUM_RING_TX_HW_STATS; j++) { 723 str = bnxt_ring_tx_stats_str[j]; 724 ethtool_sprintf(&buf, "[%d]: %s", i, 725 str); 726 } 727 num_str = bnxt_get_num_tpa_ring_stats(bp); 728 if (!num_str || !is_rx_ring(bp, i)) 729 goto skip_tpa_stats; 730 731 if (bp->max_tpa_v2) 732 for (j = 0; j < num_str; j++) { 733 str = bnxt_ring_tpa2_stats_str[j]; 734 ethtool_sprintf(&buf, "[%d]: %s", i, 735 str); 736 } 737 else 738 for (j = 0; j < num_str; j++) { 739 str = bnxt_ring_tpa_stats_str[j]; 740 ethtool_sprintf(&buf, "[%d]: %s", i, 741 str); 742 } 743 skip_tpa_stats: 744 if (is_rx_ring(bp, i)) 745 for (j = 0; j < NUM_RING_RX_SW_STATS; j++) { 746 str = bnxt_rx_sw_stats_str[j]; 747 ethtool_sprintf(&buf, "[%d]: %s", i, 748 str); 749 } 750 for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) { 751 str = bnxt_cmn_sw_stats_str[j]; 752 ethtool_sprintf(&buf, "[%d]: %s", i, str); 753 } 754 } 755 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) 756 ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]); 757 758 if (bp->flags & BNXT_FLAG_PORT_STATS) 759 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 760 str = bnxt_port_stats_arr[i].string; 761 ethtool_puts(&buf, str); 762 } 763 764 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 765 u32 len; 766 767 len = min_t(u32, bp->fw_rx_stats_ext_size, 768 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 769 for (i = 0; i < len; i++) { 770 str = bnxt_port_stats_ext_arr[i].string; 771 ethtool_puts(&buf, str); 772 } 773 774 len = min_t(u32, bp->fw_tx_stats_ext_size, 775 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 776 for (i = 0; i < len; i++) { 777 str = bnxt_tx_port_stats_ext_arr[i].string; 778 ethtool_puts(&buf, str); 779 } 780 781 if (bp->pri2cos_valid) { 782 for (i = 0; i < 8; i++) { 783 str = bnxt_rx_bytes_pri_arr[i].string; 784 ethtool_puts(&buf, str); 785 } 786 787 for (i = 0; i < 8; i++) { 788 str = bnxt_rx_pkts_pri_arr[i].string; 789 ethtool_puts(&buf, str); 790 } 791 792 for (i = 0; i < 8; i++) { 793 str = bnxt_tx_bytes_pri_arr[i].string; 794 ethtool_puts(&buf, str); 795 } 796 797 for (i = 0; i < 8; i++) { 798 str = bnxt_tx_pkts_pri_arr[i].string; 799 ethtool_puts(&buf, str); 800 } 801 } 802 } 803 break; 804 case ETH_SS_TEST: 805 if (bp->num_tests) 806 for (i = 0; i < bp->num_tests; i++) 807 ethtool_puts(&buf, bp->test_info->string[i]); 808 break; 809 default: 810 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 811 stringset); 812 break; 813 } 814 } 815 816 static void bnxt_get_ringparam(struct net_device *dev, 817 struct ethtool_ringparam *ering, 818 struct kernel_ethtool_ringparam *kernel_ering, 819 struct netlink_ext_ack *extack) 820 { 821 struct bnxt *bp = netdev_priv(dev); 822 823 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 824 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 825 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 826 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 827 } else { 828 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 829 ering->rx_jumbo_max_pending = 0; 830 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 831 } 832 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 833 834 ering->rx_pending = bp->rx_ring_size; 835 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 836 ering->tx_pending = bp->tx_ring_size; 837 838 kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; 839 } 840 841 static int bnxt_set_ringparam(struct net_device *dev, 842 struct ethtool_ringparam *ering, 843 struct kernel_ethtool_ringparam *kernel_ering, 844 struct netlink_ext_ack *extack) 845 { 846 u8 tcp_data_split = kernel_ering->tcp_data_split; 847 struct bnxt *bp = netdev_priv(dev); 848 u8 hds_config_mod; 849 850 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 851 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 852 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) 853 return -EINVAL; 854 855 hds_config_mod = tcp_data_split != dev->cfg->hds_config; 856 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) 857 return -EINVAL; 858 859 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 860 hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { 861 NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); 862 return -EINVAL; 863 } 864 865 if (netif_running(dev)) 866 bnxt_close_nic(bp, false, false); 867 868 if (hds_config_mod) { 869 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) 870 bp->flags |= BNXT_FLAG_HDS; 871 else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) 872 bp->flags &= ~BNXT_FLAG_HDS; 873 } 874 875 bp->rx_ring_size = ering->rx_pending; 876 bp->tx_ring_size = ering->tx_pending; 877 bnxt_set_ring_params(bp); 878 879 if (netif_running(dev)) 880 return bnxt_open_nic(bp, false, false); 881 882 return 0; 883 } 884 885 static void bnxt_get_channels(struct net_device *dev, 886 struct ethtool_channels *channel) 887 { 888 struct bnxt *bp = netdev_priv(dev); 889 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 890 int max_rx_rings, max_tx_rings, tcs; 891 int max_tx_sch_inputs, tx_grps; 892 893 /* Get the most up-to-date max_tx_sch_inputs. */ 894 if (netif_running(dev) && BNXT_NEW_RM(bp)) 895 bnxt_hwrm_func_resc_qcaps(bp, false); 896 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 897 898 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 899 if (max_tx_sch_inputs) 900 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 901 902 tcs = bp->num_tc; 903 tx_grps = max(tcs, 1); 904 if (bp->tx_nr_rings_xdp) 905 tx_grps++; 906 max_tx_rings /= tx_grps; 907 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 908 909 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 910 max_rx_rings = 0; 911 max_tx_rings = 0; 912 } 913 if (max_tx_sch_inputs) 914 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 915 916 if (tcs > 1) 917 max_tx_rings /= tcs; 918 919 channel->max_rx = max_rx_rings; 920 channel->max_tx = max_tx_rings; 921 channel->max_other = 0; 922 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 923 channel->combined_count = bp->rx_nr_rings; 924 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 925 channel->combined_count--; 926 } else { 927 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 928 channel->rx_count = bp->rx_nr_rings; 929 channel->tx_count = bp->tx_nr_rings_per_tc; 930 } 931 } 932 } 933 934 static int bnxt_set_channels(struct net_device *dev, 935 struct ethtool_channels *channel) 936 { 937 struct bnxt *bp = netdev_priv(dev); 938 int req_tx_rings, req_rx_rings, tcs; 939 bool sh = false; 940 int tx_xdp = 0; 941 int rc = 0; 942 int tx_cp; 943 944 if (channel->other_count) 945 return -EINVAL; 946 947 if (!channel->combined_count && 948 (!channel->rx_count || !channel->tx_count)) 949 return -EINVAL; 950 951 if (channel->combined_count && 952 (channel->rx_count || channel->tx_count)) 953 return -EINVAL; 954 955 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 956 channel->tx_count)) 957 return -EINVAL; 958 959 if (channel->combined_count) 960 sh = true; 961 962 tcs = bp->num_tc; 963 964 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 965 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 966 if (bp->tx_nr_rings_xdp) { 967 if (!sh) { 968 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 969 return -EINVAL; 970 } 971 tx_xdp = req_rx_rings; 972 } 973 974 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 975 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 976 netif_is_rxfh_configured(dev)) { 977 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 978 return -EINVAL; 979 } 980 981 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 982 if (rc) { 983 netdev_warn(dev, "Unable to allocate the requested rings\n"); 984 return rc; 985 } 986 987 if (netif_running(dev)) { 988 if (BNXT_PF(bp)) { 989 /* TODO CHIMP_FW: Send message to all VF's 990 * before PF unload 991 */ 992 } 993 bnxt_close_nic(bp, true, false); 994 } 995 996 if (sh) { 997 bp->flags |= BNXT_FLAG_SHARED_RINGS; 998 bp->rx_nr_rings = channel->combined_count; 999 bp->tx_nr_rings_per_tc = channel->combined_count; 1000 } else { 1001 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 1002 bp->rx_nr_rings = channel->rx_count; 1003 bp->tx_nr_rings_per_tc = channel->tx_count; 1004 } 1005 bp->tx_nr_rings_xdp = tx_xdp; 1006 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 1007 if (tcs > 1) 1008 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 1009 1010 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 1011 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 1012 tx_cp + bp->rx_nr_rings; 1013 1014 /* After changing number of rx channels, update NTUPLE feature. */ 1015 netdev_update_features(dev); 1016 if (netif_running(dev)) { 1017 rc = bnxt_open_nic(bp, true, false); 1018 if ((!rc) && BNXT_PF(bp)) { 1019 /* TODO CHIMP_FW: Send message to all VF's 1020 * to renable 1021 */ 1022 } 1023 } else { 1024 rc = bnxt_reserve_rings(bp, true); 1025 } 1026 1027 return rc; 1028 } 1029 1030 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], 1031 int tbl_size, u32 *ids, u32 start, 1032 u32 id_cnt) 1033 { 1034 int i, j = start; 1035 1036 if (j >= id_cnt) 1037 return j; 1038 for (i = 0; i < tbl_size; i++) { 1039 struct hlist_head *head; 1040 struct bnxt_filter_base *fltr; 1041 1042 head = &tbl[i]; 1043 hlist_for_each_entry_rcu(fltr, head, hash) { 1044 if (!fltr->flags || 1045 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) 1046 continue; 1047 ids[j++] = fltr->sw_id; 1048 if (j == id_cnt) 1049 return j; 1050 } 1051 } 1052 return j; 1053 } 1054 1055 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, 1056 struct hlist_head tbl[], 1057 int tbl_size, u32 id) 1058 { 1059 int i; 1060 1061 for (i = 0; i < tbl_size; i++) { 1062 struct hlist_head *head; 1063 struct bnxt_filter_base *fltr; 1064 1065 head = &tbl[i]; 1066 hlist_for_each_entry_rcu(fltr, head, hash) { 1067 if (fltr->flags && fltr->sw_id == id) 1068 return fltr; 1069 } 1070 } 1071 return NULL; 1072 } 1073 1074 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 1075 u32 *rule_locs) 1076 { 1077 u32 count; 1078 1079 cmd->data = bp->ntp_fltr_count; 1080 rcu_read_lock(); 1081 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, 1082 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, 1083 cmd->rule_cnt); 1084 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, 1085 BNXT_NTP_FLTR_HASH_SIZE, 1086 rule_locs, count, 1087 cmd->rule_cnt); 1088 rcu_read_unlock(); 1089 1090 return 0; 1091 } 1092 1093 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1094 { 1095 struct ethtool_rx_flow_spec *fs = 1096 (struct ethtool_rx_flow_spec *)&cmd->fs; 1097 struct bnxt_filter_base *fltr_base; 1098 struct bnxt_ntuple_filter *fltr; 1099 struct bnxt_flow_masks *fmasks; 1100 struct flow_keys *fkeys; 1101 int rc = -EINVAL; 1102 1103 if (fs->location >= bp->max_fltr) 1104 return rc; 1105 1106 rcu_read_lock(); 1107 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1108 BNXT_L2_FLTR_HASH_SIZE, 1109 fs->location); 1110 if (fltr_base) { 1111 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1112 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1113 struct bnxt_l2_filter *l2_fltr; 1114 struct bnxt_l2_key *l2_key; 1115 1116 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1117 l2_key = &l2_fltr->l2_key; 1118 fs->flow_type = ETHER_FLOW; 1119 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); 1120 eth_broadcast_addr(m_ether->h_dest); 1121 if (l2_key->vlan) { 1122 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1123 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1124 1125 fs->flow_type |= FLOW_EXT; 1126 m_ext->vlan_tci = htons(0xfff); 1127 h_ext->vlan_tci = htons(l2_key->vlan); 1128 } 1129 if (fltr_base->flags & BNXT_ACT_RING_DST) 1130 fs->ring_cookie = fltr_base->rxq; 1131 if (fltr_base->flags & BNXT_ACT_FUNC_DST) 1132 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << 1133 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 1134 rcu_read_unlock(); 1135 return 0; 1136 } 1137 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1138 BNXT_NTP_FLTR_HASH_SIZE, 1139 fs->location); 1140 if (!fltr_base) { 1141 rcu_read_unlock(); 1142 return rc; 1143 } 1144 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1145 1146 fkeys = &fltr->fkeys; 1147 fmasks = &fltr->fmasks; 1148 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1149 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1150 fs->flow_type = IP_USER_FLOW; 1151 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1152 fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD; 1153 fs->m_u.usr_ip4_spec.proto = 0; 1154 } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) { 1155 fs->flow_type = IP_USER_FLOW; 1156 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1157 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; 1158 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; 1159 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1160 fs->flow_type = TCP_V4_FLOW; 1161 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1162 fs->flow_type = UDP_V4_FLOW; 1163 } else { 1164 goto fltr_err; 1165 } 1166 1167 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1168 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; 1169 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1170 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; 1171 if (fs->flow_type == TCP_V4_FLOW || 1172 fs->flow_type == UDP_V4_FLOW) { 1173 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1174 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; 1175 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1176 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; 1177 } 1178 } else { 1179 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1180 fs->flow_type = IPV6_USER_FLOW; 1181 fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD; 1182 fs->m_u.usr_ip6_spec.l4_proto = 0; 1183 } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) { 1184 fs->flow_type = IPV6_USER_FLOW; 1185 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; 1186 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; 1187 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1188 fs->flow_type = TCP_V6_FLOW; 1189 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1190 fs->flow_type = UDP_V6_FLOW; 1191 } else { 1192 goto fltr_err; 1193 } 1194 1195 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1196 fkeys->addrs.v6addrs.src; 1197 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = 1198 fmasks->addrs.v6addrs.src; 1199 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1200 fkeys->addrs.v6addrs.dst; 1201 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = 1202 fmasks->addrs.v6addrs.dst; 1203 if (fs->flow_type == TCP_V6_FLOW || 1204 fs->flow_type == UDP_V6_FLOW) { 1205 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1206 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; 1207 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1208 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; 1209 } 1210 } 1211 1212 if (fltr->base.flags & BNXT_ACT_DROP) { 1213 fs->ring_cookie = RX_CLS_FLOW_DISC; 1214 } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 1215 fs->flow_type |= FLOW_RSS; 1216 cmd->rss_context = fltr->base.fw_vnic_id; 1217 } else { 1218 fs->ring_cookie = fltr->base.rxq; 1219 } 1220 rc = 0; 1221 1222 fltr_err: 1223 rcu_read_unlock(); 1224 1225 return rc; 1226 } 1227 1228 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, 1229 u32 index) 1230 { 1231 struct ethtool_rxfh_context *ctx; 1232 1233 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); 1234 if (!ctx) 1235 return NULL; 1236 return ethtool_rxfh_context_priv(ctx); 1237 } 1238 1239 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, 1240 struct bnxt_vnic_info *vnic) 1241 { 1242 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 1243 1244 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 1245 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, 1246 vnic->rss_table_size, 1247 &vnic->rss_table_dma_addr, 1248 GFP_KERNEL); 1249 if (!vnic->rss_table) 1250 return -ENOMEM; 1251 1252 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 1253 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 1254 return 0; 1255 } 1256 1257 static int bnxt_add_l2_cls_rule(struct bnxt *bp, 1258 struct ethtool_rx_flow_spec *fs) 1259 { 1260 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1261 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1262 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1263 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1264 struct bnxt_l2_filter *fltr; 1265 struct bnxt_l2_key key; 1266 u16 vnic_id; 1267 u8 flags; 1268 int rc; 1269 1270 if (BNXT_CHIP_P5_PLUS(bp)) 1271 return -EOPNOTSUPP; 1272 1273 if (!is_broadcast_ether_addr(m_ether->h_dest)) 1274 return -EINVAL; 1275 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); 1276 key.vlan = 0; 1277 if (fs->flow_type & FLOW_EXT) { 1278 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1279 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1280 1281 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) 1282 return -EINVAL; 1283 key.vlan = ntohs(h_ext->vlan_tci); 1284 } 1285 1286 if (vf) { 1287 flags = BNXT_ACT_FUNC_DST; 1288 vnic_id = 0xffff; 1289 vf--; 1290 } else { 1291 flags = BNXT_ACT_RING_DST; 1292 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; 1293 } 1294 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); 1295 if (IS_ERR(fltr)) 1296 return PTR_ERR(fltr); 1297 1298 fltr->base.fw_vnic_id = vnic_id; 1299 fltr->base.rxq = ring; 1300 fltr->base.vf_idx = vf; 1301 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 1302 if (rc) 1303 bnxt_del_l2_filter(bp, fltr); 1304 else 1305 fs->location = fltr->base.sw_id; 1306 return rc; 1307 } 1308 1309 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, 1310 struct ethtool_usrip4_spec *ip_mask) 1311 { 1312 u8 mproto = ip_mask->proto; 1313 u8 sproto = ip_spec->proto; 1314 1315 if (ip_mask->l4_4_bytes || ip_mask->tos || 1316 ip_spec->ip_ver != ETH_RX_NFC_IP4 || 1317 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP))) 1318 return false; 1319 return true; 1320 } 1321 1322 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, 1323 struct ethtool_usrip6_spec *ip_mask) 1324 { 1325 u8 mproto = ip_mask->l4_proto; 1326 u8 sproto = ip_spec->l4_proto; 1327 1328 if (ip_mask->l4_4_bytes || ip_mask->tclass || 1329 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6))) 1330 return false; 1331 return true; 1332 } 1333 1334 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, 1335 struct ethtool_rxnfc *cmd) 1336 { 1337 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1338 struct bnxt_ntuple_filter *new_fltr, *fltr; 1339 u32 flow_type = fs->flow_type & 0xff; 1340 struct bnxt_l2_filter *l2_fltr; 1341 struct bnxt_flow_masks *fmasks; 1342 struct flow_keys *fkeys; 1343 u32 idx, ring; 1344 int rc; 1345 u8 vf; 1346 1347 if (!bp->vnic_info) 1348 return -EAGAIN; 1349 1350 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1351 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1352 if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) 1353 return -EOPNOTSUPP; 1354 1355 if (flow_type == IP_USER_FLOW) { 1356 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, 1357 &fs->m_u.usr_ip4_spec)) 1358 return -EOPNOTSUPP; 1359 } 1360 1361 if (flow_type == IPV6_USER_FLOW) { 1362 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, 1363 &fs->m_u.usr_ip6_spec)) 1364 return -EOPNOTSUPP; 1365 } 1366 1367 new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); 1368 if (!new_fltr) 1369 return -ENOMEM; 1370 1371 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 1372 atomic_inc(&l2_fltr->refcnt); 1373 new_fltr->l2_fltr = l2_fltr; 1374 fmasks = &new_fltr->fmasks; 1375 fkeys = &new_fltr->fkeys; 1376 1377 rc = -EOPNOTSUPP; 1378 switch (flow_type) { 1379 case IP_USER_FLOW: { 1380 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; 1381 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; 1382 1383 fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto 1384 : BNXT_IP_PROTO_WILDCARD; 1385 fkeys->basic.n_proto = htons(ETH_P_IP); 1386 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1387 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1388 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1389 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1390 break; 1391 } 1392 case TCP_V4_FLOW: 1393 case UDP_V4_FLOW: { 1394 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; 1395 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; 1396 1397 fkeys->basic.ip_proto = IPPROTO_TCP; 1398 if (flow_type == UDP_V4_FLOW) 1399 fkeys->basic.ip_proto = IPPROTO_UDP; 1400 fkeys->basic.n_proto = htons(ETH_P_IP); 1401 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1402 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1403 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1404 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1405 fkeys->ports.src = ip_spec->psrc; 1406 fmasks->ports.src = ip_mask->psrc; 1407 fkeys->ports.dst = ip_spec->pdst; 1408 fmasks->ports.dst = ip_mask->pdst; 1409 break; 1410 } 1411 case IPV6_USER_FLOW: { 1412 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; 1413 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; 1414 1415 fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto 1416 : BNXT_IP_PROTO_WILDCARD; 1417 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1418 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1419 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1420 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1421 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1422 break; 1423 } 1424 case TCP_V6_FLOW: 1425 case UDP_V6_FLOW: { 1426 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; 1427 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; 1428 1429 fkeys->basic.ip_proto = IPPROTO_TCP; 1430 if (flow_type == UDP_V6_FLOW) 1431 fkeys->basic.ip_proto = IPPROTO_UDP; 1432 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1433 1434 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1435 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1436 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1437 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1438 fkeys->ports.src = ip_spec->psrc; 1439 fmasks->ports.src = ip_mask->psrc; 1440 fkeys->ports.dst = ip_spec->pdst; 1441 fmasks->ports.dst = ip_mask->pdst; 1442 break; 1443 } 1444 default: 1445 rc = -EOPNOTSUPP; 1446 goto ntuple_err; 1447 } 1448 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) 1449 goto ntuple_err; 1450 1451 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); 1452 rcu_read_lock(); 1453 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 1454 if (fltr) { 1455 rcu_read_unlock(); 1456 rc = -EEXIST; 1457 goto ntuple_err; 1458 } 1459 rcu_read_unlock(); 1460 1461 new_fltr->base.flags = BNXT_ACT_NO_AGING; 1462 if (fs->flow_type & FLOW_RSS) { 1463 struct bnxt_rss_ctx *rss_ctx; 1464 1465 new_fltr->base.fw_vnic_id = 0; 1466 new_fltr->base.flags |= BNXT_ACT_RSS_CTX; 1467 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); 1468 if (rss_ctx) { 1469 new_fltr->base.fw_vnic_id = rss_ctx->index; 1470 } else { 1471 rc = -EINVAL; 1472 goto ntuple_err; 1473 } 1474 } 1475 if (fs->ring_cookie == RX_CLS_FLOW_DISC) 1476 new_fltr->base.flags |= BNXT_ACT_DROP; 1477 else 1478 new_fltr->base.rxq = ring; 1479 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); 1480 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 1481 if (!rc) { 1482 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); 1483 if (rc) { 1484 bnxt_del_ntp_filter(bp, new_fltr); 1485 return rc; 1486 } 1487 fs->location = new_fltr->base.sw_id; 1488 return 0; 1489 } 1490 1491 ntuple_err: 1492 atomic_dec(&l2_fltr->refcnt); 1493 kfree(new_fltr); 1494 return rc; 1495 } 1496 1497 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1498 { 1499 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1500 u32 ring, flow_type; 1501 int rc; 1502 u8 vf; 1503 1504 if (!netif_running(bp->dev)) 1505 return -EAGAIN; 1506 if (!(bp->flags & BNXT_FLAG_RFS)) 1507 return -EPERM; 1508 if (fs->location != RX_CLS_LOC_ANY) 1509 return -EINVAL; 1510 1511 flow_type = fs->flow_type; 1512 if ((flow_type == IP_USER_FLOW || 1513 flow_type == IPV6_USER_FLOW) && 1514 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) 1515 return -EOPNOTSUPP; 1516 if (flow_type & FLOW_MAC_EXT) 1517 return -EINVAL; 1518 flow_type &= ~FLOW_EXT; 1519 1520 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) 1521 return bnxt_add_ntuple_cls_rule(bp, cmd); 1522 1523 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1524 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1525 if (BNXT_VF(bp) && vf) 1526 return -EINVAL; 1527 if (BNXT_PF(bp) && vf > bp->pf.active_vfs) 1528 return -EINVAL; 1529 if (!vf && ring >= bp->rx_nr_rings) 1530 return -EINVAL; 1531 1532 if (flow_type == ETHER_FLOW) 1533 rc = bnxt_add_l2_cls_rule(bp, fs); 1534 else 1535 rc = bnxt_add_ntuple_cls_rule(bp, cmd); 1536 return rc; 1537 } 1538 1539 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1540 { 1541 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1542 struct bnxt_filter_base *fltr_base; 1543 struct bnxt_ntuple_filter *fltr; 1544 u32 id = fs->location; 1545 1546 rcu_read_lock(); 1547 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1548 BNXT_L2_FLTR_HASH_SIZE, id); 1549 if (fltr_base) { 1550 struct bnxt_l2_filter *l2_fltr; 1551 1552 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1553 rcu_read_unlock(); 1554 bnxt_hwrm_l2_filter_free(bp, l2_fltr); 1555 bnxt_del_l2_filter(bp, l2_fltr); 1556 return 0; 1557 } 1558 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1559 BNXT_NTP_FLTR_HASH_SIZE, id); 1560 if (!fltr_base) { 1561 rcu_read_unlock(); 1562 return -ENOENT; 1563 } 1564 1565 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1566 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) { 1567 rcu_read_unlock(); 1568 return -EINVAL; 1569 } 1570 rcu_read_unlock(); 1571 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); 1572 bnxt_del_ntp_filter(bp, fltr); 1573 return 0; 1574 } 1575 1576 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1577 { 1578 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1579 return RXH_IP_SRC | RXH_IP_DST; 1580 return 0; 1581 } 1582 1583 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1584 { 1585 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1586 return RXH_IP_SRC | RXH_IP_DST; 1587 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL) 1588 return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL; 1589 return 0; 1590 } 1591 1592 static int bnxt_get_rxfh_fields(struct net_device *dev, 1593 struct ethtool_rxfh_fields *cmd) 1594 { 1595 struct bnxt *bp = netdev_priv(dev); 1596 1597 cmd->data = 0; 1598 switch (cmd->flow_type) { 1599 case TCP_V4_FLOW: 1600 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1601 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1602 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1603 cmd->data |= get_ethtool_ipv4_rss(bp); 1604 break; 1605 case UDP_V4_FLOW: 1606 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1607 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1608 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1609 fallthrough; 1610 case AH_ESP_V4_FLOW: 1611 if (bp->rss_hash_cfg & 1612 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1613 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) 1614 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1615 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1616 fallthrough; 1617 case SCTP_V4_FLOW: 1618 case AH_V4_FLOW: 1619 case ESP_V4_FLOW: 1620 case IPV4_FLOW: 1621 cmd->data |= get_ethtool_ipv4_rss(bp); 1622 break; 1623 1624 case TCP_V6_FLOW: 1625 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1626 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1627 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1628 cmd->data |= get_ethtool_ipv6_rss(bp); 1629 break; 1630 case UDP_V6_FLOW: 1631 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1632 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1633 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1634 fallthrough; 1635 case AH_ESP_V6_FLOW: 1636 if (bp->rss_hash_cfg & 1637 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1638 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) 1639 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1640 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1641 fallthrough; 1642 case SCTP_V6_FLOW: 1643 case AH_V6_FLOW: 1644 case ESP_V6_FLOW: 1645 case IPV6_FLOW: 1646 cmd->data |= get_ethtool_ipv6_rss(bp); 1647 break; 1648 } 1649 return 0; 1650 } 1651 1652 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1653 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1654 1655 static int bnxt_set_rxfh_fields(struct net_device *dev, 1656 const struct ethtool_rxfh_fields *cmd, 1657 struct netlink_ext_ack *extack) 1658 { 1659 struct bnxt *bp = netdev_priv(dev); 1660 int tuple, rc = 0; 1661 u32 rss_hash_cfg; 1662 1663 rss_hash_cfg = bp->rss_hash_cfg; 1664 1665 if (cmd->data == RXH_4TUPLE) 1666 tuple = 4; 1667 else if (cmd->data == RXH_2TUPLE || 1668 cmd->data == (RXH_2TUPLE | RXH_IP6_FL)) 1669 tuple = 2; 1670 else if (!cmd->data) 1671 tuple = 0; 1672 else 1673 return -EINVAL; 1674 1675 if (cmd->data & RXH_IP6_FL && 1676 !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP)) 1677 return -EINVAL; 1678 1679 if (cmd->flow_type == TCP_V4_FLOW) { 1680 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1681 if (tuple == 4) 1682 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1683 } else if (cmd->flow_type == UDP_V4_FLOW) { 1684 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1685 return -EINVAL; 1686 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1687 if (tuple == 4) 1688 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1689 } else if (cmd->flow_type == TCP_V6_FLOW) { 1690 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1691 if (tuple == 4) 1692 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1693 } else if (cmd->flow_type == UDP_V6_FLOW) { 1694 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1695 return -EINVAL; 1696 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1697 if (tuple == 4) 1698 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1699 } else if (cmd->flow_type == AH_ESP_V4_FLOW) { 1700 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || 1701 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) 1702 return -EINVAL; 1703 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1704 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); 1705 if (tuple == 4) 1706 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1707 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; 1708 } else if (cmd->flow_type == AH_ESP_V6_FLOW) { 1709 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || 1710 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) 1711 return -EINVAL; 1712 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1713 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); 1714 if (tuple == 4) 1715 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1716 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; 1717 } else if (tuple == 4) { 1718 return -EINVAL; 1719 } 1720 1721 switch (cmd->flow_type) { 1722 case TCP_V4_FLOW: 1723 case UDP_V4_FLOW: 1724 case SCTP_V4_FLOW: 1725 case AH_ESP_V4_FLOW: 1726 case AH_V4_FLOW: 1727 case ESP_V4_FLOW: 1728 case IPV4_FLOW: 1729 if (tuple == 2) 1730 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1731 else if (!tuple) 1732 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1733 break; 1734 1735 case TCP_V6_FLOW: 1736 case UDP_V6_FLOW: 1737 case SCTP_V6_FLOW: 1738 case AH_ESP_V6_FLOW: 1739 case AH_V6_FLOW: 1740 case ESP_V6_FLOW: 1741 case IPV6_FLOW: 1742 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 1743 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL); 1744 if (!tuple) 1745 break; 1746 if (cmd->data & RXH_IP6_FL) 1747 rss_hash_cfg |= 1748 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; 1749 else if (tuple == 2) 1750 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1751 break; 1752 } 1753 1754 if (bp->rss_hash_cfg == rss_hash_cfg) 1755 return 0; 1756 1757 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 1758 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; 1759 bp->rss_hash_cfg = rss_hash_cfg; 1760 if (netif_running(bp->dev)) { 1761 bnxt_close_nic(bp, false, false); 1762 rc = bnxt_open_nic(bp, false, false); 1763 } 1764 return rc; 1765 } 1766 1767 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1768 u32 *rule_locs) 1769 { 1770 struct bnxt *bp = netdev_priv(dev); 1771 int rc = 0; 1772 1773 switch (cmd->cmd) { 1774 case ETHTOOL_GRXRINGS: 1775 cmd->data = bp->rx_nr_rings; 1776 break; 1777 1778 case ETHTOOL_GRXCLSRLCNT: 1779 cmd->rule_cnt = bp->ntp_fltr_count; 1780 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; 1781 break; 1782 1783 case ETHTOOL_GRXCLSRLALL: 1784 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1785 break; 1786 1787 case ETHTOOL_GRXCLSRULE: 1788 rc = bnxt_grxclsrule(bp, cmd); 1789 break; 1790 1791 default: 1792 rc = -EOPNOTSUPP; 1793 break; 1794 } 1795 1796 return rc; 1797 } 1798 1799 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1800 { 1801 struct bnxt *bp = netdev_priv(dev); 1802 int rc; 1803 1804 switch (cmd->cmd) { 1805 case ETHTOOL_SRXCLSRLINS: 1806 rc = bnxt_srxclsrlins(bp, cmd); 1807 break; 1808 1809 case ETHTOOL_SRXCLSRLDEL: 1810 rc = bnxt_srxclsrldel(bp, cmd); 1811 break; 1812 1813 default: 1814 rc = -EOPNOTSUPP; 1815 break; 1816 } 1817 return rc; 1818 } 1819 1820 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1821 { 1822 struct bnxt *bp = netdev_priv(dev); 1823 1824 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1825 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1826 BNXT_RSS_TABLE_ENTRIES_P5; 1827 return HW_HASH_INDEX_SIZE; 1828 } 1829 1830 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1831 { 1832 return HW_HASH_KEY_SIZE; 1833 } 1834 1835 static int bnxt_get_rxfh(struct net_device *dev, 1836 struct ethtool_rxfh_param *rxfh) 1837 { 1838 struct bnxt_rss_ctx *rss_ctx = NULL; 1839 struct bnxt *bp = netdev_priv(dev); 1840 u32 *indir_tbl = bp->rss_indir_tbl; 1841 struct bnxt_vnic_info *vnic; 1842 u32 i, tbl_size; 1843 1844 rxfh->hfunc = ETH_RSS_HASH_TOP; 1845 1846 if (!bp->vnic_info) 1847 return 0; 1848 1849 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 1850 if (rxfh->rss_context) { 1851 struct ethtool_rxfh_context *ctx; 1852 1853 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); 1854 if (!ctx) 1855 return -EINVAL; 1856 indir_tbl = ethtool_rxfh_context_indir(ctx); 1857 rss_ctx = ethtool_rxfh_context_priv(ctx); 1858 vnic = &rss_ctx->vnic; 1859 } 1860 1861 if (rxfh->indir && indir_tbl) { 1862 tbl_size = bnxt_get_rxfh_indir_size(dev); 1863 for (i = 0; i < tbl_size; i++) 1864 rxfh->indir[i] = indir_tbl[i]; 1865 } 1866 1867 if (rxfh->key && vnic->rss_hash_key) 1868 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1869 1870 return 0; 1871 } 1872 1873 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, 1874 struct bnxt_rss_ctx *rss_ctx, 1875 const struct ethtool_rxfh_param *rxfh) 1876 { 1877 if (rxfh->key) { 1878 if (rss_ctx) { 1879 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, 1880 HW_HASH_KEY_SIZE); 1881 } else { 1882 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); 1883 bp->rss_hash_key_updated = true; 1884 } 1885 } 1886 if (rxfh->indir) { 1887 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 1888 u32 *indir_tbl = bp->rss_indir_tbl; 1889 1890 if (rss_ctx) 1891 indir_tbl = ethtool_rxfh_context_indir(ctx); 1892 for (i = 0; i < tbl_size; i++) 1893 indir_tbl[i] = rxfh->indir[i]; 1894 pad = bp->rss_indir_tbl_entries - tbl_size; 1895 if (pad) 1896 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); 1897 } 1898 } 1899 1900 static int bnxt_rxfh_context_check(struct bnxt *bp, 1901 const struct ethtool_rxfh_param *rxfh, 1902 struct netlink_ext_ack *extack) 1903 { 1904 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { 1905 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); 1906 return -EOPNOTSUPP; 1907 } 1908 1909 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { 1910 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); 1911 return -EOPNOTSUPP; 1912 } 1913 1914 if (!netif_running(bp->dev)) { 1915 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); 1916 return -EAGAIN; 1917 } 1918 1919 return 0; 1920 } 1921 1922 static int bnxt_create_rxfh_context(struct net_device *dev, 1923 struct ethtool_rxfh_context *ctx, 1924 const struct ethtool_rxfh_param *rxfh, 1925 struct netlink_ext_ack *extack) 1926 { 1927 struct bnxt *bp = netdev_priv(dev); 1928 struct bnxt_rss_ctx *rss_ctx; 1929 struct bnxt_vnic_info *vnic; 1930 int rc; 1931 1932 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1933 if (rc) 1934 return rc; 1935 1936 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { 1937 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", 1938 BNXT_MAX_ETH_RSS_CTX); 1939 return -EINVAL; 1940 } 1941 1942 if (!bnxt_rfs_capable(bp, true)) { 1943 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); 1944 return -ENOMEM; 1945 } 1946 1947 rss_ctx = ethtool_rxfh_context_priv(ctx); 1948 1949 bp->num_rss_ctx++; 1950 1951 vnic = &rss_ctx->vnic; 1952 vnic->rss_ctx = ctx; 1953 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; 1954 vnic->vnic_id = BNXT_VNIC_ID_INVALID; 1955 rc = bnxt_alloc_vnic_rss_table(bp, vnic); 1956 if (rc) 1957 goto out; 1958 1959 /* Populate defaults in the context */ 1960 bnxt_set_dflt_rss_indir_tbl(bp, ctx); 1961 ctx->hfunc = ETH_RSS_HASH_TOP; 1962 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); 1963 memcpy(ethtool_rxfh_context_key(ctx), 1964 bp->rss_hash_key, HW_HASH_KEY_SIZE); 1965 1966 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); 1967 if (rc) { 1968 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); 1969 goto out; 1970 } 1971 1972 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); 1973 if (rc) { 1974 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1975 goto out; 1976 } 1977 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 1978 1979 rc = __bnxt_setup_vnic_p5(bp, vnic); 1980 if (rc) { 1981 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1982 goto out; 1983 } 1984 1985 rss_ctx->index = rxfh->rss_context; 1986 return 0; 1987 out: 1988 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 1989 return rc; 1990 } 1991 1992 static int bnxt_modify_rxfh_context(struct net_device *dev, 1993 struct ethtool_rxfh_context *ctx, 1994 const struct ethtool_rxfh_param *rxfh, 1995 struct netlink_ext_ack *extack) 1996 { 1997 struct bnxt *bp = netdev_priv(dev); 1998 struct bnxt_rss_ctx *rss_ctx; 1999 int rc; 2000 2001 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 2002 if (rc) 2003 return rc; 2004 2005 rss_ctx = ethtool_rxfh_context_priv(ctx); 2006 2007 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 2008 2009 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); 2010 } 2011 2012 static int bnxt_remove_rxfh_context(struct net_device *dev, 2013 struct ethtool_rxfh_context *ctx, 2014 u32 rss_context, 2015 struct netlink_ext_ack *extack) 2016 { 2017 struct bnxt *bp = netdev_priv(dev); 2018 struct bnxt_rss_ctx *rss_ctx; 2019 2020 rss_ctx = ethtool_rxfh_context_priv(ctx); 2021 2022 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 2023 return 0; 2024 } 2025 2026 static int bnxt_set_rxfh(struct net_device *dev, 2027 struct ethtool_rxfh_param *rxfh, 2028 struct netlink_ext_ack *extack) 2029 { 2030 struct bnxt *bp = netdev_priv(dev); 2031 int rc = 0; 2032 2033 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) 2034 return -EOPNOTSUPP; 2035 2036 bnxt_modify_rss(bp, NULL, NULL, rxfh); 2037 2038 if (netif_running(bp->dev)) { 2039 bnxt_close_nic(bp, false, false); 2040 rc = bnxt_open_nic(bp, false, false); 2041 } 2042 return rc; 2043 } 2044 2045 static void bnxt_get_drvinfo(struct net_device *dev, 2046 struct ethtool_drvinfo *info) 2047 { 2048 struct bnxt *bp = netdev_priv(dev); 2049 2050 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 2051 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 2052 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 2053 info->n_stats = bnxt_get_num_stats(bp); 2054 info->testinfo_len = bp->num_tests; 2055 /* TODO CHIMP_FW: eeprom dump details */ 2056 info->eedump_len = 0; 2057 /* TODO CHIMP FW: reg dump details */ 2058 info->regdump_len = 0; 2059 } 2060 2061 static int bnxt_get_regs_len(struct net_device *dev) 2062 { 2063 struct bnxt *bp = netdev_priv(dev); 2064 2065 if (!BNXT_PF(bp)) 2066 return -EOPNOTSUPP; 2067 2068 return BNXT_PXP_REG_LEN + bp->pcie_stat_len; 2069 } 2070 2071 static void * 2072 __bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req) 2073 { 2074 struct pcie_ctx_hw_stats_v2 *hw_pcie_stats; 2075 dma_addr_t hw_pcie_stats_addr; 2076 int rc; 2077 2078 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), 2079 &hw_pcie_stats_addr); 2080 if (!hw_pcie_stats) 2081 return NULL; 2082 2083 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 2084 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 2085 rc = hwrm_req_send(bp, req); 2086 2087 return rc ? NULL : hw_pcie_stats; 2088 } 2089 2090 #define BNXT_PCIE_32B_ENTRY(start, end) \ 2091 { offsetof(struct pcie_ctx_hw_stats_v2, start),\ 2092 offsetof(struct pcie_ctx_hw_stats_v2, end) } 2093 2094 static const struct { 2095 u16 start; 2096 u16 end; 2097 } bnxt_pcie_32b_entries[] = { 2098 BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), 2099 BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1), 2100 BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2), 2101 }; 2102 2103 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2104 void *_p) 2105 { 2106 struct hwrm_pcie_qstats_output *resp; 2107 struct hwrm_pcie_qstats_input *req; 2108 struct bnxt *bp = netdev_priv(dev); 2109 u8 *src; 2110 2111 regs->version = 0; 2112 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) 2113 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 2114 2115 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 2116 return; 2117 2118 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 2119 return; 2120 2121 resp = hwrm_req_hold(bp, req); 2122 src = __bnxt_hwrm_pcie_qstats(bp, req); 2123 if (src) { 2124 u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); 2125 int i, j, len; 2126 2127 len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size)); 2128 if (len <= sizeof(struct pcie_ctx_hw_stats)) 2129 regs->version = 1; 2130 else if (len < sizeof(struct pcie_ctx_hw_stats_v2)) 2131 regs->version = 2; 2132 else 2133 regs->version = 3; 2134 2135 for (i = 0, j = 0; i < len; ) { 2136 if (i >= bnxt_pcie_32b_entries[j].start && 2137 i <= bnxt_pcie_32b_entries[j].end) { 2138 u32 *dst32 = (u32 *)(dst + i); 2139 2140 *dst32 = le32_to_cpu(*(__le32 *)(src + i)); 2141 i += 4; 2142 if (i > bnxt_pcie_32b_entries[j].end && 2143 j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) 2144 j++; 2145 } else { 2146 u64 *dst64 = (u64 *)(dst + i); 2147 2148 *dst64 = le64_to_cpu(*(__le64 *)(src + i)); 2149 i += 8; 2150 } 2151 } 2152 } 2153 hwrm_req_drop(bp, req); 2154 } 2155 2156 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2157 { 2158 struct bnxt *bp = netdev_priv(dev); 2159 2160 wol->supported = 0; 2161 wol->wolopts = 0; 2162 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2163 if (bp->flags & BNXT_FLAG_WOL_CAP) { 2164 wol->supported = WAKE_MAGIC; 2165 if (bp->wol) 2166 wol->wolopts = WAKE_MAGIC; 2167 } 2168 } 2169 2170 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2171 { 2172 struct bnxt *bp = netdev_priv(dev); 2173 2174 if (wol->wolopts & ~WAKE_MAGIC) 2175 return -EINVAL; 2176 2177 if (wol->wolopts & WAKE_MAGIC) { 2178 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 2179 return -EINVAL; 2180 if (!bp->wol) { 2181 if (bnxt_hwrm_alloc_wol_fltr(bp)) 2182 return -EBUSY; 2183 bp->wol = 1; 2184 } 2185 } else { 2186 if (bp->wol) { 2187 if (bnxt_hwrm_free_wol_fltr(bp)) 2188 return -EBUSY; 2189 bp->wol = 0; 2190 } 2191 } 2192 return 0; 2193 } 2194 2195 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 2196 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) 2197 { 2198 linkmode_zero(mode); 2199 2200 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 2201 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); 2202 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 2203 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); 2204 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 2205 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); 2206 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 2207 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); 2208 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 2209 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); 2210 } 2211 2212 enum bnxt_media_type { 2213 BNXT_MEDIA_UNKNOWN = 0, 2214 BNXT_MEDIA_TP, 2215 BNXT_MEDIA_CR, 2216 BNXT_MEDIA_SR, 2217 BNXT_MEDIA_LR_ER_FR, 2218 BNXT_MEDIA_KR, 2219 BNXT_MEDIA_KX, 2220 BNXT_MEDIA_X, 2221 __BNXT_MEDIA_END, 2222 }; 2223 2224 static const enum bnxt_media_type bnxt_phy_types[] = { 2225 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, 2226 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, 2227 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, 2228 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, 2229 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, 2230 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, 2231 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, 2232 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, 2233 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, 2234 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, 2235 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, 2236 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, 2237 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, 2238 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, 2239 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, 2240 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2241 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2242 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, 2243 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, 2244 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, 2245 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2246 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2247 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, 2248 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, 2249 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, 2250 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, 2251 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, 2252 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, 2253 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2254 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2255 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, 2256 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, 2257 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2258 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2259 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, 2260 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, 2261 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2262 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2263 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR, 2264 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR, 2265 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2266 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2267 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR, 2268 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR, 2269 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2270 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2271 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR, 2272 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR, 2273 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR, 2274 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR, 2275 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR, 2276 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR, 2277 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2278 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2279 }; 2280 2281 static enum bnxt_media_type 2282 bnxt_get_media(struct bnxt_link_info *link_info) 2283 { 2284 switch (link_info->media_type) { 2285 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: 2286 return BNXT_MEDIA_TP; 2287 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: 2288 return BNXT_MEDIA_CR; 2289 default: 2290 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) 2291 return bnxt_phy_types[link_info->phy_type]; 2292 return BNXT_MEDIA_UNKNOWN; 2293 } 2294 } 2295 2296 enum bnxt_link_speed_indices { 2297 BNXT_LINK_SPEED_UNKNOWN = 0, 2298 BNXT_LINK_SPEED_100MB_IDX, 2299 BNXT_LINK_SPEED_1GB_IDX, 2300 BNXT_LINK_SPEED_10GB_IDX, 2301 BNXT_LINK_SPEED_25GB_IDX, 2302 BNXT_LINK_SPEED_40GB_IDX, 2303 BNXT_LINK_SPEED_50GB_IDX, 2304 BNXT_LINK_SPEED_100GB_IDX, 2305 BNXT_LINK_SPEED_200GB_IDX, 2306 BNXT_LINK_SPEED_400GB_IDX, 2307 __BNXT_LINK_SPEED_END 2308 }; 2309 2310 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) 2311 { 2312 switch (speed) { 2313 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; 2314 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; 2315 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; 2316 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; 2317 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; 2318 case BNXT_LINK_SPEED_50GB: 2319 case BNXT_LINK_SPEED_50GB_PAM4: 2320 return BNXT_LINK_SPEED_50GB_IDX; 2321 case BNXT_LINK_SPEED_100GB: 2322 case BNXT_LINK_SPEED_100GB_PAM4: 2323 case BNXT_LINK_SPEED_100GB_PAM4_112: 2324 return BNXT_LINK_SPEED_100GB_IDX; 2325 case BNXT_LINK_SPEED_200GB: 2326 case BNXT_LINK_SPEED_200GB_PAM4: 2327 case BNXT_LINK_SPEED_200GB_PAM4_112: 2328 return BNXT_LINK_SPEED_200GB_IDX; 2329 case BNXT_LINK_SPEED_400GB: 2330 case BNXT_LINK_SPEED_400GB_PAM4: 2331 case BNXT_LINK_SPEED_400GB_PAM4_112: 2332 return BNXT_LINK_SPEED_400GB_IDX; 2333 default: return BNXT_LINK_SPEED_UNKNOWN; 2334 } 2335 } 2336 2337 static const enum ethtool_link_mode_bit_indices 2338 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { 2339 [BNXT_LINK_SPEED_100MB_IDX] = { 2340 { 2341 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2342 }, 2343 }, 2344 [BNXT_LINK_SPEED_1GB_IDX] = { 2345 { 2346 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2347 /* historically baseT, but DAC is more correctly baseX */ 2348 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2349 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2350 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2351 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2352 }, 2353 }, 2354 [BNXT_LINK_SPEED_10GB_IDX] = { 2355 { 2356 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2357 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2358 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2359 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2360 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2361 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2362 }, 2363 }, 2364 [BNXT_LINK_SPEED_25GB_IDX] = { 2365 { 2366 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2367 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2368 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2369 }, 2370 }, 2371 [BNXT_LINK_SPEED_40GB_IDX] = { 2372 { 2373 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2374 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2375 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2376 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2377 }, 2378 }, 2379 [BNXT_LINK_SPEED_50GB_IDX] = { 2380 [BNXT_SIG_MODE_NRZ] = { 2381 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2382 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2383 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2384 }, 2385 [BNXT_SIG_MODE_PAM4] = { 2386 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2387 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2388 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2389 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2390 }, 2391 }, 2392 [BNXT_LINK_SPEED_100GB_IDX] = { 2393 [BNXT_SIG_MODE_NRZ] = { 2394 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2395 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2396 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2397 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2398 }, 2399 [BNXT_SIG_MODE_PAM4] = { 2400 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2401 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2402 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2403 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2404 }, 2405 [BNXT_SIG_MODE_PAM4_112] = { 2406 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, 2407 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, 2408 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, 2409 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, 2410 }, 2411 }, 2412 [BNXT_LINK_SPEED_200GB_IDX] = { 2413 [BNXT_SIG_MODE_PAM4] = { 2414 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2415 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2416 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2417 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2418 }, 2419 [BNXT_SIG_MODE_PAM4_112] = { 2420 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, 2421 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, 2422 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, 2423 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, 2424 }, 2425 }, 2426 [BNXT_LINK_SPEED_400GB_IDX] = { 2427 [BNXT_SIG_MODE_PAM4] = { 2428 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2429 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2430 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2431 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2432 }, 2433 [BNXT_SIG_MODE_PAM4_112] = { 2434 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, 2435 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, 2436 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, 2437 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, 2438 }, 2439 }, 2440 }; 2441 2442 #define BNXT_LINK_MODE_UNKNOWN -1 2443 2444 static enum ethtool_link_mode_bit_indices 2445 bnxt_get_link_mode(struct bnxt_link_info *link_info) 2446 { 2447 enum ethtool_link_mode_bit_indices link_mode; 2448 enum bnxt_link_speed_indices speed; 2449 enum bnxt_media_type media; 2450 u8 sig_mode; 2451 2452 if (link_info->phy_link_status != BNXT_LINK_LINK) 2453 return BNXT_LINK_MODE_UNKNOWN; 2454 2455 media = bnxt_get_media(link_info); 2456 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 2457 speed = bnxt_fw_speed_idx(link_info->link_speed); 2458 sig_mode = link_info->active_fec_sig_mode & 2459 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 2460 } else { 2461 speed = bnxt_fw_speed_idx(link_info->req_link_speed); 2462 sig_mode = link_info->req_signal_mode; 2463 } 2464 if (sig_mode >= BNXT_SIG_MODE_MAX) 2465 return BNXT_LINK_MODE_UNKNOWN; 2466 2467 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux 2468 * link mode, but since no such devices exist, the zeroes in the 2469 * map can be conveniently used to represent unknown link modes. 2470 */ 2471 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2472 if (!link_mode) 2473 return BNXT_LINK_MODE_UNKNOWN; 2474 2475 switch (link_mode) { 2476 case ETHTOOL_LINK_MODE_100baseT_Full_BIT: 2477 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2478 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; 2479 break; 2480 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: 2481 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2482 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; 2483 break; 2484 default: 2485 break; 2486 } 2487 2488 return link_mode; 2489 } 2490 2491 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, 2492 struct ethtool_link_ksettings *lk_ksettings) 2493 { 2494 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2495 2496 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { 2497 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2498 lk_ksettings->link_modes.supported); 2499 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2500 lk_ksettings->link_modes.supported); 2501 } 2502 2503 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 || 2504 link_info->support_pam4_auto_speeds) 2505 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2506 lk_ksettings->link_modes.supported); 2507 2508 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2509 return; 2510 2511 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) 2512 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2513 lk_ksettings->link_modes.advertising); 2514 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) 2515 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2516 lk_ksettings->link_modes.advertising); 2517 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) 2518 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2519 lk_ksettings->link_modes.lp_advertising); 2520 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) 2521 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2522 lk_ksettings->link_modes.lp_advertising); 2523 } 2524 2525 static const u16 bnxt_nrz_speed_masks[] = { 2526 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, 2527 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, 2528 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, 2529 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, 2530 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, 2531 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, 2532 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, 2533 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2534 }; 2535 2536 static const u16 bnxt_pam4_speed_masks[] = { 2537 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, 2538 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, 2539 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, 2540 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2541 }; 2542 2543 static const u16 bnxt_nrz_speeds2_masks[] = { 2544 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB, 2545 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB, 2546 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB, 2547 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB, 2548 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB, 2549 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB, 2550 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2551 }; 2552 2553 static const u16 bnxt_pam4_speeds2_masks[] = { 2554 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4, 2555 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4, 2556 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4, 2557 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4, 2558 }; 2559 2560 static const u16 bnxt_pam4_112_speeds2_masks[] = { 2561 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112, 2562 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112, 2563 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112, 2564 }; 2565 2566 static enum bnxt_link_speed_indices 2567 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk) 2568 { 2569 const u16 *speeds; 2570 int idx, len; 2571 2572 switch (sig_mode) { 2573 case BNXT_SIG_MODE_NRZ: 2574 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2575 speeds = bnxt_nrz_speeds2_masks; 2576 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks); 2577 } else { 2578 speeds = bnxt_nrz_speed_masks; 2579 len = ARRAY_SIZE(bnxt_nrz_speed_masks); 2580 } 2581 break; 2582 case BNXT_SIG_MODE_PAM4: 2583 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2584 speeds = bnxt_pam4_speeds2_masks; 2585 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks); 2586 } else { 2587 speeds = bnxt_pam4_speed_masks; 2588 len = ARRAY_SIZE(bnxt_pam4_speed_masks); 2589 } 2590 break; 2591 case BNXT_SIG_MODE_PAM4_112: 2592 speeds = bnxt_pam4_112_speeds2_masks; 2593 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks); 2594 break; 2595 default: 2596 return BNXT_LINK_SPEED_UNKNOWN; 2597 } 2598 2599 for (idx = 0; idx < len; idx++) { 2600 if (speeds[idx] == speed_msk) 2601 return idx; 2602 } 2603 2604 return BNXT_LINK_SPEED_UNKNOWN; 2605 } 2606 2607 #define BNXT_FW_SPEED_MSK_BITS 16 2608 2609 static void 2610 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2611 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2612 { 2613 enum ethtool_link_mode_bit_indices link_mode; 2614 enum bnxt_link_speed_indices speed; 2615 u8 bit; 2616 2617 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { 2618 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit); 2619 if (!speed) 2620 continue; 2621 2622 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2623 if (!link_mode) 2624 continue; 2625 2626 linkmode_set_bit(link_mode, et_mask); 2627 } 2628 } 2629 2630 static void 2631 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2632 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2633 { 2634 if (media) { 2635 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2636 et_mask); 2637 return; 2638 } 2639 2640 /* list speeds for all media if unknown */ 2641 for (media = 1; media < __BNXT_MEDIA_END; media++) 2642 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2643 et_mask); 2644 } 2645 2646 static void 2647 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info, 2648 enum bnxt_media_type media, 2649 struct ethtool_link_ksettings *lk_ksettings) 2650 { 2651 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2652 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2653 u16 phy_flags = bp->phy_flags; 2654 2655 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2656 sp_nrz = link_info->support_speeds2; 2657 sp_pam4 = link_info->support_speeds2; 2658 sp_pam4_112 = link_info->support_speeds2; 2659 } else { 2660 sp_nrz = link_info->support_speeds; 2661 sp_pam4 = link_info->support_pam4_speeds; 2662 } 2663 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2664 lk_ksettings->link_modes.supported); 2665 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2666 lk_ksettings->link_modes.supported); 2667 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2668 phy_flags, lk_ksettings->link_modes.supported); 2669 } 2670 2671 static void 2672 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info, 2673 enum bnxt_media_type media, 2674 struct ethtool_link_ksettings *lk_ksettings) 2675 { 2676 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2677 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2678 u16 phy_flags = bp->phy_flags; 2679 2680 sp_nrz = link_info->advertising; 2681 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2682 sp_pam4 = link_info->advertising; 2683 sp_pam4_112 = link_info->advertising; 2684 } else { 2685 sp_pam4 = link_info->advertising_pam4; 2686 } 2687 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2688 lk_ksettings->link_modes.advertising); 2689 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2690 lk_ksettings->link_modes.advertising); 2691 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2692 phy_flags, lk_ksettings->link_modes.advertising); 2693 } 2694 2695 static void 2696 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info, 2697 enum bnxt_media_type media, 2698 struct ethtool_link_ksettings *lk_ksettings) 2699 { 2700 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2701 u16 phy_flags = bp->phy_flags; 2702 2703 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media, 2704 BNXT_SIG_MODE_NRZ, phy_flags, 2705 lk_ksettings->link_modes.lp_advertising); 2706 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media, 2707 BNXT_SIG_MODE_PAM4, phy_flags, 2708 lk_ksettings->link_modes.lp_advertising); 2709 } 2710 2711 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, 2712 u16 speed_msk, const unsigned long *et_mask, 2713 enum ethtool_link_mode_bit_indices mode) 2714 { 2715 bool mode_desired = linkmode_test_bit(mode, et_mask); 2716 2717 if (!mode) 2718 return; 2719 2720 /* enabled speeds for installed media should override */ 2721 if (installed_media && mode_desired) { 2722 *speeds |= speed_msk; 2723 *delta |= speed_msk; 2724 return; 2725 } 2726 2727 /* many to one mapping, only allow one change per fw_speed bit */ 2728 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { 2729 *speeds ^= speed_msk; 2730 *delta |= speed_msk; 2731 } 2732 } 2733 2734 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, 2735 const unsigned long *et_mask) 2736 { 2737 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2738 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks; 2739 enum bnxt_media_type media = bnxt_get_media(link_info); 2740 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL; 2741 u32 delta_pam4_112 = 0; 2742 u32 delta_pam4 = 0; 2743 u32 delta_nrz = 0; 2744 int i, m; 2745 2746 adv = &link_info->advertising; 2747 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2748 adv_pam4 = &link_info->advertising; 2749 adv_pam4_112 = &link_info->advertising; 2750 sp_msks = bnxt_nrz_speeds2_masks; 2751 sp_pam4_msks = bnxt_pam4_speeds2_masks; 2752 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks; 2753 } else { 2754 adv_pam4 = &link_info->advertising_pam4; 2755 sp_msks = bnxt_nrz_speed_masks; 2756 sp_pam4_msks = bnxt_pam4_speed_masks; 2757 } 2758 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { 2759 /* accept any legal media from user */ 2760 for (m = 1; m < __BNXT_MEDIA_END; m++) { 2761 bnxt_update_speed(&delta_nrz, m == media, 2762 adv, sp_msks[i], et_mask, 2763 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); 2764 bnxt_update_speed(&delta_pam4, m == media, 2765 adv_pam4, sp_pam4_msks[i], et_mask, 2766 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); 2767 if (!adv_pam4_112) 2768 continue; 2769 2770 bnxt_update_speed(&delta_pam4_112, m == media, 2771 adv_pam4_112, sp_pam4_112_msks[i], et_mask, 2772 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]); 2773 } 2774 } 2775 } 2776 2777 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 2778 struct ethtool_link_ksettings *lk_ksettings) 2779 { 2780 u16 fec_cfg = link_info->fec_cfg; 2781 2782 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 2783 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2784 lk_ksettings->link_modes.advertising); 2785 return; 2786 } 2787 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 2788 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2789 lk_ksettings->link_modes.advertising); 2790 if (fec_cfg & BNXT_FEC_ENC_RS) 2791 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2792 lk_ksettings->link_modes.advertising); 2793 if (fec_cfg & BNXT_FEC_ENC_LLRS) 2794 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2795 lk_ksettings->link_modes.advertising); 2796 } 2797 2798 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 2799 struct ethtool_link_ksettings *lk_ksettings) 2800 { 2801 u16 fec_cfg = link_info->fec_cfg; 2802 2803 if (fec_cfg & BNXT_FEC_NONE) { 2804 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2805 lk_ksettings->link_modes.supported); 2806 return; 2807 } 2808 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 2809 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2810 lk_ksettings->link_modes.supported); 2811 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 2812 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2813 lk_ksettings->link_modes.supported); 2814 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 2815 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2816 lk_ksettings->link_modes.supported); 2817 } 2818 2819 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 2820 { 2821 switch (fw_link_speed) { 2822 case BNXT_LINK_SPEED_100MB: 2823 return SPEED_100; 2824 case BNXT_LINK_SPEED_1GB: 2825 return SPEED_1000; 2826 case BNXT_LINK_SPEED_2_5GB: 2827 return SPEED_2500; 2828 case BNXT_LINK_SPEED_10GB: 2829 return SPEED_10000; 2830 case BNXT_LINK_SPEED_20GB: 2831 return SPEED_20000; 2832 case BNXT_LINK_SPEED_25GB: 2833 return SPEED_25000; 2834 case BNXT_LINK_SPEED_40GB: 2835 return SPEED_40000; 2836 case BNXT_LINK_SPEED_50GB: 2837 case BNXT_LINK_SPEED_50GB_PAM4: 2838 return SPEED_50000; 2839 case BNXT_LINK_SPEED_100GB: 2840 case BNXT_LINK_SPEED_100GB_PAM4: 2841 case BNXT_LINK_SPEED_100GB_PAM4_112: 2842 return SPEED_100000; 2843 case BNXT_LINK_SPEED_200GB: 2844 case BNXT_LINK_SPEED_200GB_PAM4: 2845 case BNXT_LINK_SPEED_200GB_PAM4_112: 2846 return SPEED_200000; 2847 case BNXT_LINK_SPEED_400GB: 2848 case BNXT_LINK_SPEED_400GB_PAM4: 2849 case BNXT_LINK_SPEED_400GB_PAM4_112: 2850 return SPEED_400000; 2851 default: 2852 return SPEED_UNKNOWN; 2853 } 2854 } 2855 2856 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, 2857 struct bnxt_link_info *link_info) 2858 { 2859 struct ethtool_link_settings *base = &lk_ksettings->base; 2860 2861 if (link_info->link_state == BNXT_LINK_STATE_UP) { 2862 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 2863 base->duplex = DUPLEX_HALF; 2864 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2865 base->duplex = DUPLEX_FULL; 2866 lk_ksettings->lanes = link_info->active_lanes; 2867 } else if (!link_info->autoneg) { 2868 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 2869 base->duplex = DUPLEX_HALF; 2870 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 2871 base->duplex = DUPLEX_FULL; 2872 } 2873 } 2874 2875 static int bnxt_get_link_ksettings(struct net_device *dev, 2876 struct ethtool_link_ksettings *lk_ksettings) 2877 { 2878 struct ethtool_link_settings *base = &lk_ksettings->base; 2879 enum ethtool_link_mode_bit_indices link_mode; 2880 struct bnxt *bp = netdev_priv(dev); 2881 struct bnxt_link_info *link_info; 2882 enum bnxt_media_type media; 2883 2884 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); 2885 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 2886 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 2887 base->duplex = DUPLEX_UNKNOWN; 2888 base->speed = SPEED_UNKNOWN; 2889 link_info = &bp->link_info; 2890 2891 mutex_lock(&bp->link_lock); 2892 bnxt_get_ethtool_modes(link_info, lk_ksettings); 2893 media = bnxt_get_media(link_info); 2894 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings); 2895 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 2896 link_mode = bnxt_get_link_mode(link_info); 2897 if (link_mode != BNXT_LINK_MODE_UNKNOWN) 2898 ethtool_params_from_link_mode(lk_ksettings, link_mode); 2899 else 2900 bnxt_get_default_speeds(lk_ksettings, link_info); 2901 2902 if (link_info->autoneg) { 2903 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 2904 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2905 lk_ksettings->link_modes.advertising); 2906 base->autoneg = AUTONEG_ENABLE; 2907 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings); 2908 if (link_info->phy_link_status == BNXT_LINK_LINK) 2909 bnxt_get_all_ethtool_lp_speeds(link_info, media, 2910 lk_ksettings); 2911 } else { 2912 base->autoneg = AUTONEG_DISABLE; 2913 } 2914 2915 base->port = PORT_NONE; 2916 if (media == BNXT_MEDIA_TP) { 2917 base->port = PORT_TP; 2918 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2919 lk_ksettings->link_modes.supported); 2920 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2921 lk_ksettings->link_modes.advertising); 2922 } else if (media == BNXT_MEDIA_KR) { 2923 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2924 lk_ksettings->link_modes.supported); 2925 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2926 lk_ksettings->link_modes.advertising); 2927 } else { 2928 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2929 lk_ksettings->link_modes.supported); 2930 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2931 lk_ksettings->link_modes.advertising); 2932 2933 if (media == BNXT_MEDIA_CR) 2934 base->port = PORT_DA; 2935 else 2936 base->port = PORT_FIBRE; 2937 } 2938 base->phy_address = link_info->phy_addr; 2939 mutex_unlock(&bp->link_lock); 2940 2941 return 0; 2942 } 2943 2944 static int 2945 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) 2946 { 2947 struct bnxt *bp = netdev_priv(dev); 2948 struct bnxt_link_info *link_info = &bp->link_info; 2949 u16 support_pam4_spds = link_info->support_pam4_speeds; 2950 u16 support_spds2 = link_info->support_speeds2; 2951 u16 support_spds = link_info->support_speeds; 2952 u8 sig_mode = BNXT_SIG_MODE_NRZ; 2953 u32 lanes_needed = 1; 2954 u16 fw_speed = 0; 2955 2956 switch (ethtool_speed) { 2957 case SPEED_100: 2958 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 2959 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 2960 break; 2961 case SPEED_1000: 2962 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) || 2963 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB)) 2964 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 2965 break; 2966 case SPEED_2500: 2967 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 2968 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 2969 break; 2970 case SPEED_10000: 2971 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) || 2972 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB)) 2973 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 2974 break; 2975 case SPEED_20000: 2976 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { 2977 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 2978 lanes_needed = 2; 2979 } 2980 break; 2981 case SPEED_25000: 2982 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) || 2983 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB)) 2984 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 2985 break; 2986 case SPEED_40000: 2987 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) || 2988 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) { 2989 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 2990 lanes_needed = 4; 2991 } 2992 break; 2993 case SPEED_50000: 2994 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) || 2995 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) && 2996 lanes != 1) { 2997 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 2998 lanes_needed = 2; 2999 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 3000 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 3001 sig_mode = BNXT_SIG_MODE_PAM4; 3002 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) { 3003 fw_speed = BNXT_LINK_SPEED_50GB_PAM4; 3004 sig_mode = BNXT_SIG_MODE_PAM4; 3005 } 3006 break; 3007 case SPEED_100000: 3008 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) || 3009 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) && 3010 lanes != 2 && lanes != 1) { 3011 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 3012 lanes_needed = 4; 3013 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 3014 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 3015 sig_mode = BNXT_SIG_MODE_PAM4; 3016 lanes_needed = 2; 3017 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) && 3018 lanes != 1) { 3019 fw_speed = BNXT_LINK_SPEED_100GB_PAM4; 3020 sig_mode = BNXT_SIG_MODE_PAM4; 3021 lanes_needed = 2; 3022 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) { 3023 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112; 3024 sig_mode = BNXT_SIG_MODE_PAM4_112; 3025 } 3026 break; 3027 case SPEED_200000: 3028 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 3029 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 3030 sig_mode = BNXT_SIG_MODE_PAM4; 3031 lanes_needed = 4; 3032 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) && 3033 lanes != 2) { 3034 fw_speed = BNXT_LINK_SPEED_200GB_PAM4; 3035 sig_mode = BNXT_SIG_MODE_PAM4; 3036 lanes_needed = 4; 3037 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) { 3038 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112; 3039 sig_mode = BNXT_SIG_MODE_PAM4_112; 3040 lanes_needed = 2; 3041 } 3042 break; 3043 case SPEED_400000: 3044 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) && 3045 lanes != 4) { 3046 fw_speed = BNXT_LINK_SPEED_400GB_PAM4; 3047 sig_mode = BNXT_SIG_MODE_PAM4; 3048 lanes_needed = 8; 3049 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) { 3050 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112; 3051 sig_mode = BNXT_SIG_MODE_PAM4_112; 3052 lanes_needed = 4; 3053 } 3054 break; 3055 } 3056 3057 if (!fw_speed) { 3058 netdev_err(dev, "unsupported speed!\n"); 3059 return -EINVAL; 3060 } 3061 3062 if (lanes && lanes != lanes_needed) { 3063 netdev_err(dev, "unsupported number of lanes for speed\n"); 3064 return -EINVAL; 3065 } 3066 3067 if (link_info->req_link_speed == fw_speed && 3068 link_info->req_signal_mode == sig_mode && 3069 link_info->autoneg == 0) 3070 return -EALREADY; 3071 3072 link_info->req_link_speed = fw_speed; 3073 link_info->req_signal_mode = sig_mode; 3074 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 3075 link_info->autoneg = 0; 3076 link_info->advertising = 0; 3077 link_info->advertising_pam4 = 0; 3078 3079 return 0; 3080 } 3081 3082 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) 3083 { 3084 u16 fw_speed_mask = 0; 3085 3086 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || 3087 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) 3088 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 3089 3090 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || 3091 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) 3092 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 3093 3094 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) 3095 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 3096 3097 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) 3098 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 3099 3100 return fw_speed_mask; 3101 } 3102 3103 static int bnxt_set_link_ksettings(struct net_device *dev, 3104 const struct ethtool_link_ksettings *lk_ksettings) 3105 { 3106 struct bnxt *bp = netdev_priv(dev); 3107 struct bnxt_link_info *link_info = &bp->link_info; 3108 const struct ethtool_link_settings *base = &lk_ksettings->base; 3109 bool set_pause = false; 3110 u32 speed, lanes = 0; 3111 int rc = 0; 3112 3113 if (!BNXT_PHY_CFG_ABLE(bp)) 3114 return -EOPNOTSUPP; 3115 3116 mutex_lock(&bp->link_lock); 3117 if (base->autoneg == AUTONEG_ENABLE) { 3118 bnxt_set_ethtool_speeds(link_info, 3119 lk_ksettings->link_modes.advertising); 3120 link_info->autoneg |= BNXT_AUTONEG_SPEED; 3121 if (!link_info->advertising && !link_info->advertising_pam4) { 3122 link_info->advertising = link_info->support_auto_speeds; 3123 link_info->advertising_pam4 = 3124 link_info->support_pam4_auto_speeds; 3125 } 3126 /* any change to autoneg will cause link change, therefore the 3127 * driver should put back the original pause setting in autoneg 3128 */ 3129 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3130 set_pause = true; 3131 } else { 3132 u8 phy_type = link_info->phy_type; 3133 3134 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 3135 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 3136 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 3137 netdev_err(dev, "10GBase-T devices must autoneg\n"); 3138 rc = -EINVAL; 3139 goto set_setting_exit; 3140 } 3141 if (base->duplex == DUPLEX_HALF) { 3142 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 3143 rc = -EINVAL; 3144 goto set_setting_exit; 3145 } 3146 speed = base->speed; 3147 lanes = lk_ksettings->lanes; 3148 rc = bnxt_force_link_speed(dev, speed, lanes); 3149 if (rc) { 3150 if (rc == -EALREADY) 3151 rc = 0; 3152 goto set_setting_exit; 3153 } 3154 } 3155 3156 if (netif_running(dev)) 3157 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 3158 3159 set_setting_exit: 3160 mutex_unlock(&bp->link_lock); 3161 return rc; 3162 } 3163 3164 static int bnxt_get_fecparam(struct net_device *dev, 3165 struct ethtool_fecparam *fec) 3166 { 3167 struct bnxt *bp = netdev_priv(dev); 3168 struct bnxt_link_info *link_info; 3169 u8 active_fec; 3170 u16 fec_cfg; 3171 3172 link_info = &bp->link_info; 3173 fec_cfg = link_info->fec_cfg; 3174 active_fec = link_info->active_fec_sig_mode & 3175 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 3176 if (fec_cfg & BNXT_FEC_NONE) { 3177 fec->fec = ETHTOOL_FEC_NONE; 3178 fec->active_fec = ETHTOOL_FEC_NONE; 3179 return 0; 3180 } 3181 if (fec_cfg & BNXT_FEC_AUTONEG) 3182 fec->fec |= ETHTOOL_FEC_AUTO; 3183 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 3184 fec->fec |= ETHTOOL_FEC_BASER; 3185 if (fec_cfg & BNXT_FEC_ENC_RS) 3186 fec->fec |= ETHTOOL_FEC_RS; 3187 if (fec_cfg & BNXT_FEC_ENC_LLRS) 3188 fec->fec |= ETHTOOL_FEC_LLRS; 3189 3190 switch (active_fec) { 3191 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 3192 fec->active_fec |= ETHTOOL_FEC_BASER; 3193 break; 3194 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 3195 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 3196 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 3197 fec->active_fec |= ETHTOOL_FEC_RS; 3198 break; 3199 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 3200 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 3201 fec->active_fec |= ETHTOOL_FEC_LLRS; 3202 break; 3203 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 3204 fec->active_fec |= ETHTOOL_FEC_OFF; 3205 break; 3206 } 3207 return 0; 3208 } 3209 3210 static void bnxt_get_fec_stats(struct net_device *dev, 3211 struct ethtool_fec_stats *fec_stats) 3212 { 3213 struct bnxt *bp = netdev_priv(dev); 3214 u64 *rx; 3215 3216 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 3217 return; 3218 3219 rx = bp->rx_port_stats_ext.sw_stats; 3220 fec_stats->corrected_bits.total = 3221 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 3222 3223 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) 3224 return; 3225 3226 fec_stats->corrected_blocks.total = 3227 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); 3228 fec_stats->uncorrectable_blocks.total = 3229 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); 3230 } 3231 3232 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 3233 u32 fec) 3234 { 3235 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 3236 3237 if (fec & ETHTOOL_FEC_BASER) 3238 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 3239 else if (fec & ETHTOOL_FEC_RS) 3240 fw_fec |= BNXT_FEC_RS_ON(link_info); 3241 else if (fec & ETHTOOL_FEC_LLRS) 3242 fw_fec |= BNXT_FEC_LLRS_ON; 3243 return fw_fec; 3244 } 3245 3246 static int bnxt_set_fecparam(struct net_device *dev, 3247 struct ethtool_fecparam *fecparam) 3248 { 3249 struct hwrm_port_phy_cfg_input *req; 3250 struct bnxt *bp = netdev_priv(dev); 3251 struct bnxt_link_info *link_info; 3252 u32 new_cfg, fec = fecparam->fec; 3253 u16 fec_cfg; 3254 int rc; 3255 3256 link_info = &bp->link_info; 3257 fec_cfg = link_info->fec_cfg; 3258 if (fec_cfg & BNXT_FEC_NONE) 3259 return -EOPNOTSUPP; 3260 3261 if (fec & ETHTOOL_FEC_OFF) { 3262 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 3263 BNXT_FEC_ALL_OFF(link_info); 3264 goto apply_fec; 3265 } 3266 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 3267 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 3268 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 3269 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 3270 return -EINVAL; 3271 3272 if (fec & ETHTOOL_FEC_AUTO) { 3273 if (!link_info->autoneg) 3274 return -EINVAL; 3275 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 3276 } else { 3277 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 3278 } 3279 3280 apply_fec: 3281 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 3282 if (rc) 3283 return rc; 3284 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3285 rc = hwrm_req_send(bp, req); 3286 /* update current settings */ 3287 if (!rc) { 3288 mutex_lock(&bp->link_lock); 3289 bnxt_update_link(bp, false); 3290 mutex_unlock(&bp->link_lock); 3291 } 3292 return rc; 3293 } 3294 3295 static void bnxt_get_pauseparam(struct net_device *dev, 3296 struct ethtool_pauseparam *epause) 3297 { 3298 struct bnxt *bp = netdev_priv(dev); 3299 struct bnxt_link_info *link_info = &bp->link_info; 3300 3301 if (BNXT_VF(bp)) 3302 return; 3303 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 3304 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 3305 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 3306 } 3307 3308 static void bnxt_get_pause_stats(struct net_device *dev, 3309 struct ethtool_pause_stats *epstat) 3310 { 3311 struct bnxt *bp = netdev_priv(dev); 3312 u64 *rx, *tx; 3313 3314 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 3315 return; 3316 3317 rx = bp->port_stats.sw_stats; 3318 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3319 3320 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 3321 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 3322 } 3323 3324 static int bnxt_set_pauseparam(struct net_device *dev, 3325 struct ethtool_pauseparam *epause) 3326 { 3327 int rc = 0; 3328 struct bnxt *bp = netdev_priv(dev); 3329 struct bnxt_link_info *link_info = &bp->link_info; 3330 3331 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3332 return -EOPNOTSUPP; 3333 3334 mutex_lock(&bp->link_lock); 3335 if (epause->autoneg) { 3336 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 3337 rc = -EINVAL; 3338 goto pause_exit; 3339 } 3340 3341 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 3342 link_info->req_flow_ctrl = 0; 3343 } else { 3344 /* when transition from auto pause to force pause, 3345 * force a link change 3346 */ 3347 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 3348 link_info->force_link_chng = true; 3349 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 3350 link_info->req_flow_ctrl = 0; 3351 } 3352 if (epause->rx_pause) 3353 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 3354 3355 if (epause->tx_pause) 3356 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 3357 3358 if (netif_running(dev)) 3359 rc = bnxt_hwrm_set_pause(bp); 3360 3361 pause_exit: 3362 mutex_unlock(&bp->link_lock); 3363 return rc; 3364 } 3365 3366 static u32 bnxt_get_link(struct net_device *dev) 3367 { 3368 struct bnxt *bp = netdev_priv(dev); 3369 3370 /* TODO: handle MF, VF, driver close case */ 3371 return BNXT_LINK_IS_UP(bp); 3372 } 3373 3374 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 3375 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 3376 { 3377 struct hwrm_nvm_get_dev_info_output *resp; 3378 struct hwrm_nvm_get_dev_info_input *req; 3379 int rc; 3380 3381 if (BNXT_VF(bp)) 3382 return -EOPNOTSUPP; 3383 3384 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); 3385 if (rc) 3386 return rc; 3387 3388 resp = hwrm_req_hold(bp, req); 3389 rc = hwrm_req_send(bp, req); 3390 if (!rc) 3391 memcpy(nvm_dev_info, resp, sizeof(*resp)); 3392 hwrm_req_drop(bp, req); 3393 return rc; 3394 } 3395 3396 static void bnxt_print_admin_err(struct bnxt *bp) 3397 { 3398 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 3399 } 3400 3401 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 3402 u16 ext, u16 *index, u32 *item_length, 3403 u32 *data_length); 3404 3405 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 3406 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 3407 u32 dir_item_len, const u8 *data, 3408 size_t data_len) 3409 { 3410 struct bnxt *bp = netdev_priv(dev); 3411 struct hwrm_nvm_write_input *req; 3412 int rc; 3413 3414 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); 3415 if (rc) 3416 return rc; 3417 3418 if (data_len && data) { 3419 dma_addr_t dma_handle; 3420 u8 *kmem; 3421 3422 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); 3423 if (!kmem) { 3424 hwrm_req_drop(bp, req); 3425 return -ENOMEM; 3426 } 3427 3428 req->dir_data_length = cpu_to_le32(data_len); 3429 3430 memcpy(kmem, data, data_len); 3431 req->host_src_addr = cpu_to_le64(dma_handle); 3432 } 3433 3434 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3435 req->dir_type = cpu_to_le16(dir_type); 3436 req->dir_ordinal = cpu_to_le16(dir_ordinal); 3437 req->dir_ext = cpu_to_le16(dir_ext); 3438 req->dir_attr = cpu_to_le16(dir_attr); 3439 req->dir_item_length = cpu_to_le32(dir_item_len); 3440 rc = hwrm_req_send(bp, req); 3441 3442 if (rc == -EACCES) 3443 bnxt_print_admin_err(bp); 3444 return rc; 3445 } 3446 3447 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 3448 u8 self_reset, u8 flags) 3449 { 3450 struct bnxt *bp = netdev_priv(dev); 3451 struct hwrm_fw_reset_input *req; 3452 int rc; 3453 3454 if (!bnxt_hwrm_reset_permitted(bp)) { 3455 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); 3456 return -EPERM; 3457 } 3458 3459 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 3460 if (rc) 3461 return rc; 3462 3463 req->embedded_proc_type = proc_type; 3464 req->selfrst_status = self_reset; 3465 req->flags = flags; 3466 3467 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 3468 rc = hwrm_req_send_silent(bp, req); 3469 } else { 3470 rc = hwrm_req_send(bp, req); 3471 if (rc == -EACCES) 3472 bnxt_print_admin_err(bp); 3473 } 3474 return rc; 3475 } 3476 3477 static int bnxt_firmware_reset(struct net_device *dev, 3478 enum bnxt_nvm_directory_type dir_type) 3479 { 3480 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 3481 u8 proc_type, flags = 0; 3482 3483 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 3484 /* (e.g. when firmware isn't already running) */ 3485 switch (dir_type) { 3486 case BNX_DIR_TYPE_CHIMP_PATCH: 3487 case BNX_DIR_TYPE_BOOTCODE: 3488 case BNX_DIR_TYPE_BOOTCODE_2: 3489 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 3490 /* Self-reset ChiMP upon next PCIe reset: */ 3491 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3492 break; 3493 case BNX_DIR_TYPE_APE_FW: 3494 case BNX_DIR_TYPE_APE_PATCH: 3495 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 3496 /* Self-reset APE upon next PCIe reset: */ 3497 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3498 break; 3499 case BNX_DIR_TYPE_KONG_FW: 3500 case BNX_DIR_TYPE_KONG_PATCH: 3501 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 3502 break; 3503 case BNX_DIR_TYPE_BONO_FW: 3504 case BNX_DIR_TYPE_BONO_PATCH: 3505 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 3506 break; 3507 default: 3508 return -EINVAL; 3509 } 3510 3511 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 3512 } 3513 3514 static int bnxt_firmware_reset_chip(struct net_device *dev) 3515 { 3516 struct bnxt *bp = netdev_priv(dev); 3517 u8 flags = 0; 3518 3519 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 3520 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 3521 3522 return bnxt_hwrm_firmware_reset(dev, 3523 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 3524 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 3525 flags); 3526 } 3527 3528 static int bnxt_firmware_reset_ap(struct net_device *dev) 3529 { 3530 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 3531 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 3532 0); 3533 } 3534 3535 static int bnxt_flash_firmware(struct net_device *dev, 3536 u16 dir_type, 3537 const u8 *fw_data, 3538 size_t fw_size) 3539 { 3540 int rc = 0; 3541 u16 code_type; 3542 u32 stored_crc; 3543 u32 calculated_crc; 3544 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 3545 3546 switch (dir_type) { 3547 case BNX_DIR_TYPE_BOOTCODE: 3548 case BNX_DIR_TYPE_BOOTCODE_2: 3549 code_type = CODE_BOOT; 3550 break; 3551 case BNX_DIR_TYPE_CHIMP_PATCH: 3552 code_type = CODE_CHIMP_PATCH; 3553 break; 3554 case BNX_DIR_TYPE_APE_FW: 3555 code_type = CODE_MCTP_PASSTHRU; 3556 break; 3557 case BNX_DIR_TYPE_APE_PATCH: 3558 code_type = CODE_APE_PATCH; 3559 break; 3560 case BNX_DIR_TYPE_KONG_FW: 3561 code_type = CODE_KONG_FW; 3562 break; 3563 case BNX_DIR_TYPE_KONG_PATCH: 3564 code_type = CODE_KONG_PATCH; 3565 break; 3566 case BNX_DIR_TYPE_BONO_FW: 3567 code_type = CODE_BONO_FW; 3568 break; 3569 case BNX_DIR_TYPE_BONO_PATCH: 3570 code_type = CODE_BONO_PATCH; 3571 break; 3572 default: 3573 netdev_err(dev, "Unsupported directory entry type: %u\n", 3574 dir_type); 3575 return -EINVAL; 3576 } 3577 if (fw_size < sizeof(struct bnxt_fw_header)) { 3578 netdev_err(dev, "Invalid firmware file size: %u\n", 3579 (unsigned int)fw_size); 3580 return -EINVAL; 3581 } 3582 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 3583 netdev_err(dev, "Invalid firmware signature: %08X\n", 3584 le32_to_cpu(header->signature)); 3585 return -EINVAL; 3586 } 3587 if (header->code_type != code_type) { 3588 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 3589 code_type, header->code_type); 3590 return -EINVAL; 3591 } 3592 if (header->device != DEVICE_CUMULUS_FAMILY) { 3593 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 3594 DEVICE_CUMULUS_FAMILY, header->device); 3595 return -EINVAL; 3596 } 3597 /* Confirm the CRC32 checksum of the file: */ 3598 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3599 sizeof(stored_crc))); 3600 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3601 if (calculated_crc != stored_crc) { 3602 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 3603 (unsigned long)stored_crc, 3604 (unsigned long)calculated_crc); 3605 return -EINVAL; 3606 } 3607 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3608 0, 0, 0, fw_data, fw_size); 3609 if (rc == 0) /* Firmware update successful */ 3610 rc = bnxt_firmware_reset(dev, dir_type); 3611 3612 return rc; 3613 } 3614 3615 static int bnxt_flash_microcode(struct net_device *dev, 3616 u16 dir_type, 3617 const u8 *fw_data, 3618 size_t fw_size) 3619 { 3620 struct bnxt_ucode_trailer *trailer; 3621 u32 calculated_crc; 3622 u32 stored_crc; 3623 int rc = 0; 3624 3625 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 3626 netdev_err(dev, "Invalid microcode file size: %u\n", 3627 (unsigned int)fw_size); 3628 return -EINVAL; 3629 } 3630 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 3631 sizeof(*trailer))); 3632 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 3633 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 3634 le32_to_cpu(trailer->sig)); 3635 return -EINVAL; 3636 } 3637 if (le16_to_cpu(trailer->dir_type) != dir_type) { 3638 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 3639 dir_type, le16_to_cpu(trailer->dir_type)); 3640 return -EINVAL; 3641 } 3642 if (le16_to_cpu(trailer->trailer_length) < 3643 sizeof(struct bnxt_ucode_trailer)) { 3644 netdev_err(dev, "Invalid microcode trailer length: %d\n", 3645 le16_to_cpu(trailer->trailer_length)); 3646 return -EINVAL; 3647 } 3648 3649 /* Confirm the CRC32 checksum of the file: */ 3650 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3651 sizeof(stored_crc))); 3652 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3653 if (calculated_crc != stored_crc) { 3654 netdev_err(dev, 3655 "CRC32 (%08lX) does not match calculated: %08lX\n", 3656 (unsigned long)stored_crc, 3657 (unsigned long)calculated_crc); 3658 return -EINVAL; 3659 } 3660 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3661 0, 0, 0, fw_data, fw_size); 3662 3663 return rc; 3664 } 3665 3666 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 3667 { 3668 switch (dir_type) { 3669 case BNX_DIR_TYPE_CHIMP_PATCH: 3670 case BNX_DIR_TYPE_BOOTCODE: 3671 case BNX_DIR_TYPE_BOOTCODE_2: 3672 case BNX_DIR_TYPE_APE_FW: 3673 case BNX_DIR_TYPE_APE_PATCH: 3674 case BNX_DIR_TYPE_KONG_FW: 3675 case BNX_DIR_TYPE_KONG_PATCH: 3676 case BNX_DIR_TYPE_BONO_FW: 3677 case BNX_DIR_TYPE_BONO_PATCH: 3678 return true; 3679 } 3680 3681 return false; 3682 } 3683 3684 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 3685 { 3686 switch (dir_type) { 3687 case BNX_DIR_TYPE_AVS: 3688 case BNX_DIR_TYPE_EXP_ROM_MBA: 3689 case BNX_DIR_TYPE_PCIE: 3690 case BNX_DIR_TYPE_TSCF_UCODE: 3691 case BNX_DIR_TYPE_EXT_PHY: 3692 case BNX_DIR_TYPE_CCM: 3693 case BNX_DIR_TYPE_ISCSI_BOOT: 3694 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3695 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3696 return true; 3697 } 3698 3699 return false; 3700 } 3701 3702 static bool bnxt_dir_type_is_executable(u16 dir_type) 3703 { 3704 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3705 bnxt_dir_type_is_other_exec_format(dir_type); 3706 } 3707 3708 static int bnxt_flash_firmware_from_file(struct net_device *dev, 3709 u16 dir_type, 3710 const char *filename) 3711 { 3712 const struct firmware *fw; 3713 int rc; 3714 3715 rc = request_firmware(&fw, filename, &dev->dev); 3716 if (rc != 0) { 3717 netdev_err(dev, "Error %d requesting firmware file: %s\n", 3718 rc, filename); 3719 return rc; 3720 } 3721 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 3722 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 3723 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 3724 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 3725 else 3726 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3727 0, 0, 0, fw->data, fw->size); 3728 release_firmware(fw); 3729 return rc; 3730 } 3731 3732 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" 3733 #define MSG_INVALID_PKG "PKG install error : Invalid package" 3734 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" 3735 #define MSG_INVALID_DEV "PKG install error : Invalid device" 3736 #define MSG_INTERNAL_ERR "PKG install error : Internal error" 3737 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" 3738 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" 3739 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" 3740 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" 3741 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" 3742 3743 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, 3744 struct netlink_ext_ack *extack) 3745 { 3746 switch (result) { 3747 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: 3748 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: 3749 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: 3750 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: 3751 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: 3752 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: 3753 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); 3754 return -EINVAL; 3755 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: 3756 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: 3757 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: 3758 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: 3759 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: 3760 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: 3761 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: 3762 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: 3763 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: 3764 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: 3765 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: 3766 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: 3767 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: 3768 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); 3769 return -ENOPKG; 3770 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: 3771 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); 3772 return -EPERM; 3773 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: 3774 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: 3775 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: 3776 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: 3777 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: 3778 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); 3779 return -EOPNOTSUPP; 3780 default: 3781 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); 3782 return -EIO; 3783 } 3784 } 3785 3786 #define BNXT_PKG_DMA_SIZE 0x40000 3787 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 3788 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 3789 3790 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, 3791 struct netlink_ext_ack *extack) 3792 { 3793 u32 item_len; 3794 int rc; 3795 3796 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3797 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, 3798 &item_len, NULL); 3799 if (rc) { 3800 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3801 return rc; 3802 } 3803 3804 if (fw_size > item_len) { 3805 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, 3806 BNX_DIR_ORDINAL_FIRST, 0, 1, 3807 round_up(fw_size, 4096), NULL, 0); 3808 if (rc) { 3809 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); 3810 return rc; 3811 } 3812 } 3813 return 0; 3814 } 3815 3816 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 3817 u32 install_type, struct netlink_ext_ack *extack) 3818 { 3819 struct hwrm_nvm_install_update_input *install; 3820 struct hwrm_nvm_install_update_output *resp; 3821 struct hwrm_nvm_modify_input *modify; 3822 struct bnxt *bp = netdev_priv(dev); 3823 bool defrag_attempted = false; 3824 dma_addr_t dma_handle; 3825 u8 *kmem = NULL; 3826 u32 modify_len; 3827 u32 item_len; 3828 u8 cmd_err; 3829 u16 index; 3830 int rc; 3831 3832 /* resize before flashing larger image than available space */ 3833 rc = bnxt_resize_update_entry(dev, fw->size, extack); 3834 if (rc) 3835 return rc; 3836 3837 bnxt_hwrm_fw_set_time(bp); 3838 3839 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); 3840 if (rc) 3841 return rc; 3842 3843 /* Try allocating a large DMA buffer first. Older fw will 3844 * cause excessive NVRAM erases when using small blocks. 3845 */ 3846 modify_len = roundup_pow_of_two(fw->size); 3847 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 3848 while (1) { 3849 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); 3850 if (!kmem && modify_len > PAGE_SIZE) 3851 modify_len /= 2; 3852 else 3853 break; 3854 } 3855 if (!kmem) { 3856 hwrm_req_drop(bp, modify); 3857 return -ENOMEM; 3858 } 3859 3860 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); 3861 if (rc) { 3862 hwrm_req_drop(bp, modify); 3863 return rc; 3864 } 3865 3866 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout); 3867 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout); 3868 3869 hwrm_req_hold(bp, modify); 3870 modify->host_src_addr = cpu_to_le64(dma_handle); 3871 3872 resp = hwrm_req_hold(bp, install); 3873 if ((install_type & 0xffff) == 0) 3874 install_type >>= 16; 3875 install->install_type = cpu_to_le32(install_type); 3876 3877 do { 3878 u32 copied = 0, len = modify_len; 3879 3880 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3881 BNX_DIR_ORDINAL_FIRST, 3882 BNX_DIR_EXT_NONE, 3883 &index, &item_len, NULL); 3884 if (rc) { 3885 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3886 break; 3887 } 3888 if (fw->size > item_len) { 3889 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); 3890 rc = -EFBIG; 3891 break; 3892 } 3893 3894 modify->dir_idx = cpu_to_le16(index); 3895 3896 if (fw->size > modify_len) 3897 modify->flags = BNXT_NVM_MORE_FLAG; 3898 while (copied < fw->size) { 3899 u32 balance = fw->size - copied; 3900 3901 if (balance <= modify_len) { 3902 len = balance; 3903 if (copied) 3904 modify->flags |= BNXT_NVM_LAST_FLAG; 3905 } 3906 memcpy(kmem, fw->data + copied, len); 3907 modify->len = cpu_to_le32(len); 3908 modify->offset = cpu_to_le32(copied); 3909 rc = hwrm_req_send(bp, modify); 3910 if (rc) 3911 goto pkg_abort; 3912 copied += len; 3913 } 3914 3915 rc = hwrm_req_send_silent(bp, install); 3916 if (!rc) 3917 break; 3918 3919 if (defrag_attempted) { 3920 /* We have tried to defragment already in the previous 3921 * iteration. Return with the result for INSTALL_UPDATE 3922 */ 3923 break; 3924 } 3925 3926 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 3927 3928 switch (cmd_err) { 3929 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: 3930 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); 3931 rc = -EALREADY; 3932 break; 3933 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: 3934 install->flags = 3935 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 3936 3937 rc = hwrm_req_send_silent(bp, install); 3938 if (!rc) 3939 break; 3940 3941 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 3942 3943 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 3944 /* FW has cleared NVM area, driver will create 3945 * UPDATE directory and try the flash again 3946 */ 3947 defrag_attempted = true; 3948 install->flags = 0; 3949 rc = bnxt_flash_nvram(bp->dev, 3950 BNX_DIR_TYPE_UPDATE, 3951 BNX_DIR_ORDINAL_FIRST, 3952 0, 0, item_len, NULL, 0); 3953 if (!rc) 3954 break; 3955 } 3956 fallthrough; 3957 default: 3958 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); 3959 } 3960 } while (defrag_attempted && !rc); 3961 3962 pkg_abort: 3963 hwrm_req_drop(bp, modify); 3964 hwrm_req_drop(bp, install); 3965 3966 if (resp->result) { 3967 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 3968 (s8)resp->result, (int)resp->problem_item); 3969 rc = nvm_update_err_to_stderr(dev, resp->result, extack); 3970 } 3971 if (rc == -EACCES) 3972 bnxt_print_admin_err(bp); 3973 return rc; 3974 } 3975 3976 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 3977 u32 install_type, struct netlink_ext_ack *extack) 3978 { 3979 const struct firmware *fw; 3980 int rc; 3981 3982 rc = request_firmware(&fw, filename, &dev->dev); 3983 if (rc != 0) { 3984 netdev_err(dev, "PKG error %d requesting file: %s\n", 3985 rc, filename); 3986 return rc; 3987 } 3988 3989 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); 3990 3991 release_firmware(fw); 3992 3993 return rc; 3994 } 3995 3996 static int bnxt_flash_device(struct net_device *dev, 3997 struct ethtool_flash *flash) 3998 { 3999 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 4000 netdev_err(dev, "flashdev not supported from a virtual function\n"); 4001 return -EINVAL; 4002 } 4003 4004 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 4005 flash->region > 0xffff) 4006 return bnxt_flash_package_from_file(dev, flash->data, 4007 flash->region, NULL); 4008 4009 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 4010 } 4011 4012 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 4013 { 4014 struct hwrm_nvm_get_dir_info_output *output; 4015 struct hwrm_nvm_get_dir_info_input *req; 4016 struct bnxt *bp = netdev_priv(dev); 4017 int rc; 4018 4019 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); 4020 if (rc) 4021 return rc; 4022 4023 output = hwrm_req_hold(bp, req); 4024 rc = hwrm_req_send(bp, req); 4025 if (!rc) { 4026 *entries = le32_to_cpu(output->entries); 4027 *length = le32_to_cpu(output->entry_length); 4028 } 4029 hwrm_req_drop(bp, req); 4030 return rc; 4031 } 4032 4033 static int bnxt_get_eeprom_len(struct net_device *dev) 4034 { 4035 struct bnxt *bp = netdev_priv(dev); 4036 4037 if (BNXT_VF(bp)) 4038 return 0; 4039 4040 /* The -1 return value allows the entire 32-bit range of offsets to be 4041 * passed via the ethtool command-line utility. 4042 */ 4043 return -1; 4044 } 4045 4046 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 4047 { 4048 struct bnxt *bp = netdev_priv(dev); 4049 int rc; 4050 u32 dir_entries; 4051 u32 entry_length; 4052 u8 *buf; 4053 size_t buflen; 4054 dma_addr_t dma_handle; 4055 struct hwrm_nvm_get_dir_entries_input *req; 4056 4057 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 4058 if (rc != 0) 4059 return rc; 4060 4061 if (!dir_entries || !entry_length) 4062 return -EIO; 4063 4064 /* Insert 2 bytes of directory info (count and size of entries) */ 4065 if (len < 2) 4066 return -EINVAL; 4067 4068 *data++ = dir_entries; 4069 *data++ = entry_length; 4070 len -= 2; 4071 memset(data, 0xff, len); 4072 4073 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); 4074 if (rc) 4075 return rc; 4076 4077 buflen = mul_u32_u32(dir_entries, entry_length); 4078 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); 4079 if (!buf) { 4080 hwrm_req_drop(bp, req); 4081 return -ENOMEM; 4082 } 4083 req->host_dest_addr = cpu_to_le64(dma_handle); 4084 4085 hwrm_req_hold(bp, req); /* hold the slice */ 4086 rc = hwrm_req_send(bp, req); 4087 if (rc == 0) 4088 memcpy(data, buf, len > buflen ? buflen : len); 4089 hwrm_req_drop(bp, req); 4090 return rc; 4091 } 4092 4093 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 4094 u32 length, u8 *data) 4095 { 4096 struct bnxt *bp = netdev_priv(dev); 4097 int rc; 4098 u8 *buf; 4099 dma_addr_t dma_handle; 4100 struct hwrm_nvm_read_input *req; 4101 4102 if (!length) 4103 return -EINVAL; 4104 4105 rc = hwrm_req_init(bp, req, HWRM_NVM_READ); 4106 if (rc) 4107 return rc; 4108 4109 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); 4110 if (!buf) { 4111 hwrm_req_drop(bp, req); 4112 return -ENOMEM; 4113 } 4114 4115 req->host_dest_addr = cpu_to_le64(dma_handle); 4116 req->dir_idx = cpu_to_le16(index); 4117 req->offset = cpu_to_le32(offset); 4118 req->len = cpu_to_le32(length); 4119 4120 hwrm_req_hold(bp, req); /* hold the slice */ 4121 rc = hwrm_req_send(bp, req); 4122 if (rc == 0) 4123 memcpy(data, buf, length); 4124 hwrm_req_drop(bp, req); 4125 return rc; 4126 } 4127 4128 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 4129 u16 ext, u16 *index, u32 *item_length, 4130 u32 *data_length) 4131 { 4132 struct hwrm_nvm_find_dir_entry_output *output; 4133 struct hwrm_nvm_find_dir_entry_input *req; 4134 struct bnxt *bp = netdev_priv(dev); 4135 int rc; 4136 4137 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); 4138 if (rc) 4139 return rc; 4140 4141 req->enables = 0; 4142 req->dir_idx = 0; 4143 req->dir_type = cpu_to_le16(type); 4144 req->dir_ordinal = cpu_to_le16(ordinal); 4145 req->dir_ext = cpu_to_le16(ext); 4146 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 4147 output = hwrm_req_hold(bp, req); 4148 rc = hwrm_req_send_silent(bp, req); 4149 if (rc == 0) { 4150 if (index) 4151 *index = le16_to_cpu(output->dir_idx); 4152 if (item_length) 4153 *item_length = le32_to_cpu(output->dir_item_length); 4154 if (data_length) 4155 *data_length = le32_to_cpu(output->dir_data_length); 4156 } 4157 hwrm_req_drop(bp, req); 4158 return rc; 4159 } 4160 4161 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 4162 { 4163 char *retval = NULL; 4164 char *p; 4165 char *value; 4166 int field = 0; 4167 4168 if (datalen < 1) 4169 return NULL; 4170 /* null-terminate the log data (removing last '\n'): */ 4171 data[datalen - 1] = 0; 4172 for (p = data; *p != 0; p++) { 4173 field = 0; 4174 retval = NULL; 4175 while (*p != 0 && *p != '\n') { 4176 value = p; 4177 while (*p != 0 && *p != '\t' && *p != '\n') 4178 p++; 4179 if (field == desired_field) 4180 retval = value; 4181 if (*p != '\t') 4182 break; 4183 *p = 0; 4184 field++; 4185 p++; 4186 } 4187 if (*p == 0) 4188 break; 4189 *p = 0; 4190 } 4191 return retval; 4192 } 4193 4194 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) 4195 { 4196 struct bnxt *bp = netdev_priv(dev); 4197 u16 index = 0; 4198 char *pkgver; 4199 u32 pkglen; 4200 u8 *pkgbuf; 4201 int rc; 4202 4203 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 4204 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 4205 &index, NULL, &pkglen); 4206 if (rc) 4207 return rc; 4208 4209 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 4210 if (!pkgbuf) { 4211 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 4212 pkglen); 4213 return -ENOMEM; 4214 } 4215 4216 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); 4217 if (rc) 4218 goto err; 4219 4220 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 4221 pkglen); 4222 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) 4223 strscpy(ver, pkgver, size); 4224 else 4225 rc = -ENOENT; 4226 4227 err: 4228 kfree(pkgbuf); 4229 4230 return rc; 4231 } 4232 4233 static void bnxt_get_pkgver(struct net_device *dev) 4234 { 4235 struct bnxt *bp = netdev_priv(dev); 4236 char buf[FW_VER_STR_LEN - 5]; 4237 int len; 4238 4239 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { 4240 len = strlen(bp->fw_ver_str); 4241 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len, 4242 "/pkg %s", buf); 4243 } 4244 } 4245 4246 static int bnxt_get_eeprom(struct net_device *dev, 4247 struct ethtool_eeprom *eeprom, 4248 u8 *data) 4249 { 4250 u32 index; 4251 u32 offset; 4252 4253 if (eeprom->offset == 0) /* special offset value to get directory */ 4254 return bnxt_get_nvram_directory(dev, eeprom->len, data); 4255 4256 index = eeprom->offset >> 24; 4257 offset = eeprom->offset & 0xffffff; 4258 4259 if (index == 0) { 4260 netdev_err(dev, "unsupported index value: %d\n", index); 4261 return -EINVAL; 4262 } 4263 4264 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 4265 } 4266 4267 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 4268 { 4269 struct hwrm_nvm_erase_dir_entry_input *req; 4270 struct bnxt *bp = netdev_priv(dev); 4271 int rc; 4272 4273 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); 4274 if (rc) 4275 return rc; 4276 4277 req->dir_idx = cpu_to_le16(index); 4278 return hwrm_req_send(bp, req); 4279 } 4280 4281 static int bnxt_set_eeprom(struct net_device *dev, 4282 struct ethtool_eeprom *eeprom, 4283 u8 *data) 4284 { 4285 struct bnxt *bp = netdev_priv(dev); 4286 u8 index, dir_op; 4287 u16 type, ext, ordinal, attr; 4288 4289 if (!BNXT_PF(bp)) { 4290 netdev_err(dev, "NVM write not supported from a virtual function\n"); 4291 return -EINVAL; 4292 } 4293 4294 type = eeprom->magic >> 16; 4295 4296 if (type == 0xffff) { /* special value for directory operations */ 4297 index = eeprom->magic & 0xff; 4298 dir_op = eeprom->magic >> 8; 4299 if (index == 0) 4300 return -EINVAL; 4301 switch (dir_op) { 4302 case 0x0e: /* erase */ 4303 if (eeprom->offset != ~eeprom->magic) 4304 return -EINVAL; 4305 return bnxt_erase_nvram_directory(dev, index - 1); 4306 default: 4307 return -EINVAL; 4308 } 4309 } 4310 4311 /* Create or re-write an NVM item: */ 4312 if (bnxt_dir_type_is_executable(type)) 4313 return -EOPNOTSUPP; 4314 ext = eeprom->magic & 0xffff; 4315 ordinal = eeprom->offset >> 16; 4316 attr = eeprom->offset & 0xffff; 4317 4318 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, 4319 eeprom->len); 4320 } 4321 4322 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) 4323 { 4324 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 4325 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 4326 struct bnxt *bp = netdev_priv(dev); 4327 struct ethtool_keee *eee = &bp->eee; 4328 struct bnxt_link_info *link_info = &bp->link_info; 4329 int rc = 0; 4330 4331 if (!BNXT_PHY_CFG_ABLE(bp)) 4332 return -EOPNOTSUPP; 4333 4334 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4335 return -EOPNOTSUPP; 4336 4337 mutex_lock(&bp->link_lock); 4338 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 4339 if (!edata->eee_enabled) 4340 goto eee_ok; 4341 4342 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4343 netdev_warn(dev, "EEE requires autoneg\n"); 4344 rc = -EINVAL; 4345 goto eee_exit; 4346 } 4347 if (edata->tx_lpi_enabled) { 4348 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 4349 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 4350 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 4351 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 4352 rc = -EINVAL; 4353 goto eee_exit; 4354 } else if (!bp->lpi_tmr_hi) { 4355 edata->tx_lpi_timer = eee->tx_lpi_timer; 4356 } 4357 } 4358 if (linkmode_empty(edata->advertised)) { 4359 linkmode_and(edata->advertised, advertising, eee->supported); 4360 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { 4361 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); 4362 rc = -EINVAL; 4363 goto eee_exit; 4364 } 4365 4366 linkmode_copy(eee->advertised, edata->advertised); 4367 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 4368 eee->tx_lpi_timer = edata->tx_lpi_timer; 4369 eee_ok: 4370 eee->eee_enabled = edata->eee_enabled; 4371 4372 if (netif_running(dev)) 4373 rc = bnxt_hwrm_set_link_setting(bp, false, true); 4374 4375 eee_exit: 4376 mutex_unlock(&bp->link_lock); 4377 return rc; 4378 } 4379 4380 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) 4381 { 4382 struct bnxt *bp = netdev_priv(dev); 4383 4384 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4385 return -EOPNOTSUPP; 4386 4387 *edata = bp->eee; 4388 if (!bp->eee.eee_enabled) { 4389 /* Preserve tx_lpi_timer so that the last value will be used 4390 * by default when it is re-enabled. 4391 */ 4392 linkmode_zero(edata->advertised); 4393 edata->tx_lpi_enabled = 0; 4394 } 4395 4396 if (!bp->eee.eee_active) 4397 linkmode_zero(edata->lp_advertised); 4398 4399 return 0; 4400 } 4401 4402 static int bnxt_set_tunable(struct net_device *dev, 4403 const struct ethtool_tunable *tuna, 4404 const void *data) 4405 { 4406 struct bnxt *bp = netdev_priv(dev); 4407 u32 rx_copybreak; 4408 4409 switch (tuna->id) { 4410 case ETHTOOL_RX_COPYBREAK: 4411 rx_copybreak = *(u32 *)data; 4412 if (rx_copybreak > BNXT_MAX_RX_COPYBREAK) 4413 return -ERANGE; 4414 if (rx_copybreak != bp->rx_copybreak) { 4415 if (netif_running(dev)) 4416 return -EBUSY; 4417 bp->rx_copybreak = rx_copybreak; 4418 } 4419 return 0; 4420 default: 4421 return -EOPNOTSUPP; 4422 } 4423 } 4424 4425 static int bnxt_get_tunable(struct net_device *dev, 4426 const struct ethtool_tunable *tuna, void *data) 4427 { 4428 struct bnxt *bp = netdev_priv(dev); 4429 4430 switch (tuna->id) { 4431 case ETHTOOL_RX_COPYBREAK: 4432 *(u32 *)data = bp->rx_copybreak; 4433 break; 4434 default: 4435 return -EOPNOTSUPP; 4436 } 4437 4438 return 0; 4439 } 4440 4441 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 4442 u16 page_number, u8 bank, 4443 u16 start_addr, u16 data_length, 4444 u8 *buf) 4445 { 4446 struct hwrm_port_phy_i2c_read_output *output; 4447 struct hwrm_port_phy_i2c_read_input *req; 4448 int rc, byte_offset = 0; 4449 4450 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); 4451 if (rc) 4452 return rc; 4453 4454 output = hwrm_req_hold(bp, req); 4455 req->i2c_slave_addr = i2c_addr; 4456 req->page_number = cpu_to_le16(page_number); 4457 req->port_id = cpu_to_le16(bp->pf.port_id); 4458 do { 4459 u16 xfer_size; 4460 4461 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 4462 data_length -= xfer_size; 4463 req->page_offset = cpu_to_le16(start_addr + byte_offset); 4464 req->data_length = xfer_size; 4465 req->enables = 4466 cpu_to_le32((start_addr + byte_offset ? 4467 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 4468 0) | 4469 (bank ? 4470 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : 4471 0)); 4472 rc = hwrm_req_send(bp, req); 4473 if (!rc) 4474 memcpy(buf + byte_offset, output->data, xfer_size); 4475 byte_offset += xfer_size; 4476 } while (!rc && data_length > 0); 4477 hwrm_req_drop(bp, req); 4478 4479 return rc; 4480 } 4481 4482 static int bnxt_get_module_info(struct net_device *dev, 4483 struct ethtool_modinfo *modinfo) 4484 { 4485 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 4486 struct bnxt *bp = netdev_priv(dev); 4487 int rc; 4488 4489 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4490 return -EPERM; 4491 4492 /* No point in going further if phy status indicates 4493 * module is not inserted or if it is powered down or 4494 * if it is of type 10GBase-T 4495 */ 4496 if (bp->link_info.module_status > 4497 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4498 return -EOPNOTSUPP; 4499 4500 /* This feature is not supported in older firmware versions */ 4501 if (bp->hwrm_spec_code < 0x10202) 4502 return -EOPNOTSUPP; 4503 4504 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, 4505 SFF_DIAG_SUPPORT_OFFSET + 1, 4506 data); 4507 if (!rc) { 4508 u8 module_id = data[0]; 4509 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 4510 4511 switch (module_id) { 4512 case SFF_MODULE_ID_SFP: 4513 modinfo->type = ETH_MODULE_SFF_8472; 4514 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4515 if (!diag_supported) 4516 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4517 break; 4518 case SFF_MODULE_ID_QSFP: 4519 case SFF_MODULE_ID_QSFP_PLUS: 4520 modinfo->type = ETH_MODULE_SFF_8436; 4521 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4522 break; 4523 case SFF_MODULE_ID_QSFP28: 4524 modinfo->type = ETH_MODULE_SFF_8636; 4525 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 4526 break; 4527 default: 4528 rc = -EOPNOTSUPP; 4529 break; 4530 } 4531 } 4532 return rc; 4533 } 4534 4535 static int bnxt_get_module_eeprom(struct net_device *dev, 4536 struct ethtool_eeprom *eeprom, 4537 u8 *data) 4538 { 4539 struct bnxt *bp = netdev_priv(dev); 4540 u16 start = eeprom->offset, length = eeprom->len; 4541 int rc = 0; 4542 4543 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4544 return -EPERM; 4545 4546 memset(data, 0, eeprom->len); 4547 4548 /* Read A0 portion of the EEPROM */ 4549 if (start < ETH_MODULE_SFF_8436_LEN) { 4550 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 4551 length = ETH_MODULE_SFF_8436_LEN - start; 4552 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4553 start, length, data); 4554 if (rc) 4555 return rc; 4556 start += length; 4557 data += length; 4558 length = eeprom->len - length; 4559 } 4560 4561 /* Read A2 portion of the EEPROM */ 4562 if (length) { 4563 start -= ETH_MODULE_SFF_8436_LEN; 4564 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, 4565 start, length, data); 4566 } 4567 return rc; 4568 } 4569 4570 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) 4571 { 4572 if (bp->link_info.module_status <= 4573 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4574 return 0; 4575 4576 switch (bp->link_info.module_status) { 4577 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 4578 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); 4579 break; 4580 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: 4581 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); 4582 break; 4583 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: 4584 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); 4585 break; 4586 default: 4587 NL_SET_ERR_MSG_MOD(extack, "Unknown error"); 4588 break; 4589 } 4590 return -EINVAL; 4591 } 4592 4593 static int 4594 bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, 4595 const struct ethtool_module_eeprom *page_data, 4596 struct netlink_ext_ack *extack) 4597 { 4598 int rc; 4599 4600 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 4601 NL_SET_ERR_MSG_MOD(extack, 4602 "Module read/write not permitted on untrusted VF"); 4603 return -EPERM; 4604 } 4605 4606 rc = bnxt_get_module_status(bp, extack); 4607 if (rc) 4608 return rc; 4609 4610 if (bp->hwrm_spec_code < 0x10202) { 4611 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); 4612 return -EINVAL; 4613 } 4614 4615 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { 4616 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); 4617 return -EINVAL; 4618 } 4619 return 0; 4620 } 4621 4622 static int bnxt_get_module_eeprom_by_page(struct net_device *dev, 4623 const struct ethtool_module_eeprom *page_data, 4624 struct netlink_ext_ack *extack) 4625 { 4626 struct bnxt *bp = netdev_priv(dev); 4627 int rc; 4628 4629 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4630 if (rc) 4631 return rc; 4632 4633 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, 4634 page_data->page, page_data->bank, 4635 page_data->offset, 4636 page_data->length, 4637 page_data->data); 4638 if (rc) { 4639 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); 4640 return rc; 4641 } 4642 return page_data->length; 4643 } 4644 4645 static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, 4646 const struct ethtool_module_eeprom *page) 4647 { 4648 struct hwrm_port_phy_i2c_write_input *req; 4649 int bytes_written = 0; 4650 int rc; 4651 4652 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); 4653 if (rc) 4654 return rc; 4655 4656 hwrm_req_hold(bp, req); 4657 req->i2c_slave_addr = page->i2c_address << 1; 4658 req->page_number = cpu_to_le16(page->page); 4659 req->bank_number = page->bank; 4660 req->port_id = cpu_to_le16(bp->pf.port_id); 4661 req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | 4662 PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); 4663 4664 while (bytes_written < page->length) { 4665 u16 xfer_size; 4666 4667 xfer_size = min_t(u16, page->length - bytes_written, 4668 BNXT_MAX_PHY_I2C_RESP_SIZE); 4669 req->page_offset = cpu_to_le16(page->offset + bytes_written); 4670 req->data_length = xfer_size; 4671 memcpy(req->data, page->data + bytes_written, xfer_size); 4672 rc = hwrm_req_send(bp, req); 4673 if (rc) 4674 break; 4675 bytes_written += xfer_size; 4676 } 4677 4678 hwrm_req_drop(bp, req); 4679 return rc; 4680 } 4681 4682 static int bnxt_set_module_eeprom_by_page(struct net_device *dev, 4683 const struct ethtool_module_eeprom *page_data, 4684 struct netlink_ext_ack *extack) 4685 { 4686 struct bnxt *bp = netdev_priv(dev); 4687 int rc; 4688 4689 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4690 if (rc) 4691 return rc; 4692 4693 rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); 4694 if (rc) { 4695 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); 4696 return rc; 4697 } 4698 return page_data->length; 4699 } 4700 4701 static int bnxt_nway_reset(struct net_device *dev) 4702 { 4703 int rc = 0; 4704 4705 struct bnxt *bp = netdev_priv(dev); 4706 struct bnxt_link_info *link_info = &bp->link_info; 4707 4708 if (!BNXT_PHY_CFG_ABLE(bp)) 4709 return -EOPNOTSUPP; 4710 4711 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 4712 return -EINVAL; 4713 4714 if (netif_running(dev)) 4715 rc = bnxt_hwrm_set_link_setting(bp, true, false); 4716 4717 return rc; 4718 } 4719 4720 static int bnxt_set_phys_id(struct net_device *dev, 4721 enum ethtool_phys_id_state state) 4722 { 4723 struct hwrm_port_led_cfg_input *req; 4724 struct bnxt *bp = netdev_priv(dev); 4725 struct bnxt_pf_info *pf = &bp->pf; 4726 struct bnxt_led_cfg *led_cfg; 4727 u8 led_state; 4728 __le16 duration; 4729 int rc, i; 4730 4731 if (!bp->num_leds || BNXT_VF(bp)) 4732 return -EOPNOTSUPP; 4733 4734 if (state == ETHTOOL_ID_ACTIVE) { 4735 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 4736 duration = cpu_to_le16(500); 4737 } else if (state == ETHTOOL_ID_INACTIVE) { 4738 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 4739 duration = cpu_to_le16(0); 4740 } else { 4741 return -EINVAL; 4742 } 4743 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); 4744 if (rc) 4745 return rc; 4746 4747 req->port_id = cpu_to_le16(pf->port_id); 4748 req->num_leds = bp->num_leds; 4749 led_cfg = (struct bnxt_led_cfg *)&req->led0_id; 4750 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 4751 req->enables |= BNXT_LED_DFLT_ENABLES(i); 4752 led_cfg->led_id = bp->leds[i].led_id; 4753 led_cfg->led_state = led_state; 4754 led_cfg->led_blink_on = duration; 4755 led_cfg->led_blink_off = duration; 4756 led_cfg->led_group_id = bp->leds[i].led_group_id; 4757 } 4758 return hwrm_req_send(bp, req); 4759 } 4760 4761 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 4762 { 4763 struct hwrm_selftest_irq_input *req; 4764 int rc; 4765 4766 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); 4767 if (rc) 4768 return rc; 4769 4770 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4771 return hwrm_req_send(bp, req); 4772 } 4773 4774 static int bnxt_test_irq(struct bnxt *bp) 4775 { 4776 int i; 4777 4778 for (i = 0; i < bp->cp_nr_rings; i++) { 4779 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 4780 int rc; 4781 4782 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 4783 if (rc) 4784 return rc; 4785 } 4786 return 0; 4787 } 4788 4789 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 4790 { 4791 struct hwrm_port_mac_cfg_input *req; 4792 int rc; 4793 4794 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); 4795 if (rc) 4796 return rc; 4797 4798 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 4799 if (enable) 4800 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 4801 else 4802 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 4803 return hwrm_req_send(bp, req); 4804 } 4805 4806 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 4807 { 4808 struct hwrm_port_phy_qcaps_output *resp; 4809 struct hwrm_port_phy_qcaps_input *req; 4810 int rc; 4811 4812 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 4813 if (rc) 4814 return rc; 4815 4816 resp = hwrm_req_hold(bp, req); 4817 rc = hwrm_req_send(bp, req); 4818 if (!rc) 4819 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 4820 4821 hwrm_req_drop(bp, req); 4822 return rc; 4823 } 4824 4825 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 4826 struct hwrm_port_phy_cfg_input *req) 4827 { 4828 struct bnxt_link_info *link_info = &bp->link_info; 4829 u16 fw_advertising; 4830 u16 fw_speed; 4831 int rc; 4832 4833 if (!link_info->autoneg || 4834 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 4835 return 0; 4836 4837 rc = bnxt_query_force_speeds(bp, &fw_advertising); 4838 if (rc) 4839 return rc; 4840 4841 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 4842 if (BNXT_LINK_IS_UP(bp)) 4843 fw_speed = bp->link_info.link_speed; 4844 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 4845 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 4846 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 4847 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 4848 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 4849 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 4850 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 4851 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 4852 4853 req->force_link_speed = cpu_to_le16(fw_speed); 4854 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 4855 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 4856 rc = hwrm_req_send(bp, req); 4857 req->flags = 0; 4858 req->force_link_speed = cpu_to_le16(0); 4859 return rc; 4860 } 4861 4862 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 4863 { 4864 struct hwrm_port_phy_cfg_input *req; 4865 int rc; 4866 4867 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 4868 if (rc) 4869 return rc; 4870 4871 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ 4872 hwrm_req_hold(bp, req); 4873 4874 if (enable) { 4875 bnxt_disable_an_for_lpbk(bp, req); 4876 if (ext) 4877 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 4878 else 4879 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 4880 } else { 4881 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 4882 } 4883 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 4884 rc = hwrm_req_send(bp, req); 4885 hwrm_req_drop(bp, req); 4886 return rc; 4887 } 4888 4889 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 4890 u32 raw_cons, int pkt_size) 4891 { 4892 struct bnxt_napi *bnapi = cpr->bnapi; 4893 struct bnxt_rx_ring_info *rxr; 4894 struct bnxt_sw_rx_bd *rx_buf; 4895 struct rx_cmp *rxcmp; 4896 u16 cp_cons, cons; 4897 u8 *data; 4898 u32 len; 4899 int i; 4900 4901 rxr = bnapi->rx_ring; 4902 cp_cons = RING_CMP(raw_cons); 4903 rxcmp = (struct rx_cmp *) 4904 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 4905 cons = rxcmp->rx_cmp_opaque; 4906 rx_buf = &rxr->rx_buf_ring[cons]; 4907 data = rx_buf->data_ptr; 4908 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 4909 if (len != pkt_size) 4910 return -EIO; 4911 i = ETH_ALEN; 4912 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 4913 return -EIO; 4914 i += ETH_ALEN; 4915 for ( ; i < pkt_size; i++) { 4916 if (data[i] != (u8)(i & 0xff)) 4917 return -EIO; 4918 } 4919 return 0; 4920 } 4921 4922 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 4923 int pkt_size) 4924 { 4925 struct tx_cmp *txcmp; 4926 int rc = -EIO; 4927 u32 raw_cons; 4928 u32 cons; 4929 int i; 4930 4931 raw_cons = cpr->cp_raw_cons; 4932 for (i = 0; i < 200; i++) { 4933 cons = RING_CMP(raw_cons); 4934 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 4935 4936 if (!TX_CMP_VALID(txcmp, raw_cons)) { 4937 udelay(5); 4938 continue; 4939 } 4940 4941 /* The valid test of the entry must be done first before 4942 * reading any further. 4943 */ 4944 dma_rmb(); 4945 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP || 4946 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) { 4947 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 4948 raw_cons = NEXT_RAW_CMP(raw_cons); 4949 raw_cons = NEXT_RAW_CMP(raw_cons); 4950 break; 4951 } 4952 raw_cons = NEXT_RAW_CMP(raw_cons); 4953 } 4954 cpr->cp_raw_cons = raw_cons; 4955 return rc; 4956 } 4957 4958 static int bnxt_run_loopback(struct bnxt *bp) 4959 { 4960 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 4961 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 4962 struct bnxt_cp_ring_info *cpr; 4963 int pkt_size, i = 0; 4964 struct sk_buff *skb; 4965 dma_addr_t map; 4966 u8 *data; 4967 int rc; 4968 4969 cpr = &rxr->bnapi->cp_ring; 4970 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4971 cpr = rxr->rx_cpr; 4972 pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK, 4973 bp->rx_copybreak)); 4974 skb = netdev_alloc_skb(bp->dev, pkt_size); 4975 if (!skb) 4976 return -ENOMEM; 4977 data = skb_put(skb, pkt_size); 4978 ether_addr_copy(&data[i], bp->dev->dev_addr); 4979 i += ETH_ALEN; 4980 ether_addr_copy(&data[i], bp->dev->dev_addr); 4981 i += ETH_ALEN; 4982 for ( ; i < pkt_size; i++) 4983 data[i] = (u8)(i & 0xff); 4984 4985 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 4986 DMA_TO_DEVICE); 4987 if (dma_mapping_error(&bp->pdev->dev, map)) { 4988 dev_kfree_skb(skb); 4989 return -EIO; 4990 } 4991 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); 4992 4993 /* Sync BD data before updating doorbell */ 4994 wmb(); 4995 4996 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 4997 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 4998 4999 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 5000 dev_kfree_skb(skb); 5001 return rc; 5002 } 5003 5004 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 5005 { 5006 struct hwrm_selftest_exec_output *resp; 5007 struct hwrm_selftest_exec_input *req; 5008 int rc; 5009 5010 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); 5011 if (rc) 5012 return rc; 5013 5014 hwrm_req_timeout(bp, req, bp->test_info->timeout); 5015 req->flags = test_mask; 5016 5017 resp = hwrm_req_hold(bp, req); 5018 rc = hwrm_req_send(bp, req); 5019 *test_results = resp->test_success; 5020 hwrm_req_drop(bp, req); 5021 return rc; 5022 } 5023 5024 #define BNXT_DRV_TESTS 4 5025 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 5026 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 5027 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 5028 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 5029 5030 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 5031 u64 *buf) 5032 { 5033 struct bnxt *bp = netdev_priv(dev); 5034 bool do_ext_lpbk = false; 5035 bool offline = false; 5036 u8 test_results = 0; 5037 u8 test_mask = 0; 5038 int rc = 0, i; 5039 5040 if (!bp->num_tests || !BNXT_PF(bp)) 5041 return; 5042 5043 memset(buf, 0, sizeof(u64) * bp->num_tests); 5044 if (etest->flags & ETH_TEST_FL_OFFLINE && 5045 bnxt_ulp_registered(bp->edev)) { 5046 etest->flags |= ETH_TEST_FL_FAILED; 5047 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); 5048 return; 5049 } 5050 5051 if (!netif_running(dev)) { 5052 etest->flags |= ETH_TEST_FL_FAILED; 5053 return; 5054 } 5055 5056 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 5057 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 5058 do_ext_lpbk = true; 5059 5060 if (etest->flags & ETH_TEST_FL_OFFLINE) { 5061 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 5062 etest->flags |= ETH_TEST_FL_FAILED; 5063 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 5064 return; 5065 } 5066 offline = true; 5067 } 5068 5069 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5070 u8 bit_val = 1 << i; 5071 5072 if (!(bp->test_info->offline_mask & bit_val)) 5073 test_mask |= bit_val; 5074 else if (offline) 5075 test_mask |= bit_val; 5076 } 5077 if (!offline) { 5078 bnxt_run_fw_tests(bp, test_mask, &test_results); 5079 } else { 5080 bnxt_close_nic(bp, true, false); 5081 bnxt_run_fw_tests(bp, test_mask, &test_results); 5082 5083 rc = bnxt_half_open_nic(bp); 5084 if (rc) { 5085 etest->flags |= ETH_TEST_FL_FAILED; 5086 return; 5087 } 5088 buf[BNXT_MACLPBK_TEST_IDX] = 1; 5089 if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) 5090 goto skip_mac_loopback; 5091 5092 bnxt_hwrm_mac_loopback(bp, true); 5093 msleep(250); 5094 if (bnxt_run_loopback(bp)) 5095 etest->flags |= ETH_TEST_FL_FAILED; 5096 else 5097 buf[BNXT_MACLPBK_TEST_IDX] = 0; 5098 5099 bnxt_hwrm_mac_loopback(bp, false); 5100 skip_mac_loopback: 5101 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 5102 if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) 5103 goto skip_phy_loopback; 5104 5105 bnxt_hwrm_phy_loopback(bp, true, false); 5106 msleep(1000); 5107 if (bnxt_run_loopback(bp)) 5108 etest->flags |= ETH_TEST_FL_FAILED; 5109 else 5110 buf[BNXT_PHYLPBK_TEST_IDX] = 0; 5111 skip_phy_loopback: 5112 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 5113 if (do_ext_lpbk) { 5114 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 5115 bnxt_hwrm_phy_loopback(bp, true, true); 5116 msleep(1000); 5117 if (bnxt_run_loopback(bp)) 5118 etest->flags |= ETH_TEST_FL_FAILED; 5119 else 5120 buf[BNXT_EXTLPBK_TEST_IDX] = 0; 5121 } 5122 bnxt_hwrm_phy_loopback(bp, false, false); 5123 bnxt_half_close_nic(bp); 5124 rc = bnxt_open_nic(bp, true, true); 5125 } 5126 if (rc || bnxt_test_irq(bp)) { 5127 buf[BNXT_IRQ_TEST_IDX] = 1; 5128 etest->flags |= ETH_TEST_FL_FAILED; 5129 } 5130 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5131 u8 bit_val = 1 << i; 5132 5133 if ((test_mask & bit_val) && !(test_results & bit_val)) { 5134 buf[i] = 1; 5135 etest->flags |= ETH_TEST_FL_FAILED; 5136 } 5137 } 5138 } 5139 5140 static int bnxt_reset(struct net_device *dev, u32 *flags) 5141 { 5142 struct bnxt *bp = netdev_priv(dev); 5143 bool reload = false; 5144 u32 req = *flags; 5145 5146 if (!req) 5147 return -EINVAL; 5148 5149 if (!BNXT_PF(bp)) { 5150 netdev_err(dev, "Reset is not supported from a VF\n"); 5151 return -EOPNOTSUPP; 5152 } 5153 5154 if (pci_vfs_assigned(bp->pdev) && 5155 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 5156 netdev_err(dev, 5157 "Reset not allowed when VFs are assigned to VMs\n"); 5158 return -EBUSY; 5159 } 5160 5161 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 5162 /* This feature is not supported in older firmware versions */ 5163 if (bp->hwrm_spec_code >= 0x10803) { 5164 if (!bnxt_firmware_reset_chip(dev)) { 5165 netdev_info(dev, "Firmware reset request successful.\n"); 5166 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 5167 reload = true; 5168 *flags &= ~BNXT_FW_RESET_CHIP; 5169 } 5170 } else if (req == BNXT_FW_RESET_CHIP) { 5171 return -EOPNOTSUPP; /* only request, fail hard */ 5172 } 5173 } 5174 5175 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { 5176 /* This feature is not supported in older firmware versions */ 5177 if (bp->hwrm_spec_code >= 0x10803) { 5178 if (!bnxt_firmware_reset_ap(dev)) { 5179 netdev_info(dev, "Reset application processor successful.\n"); 5180 reload = true; 5181 *flags &= ~BNXT_FW_RESET_AP; 5182 } 5183 } else if (req == BNXT_FW_RESET_AP) { 5184 return -EOPNOTSUPP; /* only request, fail hard */ 5185 } 5186 } 5187 5188 if (reload) 5189 netdev_info(dev, "Reload driver to complete reset\n"); 5190 5191 return 0; 5192 } 5193 5194 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 5195 { 5196 struct bnxt *bp = netdev_priv(dev); 5197 5198 if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { 5199 netdev_info(dev, 5200 "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); 5201 return -EINVAL; 5202 } 5203 5204 if (dump->flag == BNXT_DUMP_CRASH) { 5205 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR && 5206 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { 5207 netdev_info(dev, 5208 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 5209 return -EOPNOTSUPP; 5210 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) { 5211 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); 5212 return -EOPNOTSUPP; 5213 } 5214 } 5215 5216 bp->dump_flag = dump->flag; 5217 return 0; 5218 } 5219 5220 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 5221 { 5222 struct bnxt *bp = netdev_priv(dev); 5223 5224 if (bp->hwrm_spec_code < 0x10801) 5225 return -EOPNOTSUPP; 5226 5227 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 5228 bp->ver_resp.hwrm_fw_min_8b << 16 | 5229 bp->ver_resp.hwrm_fw_bld_8b << 8 | 5230 bp->ver_resp.hwrm_fw_rsvd_8b; 5231 5232 dump->flag = bp->dump_flag; 5233 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); 5234 return 0; 5235 } 5236 5237 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 5238 void *buf) 5239 { 5240 struct bnxt *bp = netdev_priv(dev); 5241 5242 if (bp->hwrm_spec_code < 0x10801) 5243 return -EOPNOTSUPP; 5244 5245 memset(buf, 0, dump->len); 5246 5247 dump->flag = bp->dump_flag; 5248 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); 5249 } 5250 5251 static int bnxt_get_ts_info(struct net_device *dev, 5252 struct kernel_ethtool_ts_info *info) 5253 { 5254 struct bnxt *bp = netdev_priv(dev); 5255 struct bnxt_ptp_cfg *ptp; 5256 5257 ptp = bp->ptp_cfg; 5258 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; 5259 5260 if (!ptp) 5261 return 0; 5262 5263 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 5264 SOF_TIMESTAMPING_RX_HARDWARE | 5265 SOF_TIMESTAMPING_RAW_HARDWARE; 5266 if (ptp->ptp_clock) 5267 info->phc_index = ptp_clock_index(ptp->ptp_clock); 5268 5269 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 5270 5271 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 5272 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 5273 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 5274 5275 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) 5276 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); 5277 return 0; 5278 } 5279 5280 static void bnxt_hwrm_pcie_qstats(struct bnxt *bp) 5281 { 5282 struct hwrm_pcie_qstats_output *resp; 5283 struct hwrm_pcie_qstats_input *req; 5284 5285 bp->pcie_stat_len = 0; 5286 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 5287 return; 5288 5289 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 5290 return; 5291 5292 resp = hwrm_req_hold(bp, req); 5293 if (__bnxt_hwrm_pcie_qstats(bp, req)) 5294 bp->pcie_stat_len = min_t(u16, 5295 le16_to_cpu(resp->pcie_stat_size), 5296 sizeof(struct pcie_ctx_hw_stats_v2)); 5297 hwrm_req_drop(bp, req); 5298 } 5299 5300 void bnxt_ethtool_init(struct bnxt *bp) 5301 { 5302 struct hwrm_selftest_qlist_output *resp; 5303 struct hwrm_selftest_qlist_input *req; 5304 struct bnxt_test_info *test_info; 5305 struct net_device *dev = bp->dev; 5306 int i, rc; 5307 5308 bnxt_hwrm_pcie_qstats(bp); 5309 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 5310 bnxt_get_pkgver(dev); 5311 5312 bp->num_tests = 0; 5313 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 5314 return; 5315 5316 test_info = bp->test_info; 5317 if (!test_info) { 5318 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 5319 if (!test_info) 5320 return; 5321 bp->test_info = test_info; 5322 } 5323 5324 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) 5325 return; 5326 5327 resp = hwrm_req_hold(bp, req); 5328 rc = hwrm_req_send_silent(bp, req); 5329 if (rc) 5330 goto ethtool_init_exit; 5331 5332 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 5333 if (bp->num_tests > BNXT_MAX_TEST) 5334 bp->num_tests = BNXT_MAX_TEST; 5335 5336 test_info->offline_mask = resp->offline_tests; 5337 test_info->timeout = le16_to_cpu(resp->test_timeout); 5338 if (!test_info->timeout) 5339 test_info->timeout = HWRM_CMD_TIMEOUT; 5340 for (i = 0; i < bp->num_tests; i++) { 5341 char *str = test_info->string[i]; 5342 char *fw_str = resp->test_name[i]; 5343 5344 if (i == BNXT_MACLPBK_TEST_IDX) { 5345 strcpy(str, "Mac loopback test (offline)"); 5346 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 5347 strcpy(str, "Phy loopback test (offline)"); 5348 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 5349 strcpy(str, "Ext loopback test (offline)"); 5350 } else if (i == BNXT_IRQ_TEST_IDX) { 5351 strcpy(str, "Interrupt_test (offline)"); 5352 } else { 5353 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 5354 fw_str, test_info->offline_mask & (1 << i) ? 5355 "offline" : "online"); 5356 } 5357 } 5358 5359 ethtool_init_exit: 5360 hwrm_req_drop(bp, req); 5361 } 5362 5363 static void bnxt_get_eth_phy_stats(struct net_device *dev, 5364 struct ethtool_eth_phy_stats *phy_stats) 5365 { 5366 struct bnxt *bp = netdev_priv(dev); 5367 u64 *rx; 5368 5369 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5370 return; 5371 5372 rx = bp->rx_port_stats_ext.sw_stats; 5373 phy_stats->SymbolErrorDuringCarrier = 5374 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 5375 } 5376 5377 static void bnxt_get_eth_mac_stats(struct net_device *dev, 5378 struct ethtool_eth_mac_stats *mac_stats) 5379 { 5380 struct bnxt *bp = netdev_priv(dev); 5381 u64 *rx, *tx; 5382 5383 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5384 return; 5385 5386 rx = bp->port_stats.sw_stats; 5387 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5388 5389 mac_stats->FramesReceivedOK = 5390 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 5391 mac_stats->FramesTransmittedOK = 5392 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 5393 mac_stats->FrameCheckSequenceErrors = 5394 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 5395 mac_stats->AlignmentErrors = 5396 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 5397 mac_stats->OutOfRangeLengthField = 5398 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 5399 } 5400 5401 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 5402 struct ethtool_eth_ctrl_stats *ctrl_stats) 5403 { 5404 struct bnxt *bp = netdev_priv(dev); 5405 u64 *rx; 5406 5407 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5408 return; 5409 5410 rx = bp->port_stats.sw_stats; 5411 ctrl_stats->MACControlFramesReceived = 5412 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 5413 } 5414 5415 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 5416 { 0, 64 }, 5417 { 65, 127 }, 5418 { 128, 255 }, 5419 { 256, 511 }, 5420 { 512, 1023 }, 5421 { 1024, 1518 }, 5422 { 1519, 2047 }, 5423 { 2048, 4095 }, 5424 { 4096, 9216 }, 5425 { 9217, 16383 }, 5426 {} 5427 }; 5428 5429 static void bnxt_get_rmon_stats(struct net_device *dev, 5430 struct ethtool_rmon_stats *rmon_stats, 5431 const struct ethtool_rmon_hist_range **ranges) 5432 { 5433 struct bnxt *bp = netdev_priv(dev); 5434 u64 *rx, *tx; 5435 5436 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5437 return; 5438 5439 rx = bp->port_stats.sw_stats; 5440 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5441 5442 rmon_stats->jabbers = 5443 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 5444 rmon_stats->oversize_pkts = 5445 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 5446 rmon_stats->undersize_pkts = 5447 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 5448 5449 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 5450 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 5451 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 5452 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 5453 rmon_stats->hist[4] = 5454 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 5455 rmon_stats->hist[5] = 5456 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 5457 rmon_stats->hist[6] = 5458 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 5459 rmon_stats->hist[7] = 5460 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 5461 rmon_stats->hist[8] = 5462 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 5463 rmon_stats->hist[9] = 5464 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 5465 5466 rmon_stats->hist_tx[0] = 5467 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 5468 rmon_stats->hist_tx[1] = 5469 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 5470 rmon_stats->hist_tx[2] = 5471 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 5472 rmon_stats->hist_tx[3] = 5473 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 5474 rmon_stats->hist_tx[4] = 5475 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 5476 rmon_stats->hist_tx[5] = 5477 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 5478 rmon_stats->hist_tx[6] = 5479 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 5480 rmon_stats->hist_tx[7] = 5481 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 5482 rmon_stats->hist_tx[8] = 5483 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 5484 rmon_stats->hist_tx[9] = 5485 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 5486 5487 *ranges = bnxt_rmon_ranges; 5488 } 5489 5490 static void bnxt_get_ptp_stats(struct net_device *dev, 5491 struct ethtool_ts_stats *ts_stats) 5492 { 5493 struct bnxt *bp = netdev_priv(dev); 5494 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 5495 5496 if (ptp) { 5497 ts_stats->pkts = ptp->stats.ts_pkts; 5498 ts_stats->lost = ptp->stats.ts_lost; 5499 ts_stats->err = atomic64_read(&ptp->stats.ts_err); 5500 } 5501 } 5502 5503 static void bnxt_get_link_ext_stats(struct net_device *dev, 5504 struct ethtool_link_ext_stats *stats) 5505 { 5506 struct bnxt *bp = netdev_priv(dev); 5507 u64 *rx; 5508 5509 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5510 return; 5511 5512 rx = bp->rx_port_stats_ext.sw_stats; 5513 stats->link_down_events = 5514 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); 5515 } 5516 5517 void bnxt_ethtool_free(struct bnxt *bp) 5518 { 5519 kfree(bp->test_info); 5520 bp->test_info = NULL; 5521 } 5522 5523 const struct ethtool_ops bnxt_ethtool_ops = { 5524 .cap_link_lanes_supported = 1, 5525 .rxfh_per_ctx_key = 1, 5526 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, 5527 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, 5528 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), 5529 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5530 ETHTOOL_COALESCE_MAX_FRAMES | 5531 ETHTOOL_COALESCE_USECS_IRQ | 5532 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 5533 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 5534 ETHTOOL_COALESCE_USE_ADAPTIVE_RX | 5535 ETHTOOL_COALESCE_USE_CQE, 5536 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | 5537 ETHTOOL_RING_USE_HDS_THRS, 5538 .get_link_ksettings = bnxt_get_link_ksettings, 5539 .set_link_ksettings = bnxt_set_link_ksettings, 5540 .get_fec_stats = bnxt_get_fec_stats, 5541 .get_fecparam = bnxt_get_fecparam, 5542 .set_fecparam = bnxt_set_fecparam, 5543 .get_pause_stats = bnxt_get_pause_stats, 5544 .get_pauseparam = bnxt_get_pauseparam, 5545 .set_pauseparam = bnxt_set_pauseparam, 5546 .get_drvinfo = bnxt_get_drvinfo, 5547 .get_regs_len = bnxt_get_regs_len, 5548 .get_regs = bnxt_get_regs, 5549 .get_wol = bnxt_get_wol, 5550 .set_wol = bnxt_set_wol, 5551 .get_coalesce = bnxt_get_coalesce, 5552 .set_coalesce = bnxt_set_coalesce, 5553 .get_msglevel = bnxt_get_msglevel, 5554 .set_msglevel = bnxt_set_msglevel, 5555 .get_sset_count = bnxt_get_sset_count, 5556 .get_strings = bnxt_get_strings, 5557 .get_ethtool_stats = bnxt_get_ethtool_stats, 5558 .set_ringparam = bnxt_set_ringparam, 5559 .get_ringparam = bnxt_get_ringparam, 5560 .get_channels = bnxt_get_channels, 5561 .set_channels = bnxt_set_channels, 5562 .get_rxnfc = bnxt_get_rxnfc, 5563 .set_rxnfc = bnxt_set_rxnfc, 5564 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 5565 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 5566 .get_rxfh = bnxt_get_rxfh, 5567 .set_rxfh = bnxt_set_rxfh, 5568 .get_rxfh_fields = bnxt_get_rxfh_fields, 5569 .set_rxfh_fields = bnxt_set_rxfh_fields, 5570 .create_rxfh_context = bnxt_create_rxfh_context, 5571 .modify_rxfh_context = bnxt_modify_rxfh_context, 5572 .remove_rxfh_context = bnxt_remove_rxfh_context, 5573 .flash_device = bnxt_flash_device, 5574 .get_eeprom_len = bnxt_get_eeprom_len, 5575 .get_eeprom = bnxt_get_eeprom, 5576 .set_eeprom = bnxt_set_eeprom, 5577 .get_link = bnxt_get_link, 5578 .get_link_ext_stats = bnxt_get_link_ext_stats, 5579 .get_eee = bnxt_get_eee, 5580 .set_eee = bnxt_set_eee, 5581 .get_tunable = bnxt_get_tunable, 5582 .set_tunable = bnxt_set_tunable, 5583 .get_module_info = bnxt_get_module_info, 5584 .get_module_eeprom = bnxt_get_module_eeprom, 5585 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, 5586 .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, 5587 .nway_reset = bnxt_nway_reset, 5588 .set_phys_id = bnxt_set_phys_id, 5589 .self_test = bnxt_self_test, 5590 .get_ts_info = bnxt_get_ts_info, 5591 .reset = bnxt_reset, 5592 .set_dump = bnxt_set_dump, 5593 .get_dump_flag = bnxt_get_dump_flag, 5594 .get_dump_data = bnxt_get_dump_data, 5595 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 5596 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 5597 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 5598 .get_rmon_stats = bnxt_get_rmon_stats, 5599 .get_ts_stats = bnxt_get_ptp_stats, 5600 }; 5601