1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/ctype.h> 13 #include <linux/stringify.h> 14 #include <linux/ethtool.h> 15 #include <linux/ethtool_netlink.h> 16 #include <linux/linkmode.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/etherdevice.h> 20 #include <linux/crc32.h> 21 #include <linux/firmware.h> 22 #include <linux/utsname.h> 23 #include <linux/time.h> 24 #include <linux/ptp_clock_kernel.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/timecounter.h> 27 #include <net/netdev_queues.h> 28 #include <net/netlink.h> 29 #include <linux/bnxt/hsi.h> 30 #include "bnxt.h" 31 #include "bnxt_hwrm.h" 32 #include "bnxt_ulp.h" 33 #include "bnxt_xdp.h" 34 #include "bnxt_ptp.h" 35 #include "bnxt_ethtool.h" 36 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 37 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 38 #include "bnxt_coredump.h" 39 40 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \ 41 do { \ 42 if (extack) \ 43 NL_SET_ERR_MSG_MOD(extack, msg); \ 44 netdev_err(dev, "%s\n", msg); \ 45 } while (0) 46 47 static u32 bnxt_get_msglevel(struct net_device *dev) 48 { 49 struct bnxt *bp = netdev_priv(dev); 50 51 return bp->msg_enable; 52 } 53 54 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 55 { 56 struct bnxt *bp = netdev_priv(dev); 57 58 bp->msg_enable = value; 59 } 60 61 static int bnxt_get_coalesce(struct net_device *dev, 62 struct ethtool_coalesce *coal, 63 struct kernel_ethtool_coalesce *kernel_coal, 64 struct netlink_ext_ack *extack) 65 { 66 struct bnxt *bp = netdev_priv(dev); 67 struct bnxt_coal *hw_coal; 68 u16 mult; 69 70 memset(coal, 0, sizeof(*coal)); 71 72 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 73 74 hw_coal = &bp->rx_coal; 75 mult = hw_coal->bufs_per_record; 76 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 77 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 78 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 79 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 80 if (hw_coal->flags & 81 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 82 kernel_coal->use_cqe_mode_rx = true; 83 84 hw_coal = &bp->tx_coal; 85 mult = hw_coal->bufs_per_record; 86 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 87 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 88 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 89 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 90 if (hw_coal->flags & 91 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 92 kernel_coal->use_cqe_mode_tx = true; 93 94 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 95 96 return 0; 97 } 98 99 static int bnxt_set_coalesce(struct net_device *dev, 100 struct ethtool_coalesce *coal, 101 struct kernel_ethtool_coalesce *kernel_coal, 102 struct netlink_ext_ack *extack) 103 { 104 struct bnxt *bp = netdev_priv(dev); 105 bool update_stats = false; 106 struct bnxt_coal *hw_coal; 107 int rc = 0; 108 u16 mult; 109 110 if (coal->use_adaptive_rx_coalesce) { 111 bp->flags |= BNXT_FLAG_DIM; 112 } else { 113 if (bp->flags & BNXT_FLAG_DIM) { 114 bp->flags &= ~(BNXT_FLAG_DIM); 115 goto reset_coalesce; 116 } 117 } 118 119 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && 120 !(bp->coal_cap.cmpl_params & 121 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) 122 return -EOPNOTSUPP; 123 124 hw_coal = &bp->rx_coal; 125 mult = hw_coal->bufs_per_record; 126 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 127 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 128 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 129 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 130 hw_coal->flags &= 131 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 132 if (kernel_coal->use_cqe_mode_rx) 133 hw_coal->flags |= 134 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 135 136 hw_coal = &bp->tx_coal; 137 mult = hw_coal->bufs_per_record; 138 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 139 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 140 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 141 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 142 hw_coal->flags &= 143 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 144 if (kernel_coal->use_cqe_mode_tx) 145 hw_coal->flags |= 146 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 147 148 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 149 u32 stats_ticks = coal->stats_block_coalesce_usecs; 150 151 /* Allow 0, which means disable. */ 152 if (stats_ticks) 153 stats_ticks = clamp_t(u32, stats_ticks, 154 BNXT_MIN_STATS_COAL_TICKS, 155 BNXT_MAX_STATS_COAL_TICKS); 156 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 157 bp->stats_coal_ticks = stats_ticks; 158 if (bp->stats_coal_ticks) 159 bp->current_interval = 160 bp->stats_coal_ticks * HZ / 1000000; 161 else 162 bp->current_interval = BNXT_TIMER_INTERVAL; 163 update_stats = true; 164 } 165 166 reset_coalesce: 167 if (test_bit(BNXT_STATE_OPEN, &bp->state)) { 168 if (update_stats) { 169 bnxt_close_nic(bp, true, false); 170 rc = bnxt_open_nic(bp, true, false); 171 } else { 172 rc = bnxt_hwrm_set_coal(bp); 173 } 174 } 175 176 return rc; 177 } 178 179 static const char * const bnxt_ring_rx_stats_str[] = { 180 "rx_ucast_packets", 181 "rx_mcast_packets", 182 "rx_bcast_packets", 183 "rx_discards", 184 "rx_errors", 185 "rx_ucast_bytes", 186 "rx_mcast_bytes", 187 "rx_bcast_bytes", 188 }; 189 190 static const char * const bnxt_ring_tx_stats_str[] = { 191 "tx_ucast_packets", 192 "tx_mcast_packets", 193 "tx_bcast_packets", 194 "tx_errors", 195 "tx_discards", 196 "tx_ucast_bytes", 197 "tx_mcast_bytes", 198 "tx_bcast_bytes", 199 }; 200 201 static const char * const bnxt_ring_tpa_stats_str[] = { 202 "tpa_packets", 203 "tpa_bytes", 204 "tpa_events", 205 "tpa_aborts", 206 }; 207 208 static const char * const bnxt_ring_tpa2_stats_str[] = { 209 "rx_tpa_eligible_pkt", 210 "rx_tpa_eligible_bytes", 211 "rx_tpa_pkt", 212 "rx_tpa_bytes", 213 "rx_tpa_errors", 214 "rx_tpa_events", 215 }; 216 217 static const char * const bnxt_rx_sw_stats_str[] = { 218 "rx_l4_csum_errors", 219 "rx_resets", 220 "rx_buf_errors", 221 }; 222 223 static const char * const bnxt_cmn_sw_stats_str[] = { 224 "missed_irqs", 225 }; 226 227 #define BNXT_RX_STATS_ENTRY(counter) \ 228 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 229 230 #define BNXT_TX_STATS_ENTRY(counter) \ 231 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 232 233 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 234 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 235 236 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 237 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 238 239 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 241 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 242 243 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 245 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 246 247 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 248 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 249 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 250 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 251 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 252 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 253 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 254 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 255 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 256 257 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 258 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 259 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 260 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 261 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 262 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 263 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 264 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 265 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 266 267 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 268 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 269 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 270 271 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 272 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 273 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 274 275 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 276 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 277 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 278 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 279 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 280 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 281 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 282 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 283 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 284 285 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 286 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 287 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 288 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 289 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 290 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 291 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 292 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 293 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 294 295 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 297 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 298 299 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 307 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 308 309 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 310 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 311 __stringify(counter##_pri##n) } 312 313 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 314 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 315 __stringify(counter##_pri##n) } 316 317 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 318 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 319 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 320 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 321 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 322 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 323 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 324 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 325 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 326 327 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 328 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 329 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 330 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 331 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 332 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 333 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 334 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 335 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 336 337 enum { 338 RX_TOTAL_DISCARDS, 339 TX_TOTAL_DISCARDS, 340 RX_NETPOLL_DISCARDS, 341 }; 342 343 static const char *const bnxt_ring_err_stats_arr[] = { 344 "rx_total_l4_csum_errors", 345 "rx_total_resets", 346 "rx_total_buf_errors", 347 "rx_total_oom_discards", 348 "rx_total_netpoll_discards", 349 "rx_total_ring_discards", 350 "tx_total_resets", 351 "tx_total_ring_discards", 352 "total_missed_irqs", 353 }; 354 355 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 356 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 357 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 358 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 359 360 static const struct { 361 long offset; 362 char string[ETH_GSTRING_LEN]; 363 } bnxt_port_stats_arr[] = { 364 BNXT_RX_STATS_ENTRY(rx_64b_frames), 365 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 366 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 367 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 368 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 369 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 370 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 371 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 372 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 373 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 374 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 375 BNXT_RX_STATS_ENTRY(rx_total_frames), 376 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 377 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 378 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 379 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 380 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 381 BNXT_RX_STATS_ENTRY(rx_pause_frames), 382 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 383 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 384 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 385 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 386 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 387 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 388 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 389 BNXT_RX_STATS_ENTRY(rx_good_frames), 390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 397 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 398 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 400 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 401 BNXT_RX_STATS_ENTRY(rx_bytes), 402 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 403 BNXT_RX_STATS_ENTRY(rx_runt_frames), 404 BNXT_RX_STATS_ENTRY(rx_stat_discard), 405 BNXT_RX_STATS_ENTRY(rx_stat_err), 406 407 BNXT_TX_STATS_ENTRY(tx_64b_frames), 408 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 409 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 410 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 411 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 412 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 413 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 414 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 415 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 416 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 417 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 418 BNXT_TX_STATS_ENTRY(tx_good_frames), 419 BNXT_TX_STATS_ENTRY(tx_total_frames), 420 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 421 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 422 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 423 BNXT_TX_STATS_ENTRY(tx_pause_frames), 424 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 425 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 426 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 427 BNXT_TX_STATS_ENTRY(tx_err), 428 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 436 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 438 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 439 BNXT_TX_STATS_ENTRY(tx_total_collisions), 440 BNXT_TX_STATS_ENTRY(tx_bytes), 441 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 442 BNXT_TX_STATS_ENTRY(tx_stat_discard), 443 BNXT_TX_STATS_ENTRY(tx_stat_error), 444 }; 445 446 static const struct { 447 long offset; 448 char string[ETH_GSTRING_LEN]; 449 } bnxt_port_stats_ext_arr[] = { 450 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 451 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 452 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 453 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 454 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 455 BNXT_RX_STATS_EXT_COS_ENTRIES, 456 BNXT_RX_STATS_EXT_PFC_ENTRIES, 457 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 458 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 459 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 460 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 461 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), 463 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), 464 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss), 465 }; 466 467 static const struct { 468 long offset; 469 char string[ETH_GSTRING_LEN]; 470 } bnxt_tx_port_stats_ext_arr[] = { 471 BNXT_TX_STATS_EXT_COS_ENTRIES, 472 BNXT_TX_STATS_EXT_PFC_ENTRIES, 473 }; 474 475 static const struct { 476 long base_off; 477 char string[ETH_GSTRING_LEN]; 478 } bnxt_rx_bytes_pri_arr[] = { 479 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 480 }; 481 482 static const struct { 483 long base_off; 484 char string[ETH_GSTRING_LEN]; 485 } bnxt_rx_pkts_pri_arr[] = { 486 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 487 }; 488 489 static const struct { 490 long base_off; 491 char string[ETH_GSTRING_LEN]; 492 } bnxt_tx_bytes_pri_arr[] = { 493 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 494 }; 495 496 static const struct { 497 long base_off; 498 char string[ETH_GSTRING_LEN]; 499 } bnxt_tx_pkts_pri_arr[] = { 500 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 501 }; 502 503 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) 504 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 505 #define BNXT_NUM_STATS_PRI \ 506 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 507 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 508 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 509 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 510 511 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 512 { 513 if (BNXT_SUPPORTS_TPA(bp)) { 514 if (bp->max_tpa_v2) { 515 if (BNXT_CHIP_P5(bp)) 516 return BNXT_NUM_TPA_RING_STATS_P5; 517 return BNXT_NUM_TPA_RING_STATS_P7; 518 } 519 return BNXT_NUM_TPA_RING_STATS; 520 } 521 return 0; 522 } 523 524 static int bnxt_get_num_ring_stats(struct bnxt *bp) 525 { 526 int rx, tx, cmn; 527 528 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 529 bnxt_get_num_tpa_ring_stats(bp); 530 tx = NUM_RING_TX_HW_STATS; 531 cmn = NUM_RING_CMN_SW_STATS; 532 return rx * bp->rx_nr_rings + 533 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) + 534 cmn * bp->cp_nr_rings; 535 } 536 537 static int bnxt_get_num_stats(struct bnxt *bp) 538 { 539 int num_stats = bnxt_get_num_ring_stats(bp); 540 int len; 541 542 num_stats += BNXT_NUM_RING_ERR_STATS; 543 544 if (bp->flags & BNXT_FLAG_PORT_STATS) 545 num_stats += BNXT_NUM_PORT_STATS; 546 547 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 548 len = min_t(int, bp->fw_rx_stats_ext_size, 549 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 550 num_stats += len; 551 len = min_t(int, bp->fw_tx_stats_ext_size, 552 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 553 num_stats += len; 554 if (bp->pri2cos_valid) 555 num_stats += BNXT_NUM_STATS_PRI; 556 } 557 558 return num_stats; 559 } 560 561 static int bnxt_get_sset_count(struct net_device *dev, int sset) 562 { 563 struct bnxt *bp = netdev_priv(dev); 564 565 switch (sset) { 566 case ETH_SS_STATS: 567 return bnxt_get_num_stats(bp); 568 case ETH_SS_TEST: 569 if (!bp->num_tests) 570 return -EOPNOTSUPP; 571 return bp->num_tests; 572 default: 573 return -EOPNOTSUPP; 574 } 575 } 576 577 static bool is_rx_ring(struct bnxt *bp, int ring_num) 578 { 579 return ring_num < bp->rx_nr_rings; 580 } 581 582 static bool is_tx_ring(struct bnxt *bp, int ring_num) 583 { 584 int tx_base = 0; 585 586 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 587 tx_base = bp->rx_nr_rings; 588 589 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 590 return true; 591 return false; 592 } 593 594 static void bnxt_get_ethtool_stats(struct net_device *dev, 595 struct ethtool_stats *stats, u64 *buf) 596 { 597 struct bnxt_total_ring_err_stats ring_err_stats = {0}; 598 struct bnxt *bp = netdev_priv(dev); 599 u64 *curr, *prev; 600 u32 tpa_stats; 601 u32 i, j = 0; 602 603 if (!bp->bnapi) { 604 j += bnxt_get_num_ring_stats(bp); 605 goto skip_ring_stats; 606 } 607 608 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 609 for (i = 0; i < bp->cp_nr_rings; i++) { 610 struct bnxt_napi *bnapi = bp->bnapi[i]; 611 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 612 u64 *sw_stats = cpr->stats.sw_stats; 613 u64 *sw; 614 int k; 615 616 if (is_rx_ring(bp, i)) { 617 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 618 buf[j] = sw_stats[k]; 619 } 620 if (is_tx_ring(bp, i)) { 621 k = NUM_RING_RX_HW_STATS; 622 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 623 j++, k++) 624 buf[j] = sw_stats[k]; 625 } 626 if (!tpa_stats || !is_rx_ring(bp, i)) 627 goto skip_tpa_ring_stats; 628 629 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 630 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 631 tpa_stats; j++, k++) 632 buf[j] = sw_stats[k]; 633 634 skip_tpa_ring_stats: 635 sw = (u64 *)&cpr->sw_stats->rx; 636 if (is_rx_ring(bp, i)) { 637 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 638 buf[j] = sw[k]; 639 } 640 641 sw = (u64 *)&cpr->sw_stats->cmn; 642 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 643 buf[j] = sw[k]; 644 } 645 646 bnxt_get_ring_err_stats(bp, &ring_err_stats); 647 648 skip_ring_stats: 649 curr = &ring_err_stats.rx_total_l4_csum_errors; 650 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; 651 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) 652 buf[j] = *curr + *prev; 653 654 if (bp->flags & BNXT_FLAG_PORT_STATS) { 655 u64 *port_stats = bp->port_stats.sw_stats; 656 657 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 658 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 659 } 660 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 661 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 662 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 663 u32 len; 664 665 len = min_t(u32, bp->fw_rx_stats_ext_size, 666 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 667 for (i = 0; i < len; i++, j++) { 668 buf[j] = *(rx_port_stats_ext + 669 bnxt_port_stats_ext_arr[i].offset); 670 } 671 len = min_t(u32, bp->fw_tx_stats_ext_size, 672 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 673 for (i = 0; i < len; i++, j++) { 674 buf[j] = *(tx_port_stats_ext + 675 bnxt_tx_port_stats_ext_arr[i].offset); 676 } 677 if (bp->pri2cos_valid) { 678 for (i = 0; i < 8; i++, j++) { 679 long n = bnxt_rx_bytes_pri_arr[i].base_off + 680 bp->pri2cos_idx[i]; 681 682 buf[j] = *(rx_port_stats_ext + n); 683 } 684 for (i = 0; i < 8; i++, j++) { 685 long n = bnxt_rx_pkts_pri_arr[i].base_off + 686 bp->pri2cos_idx[i]; 687 688 buf[j] = *(rx_port_stats_ext + n); 689 } 690 for (i = 0; i < 8; i++, j++) { 691 long n = bnxt_tx_bytes_pri_arr[i].base_off + 692 bp->pri2cos_idx[i]; 693 694 buf[j] = *(tx_port_stats_ext + n); 695 } 696 for (i = 0; i < 8; i++, j++) { 697 long n = bnxt_tx_pkts_pri_arr[i].base_off + 698 bp->pri2cos_idx[i]; 699 700 buf[j] = *(tx_port_stats_ext + n); 701 } 702 } 703 } 704 } 705 706 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 707 { 708 struct bnxt *bp = netdev_priv(dev); 709 u32 i, j, num_str; 710 const char *str; 711 712 switch (stringset) { 713 case ETH_SS_STATS: 714 for (i = 0; i < bp->cp_nr_rings; i++) { 715 if (is_rx_ring(bp, i)) 716 for (j = 0; j < NUM_RING_RX_HW_STATS; j++) { 717 str = bnxt_ring_rx_stats_str[j]; 718 ethtool_sprintf(&buf, "[%d]: %s", i, 719 str); 720 } 721 if (is_tx_ring(bp, i)) 722 for (j = 0; j < NUM_RING_TX_HW_STATS; j++) { 723 str = bnxt_ring_tx_stats_str[j]; 724 ethtool_sprintf(&buf, "[%d]: %s", i, 725 str); 726 } 727 num_str = bnxt_get_num_tpa_ring_stats(bp); 728 if (!num_str || !is_rx_ring(bp, i)) 729 goto skip_tpa_stats; 730 731 if (bp->max_tpa_v2) 732 for (j = 0; j < num_str; j++) { 733 str = bnxt_ring_tpa2_stats_str[j]; 734 ethtool_sprintf(&buf, "[%d]: %s", i, 735 str); 736 } 737 else 738 for (j = 0; j < num_str; j++) { 739 str = bnxt_ring_tpa_stats_str[j]; 740 ethtool_sprintf(&buf, "[%d]: %s", i, 741 str); 742 } 743 skip_tpa_stats: 744 if (is_rx_ring(bp, i)) 745 for (j = 0; j < NUM_RING_RX_SW_STATS; j++) { 746 str = bnxt_rx_sw_stats_str[j]; 747 ethtool_sprintf(&buf, "[%d]: %s", i, 748 str); 749 } 750 for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) { 751 str = bnxt_cmn_sw_stats_str[j]; 752 ethtool_sprintf(&buf, "[%d]: %s", i, str); 753 } 754 } 755 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) 756 ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]); 757 758 if (bp->flags & BNXT_FLAG_PORT_STATS) 759 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 760 str = bnxt_port_stats_arr[i].string; 761 ethtool_puts(&buf, str); 762 } 763 764 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 765 u32 len; 766 767 len = min_t(u32, bp->fw_rx_stats_ext_size, 768 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 769 for (i = 0; i < len; i++) { 770 str = bnxt_port_stats_ext_arr[i].string; 771 ethtool_puts(&buf, str); 772 } 773 774 len = min_t(u32, bp->fw_tx_stats_ext_size, 775 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 776 for (i = 0; i < len; i++) { 777 str = bnxt_tx_port_stats_ext_arr[i].string; 778 ethtool_puts(&buf, str); 779 } 780 781 if (bp->pri2cos_valid) { 782 for (i = 0; i < 8; i++) { 783 str = bnxt_rx_bytes_pri_arr[i].string; 784 ethtool_puts(&buf, str); 785 } 786 787 for (i = 0; i < 8; i++) { 788 str = bnxt_rx_pkts_pri_arr[i].string; 789 ethtool_puts(&buf, str); 790 } 791 792 for (i = 0; i < 8; i++) { 793 str = bnxt_tx_bytes_pri_arr[i].string; 794 ethtool_puts(&buf, str); 795 } 796 797 for (i = 0; i < 8; i++) { 798 str = bnxt_tx_pkts_pri_arr[i].string; 799 ethtool_puts(&buf, str); 800 } 801 } 802 } 803 break; 804 case ETH_SS_TEST: 805 if (bp->num_tests) 806 for (i = 0; i < bp->num_tests; i++) 807 ethtool_puts(&buf, bp->test_info->string[i]); 808 break; 809 default: 810 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 811 stringset); 812 break; 813 } 814 } 815 816 static void bnxt_get_ringparam(struct net_device *dev, 817 struct ethtool_ringparam *ering, 818 struct kernel_ethtool_ringparam *kernel_ering, 819 struct netlink_ext_ack *extack) 820 { 821 struct bnxt *bp = netdev_priv(dev); 822 823 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 824 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 825 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 826 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 827 } else { 828 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 829 ering->rx_jumbo_max_pending = 0; 830 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 831 } 832 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 833 834 ering->rx_pending = bp->rx_ring_size; 835 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 836 ering->tx_pending = bp->tx_ring_size; 837 838 kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; 839 } 840 841 static int bnxt_set_ringparam(struct net_device *dev, 842 struct ethtool_ringparam *ering, 843 struct kernel_ethtool_ringparam *kernel_ering, 844 struct netlink_ext_ack *extack) 845 { 846 u8 tcp_data_split = kernel_ering->tcp_data_split; 847 struct bnxt *bp = netdev_priv(dev); 848 u8 hds_config_mod; 849 850 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 851 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 852 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) 853 return -EINVAL; 854 855 hds_config_mod = tcp_data_split != dev->cfg->hds_config; 856 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) 857 return -EINVAL; 858 859 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 860 hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { 861 NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); 862 return -EINVAL; 863 } 864 865 if (netif_running(dev)) 866 bnxt_close_nic(bp, false, false); 867 868 if (hds_config_mod) { 869 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) 870 bp->flags |= BNXT_FLAG_HDS; 871 else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) 872 bp->flags &= ~BNXT_FLAG_HDS; 873 } 874 875 bp->rx_ring_size = ering->rx_pending; 876 bp->tx_ring_size = ering->tx_pending; 877 bnxt_set_ring_params(bp); 878 879 if (netif_running(dev)) 880 return bnxt_open_nic(bp, false, false); 881 882 return 0; 883 } 884 885 static void bnxt_get_channels(struct net_device *dev, 886 struct ethtool_channels *channel) 887 { 888 struct bnxt *bp = netdev_priv(dev); 889 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 890 int max_rx_rings, max_tx_rings, tcs; 891 int max_tx_sch_inputs, tx_grps; 892 893 /* Get the most up-to-date max_tx_sch_inputs. */ 894 if (netif_running(dev) && BNXT_NEW_RM(bp)) 895 bnxt_hwrm_func_resc_qcaps(bp, false); 896 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 897 898 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 899 if (max_tx_sch_inputs) 900 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 901 902 tcs = bp->num_tc; 903 tx_grps = max(tcs, 1); 904 if (bp->tx_nr_rings_xdp) 905 tx_grps++; 906 max_tx_rings /= tx_grps; 907 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 908 909 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 910 max_rx_rings = 0; 911 max_tx_rings = 0; 912 } 913 if (max_tx_sch_inputs) 914 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 915 916 if (tcs > 1) 917 max_tx_rings /= tcs; 918 919 channel->max_rx = max_rx_rings; 920 channel->max_tx = max_tx_rings; 921 channel->max_other = 0; 922 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 923 channel->combined_count = bp->rx_nr_rings; 924 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 925 channel->combined_count--; 926 } else { 927 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 928 channel->rx_count = bp->rx_nr_rings; 929 channel->tx_count = bp->tx_nr_rings_per_tc; 930 } 931 } 932 } 933 934 static int bnxt_set_channels(struct net_device *dev, 935 struct ethtool_channels *channel) 936 { 937 struct bnxt *bp = netdev_priv(dev); 938 int req_tx_rings, req_rx_rings, tcs; 939 bool sh = false; 940 int tx_xdp = 0; 941 int rc = 0; 942 int tx_cp; 943 944 if (channel->other_count) 945 return -EINVAL; 946 947 if (!channel->combined_count && 948 (!channel->rx_count || !channel->tx_count)) 949 return -EINVAL; 950 951 if (channel->combined_count && 952 (channel->rx_count || channel->tx_count)) 953 return -EINVAL; 954 955 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 956 channel->tx_count)) 957 return -EINVAL; 958 959 if (channel->combined_count) 960 sh = true; 961 962 tcs = bp->num_tc; 963 964 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 965 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 966 if (bp->tx_nr_rings_xdp) { 967 if (!sh) { 968 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 969 return -EINVAL; 970 } 971 tx_xdp = req_rx_rings; 972 } 973 974 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 975 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 976 netif_is_rxfh_configured(dev)) { 977 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 978 return -EINVAL; 979 } 980 981 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 982 if (rc) { 983 netdev_warn(dev, "Unable to allocate the requested rings\n"); 984 return rc; 985 } 986 987 if (netif_running(dev)) { 988 if (BNXT_PF(bp)) { 989 /* TODO CHIMP_FW: Send message to all VF's 990 * before PF unload 991 */ 992 } 993 bnxt_close_nic(bp, true, false); 994 } 995 996 if (sh) { 997 bp->flags |= BNXT_FLAG_SHARED_RINGS; 998 bp->rx_nr_rings = channel->combined_count; 999 bp->tx_nr_rings_per_tc = channel->combined_count; 1000 } else { 1001 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 1002 bp->rx_nr_rings = channel->rx_count; 1003 bp->tx_nr_rings_per_tc = channel->tx_count; 1004 } 1005 bp->tx_nr_rings_xdp = tx_xdp; 1006 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 1007 if (tcs > 1) 1008 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 1009 1010 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 1011 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 1012 tx_cp + bp->rx_nr_rings; 1013 1014 /* After changing number of rx channels, update NTUPLE feature. */ 1015 netdev_update_features(dev); 1016 if (netif_running(dev)) { 1017 rc = bnxt_open_nic(bp, true, false); 1018 if ((!rc) && BNXT_PF(bp)) { 1019 /* TODO CHIMP_FW: Send message to all VF's 1020 * to renable 1021 */ 1022 } 1023 } else { 1024 rc = bnxt_reserve_rings(bp, true); 1025 } 1026 1027 return rc; 1028 } 1029 1030 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], 1031 int tbl_size, u32 *ids, u32 start, 1032 u32 id_cnt) 1033 { 1034 int i, j = start; 1035 1036 if (j >= id_cnt) 1037 return j; 1038 for (i = 0; i < tbl_size; i++) { 1039 struct hlist_head *head; 1040 struct bnxt_filter_base *fltr; 1041 1042 head = &tbl[i]; 1043 hlist_for_each_entry_rcu(fltr, head, hash) { 1044 if (!fltr->flags || 1045 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) 1046 continue; 1047 ids[j++] = fltr->sw_id; 1048 if (j == id_cnt) 1049 return j; 1050 } 1051 } 1052 return j; 1053 } 1054 1055 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, 1056 struct hlist_head tbl[], 1057 int tbl_size, u32 id) 1058 { 1059 int i; 1060 1061 for (i = 0; i < tbl_size; i++) { 1062 struct hlist_head *head; 1063 struct bnxt_filter_base *fltr; 1064 1065 head = &tbl[i]; 1066 hlist_for_each_entry_rcu(fltr, head, hash) { 1067 if (fltr->flags && fltr->sw_id == id) 1068 return fltr; 1069 } 1070 } 1071 return NULL; 1072 } 1073 1074 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 1075 u32 *rule_locs) 1076 { 1077 u32 count; 1078 1079 cmd->data = bp->ntp_fltr_count; 1080 rcu_read_lock(); 1081 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, 1082 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, 1083 cmd->rule_cnt); 1084 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, 1085 BNXT_NTP_FLTR_HASH_SIZE, 1086 rule_locs, count, 1087 cmd->rule_cnt); 1088 rcu_read_unlock(); 1089 1090 return 0; 1091 } 1092 1093 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1094 { 1095 struct ethtool_rx_flow_spec *fs = 1096 (struct ethtool_rx_flow_spec *)&cmd->fs; 1097 struct bnxt_filter_base *fltr_base; 1098 struct bnxt_ntuple_filter *fltr; 1099 struct bnxt_flow_masks *fmasks; 1100 struct flow_keys *fkeys; 1101 int rc = -EINVAL; 1102 1103 if (fs->location >= bp->max_fltr) 1104 return rc; 1105 1106 rcu_read_lock(); 1107 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1108 BNXT_L2_FLTR_HASH_SIZE, 1109 fs->location); 1110 if (fltr_base) { 1111 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1112 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1113 struct bnxt_l2_filter *l2_fltr; 1114 struct bnxt_l2_key *l2_key; 1115 1116 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1117 l2_key = &l2_fltr->l2_key; 1118 fs->flow_type = ETHER_FLOW; 1119 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); 1120 eth_broadcast_addr(m_ether->h_dest); 1121 if (l2_key->vlan) { 1122 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1123 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1124 1125 fs->flow_type |= FLOW_EXT; 1126 m_ext->vlan_tci = htons(0xfff); 1127 h_ext->vlan_tci = htons(l2_key->vlan); 1128 } 1129 if (fltr_base->flags & BNXT_ACT_RING_DST) 1130 fs->ring_cookie = fltr_base->rxq; 1131 if (fltr_base->flags & BNXT_ACT_FUNC_DST) 1132 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << 1133 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 1134 rcu_read_unlock(); 1135 return 0; 1136 } 1137 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1138 BNXT_NTP_FLTR_HASH_SIZE, 1139 fs->location); 1140 if (!fltr_base) { 1141 rcu_read_unlock(); 1142 return rc; 1143 } 1144 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1145 1146 fkeys = &fltr->fkeys; 1147 fmasks = &fltr->fmasks; 1148 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1149 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1150 fs->flow_type = IP_USER_FLOW; 1151 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1152 fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD; 1153 fs->m_u.usr_ip4_spec.proto = 0; 1154 } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) { 1155 fs->flow_type = IP_USER_FLOW; 1156 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1157 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; 1158 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; 1159 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1160 fs->flow_type = TCP_V4_FLOW; 1161 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1162 fs->flow_type = UDP_V4_FLOW; 1163 } else { 1164 goto fltr_err; 1165 } 1166 1167 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1168 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; 1169 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1170 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; 1171 if (fs->flow_type == TCP_V4_FLOW || 1172 fs->flow_type == UDP_V4_FLOW) { 1173 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1174 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; 1175 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1176 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; 1177 } 1178 } else { 1179 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1180 fs->flow_type = IPV6_USER_FLOW; 1181 fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD; 1182 fs->m_u.usr_ip6_spec.l4_proto = 0; 1183 } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) { 1184 fs->flow_type = IPV6_USER_FLOW; 1185 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; 1186 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; 1187 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1188 fs->flow_type = TCP_V6_FLOW; 1189 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1190 fs->flow_type = UDP_V6_FLOW; 1191 } else { 1192 goto fltr_err; 1193 } 1194 1195 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1196 fkeys->addrs.v6addrs.src; 1197 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = 1198 fmasks->addrs.v6addrs.src; 1199 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1200 fkeys->addrs.v6addrs.dst; 1201 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = 1202 fmasks->addrs.v6addrs.dst; 1203 if (fs->flow_type == TCP_V6_FLOW || 1204 fs->flow_type == UDP_V6_FLOW) { 1205 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1206 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; 1207 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1208 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; 1209 } 1210 } 1211 1212 if (fltr->base.flags & BNXT_ACT_DROP) { 1213 fs->ring_cookie = RX_CLS_FLOW_DISC; 1214 } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 1215 fs->flow_type |= FLOW_RSS; 1216 cmd->rss_context = fltr->base.fw_vnic_id; 1217 } else { 1218 fs->ring_cookie = fltr->base.rxq; 1219 } 1220 rc = 0; 1221 1222 fltr_err: 1223 rcu_read_unlock(); 1224 1225 return rc; 1226 } 1227 1228 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, 1229 u32 index) 1230 { 1231 struct ethtool_rxfh_context *ctx; 1232 1233 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); 1234 if (!ctx) 1235 return NULL; 1236 return ethtool_rxfh_context_priv(ctx); 1237 } 1238 1239 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, 1240 struct bnxt_vnic_info *vnic) 1241 { 1242 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 1243 1244 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 1245 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, 1246 vnic->rss_table_size, 1247 &vnic->rss_table_dma_addr, 1248 GFP_KERNEL); 1249 if (!vnic->rss_table) 1250 return -ENOMEM; 1251 1252 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 1253 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 1254 return 0; 1255 } 1256 1257 static int bnxt_add_l2_cls_rule(struct bnxt *bp, 1258 struct ethtool_rx_flow_spec *fs) 1259 { 1260 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1261 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1262 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1263 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1264 struct bnxt_l2_filter *fltr; 1265 struct bnxt_l2_key key; 1266 u16 vnic_id; 1267 u8 flags; 1268 int rc; 1269 1270 if (BNXT_CHIP_P5_PLUS(bp)) 1271 return -EOPNOTSUPP; 1272 1273 if (!is_broadcast_ether_addr(m_ether->h_dest)) 1274 return -EINVAL; 1275 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); 1276 key.vlan = 0; 1277 if (fs->flow_type & FLOW_EXT) { 1278 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1279 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1280 1281 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) 1282 return -EINVAL; 1283 key.vlan = ntohs(h_ext->vlan_tci); 1284 } 1285 1286 if (vf) { 1287 flags = BNXT_ACT_FUNC_DST; 1288 vnic_id = 0xffff; 1289 vf--; 1290 } else { 1291 flags = BNXT_ACT_RING_DST; 1292 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; 1293 } 1294 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); 1295 if (IS_ERR(fltr)) 1296 return PTR_ERR(fltr); 1297 1298 fltr->base.fw_vnic_id = vnic_id; 1299 fltr->base.rxq = ring; 1300 fltr->base.vf_idx = vf; 1301 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 1302 if (rc) 1303 bnxt_del_l2_filter(bp, fltr); 1304 else 1305 fs->location = fltr->base.sw_id; 1306 return rc; 1307 } 1308 1309 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, 1310 struct ethtool_usrip4_spec *ip_mask) 1311 { 1312 u8 mproto = ip_mask->proto; 1313 u8 sproto = ip_spec->proto; 1314 1315 if (ip_mask->l4_4_bytes || ip_mask->tos || 1316 ip_spec->ip_ver != ETH_RX_NFC_IP4 || 1317 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP))) 1318 return false; 1319 return true; 1320 } 1321 1322 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, 1323 struct ethtool_usrip6_spec *ip_mask) 1324 { 1325 u8 mproto = ip_mask->l4_proto; 1326 u8 sproto = ip_spec->l4_proto; 1327 1328 if (ip_mask->l4_4_bytes || ip_mask->tclass || 1329 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6))) 1330 return false; 1331 return true; 1332 } 1333 1334 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, 1335 struct ethtool_rxnfc *cmd) 1336 { 1337 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1338 struct bnxt_ntuple_filter *new_fltr, *fltr; 1339 u32 flow_type = fs->flow_type & 0xff; 1340 struct bnxt_l2_filter *l2_fltr; 1341 struct bnxt_flow_masks *fmasks; 1342 struct flow_keys *fkeys; 1343 u32 idx, ring; 1344 int rc; 1345 u8 vf; 1346 1347 if (!bp->vnic_info) 1348 return -EAGAIN; 1349 1350 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1351 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1352 if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) 1353 return -EOPNOTSUPP; 1354 1355 if (flow_type == IP_USER_FLOW) { 1356 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, 1357 &fs->m_u.usr_ip4_spec)) 1358 return -EOPNOTSUPP; 1359 } 1360 1361 if (flow_type == IPV6_USER_FLOW) { 1362 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, 1363 &fs->m_u.usr_ip6_spec)) 1364 return -EOPNOTSUPP; 1365 } 1366 1367 new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); 1368 if (!new_fltr) 1369 return -ENOMEM; 1370 1371 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 1372 atomic_inc(&l2_fltr->refcnt); 1373 new_fltr->l2_fltr = l2_fltr; 1374 fmasks = &new_fltr->fmasks; 1375 fkeys = &new_fltr->fkeys; 1376 1377 rc = -EOPNOTSUPP; 1378 switch (flow_type) { 1379 case IP_USER_FLOW: { 1380 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; 1381 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; 1382 1383 fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto 1384 : BNXT_IP_PROTO_WILDCARD; 1385 fkeys->basic.n_proto = htons(ETH_P_IP); 1386 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1387 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1388 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1389 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1390 break; 1391 } 1392 case TCP_V4_FLOW: 1393 case UDP_V4_FLOW: { 1394 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; 1395 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; 1396 1397 fkeys->basic.ip_proto = IPPROTO_TCP; 1398 if (flow_type == UDP_V4_FLOW) 1399 fkeys->basic.ip_proto = IPPROTO_UDP; 1400 fkeys->basic.n_proto = htons(ETH_P_IP); 1401 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1402 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1403 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1404 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1405 fkeys->ports.src = ip_spec->psrc; 1406 fmasks->ports.src = ip_mask->psrc; 1407 fkeys->ports.dst = ip_spec->pdst; 1408 fmasks->ports.dst = ip_mask->pdst; 1409 break; 1410 } 1411 case IPV6_USER_FLOW: { 1412 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; 1413 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; 1414 1415 fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto 1416 : BNXT_IP_PROTO_WILDCARD; 1417 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1418 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1419 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1420 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1421 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1422 break; 1423 } 1424 case TCP_V6_FLOW: 1425 case UDP_V6_FLOW: { 1426 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; 1427 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; 1428 1429 fkeys->basic.ip_proto = IPPROTO_TCP; 1430 if (flow_type == UDP_V6_FLOW) 1431 fkeys->basic.ip_proto = IPPROTO_UDP; 1432 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1433 1434 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1435 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1436 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1437 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1438 fkeys->ports.src = ip_spec->psrc; 1439 fmasks->ports.src = ip_mask->psrc; 1440 fkeys->ports.dst = ip_spec->pdst; 1441 fmasks->ports.dst = ip_mask->pdst; 1442 break; 1443 } 1444 default: 1445 rc = -EOPNOTSUPP; 1446 goto ntuple_err; 1447 } 1448 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) 1449 goto ntuple_err; 1450 1451 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); 1452 rcu_read_lock(); 1453 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 1454 if (fltr) { 1455 rcu_read_unlock(); 1456 rc = -EEXIST; 1457 goto ntuple_err; 1458 } 1459 rcu_read_unlock(); 1460 1461 new_fltr->base.flags = BNXT_ACT_NO_AGING; 1462 if (fs->flow_type & FLOW_RSS) { 1463 struct bnxt_rss_ctx *rss_ctx; 1464 1465 new_fltr->base.fw_vnic_id = 0; 1466 new_fltr->base.flags |= BNXT_ACT_RSS_CTX; 1467 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); 1468 if (rss_ctx) { 1469 new_fltr->base.fw_vnic_id = rss_ctx->index; 1470 } else { 1471 rc = -EINVAL; 1472 goto ntuple_err; 1473 } 1474 } 1475 if (fs->ring_cookie == RX_CLS_FLOW_DISC) 1476 new_fltr->base.flags |= BNXT_ACT_DROP; 1477 else 1478 new_fltr->base.rxq = ring; 1479 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); 1480 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 1481 if (!rc) { 1482 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); 1483 if (rc) { 1484 bnxt_del_ntp_filter(bp, new_fltr); 1485 return rc; 1486 } 1487 fs->location = new_fltr->base.sw_id; 1488 return 0; 1489 } 1490 1491 ntuple_err: 1492 atomic_dec(&l2_fltr->refcnt); 1493 kfree(new_fltr); 1494 return rc; 1495 } 1496 1497 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1498 { 1499 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1500 u32 ring, flow_type; 1501 int rc; 1502 u8 vf; 1503 1504 if (!netif_running(bp->dev)) 1505 return -EAGAIN; 1506 if (!(bp->flags & BNXT_FLAG_RFS)) 1507 return -EPERM; 1508 if (fs->location != RX_CLS_LOC_ANY) 1509 return -EINVAL; 1510 1511 flow_type = fs->flow_type; 1512 if ((flow_type == IP_USER_FLOW || 1513 flow_type == IPV6_USER_FLOW) && 1514 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) 1515 return -EOPNOTSUPP; 1516 if (flow_type & FLOW_MAC_EXT) 1517 return -EINVAL; 1518 flow_type &= ~FLOW_EXT; 1519 1520 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) 1521 return bnxt_add_ntuple_cls_rule(bp, cmd); 1522 1523 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1524 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1525 if (BNXT_VF(bp) && vf) 1526 return -EINVAL; 1527 if (BNXT_PF(bp) && vf > bp->pf.active_vfs) 1528 return -EINVAL; 1529 if (!vf && ring >= bp->rx_nr_rings) 1530 return -EINVAL; 1531 1532 if (flow_type == ETHER_FLOW) 1533 rc = bnxt_add_l2_cls_rule(bp, fs); 1534 else 1535 rc = bnxt_add_ntuple_cls_rule(bp, cmd); 1536 return rc; 1537 } 1538 1539 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1540 { 1541 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1542 struct bnxt_filter_base *fltr_base; 1543 struct bnxt_ntuple_filter *fltr; 1544 u32 id = fs->location; 1545 1546 rcu_read_lock(); 1547 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1548 BNXT_L2_FLTR_HASH_SIZE, id); 1549 if (fltr_base) { 1550 struct bnxt_l2_filter *l2_fltr; 1551 1552 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1553 rcu_read_unlock(); 1554 bnxt_hwrm_l2_filter_free(bp, l2_fltr); 1555 bnxt_del_l2_filter(bp, l2_fltr); 1556 return 0; 1557 } 1558 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1559 BNXT_NTP_FLTR_HASH_SIZE, id); 1560 if (!fltr_base) { 1561 rcu_read_unlock(); 1562 return -ENOENT; 1563 } 1564 1565 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1566 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) { 1567 rcu_read_unlock(); 1568 return -EINVAL; 1569 } 1570 rcu_read_unlock(); 1571 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); 1572 bnxt_del_ntp_filter(bp, fltr); 1573 return 0; 1574 } 1575 1576 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1577 { 1578 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1579 return RXH_IP_SRC | RXH_IP_DST; 1580 return 0; 1581 } 1582 1583 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1584 { 1585 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1586 return RXH_IP_SRC | RXH_IP_DST; 1587 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL) 1588 return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL; 1589 return 0; 1590 } 1591 1592 static int bnxt_get_rxfh_fields(struct net_device *dev, 1593 struct ethtool_rxfh_fields *cmd) 1594 { 1595 struct bnxt *bp = netdev_priv(dev); 1596 1597 cmd->data = 0; 1598 switch (cmd->flow_type) { 1599 case TCP_V4_FLOW: 1600 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1601 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1602 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1603 cmd->data |= get_ethtool_ipv4_rss(bp); 1604 break; 1605 case UDP_V4_FLOW: 1606 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1607 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1608 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1609 fallthrough; 1610 case AH_ESP_V4_FLOW: 1611 if (bp->rss_hash_cfg & 1612 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1613 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) 1614 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1615 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1616 fallthrough; 1617 case SCTP_V4_FLOW: 1618 case AH_V4_FLOW: 1619 case ESP_V4_FLOW: 1620 case IPV4_FLOW: 1621 cmd->data |= get_ethtool_ipv4_rss(bp); 1622 break; 1623 1624 case TCP_V6_FLOW: 1625 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1626 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1627 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1628 cmd->data |= get_ethtool_ipv6_rss(bp); 1629 break; 1630 case UDP_V6_FLOW: 1631 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1632 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1633 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1634 fallthrough; 1635 case AH_ESP_V6_FLOW: 1636 if (bp->rss_hash_cfg & 1637 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1638 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) 1639 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1640 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1641 fallthrough; 1642 case SCTP_V6_FLOW: 1643 case AH_V6_FLOW: 1644 case ESP_V6_FLOW: 1645 case IPV6_FLOW: 1646 cmd->data |= get_ethtool_ipv6_rss(bp); 1647 break; 1648 } 1649 return 0; 1650 } 1651 1652 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1653 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1654 1655 static int bnxt_set_rxfh_fields(struct net_device *dev, 1656 const struct ethtool_rxfh_fields *cmd, 1657 struct netlink_ext_ack *extack) 1658 { 1659 struct bnxt *bp = netdev_priv(dev); 1660 int tuple, rc = 0; 1661 u32 rss_hash_cfg; 1662 1663 rss_hash_cfg = bp->rss_hash_cfg; 1664 1665 if (cmd->data == RXH_4TUPLE) 1666 tuple = 4; 1667 else if (cmd->data == RXH_2TUPLE || 1668 cmd->data == (RXH_2TUPLE | RXH_IP6_FL)) 1669 tuple = 2; 1670 else if (!cmd->data) 1671 tuple = 0; 1672 else 1673 return -EINVAL; 1674 1675 if (cmd->data & RXH_IP6_FL && 1676 !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP)) 1677 return -EINVAL; 1678 1679 if (cmd->flow_type == TCP_V4_FLOW) { 1680 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1681 if (tuple == 4) 1682 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1683 } else if (cmd->flow_type == UDP_V4_FLOW) { 1684 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1685 return -EINVAL; 1686 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1687 if (tuple == 4) 1688 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1689 } else if (cmd->flow_type == TCP_V6_FLOW) { 1690 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1691 if (tuple == 4) 1692 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1693 } else if (cmd->flow_type == UDP_V6_FLOW) { 1694 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1695 return -EINVAL; 1696 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1697 if (tuple == 4) 1698 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1699 } else if (cmd->flow_type == AH_ESP_V4_FLOW) { 1700 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || 1701 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) 1702 return -EINVAL; 1703 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1704 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); 1705 if (tuple == 4) 1706 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1707 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; 1708 } else if (cmd->flow_type == AH_ESP_V6_FLOW) { 1709 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || 1710 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) 1711 return -EINVAL; 1712 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1713 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); 1714 if (tuple == 4) 1715 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1716 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; 1717 } else if (tuple == 4) { 1718 return -EINVAL; 1719 } 1720 1721 switch (cmd->flow_type) { 1722 case TCP_V4_FLOW: 1723 case UDP_V4_FLOW: 1724 case SCTP_V4_FLOW: 1725 case AH_ESP_V4_FLOW: 1726 case AH_V4_FLOW: 1727 case ESP_V4_FLOW: 1728 case IPV4_FLOW: 1729 if (tuple == 2) 1730 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1731 else if (!tuple) 1732 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1733 break; 1734 1735 case TCP_V6_FLOW: 1736 case UDP_V6_FLOW: 1737 case SCTP_V6_FLOW: 1738 case AH_ESP_V6_FLOW: 1739 case AH_V6_FLOW: 1740 case ESP_V6_FLOW: 1741 case IPV6_FLOW: 1742 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 1743 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL); 1744 if (!tuple) 1745 break; 1746 if (cmd->data & RXH_IP6_FL) 1747 rss_hash_cfg |= 1748 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; 1749 else if (tuple == 2) 1750 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1751 break; 1752 } 1753 1754 if (bp->rss_hash_cfg == rss_hash_cfg) 1755 return 0; 1756 1757 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 1758 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; 1759 bp->rss_hash_cfg = rss_hash_cfg; 1760 if (netif_running(bp->dev)) { 1761 bnxt_close_nic(bp, false, false); 1762 rc = bnxt_open_nic(bp, false, false); 1763 } 1764 return rc; 1765 } 1766 1767 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1768 u32 *rule_locs) 1769 { 1770 struct bnxt *bp = netdev_priv(dev); 1771 int rc = 0; 1772 1773 switch (cmd->cmd) { 1774 case ETHTOOL_GRXRINGS: 1775 cmd->data = bp->rx_nr_rings; 1776 break; 1777 1778 case ETHTOOL_GRXCLSRLCNT: 1779 cmd->rule_cnt = bp->ntp_fltr_count; 1780 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; 1781 break; 1782 1783 case ETHTOOL_GRXCLSRLALL: 1784 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1785 break; 1786 1787 case ETHTOOL_GRXCLSRULE: 1788 rc = bnxt_grxclsrule(bp, cmd); 1789 break; 1790 1791 default: 1792 rc = -EOPNOTSUPP; 1793 break; 1794 } 1795 1796 return rc; 1797 } 1798 1799 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1800 { 1801 struct bnxt *bp = netdev_priv(dev); 1802 int rc; 1803 1804 switch (cmd->cmd) { 1805 case ETHTOOL_SRXCLSRLINS: 1806 rc = bnxt_srxclsrlins(bp, cmd); 1807 break; 1808 1809 case ETHTOOL_SRXCLSRLDEL: 1810 rc = bnxt_srxclsrldel(bp, cmd); 1811 break; 1812 1813 default: 1814 rc = -EOPNOTSUPP; 1815 break; 1816 } 1817 return rc; 1818 } 1819 1820 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1821 { 1822 struct bnxt *bp = netdev_priv(dev); 1823 1824 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1825 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1826 BNXT_RSS_TABLE_ENTRIES_P5; 1827 return HW_HASH_INDEX_SIZE; 1828 } 1829 1830 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1831 { 1832 return HW_HASH_KEY_SIZE; 1833 } 1834 1835 static int bnxt_get_rxfh(struct net_device *dev, 1836 struct ethtool_rxfh_param *rxfh) 1837 { 1838 struct bnxt_rss_ctx *rss_ctx = NULL; 1839 struct bnxt *bp = netdev_priv(dev); 1840 u32 *indir_tbl = bp->rss_indir_tbl; 1841 struct bnxt_vnic_info *vnic; 1842 u32 i, tbl_size; 1843 1844 rxfh->hfunc = ETH_RSS_HASH_TOP; 1845 1846 if (!bp->vnic_info) 1847 return 0; 1848 1849 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 1850 if (rxfh->rss_context) { 1851 struct ethtool_rxfh_context *ctx; 1852 1853 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); 1854 if (!ctx) 1855 return -EINVAL; 1856 indir_tbl = ethtool_rxfh_context_indir(ctx); 1857 rss_ctx = ethtool_rxfh_context_priv(ctx); 1858 vnic = &rss_ctx->vnic; 1859 } 1860 1861 if (rxfh->indir && indir_tbl) { 1862 tbl_size = bnxt_get_rxfh_indir_size(dev); 1863 for (i = 0; i < tbl_size; i++) 1864 rxfh->indir[i] = indir_tbl[i]; 1865 } 1866 1867 if (rxfh->key && vnic->rss_hash_key) 1868 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1869 1870 return 0; 1871 } 1872 1873 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, 1874 struct bnxt_rss_ctx *rss_ctx, 1875 const struct ethtool_rxfh_param *rxfh) 1876 { 1877 if (rxfh->key) { 1878 if (rss_ctx) { 1879 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, 1880 HW_HASH_KEY_SIZE); 1881 } else { 1882 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); 1883 bp->rss_hash_key_updated = true; 1884 } 1885 } 1886 if (rxfh->indir) { 1887 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 1888 u32 *indir_tbl = bp->rss_indir_tbl; 1889 1890 if (rss_ctx) 1891 indir_tbl = ethtool_rxfh_context_indir(ctx); 1892 for (i = 0; i < tbl_size; i++) 1893 indir_tbl[i] = rxfh->indir[i]; 1894 pad = bp->rss_indir_tbl_entries - tbl_size; 1895 if (pad) 1896 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); 1897 } 1898 } 1899 1900 static int bnxt_rxfh_context_check(struct bnxt *bp, 1901 const struct ethtool_rxfh_param *rxfh, 1902 struct netlink_ext_ack *extack) 1903 { 1904 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { 1905 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); 1906 return -EOPNOTSUPP; 1907 } 1908 1909 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { 1910 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); 1911 return -EOPNOTSUPP; 1912 } 1913 1914 if (!netif_running(bp->dev)) { 1915 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); 1916 return -EAGAIN; 1917 } 1918 1919 return 0; 1920 } 1921 1922 static int bnxt_create_rxfh_context(struct net_device *dev, 1923 struct ethtool_rxfh_context *ctx, 1924 const struct ethtool_rxfh_param *rxfh, 1925 struct netlink_ext_ack *extack) 1926 { 1927 struct bnxt *bp = netdev_priv(dev); 1928 struct bnxt_rss_ctx *rss_ctx; 1929 struct bnxt_vnic_info *vnic; 1930 int rc; 1931 1932 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1933 if (rc) 1934 return rc; 1935 1936 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { 1937 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", 1938 BNXT_MAX_ETH_RSS_CTX); 1939 return -EINVAL; 1940 } 1941 1942 if (!bnxt_rfs_capable(bp, true)) { 1943 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); 1944 return -ENOMEM; 1945 } 1946 1947 rss_ctx = ethtool_rxfh_context_priv(ctx); 1948 1949 bp->num_rss_ctx++; 1950 1951 vnic = &rss_ctx->vnic; 1952 vnic->rss_ctx = ctx; 1953 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; 1954 vnic->vnic_id = BNXT_VNIC_ID_INVALID; 1955 rc = bnxt_alloc_vnic_rss_table(bp, vnic); 1956 if (rc) 1957 goto out; 1958 1959 /* Populate defaults in the context */ 1960 bnxt_set_dflt_rss_indir_tbl(bp, ctx); 1961 ctx->hfunc = ETH_RSS_HASH_TOP; 1962 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); 1963 memcpy(ethtool_rxfh_context_key(ctx), 1964 bp->rss_hash_key, HW_HASH_KEY_SIZE); 1965 1966 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); 1967 if (rc) { 1968 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); 1969 goto out; 1970 } 1971 1972 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); 1973 if (rc) { 1974 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1975 goto out; 1976 } 1977 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 1978 1979 rc = __bnxt_setup_vnic_p5(bp, vnic); 1980 if (rc) { 1981 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1982 goto out; 1983 } 1984 1985 rss_ctx->index = rxfh->rss_context; 1986 return 0; 1987 out: 1988 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 1989 return rc; 1990 } 1991 1992 static int bnxt_modify_rxfh_context(struct net_device *dev, 1993 struct ethtool_rxfh_context *ctx, 1994 const struct ethtool_rxfh_param *rxfh, 1995 struct netlink_ext_ack *extack) 1996 { 1997 struct bnxt *bp = netdev_priv(dev); 1998 struct bnxt_rss_ctx *rss_ctx; 1999 int rc; 2000 2001 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 2002 if (rc) 2003 return rc; 2004 2005 rss_ctx = ethtool_rxfh_context_priv(ctx); 2006 2007 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 2008 2009 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); 2010 } 2011 2012 static int bnxt_remove_rxfh_context(struct net_device *dev, 2013 struct ethtool_rxfh_context *ctx, 2014 u32 rss_context, 2015 struct netlink_ext_ack *extack) 2016 { 2017 struct bnxt *bp = netdev_priv(dev); 2018 struct bnxt_rss_ctx *rss_ctx; 2019 2020 rss_ctx = ethtool_rxfh_context_priv(ctx); 2021 2022 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 2023 return 0; 2024 } 2025 2026 static int bnxt_set_rxfh(struct net_device *dev, 2027 struct ethtool_rxfh_param *rxfh, 2028 struct netlink_ext_ack *extack) 2029 { 2030 struct bnxt *bp = netdev_priv(dev); 2031 int rc = 0; 2032 2033 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) 2034 return -EOPNOTSUPP; 2035 2036 bnxt_modify_rss(bp, NULL, NULL, rxfh); 2037 2038 if (netif_running(bp->dev)) { 2039 bnxt_close_nic(bp, false, false); 2040 rc = bnxt_open_nic(bp, false, false); 2041 } 2042 return rc; 2043 } 2044 2045 static void bnxt_get_drvinfo(struct net_device *dev, 2046 struct ethtool_drvinfo *info) 2047 { 2048 struct bnxt *bp = netdev_priv(dev); 2049 2050 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 2051 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 2052 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 2053 info->n_stats = bnxt_get_num_stats(bp); 2054 info->testinfo_len = bp->num_tests; 2055 /* TODO CHIMP_FW: eeprom dump details */ 2056 info->eedump_len = 0; 2057 /* TODO CHIMP FW: reg dump details */ 2058 info->regdump_len = 0; 2059 } 2060 2061 static int bnxt_get_regs_len(struct net_device *dev) 2062 { 2063 struct bnxt *bp = netdev_priv(dev); 2064 2065 if (!BNXT_PF(bp)) 2066 return -EOPNOTSUPP; 2067 2068 return BNXT_PXP_REG_LEN + bp->pcie_stat_len; 2069 } 2070 2071 static void * 2072 __bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req) 2073 { 2074 struct pcie_ctx_hw_stats_v2 *hw_pcie_stats; 2075 dma_addr_t hw_pcie_stats_addr; 2076 int rc; 2077 2078 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), 2079 &hw_pcie_stats_addr); 2080 if (!hw_pcie_stats) 2081 return NULL; 2082 2083 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 2084 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 2085 rc = hwrm_req_send(bp, req); 2086 2087 return rc ? NULL : hw_pcie_stats; 2088 } 2089 2090 #define BNXT_PCIE_32B_ENTRY(start, end) \ 2091 { offsetof(struct pcie_ctx_hw_stats_v2, start),\ 2092 offsetof(struct pcie_ctx_hw_stats_v2, end) } 2093 2094 static const struct { 2095 u16 start; 2096 u16 end; 2097 } bnxt_pcie_32b_entries[] = { 2098 BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), 2099 BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1), 2100 BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2), 2101 }; 2102 2103 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2104 void *_p) 2105 { 2106 struct hwrm_pcie_qstats_output *resp; 2107 struct hwrm_pcie_qstats_input *req; 2108 struct bnxt *bp = netdev_priv(dev); 2109 u8 *src; 2110 2111 regs->version = 0; 2112 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) 2113 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 2114 2115 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 2116 return; 2117 2118 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 2119 return; 2120 2121 resp = hwrm_req_hold(bp, req); 2122 src = __bnxt_hwrm_pcie_qstats(bp, req); 2123 if (src) { 2124 u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); 2125 int i, j, len; 2126 2127 len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size)); 2128 if (len <= sizeof(struct pcie_ctx_hw_stats)) 2129 regs->version = 1; 2130 else if (len < sizeof(struct pcie_ctx_hw_stats_v2)) 2131 regs->version = 2; 2132 else 2133 regs->version = 3; 2134 2135 for (i = 0, j = 0; i < len; ) { 2136 if (i >= bnxt_pcie_32b_entries[j].start && 2137 i <= bnxt_pcie_32b_entries[j].end) { 2138 u32 *dst32 = (u32 *)(dst + i); 2139 2140 *dst32 = le32_to_cpu(*(__le32 *)(src + i)); 2141 i += 4; 2142 if (i > bnxt_pcie_32b_entries[j].end && 2143 j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) 2144 j++; 2145 } else { 2146 u64 *dst64 = (u64 *)(dst + i); 2147 2148 *dst64 = le64_to_cpu(*(__le64 *)(src + i)); 2149 i += 8; 2150 } 2151 } 2152 } 2153 hwrm_req_drop(bp, req); 2154 } 2155 2156 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2157 { 2158 struct bnxt *bp = netdev_priv(dev); 2159 2160 wol->supported = 0; 2161 wol->wolopts = 0; 2162 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2163 if (bp->flags & BNXT_FLAG_WOL_CAP) { 2164 wol->supported = WAKE_MAGIC; 2165 if (bp->wol) 2166 wol->wolopts = WAKE_MAGIC; 2167 } 2168 } 2169 2170 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2171 { 2172 struct bnxt *bp = netdev_priv(dev); 2173 2174 if (wol->wolopts & ~WAKE_MAGIC) 2175 return -EINVAL; 2176 2177 if (wol->wolopts & WAKE_MAGIC) { 2178 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 2179 return -EINVAL; 2180 if (!bp->wol) { 2181 if (bnxt_hwrm_alloc_wol_fltr(bp)) 2182 return -EBUSY; 2183 bp->wol = 1; 2184 } 2185 } else { 2186 if (bp->wol) { 2187 if (bnxt_hwrm_free_wol_fltr(bp)) 2188 return -EBUSY; 2189 bp->wol = 0; 2190 } 2191 } 2192 return 0; 2193 } 2194 2195 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 2196 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) 2197 { 2198 linkmode_zero(mode); 2199 2200 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 2201 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); 2202 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 2203 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); 2204 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 2205 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); 2206 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 2207 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); 2208 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 2209 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); 2210 } 2211 2212 enum bnxt_media_type { 2213 BNXT_MEDIA_UNKNOWN = 0, 2214 BNXT_MEDIA_TP, 2215 BNXT_MEDIA_CR, 2216 BNXT_MEDIA_SR, 2217 BNXT_MEDIA_LR_ER_FR, 2218 BNXT_MEDIA_KR, 2219 BNXT_MEDIA_KX, 2220 BNXT_MEDIA_X, 2221 __BNXT_MEDIA_END, 2222 }; 2223 2224 static const enum bnxt_media_type bnxt_phy_types[] = { 2225 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, 2226 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, 2227 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, 2228 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, 2229 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, 2230 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, 2231 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, 2232 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, 2233 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, 2234 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, 2235 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, 2236 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, 2237 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, 2238 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, 2239 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, 2240 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2241 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2242 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, 2243 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, 2244 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, 2245 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2246 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2247 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, 2248 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, 2249 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, 2250 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, 2251 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, 2252 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, 2253 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2254 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2255 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, 2256 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, 2257 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2258 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2259 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, 2260 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, 2261 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2262 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2263 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR, 2264 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR, 2265 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2266 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2267 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR, 2268 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR, 2269 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2270 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2271 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR, 2272 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR, 2273 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR, 2274 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR, 2275 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR, 2276 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR, 2277 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2278 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2279 }; 2280 2281 static enum bnxt_media_type 2282 bnxt_get_media(struct bnxt_link_info *link_info) 2283 { 2284 switch (link_info->media_type) { 2285 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: 2286 return BNXT_MEDIA_TP; 2287 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: 2288 return BNXT_MEDIA_CR; 2289 default: 2290 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) 2291 return bnxt_phy_types[link_info->phy_type]; 2292 return BNXT_MEDIA_UNKNOWN; 2293 } 2294 } 2295 2296 enum bnxt_link_speed_indices { 2297 BNXT_LINK_SPEED_UNKNOWN = 0, 2298 BNXT_LINK_SPEED_100MB_IDX, 2299 BNXT_LINK_SPEED_1GB_IDX, 2300 BNXT_LINK_SPEED_10GB_IDX, 2301 BNXT_LINK_SPEED_25GB_IDX, 2302 BNXT_LINK_SPEED_40GB_IDX, 2303 BNXT_LINK_SPEED_50GB_IDX, 2304 BNXT_LINK_SPEED_100GB_IDX, 2305 BNXT_LINK_SPEED_200GB_IDX, 2306 BNXT_LINK_SPEED_400GB_IDX, 2307 __BNXT_LINK_SPEED_END 2308 }; 2309 2310 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) 2311 { 2312 switch (speed) { 2313 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; 2314 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; 2315 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; 2316 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; 2317 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; 2318 case BNXT_LINK_SPEED_50GB: 2319 case BNXT_LINK_SPEED_50GB_PAM4: 2320 return BNXT_LINK_SPEED_50GB_IDX; 2321 case BNXT_LINK_SPEED_100GB: 2322 case BNXT_LINK_SPEED_100GB_PAM4: 2323 case BNXT_LINK_SPEED_100GB_PAM4_112: 2324 return BNXT_LINK_SPEED_100GB_IDX; 2325 case BNXT_LINK_SPEED_200GB: 2326 case BNXT_LINK_SPEED_200GB_PAM4: 2327 case BNXT_LINK_SPEED_200GB_PAM4_112: 2328 return BNXT_LINK_SPEED_200GB_IDX; 2329 case BNXT_LINK_SPEED_400GB: 2330 case BNXT_LINK_SPEED_400GB_PAM4: 2331 case BNXT_LINK_SPEED_400GB_PAM4_112: 2332 return BNXT_LINK_SPEED_400GB_IDX; 2333 default: return BNXT_LINK_SPEED_UNKNOWN; 2334 } 2335 } 2336 2337 static const enum ethtool_link_mode_bit_indices 2338 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { 2339 [BNXT_LINK_SPEED_100MB_IDX] = { 2340 { 2341 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2342 }, 2343 }, 2344 [BNXT_LINK_SPEED_1GB_IDX] = { 2345 { 2346 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2347 /* historically baseT, but DAC is more correctly baseX */ 2348 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2349 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2350 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2351 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2352 }, 2353 }, 2354 [BNXT_LINK_SPEED_10GB_IDX] = { 2355 { 2356 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2357 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2358 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2359 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2360 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2361 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2362 }, 2363 }, 2364 [BNXT_LINK_SPEED_25GB_IDX] = { 2365 { 2366 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2367 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2368 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2369 }, 2370 }, 2371 [BNXT_LINK_SPEED_40GB_IDX] = { 2372 { 2373 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2374 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2375 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2376 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2377 }, 2378 }, 2379 [BNXT_LINK_SPEED_50GB_IDX] = { 2380 [BNXT_SIG_MODE_NRZ] = { 2381 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2382 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2383 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2384 }, 2385 [BNXT_SIG_MODE_PAM4] = { 2386 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2387 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2388 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2389 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2390 }, 2391 }, 2392 [BNXT_LINK_SPEED_100GB_IDX] = { 2393 [BNXT_SIG_MODE_NRZ] = { 2394 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2395 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2396 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2397 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2398 }, 2399 [BNXT_SIG_MODE_PAM4] = { 2400 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2401 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2402 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2403 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2404 }, 2405 [BNXT_SIG_MODE_PAM4_112] = { 2406 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, 2407 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, 2408 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, 2409 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, 2410 }, 2411 }, 2412 [BNXT_LINK_SPEED_200GB_IDX] = { 2413 [BNXT_SIG_MODE_PAM4] = { 2414 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2415 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2416 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2417 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2418 }, 2419 [BNXT_SIG_MODE_PAM4_112] = { 2420 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, 2421 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, 2422 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, 2423 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, 2424 }, 2425 }, 2426 [BNXT_LINK_SPEED_400GB_IDX] = { 2427 [BNXT_SIG_MODE_PAM4] = { 2428 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2429 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2430 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2431 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2432 }, 2433 [BNXT_SIG_MODE_PAM4_112] = { 2434 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, 2435 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, 2436 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, 2437 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, 2438 }, 2439 }, 2440 }; 2441 2442 #define BNXT_LINK_MODE_UNKNOWN -1 2443 2444 static enum ethtool_link_mode_bit_indices 2445 bnxt_get_link_mode(struct bnxt_link_info *link_info) 2446 { 2447 enum ethtool_link_mode_bit_indices link_mode; 2448 enum bnxt_link_speed_indices speed; 2449 enum bnxt_media_type media; 2450 u8 sig_mode; 2451 2452 if (link_info->phy_link_status != BNXT_LINK_LINK) 2453 return BNXT_LINK_MODE_UNKNOWN; 2454 2455 media = bnxt_get_media(link_info); 2456 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 2457 speed = bnxt_fw_speed_idx(link_info->link_speed); 2458 sig_mode = link_info->active_fec_sig_mode & 2459 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 2460 } else { 2461 speed = bnxt_fw_speed_idx(link_info->req_link_speed); 2462 sig_mode = link_info->req_signal_mode; 2463 } 2464 if (sig_mode >= BNXT_SIG_MODE_MAX) 2465 return BNXT_LINK_MODE_UNKNOWN; 2466 2467 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux 2468 * link mode, but since no such devices exist, the zeroes in the 2469 * map can be conveniently used to represent unknown link modes. 2470 */ 2471 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2472 if (!link_mode) 2473 return BNXT_LINK_MODE_UNKNOWN; 2474 2475 switch (link_mode) { 2476 case ETHTOOL_LINK_MODE_100baseT_Full_BIT: 2477 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2478 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; 2479 break; 2480 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: 2481 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2482 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; 2483 break; 2484 default: 2485 break; 2486 } 2487 2488 return link_mode; 2489 } 2490 2491 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, 2492 struct ethtool_link_ksettings *lk_ksettings) 2493 { 2494 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2495 2496 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { 2497 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2498 lk_ksettings->link_modes.supported); 2499 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2500 lk_ksettings->link_modes.supported); 2501 } 2502 2503 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 || 2504 link_info->support_pam4_auto_speeds) 2505 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2506 lk_ksettings->link_modes.supported); 2507 2508 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2509 return; 2510 2511 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) 2512 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2513 lk_ksettings->link_modes.advertising); 2514 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) 2515 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2516 lk_ksettings->link_modes.advertising); 2517 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) 2518 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2519 lk_ksettings->link_modes.lp_advertising); 2520 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) 2521 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2522 lk_ksettings->link_modes.lp_advertising); 2523 } 2524 2525 static const u16 bnxt_nrz_speed_masks[] = { 2526 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, 2527 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, 2528 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, 2529 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, 2530 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, 2531 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, 2532 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, 2533 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2534 }; 2535 2536 static const u16 bnxt_pam4_speed_masks[] = { 2537 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, 2538 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, 2539 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, 2540 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2541 }; 2542 2543 static const u16 bnxt_nrz_speeds2_masks[] = { 2544 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB, 2545 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB, 2546 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB, 2547 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB, 2548 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB, 2549 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB, 2550 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2551 }; 2552 2553 static const u16 bnxt_pam4_speeds2_masks[] = { 2554 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4, 2555 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4, 2556 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4, 2557 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4, 2558 }; 2559 2560 static const u16 bnxt_pam4_112_speeds2_masks[] = { 2561 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112, 2562 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112, 2563 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112, 2564 }; 2565 2566 static enum bnxt_link_speed_indices 2567 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk) 2568 { 2569 const u16 *speeds; 2570 int idx, len; 2571 2572 switch (sig_mode) { 2573 case BNXT_SIG_MODE_NRZ: 2574 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2575 speeds = bnxt_nrz_speeds2_masks; 2576 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks); 2577 } else { 2578 speeds = bnxt_nrz_speed_masks; 2579 len = ARRAY_SIZE(bnxt_nrz_speed_masks); 2580 } 2581 break; 2582 case BNXT_SIG_MODE_PAM4: 2583 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2584 speeds = bnxt_pam4_speeds2_masks; 2585 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks); 2586 } else { 2587 speeds = bnxt_pam4_speed_masks; 2588 len = ARRAY_SIZE(bnxt_pam4_speed_masks); 2589 } 2590 break; 2591 case BNXT_SIG_MODE_PAM4_112: 2592 speeds = bnxt_pam4_112_speeds2_masks; 2593 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks); 2594 break; 2595 default: 2596 return BNXT_LINK_SPEED_UNKNOWN; 2597 } 2598 2599 for (idx = 0; idx < len; idx++) { 2600 if (speeds[idx] == speed_msk) 2601 return idx; 2602 } 2603 2604 return BNXT_LINK_SPEED_UNKNOWN; 2605 } 2606 2607 #define BNXT_FW_SPEED_MSK_BITS 16 2608 2609 static void 2610 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2611 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2612 { 2613 enum ethtool_link_mode_bit_indices link_mode; 2614 enum bnxt_link_speed_indices speed; 2615 u8 bit; 2616 2617 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { 2618 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit); 2619 if (!speed) 2620 continue; 2621 2622 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2623 if (!link_mode) 2624 continue; 2625 2626 linkmode_set_bit(link_mode, et_mask); 2627 } 2628 } 2629 2630 static void 2631 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2632 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2633 { 2634 if (media) { 2635 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2636 et_mask); 2637 return; 2638 } 2639 2640 /* list speeds for all media if unknown */ 2641 for (media = 1; media < __BNXT_MEDIA_END; media++) 2642 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2643 et_mask); 2644 } 2645 2646 static void 2647 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info, 2648 enum bnxt_media_type media, 2649 struct ethtool_link_ksettings *lk_ksettings) 2650 { 2651 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2652 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2653 u16 phy_flags = bp->phy_flags; 2654 2655 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2656 sp_nrz = link_info->support_speeds2; 2657 sp_pam4 = link_info->support_speeds2; 2658 sp_pam4_112 = link_info->support_speeds2; 2659 } else { 2660 sp_nrz = link_info->support_speeds; 2661 sp_pam4 = link_info->support_pam4_speeds; 2662 } 2663 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2664 lk_ksettings->link_modes.supported); 2665 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2666 lk_ksettings->link_modes.supported); 2667 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2668 phy_flags, lk_ksettings->link_modes.supported); 2669 } 2670 2671 static void 2672 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info, 2673 enum bnxt_media_type media, 2674 struct ethtool_link_ksettings *lk_ksettings) 2675 { 2676 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2677 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2678 u16 phy_flags = bp->phy_flags; 2679 2680 sp_nrz = link_info->advertising; 2681 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2682 sp_pam4 = link_info->advertising; 2683 sp_pam4_112 = link_info->advertising; 2684 } else { 2685 sp_pam4 = link_info->advertising_pam4; 2686 } 2687 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2688 lk_ksettings->link_modes.advertising); 2689 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2690 lk_ksettings->link_modes.advertising); 2691 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2692 phy_flags, lk_ksettings->link_modes.advertising); 2693 } 2694 2695 static void 2696 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info, 2697 enum bnxt_media_type media, 2698 struct ethtool_link_ksettings *lk_ksettings) 2699 { 2700 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2701 u16 phy_flags = bp->phy_flags; 2702 2703 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media, 2704 BNXT_SIG_MODE_NRZ, phy_flags, 2705 lk_ksettings->link_modes.lp_advertising); 2706 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media, 2707 BNXT_SIG_MODE_PAM4, phy_flags, 2708 lk_ksettings->link_modes.lp_advertising); 2709 } 2710 2711 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, 2712 u16 speed_msk, const unsigned long *et_mask, 2713 enum ethtool_link_mode_bit_indices mode) 2714 { 2715 bool mode_desired = linkmode_test_bit(mode, et_mask); 2716 2717 if (!mode) 2718 return; 2719 2720 /* enabled speeds for installed media should override */ 2721 if (installed_media && mode_desired) { 2722 *speeds |= speed_msk; 2723 *delta |= speed_msk; 2724 return; 2725 } 2726 2727 /* many to one mapping, only allow one change per fw_speed bit */ 2728 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { 2729 *speeds ^= speed_msk; 2730 *delta |= speed_msk; 2731 } 2732 } 2733 2734 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, 2735 const unsigned long *et_mask) 2736 { 2737 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2738 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks; 2739 enum bnxt_media_type media = bnxt_get_media(link_info); 2740 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL; 2741 u32 delta_pam4_112 = 0; 2742 u32 delta_pam4 = 0; 2743 u32 delta_nrz = 0; 2744 int i, m; 2745 2746 adv = &link_info->advertising; 2747 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2748 adv_pam4 = &link_info->advertising; 2749 adv_pam4_112 = &link_info->advertising; 2750 sp_msks = bnxt_nrz_speeds2_masks; 2751 sp_pam4_msks = bnxt_pam4_speeds2_masks; 2752 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks; 2753 } else { 2754 adv_pam4 = &link_info->advertising_pam4; 2755 sp_msks = bnxt_nrz_speed_masks; 2756 sp_pam4_msks = bnxt_pam4_speed_masks; 2757 } 2758 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { 2759 /* accept any legal media from user */ 2760 for (m = 1; m < __BNXT_MEDIA_END; m++) { 2761 bnxt_update_speed(&delta_nrz, m == media, 2762 adv, sp_msks[i], et_mask, 2763 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); 2764 bnxt_update_speed(&delta_pam4, m == media, 2765 adv_pam4, sp_pam4_msks[i], et_mask, 2766 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); 2767 if (!adv_pam4_112) 2768 continue; 2769 2770 bnxt_update_speed(&delta_pam4_112, m == media, 2771 adv_pam4_112, sp_pam4_112_msks[i], et_mask, 2772 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]); 2773 } 2774 } 2775 } 2776 2777 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 2778 struct ethtool_link_ksettings *lk_ksettings) 2779 { 2780 u16 fec_cfg = link_info->fec_cfg; 2781 2782 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 2783 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2784 lk_ksettings->link_modes.advertising); 2785 return; 2786 } 2787 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 2788 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2789 lk_ksettings->link_modes.advertising); 2790 if (fec_cfg & BNXT_FEC_ENC_RS) 2791 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2792 lk_ksettings->link_modes.advertising); 2793 if (fec_cfg & BNXT_FEC_ENC_LLRS) 2794 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2795 lk_ksettings->link_modes.advertising); 2796 } 2797 2798 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 2799 struct ethtool_link_ksettings *lk_ksettings) 2800 { 2801 u16 fec_cfg = link_info->fec_cfg; 2802 2803 if (fec_cfg & BNXT_FEC_NONE) { 2804 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2805 lk_ksettings->link_modes.supported); 2806 return; 2807 } 2808 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 2809 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2810 lk_ksettings->link_modes.supported); 2811 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 2812 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2813 lk_ksettings->link_modes.supported); 2814 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 2815 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2816 lk_ksettings->link_modes.supported); 2817 } 2818 2819 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 2820 { 2821 switch (fw_link_speed) { 2822 case BNXT_LINK_SPEED_100MB: 2823 return SPEED_100; 2824 case BNXT_LINK_SPEED_1GB: 2825 return SPEED_1000; 2826 case BNXT_LINK_SPEED_2_5GB: 2827 return SPEED_2500; 2828 case BNXT_LINK_SPEED_10GB: 2829 return SPEED_10000; 2830 case BNXT_LINK_SPEED_20GB: 2831 return SPEED_20000; 2832 case BNXT_LINK_SPEED_25GB: 2833 return SPEED_25000; 2834 case BNXT_LINK_SPEED_40GB: 2835 return SPEED_40000; 2836 case BNXT_LINK_SPEED_50GB: 2837 case BNXT_LINK_SPEED_50GB_PAM4: 2838 return SPEED_50000; 2839 case BNXT_LINK_SPEED_100GB: 2840 case BNXT_LINK_SPEED_100GB_PAM4: 2841 case BNXT_LINK_SPEED_100GB_PAM4_112: 2842 return SPEED_100000; 2843 case BNXT_LINK_SPEED_200GB: 2844 case BNXT_LINK_SPEED_200GB_PAM4: 2845 case BNXT_LINK_SPEED_200GB_PAM4_112: 2846 return SPEED_200000; 2847 case BNXT_LINK_SPEED_400GB: 2848 case BNXT_LINK_SPEED_400GB_PAM4: 2849 case BNXT_LINK_SPEED_400GB_PAM4_112: 2850 return SPEED_400000; 2851 default: 2852 return SPEED_UNKNOWN; 2853 } 2854 } 2855 2856 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, 2857 struct bnxt_link_info *link_info) 2858 { 2859 struct ethtool_link_settings *base = &lk_ksettings->base; 2860 2861 if (link_info->link_state == BNXT_LINK_STATE_UP) { 2862 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 2863 base->duplex = DUPLEX_HALF; 2864 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2865 base->duplex = DUPLEX_FULL; 2866 lk_ksettings->lanes = link_info->active_lanes; 2867 } else if (!link_info->autoneg) { 2868 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 2869 base->duplex = DUPLEX_HALF; 2870 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 2871 base->duplex = DUPLEX_FULL; 2872 } 2873 } 2874 2875 static int bnxt_get_link_ksettings(struct net_device *dev, 2876 struct ethtool_link_ksettings *lk_ksettings) 2877 { 2878 struct ethtool_link_settings *base = &lk_ksettings->base; 2879 enum ethtool_link_mode_bit_indices link_mode; 2880 struct bnxt *bp = netdev_priv(dev); 2881 struct bnxt_link_info *link_info; 2882 enum bnxt_media_type media; 2883 2884 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); 2885 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 2886 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 2887 base->duplex = DUPLEX_UNKNOWN; 2888 base->speed = SPEED_UNKNOWN; 2889 link_info = &bp->link_info; 2890 2891 mutex_lock(&bp->link_lock); 2892 bnxt_get_ethtool_modes(link_info, lk_ksettings); 2893 media = bnxt_get_media(link_info); 2894 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings); 2895 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 2896 link_mode = bnxt_get_link_mode(link_info); 2897 if (link_mode != BNXT_LINK_MODE_UNKNOWN) 2898 ethtool_params_from_link_mode(lk_ksettings, link_mode); 2899 else 2900 bnxt_get_default_speeds(lk_ksettings, link_info); 2901 2902 if (link_info->autoneg) { 2903 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 2904 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2905 lk_ksettings->link_modes.advertising); 2906 base->autoneg = AUTONEG_ENABLE; 2907 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings); 2908 if (link_info->phy_link_status == BNXT_LINK_LINK) 2909 bnxt_get_all_ethtool_lp_speeds(link_info, media, 2910 lk_ksettings); 2911 } else { 2912 base->autoneg = AUTONEG_DISABLE; 2913 } 2914 2915 base->port = PORT_NONE; 2916 if (media == BNXT_MEDIA_TP) { 2917 base->port = PORT_TP; 2918 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2919 lk_ksettings->link_modes.supported); 2920 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2921 lk_ksettings->link_modes.advertising); 2922 } else if (media == BNXT_MEDIA_KR) { 2923 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2924 lk_ksettings->link_modes.supported); 2925 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2926 lk_ksettings->link_modes.advertising); 2927 } else { 2928 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2929 lk_ksettings->link_modes.supported); 2930 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2931 lk_ksettings->link_modes.advertising); 2932 2933 if (media == BNXT_MEDIA_CR) 2934 base->port = PORT_DA; 2935 else 2936 base->port = PORT_FIBRE; 2937 } 2938 base->phy_address = link_info->phy_addr; 2939 mutex_unlock(&bp->link_lock); 2940 2941 return 0; 2942 } 2943 2944 static int 2945 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) 2946 { 2947 struct bnxt *bp = netdev_priv(dev); 2948 struct bnxt_link_info *link_info = &bp->link_info; 2949 u16 support_pam4_spds = link_info->support_pam4_speeds; 2950 u16 support_spds2 = link_info->support_speeds2; 2951 u16 support_spds = link_info->support_speeds; 2952 u8 sig_mode = BNXT_SIG_MODE_NRZ; 2953 u32 lanes_needed = 1; 2954 u16 fw_speed = 0; 2955 2956 switch (ethtool_speed) { 2957 case SPEED_100: 2958 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 2959 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 2960 break; 2961 case SPEED_1000: 2962 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) || 2963 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB)) 2964 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 2965 break; 2966 case SPEED_2500: 2967 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 2968 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 2969 break; 2970 case SPEED_10000: 2971 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) || 2972 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB)) 2973 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 2974 break; 2975 case SPEED_20000: 2976 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { 2977 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 2978 lanes_needed = 2; 2979 } 2980 break; 2981 case SPEED_25000: 2982 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) || 2983 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB)) 2984 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 2985 break; 2986 case SPEED_40000: 2987 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) || 2988 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) { 2989 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 2990 lanes_needed = 4; 2991 } 2992 break; 2993 case SPEED_50000: 2994 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) || 2995 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) && 2996 lanes != 1) { 2997 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 2998 lanes_needed = 2; 2999 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 3000 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 3001 sig_mode = BNXT_SIG_MODE_PAM4; 3002 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) { 3003 fw_speed = BNXT_LINK_SPEED_50GB_PAM4; 3004 sig_mode = BNXT_SIG_MODE_PAM4; 3005 } 3006 break; 3007 case SPEED_100000: 3008 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) || 3009 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) && 3010 lanes != 2 && lanes != 1) { 3011 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 3012 lanes_needed = 4; 3013 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 3014 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 3015 sig_mode = BNXT_SIG_MODE_PAM4; 3016 lanes_needed = 2; 3017 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) && 3018 lanes != 1) { 3019 fw_speed = BNXT_LINK_SPEED_100GB_PAM4; 3020 sig_mode = BNXT_SIG_MODE_PAM4; 3021 lanes_needed = 2; 3022 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) { 3023 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112; 3024 sig_mode = BNXT_SIG_MODE_PAM4_112; 3025 } 3026 break; 3027 case SPEED_200000: 3028 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 3029 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 3030 sig_mode = BNXT_SIG_MODE_PAM4; 3031 lanes_needed = 4; 3032 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) && 3033 lanes != 2) { 3034 fw_speed = BNXT_LINK_SPEED_200GB_PAM4; 3035 sig_mode = BNXT_SIG_MODE_PAM4; 3036 lanes_needed = 4; 3037 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) { 3038 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112; 3039 sig_mode = BNXT_SIG_MODE_PAM4_112; 3040 lanes_needed = 2; 3041 } 3042 break; 3043 case SPEED_400000: 3044 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) && 3045 lanes != 4) { 3046 fw_speed = BNXT_LINK_SPEED_400GB_PAM4; 3047 sig_mode = BNXT_SIG_MODE_PAM4; 3048 lanes_needed = 8; 3049 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) { 3050 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112; 3051 sig_mode = BNXT_SIG_MODE_PAM4_112; 3052 lanes_needed = 4; 3053 } 3054 break; 3055 } 3056 3057 if (!fw_speed) { 3058 netdev_err(dev, "unsupported speed!\n"); 3059 return -EINVAL; 3060 } 3061 3062 if (lanes && lanes != lanes_needed) { 3063 netdev_err(dev, "unsupported number of lanes for speed\n"); 3064 return -EINVAL; 3065 } 3066 3067 if (link_info->req_link_speed == fw_speed && 3068 link_info->req_signal_mode == sig_mode && 3069 link_info->autoneg == 0) 3070 return -EALREADY; 3071 3072 link_info->req_link_speed = fw_speed; 3073 link_info->req_signal_mode = sig_mode; 3074 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 3075 link_info->autoneg = 0; 3076 link_info->advertising = 0; 3077 link_info->advertising_pam4 = 0; 3078 3079 return 0; 3080 } 3081 3082 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) 3083 { 3084 u16 fw_speed_mask = 0; 3085 3086 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || 3087 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) 3088 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 3089 3090 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || 3091 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) 3092 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 3093 3094 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) 3095 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 3096 3097 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) 3098 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 3099 3100 return fw_speed_mask; 3101 } 3102 3103 static int bnxt_set_link_ksettings(struct net_device *dev, 3104 const struct ethtool_link_ksettings *lk_ksettings) 3105 { 3106 struct bnxt *bp = netdev_priv(dev); 3107 struct bnxt_link_info *link_info = &bp->link_info; 3108 const struct ethtool_link_settings *base = &lk_ksettings->base; 3109 bool set_pause = false; 3110 u32 speed, lanes = 0; 3111 int rc = 0; 3112 3113 if (!BNXT_PHY_CFG_ABLE(bp)) 3114 return -EOPNOTSUPP; 3115 3116 mutex_lock(&bp->link_lock); 3117 if (base->autoneg == AUTONEG_ENABLE) { 3118 bnxt_set_ethtool_speeds(link_info, 3119 lk_ksettings->link_modes.advertising); 3120 link_info->autoneg |= BNXT_AUTONEG_SPEED; 3121 if (!link_info->advertising && !link_info->advertising_pam4) { 3122 link_info->advertising = link_info->support_auto_speeds; 3123 link_info->advertising_pam4 = 3124 link_info->support_pam4_auto_speeds; 3125 } 3126 /* any change to autoneg will cause link change, therefore the 3127 * driver should put back the original pause setting in autoneg 3128 */ 3129 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3130 set_pause = true; 3131 } else { 3132 u8 phy_type = link_info->phy_type; 3133 3134 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 3135 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 3136 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 3137 netdev_err(dev, "10GBase-T devices must autoneg\n"); 3138 rc = -EINVAL; 3139 goto set_setting_exit; 3140 } 3141 if (base->duplex == DUPLEX_HALF) { 3142 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 3143 rc = -EINVAL; 3144 goto set_setting_exit; 3145 } 3146 speed = base->speed; 3147 lanes = lk_ksettings->lanes; 3148 rc = bnxt_force_link_speed(dev, speed, lanes); 3149 if (rc) { 3150 if (rc == -EALREADY) 3151 rc = 0; 3152 goto set_setting_exit; 3153 } 3154 } 3155 3156 if (netif_running(dev)) 3157 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 3158 3159 set_setting_exit: 3160 mutex_unlock(&bp->link_lock); 3161 return rc; 3162 } 3163 3164 static int bnxt_get_fecparam(struct net_device *dev, 3165 struct ethtool_fecparam *fec) 3166 { 3167 struct bnxt *bp = netdev_priv(dev); 3168 struct bnxt_link_info *link_info; 3169 u8 active_fec; 3170 u16 fec_cfg; 3171 3172 link_info = &bp->link_info; 3173 fec_cfg = link_info->fec_cfg; 3174 active_fec = link_info->active_fec_sig_mode & 3175 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 3176 if (fec_cfg & BNXT_FEC_NONE) { 3177 fec->fec = ETHTOOL_FEC_NONE; 3178 fec->active_fec = ETHTOOL_FEC_NONE; 3179 return 0; 3180 } 3181 if (fec_cfg & BNXT_FEC_AUTONEG) 3182 fec->fec |= ETHTOOL_FEC_AUTO; 3183 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 3184 fec->fec |= ETHTOOL_FEC_BASER; 3185 if (fec_cfg & BNXT_FEC_ENC_RS) 3186 fec->fec |= ETHTOOL_FEC_RS; 3187 if (fec_cfg & BNXT_FEC_ENC_LLRS) 3188 fec->fec |= ETHTOOL_FEC_LLRS; 3189 3190 switch (active_fec) { 3191 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 3192 fec->active_fec |= ETHTOOL_FEC_BASER; 3193 break; 3194 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 3195 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 3196 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 3197 fec->active_fec |= ETHTOOL_FEC_RS; 3198 break; 3199 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 3200 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 3201 fec->active_fec |= ETHTOOL_FEC_LLRS; 3202 break; 3203 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 3204 fec->active_fec |= ETHTOOL_FEC_OFF; 3205 break; 3206 } 3207 return 0; 3208 } 3209 3210 static void bnxt_get_fec_stats(struct net_device *dev, 3211 struct ethtool_fec_stats *fec_stats, 3212 struct ethtool_fec_hist *hist) 3213 { 3214 struct bnxt *bp = netdev_priv(dev); 3215 u64 *rx; 3216 3217 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 3218 return; 3219 3220 rx = bp->rx_port_stats_ext.sw_stats; 3221 fec_stats->corrected_bits.total = 3222 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 3223 3224 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) 3225 return; 3226 3227 fec_stats->corrected_blocks.total = 3228 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); 3229 fec_stats->uncorrectable_blocks.total = 3230 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); 3231 } 3232 3233 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 3234 u32 fec) 3235 { 3236 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 3237 3238 if (fec & ETHTOOL_FEC_BASER) 3239 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 3240 else if (fec & ETHTOOL_FEC_RS) 3241 fw_fec |= BNXT_FEC_RS_ON(link_info); 3242 else if (fec & ETHTOOL_FEC_LLRS) 3243 fw_fec |= BNXT_FEC_LLRS_ON; 3244 return fw_fec; 3245 } 3246 3247 static int bnxt_set_fecparam(struct net_device *dev, 3248 struct ethtool_fecparam *fecparam) 3249 { 3250 struct hwrm_port_phy_cfg_input *req; 3251 struct bnxt *bp = netdev_priv(dev); 3252 struct bnxt_link_info *link_info; 3253 u32 new_cfg, fec = fecparam->fec; 3254 u16 fec_cfg; 3255 int rc; 3256 3257 link_info = &bp->link_info; 3258 fec_cfg = link_info->fec_cfg; 3259 if (fec_cfg & BNXT_FEC_NONE) 3260 return -EOPNOTSUPP; 3261 3262 if (fec & ETHTOOL_FEC_OFF) { 3263 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 3264 BNXT_FEC_ALL_OFF(link_info); 3265 goto apply_fec; 3266 } 3267 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 3268 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 3269 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 3270 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 3271 return -EINVAL; 3272 3273 if (fec & ETHTOOL_FEC_AUTO) { 3274 if (!link_info->autoneg) 3275 return -EINVAL; 3276 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 3277 } else { 3278 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 3279 } 3280 3281 apply_fec: 3282 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 3283 if (rc) 3284 return rc; 3285 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3286 rc = hwrm_req_send(bp, req); 3287 /* update current settings */ 3288 if (!rc) { 3289 mutex_lock(&bp->link_lock); 3290 bnxt_update_link(bp, false); 3291 mutex_unlock(&bp->link_lock); 3292 } 3293 return rc; 3294 } 3295 3296 static void bnxt_get_pauseparam(struct net_device *dev, 3297 struct ethtool_pauseparam *epause) 3298 { 3299 struct bnxt *bp = netdev_priv(dev); 3300 struct bnxt_link_info *link_info = &bp->link_info; 3301 3302 if (BNXT_VF(bp)) 3303 return; 3304 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 3305 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 3306 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 3307 } 3308 3309 static void bnxt_get_pause_stats(struct net_device *dev, 3310 struct ethtool_pause_stats *epstat) 3311 { 3312 struct bnxt *bp = netdev_priv(dev); 3313 u64 *rx, *tx; 3314 3315 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 3316 return; 3317 3318 rx = bp->port_stats.sw_stats; 3319 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3320 3321 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 3322 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 3323 } 3324 3325 static int bnxt_set_pauseparam(struct net_device *dev, 3326 struct ethtool_pauseparam *epause) 3327 { 3328 int rc = 0; 3329 struct bnxt *bp = netdev_priv(dev); 3330 struct bnxt_link_info *link_info = &bp->link_info; 3331 3332 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3333 return -EOPNOTSUPP; 3334 3335 mutex_lock(&bp->link_lock); 3336 if (epause->autoneg) { 3337 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 3338 rc = -EINVAL; 3339 goto pause_exit; 3340 } 3341 3342 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 3343 link_info->req_flow_ctrl = 0; 3344 } else { 3345 /* when transition from auto pause to force pause, 3346 * force a link change 3347 */ 3348 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 3349 link_info->force_link_chng = true; 3350 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 3351 link_info->req_flow_ctrl = 0; 3352 } 3353 if (epause->rx_pause) 3354 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 3355 3356 if (epause->tx_pause) 3357 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 3358 3359 if (netif_running(dev)) 3360 rc = bnxt_hwrm_set_pause(bp); 3361 3362 pause_exit: 3363 mutex_unlock(&bp->link_lock); 3364 return rc; 3365 } 3366 3367 static u32 bnxt_get_link(struct net_device *dev) 3368 { 3369 struct bnxt *bp = netdev_priv(dev); 3370 3371 /* TODO: handle MF, VF, driver close case */ 3372 return BNXT_LINK_IS_UP(bp); 3373 } 3374 3375 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 3376 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 3377 { 3378 struct hwrm_nvm_get_dev_info_output *resp; 3379 struct hwrm_nvm_get_dev_info_input *req; 3380 int rc; 3381 3382 if (BNXT_VF(bp)) 3383 return -EOPNOTSUPP; 3384 3385 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); 3386 if (rc) 3387 return rc; 3388 3389 resp = hwrm_req_hold(bp, req); 3390 rc = hwrm_req_send(bp, req); 3391 if (!rc) 3392 memcpy(nvm_dev_info, resp, sizeof(*resp)); 3393 hwrm_req_drop(bp, req); 3394 return rc; 3395 } 3396 3397 static void bnxt_print_admin_err(struct bnxt *bp) 3398 { 3399 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 3400 } 3401 3402 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 3403 u16 ext, u16 *index, u32 *item_length, 3404 u32 *data_length); 3405 3406 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 3407 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 3408 u32 dir_item_len, const u8 *data, 3409 size_t data_len) 3410 { 3411 struct bnxt *bp = netdev_priv(dev); 3412 struct hwrm_nvm_write_input *req; 3413 int rc; 3414 3415 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); 3416 if (rc) 3417 return rc; 3418 3419 if (data_len && data) { 3420 dma_addr_t dma_handle; 3421 u8 *kmem; 3422 3423 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); 3424 if (!kmem) { 3425 hwrm_req_drop(bp, req); 3426 return -ENOMEM; 3427 } 3428 3429 req->dir_data_length = cpu_to_le32(data_len); 3430 3431 memcpy(kmem, data, data_len); 3432 req->host_src_addr = cpu_to_le64(dma_handle); 3433 } 3434 3435 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3436 req->dir_type = cpu_to_le16(dir_type); 3437 req->dir_ordinal = cpu_to_le16(dir_ordinal); 3438 req->dir_ext = cpu_to_le16(dir_ext); 3439 req->dir_attr = cpu_to_le16(dir_attr); 3440 req->dir_item_length = cpu_to_le32(dir_item_len); 3441 rc = hwrm_req_send(bp, req); 3442 3443 if (rc == -EACCES) 3444 bnxt_print_admin_err(bp); 3445 return rc; 3446 } 3447 3448 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 3449 u8 self_reset, u8 flags) 3450 { 3451 struct bnxt *bp = netdev_priv(dev); 3452 struct hwrm_fw_reset_input *req; 3453 int rc; 3454 3455 if (!bnxt_hwrm_reset_permitted(bp)) { 3456 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); 3457 return -EPERM; 3458 } 3459 3460 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 3461 if (rc) 3462 return rc; 3463 3464 req->embedded_proc_type = proc_type; 3465 req->selfrst_status = self_reset; 3466 req->flags = flags; 3467 3468 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 3469 rc = hwrm_req_send_silent(bp, req); 3470 } else { 3471 rc = hwrm_req_send(bp, req); 3472 if (rc == -EACCES) 3473 bnxt_print_admin_err(bp); 3474 } 3475 return rc; 3476 } 3477 3478 static int bnxt_firmware_reset(struct net_device *dev, 3479 enum bnxt_nvm_directory_type dir_type) 3480 { 3481 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 3482 u8 proc_type, flags = 0; 3483 3484 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 3485 /* (e.g. when firmware isn't already running) */ 3486 switch (dir_type) { 3487 case BNX_DIR_TYPE_CHIMP_PATCH: 3488 case BNX_DIR_TYPE_BOOTCODE: 3489 case BNX_DIR_TYPE_BOOTCODE_2: 3490 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 3491 /* Self-reset ChiMP upon next PCIe reset: */ 3492 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3493 break; 3494 case BNX_DIR_TYPE_APE_FW: 3495 case BNX_DIR_TYPE_APE_PATCH: 3496 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 3497 /* Self-reset APE upon next PCIe reset: */ 3498 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3499 break; 3500 case BNX_DIR_TYPE_KONG_FW: 3501 case BNX_DIR_TYPE_KONG_PATCH: 3502 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 3503 break; 3504 case BNX_DIR_TYPE_BONO_FW: 3505 case BNX_DIR_TYPE_BONO_PATCH: 3506 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 3507 break; 3508 default: 3509 return -EINVAL; 3510 } 3511 3512 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 3513 } 3514 3515 static int bnxt_firmware_reset_chip(struct net_device *dev) 3516 { 3517 struct bnxt *bp = netdev_priv(dev); 3518 u8 flags = 0; 3519 3520 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 3521 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 3522 3523 return bnxt_hwrm_firmware_reset(dev, 3524 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 3525 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 3526 flags); 3527 } 3528 3529 static int bnxt_firmware_reset_ap(struct net_device *dev) 3530 { 3531 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 3532 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 3533 0); 3534 } 3535 3536 static int bnxt_flash_firmware(struct net_device *dev, 3537 u16 dir_type, 3538 const u8 *fw_data, 3539 size_t fw_size) 3540 { 3541 int rc = 0; 3542 u16 code_type; 3543 u32 stored_crc; 3544 u32 calculated_crc; 3545 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 3546 3547 switch (dir_type) { 3548 case BNX_DIR_TYPE_BOOTCODE: 3549 case BNX_DIR_TYPE_BOOTCODE_2: 3550 code_type = CODE_BOOT; 3551 break; 3552 case BNX_DIR_TYPE_CHIMP_PATCH: 3553 code_type = CODE_CHIMP_PATCH; 3554 break; 3555 case BNX_DIR_TYPE_APE_FW: 3556 code_type = CODE_MCTP_PASSTHRU; 3557 break; 3558 case BNX_DIR_TYPE_APE_PATCH: 3559 code_type = CODE_APE_PATCH; 3560 break; 3561 case BNX_DIR_TYPE_KONG_FW: 3562 code_type = CODE_KONG_FW; 3563 break; 3564 case BNX_DIR_TYPE_KONG_PATCH: 3565 code_type = CODE_KONG_PATCH; 3566 break; 3567 case BNX_DIR_TYPE_BONO_FW: 3568 code_type = CODE_BONO_FW; 3569 break; 3570 case BNX_DIR_TYPE_BONO_PATCH: 3571 code_type = CODE_BONO_PATCH; 3572 break; 3573 default: 3574 netdev_err(dev, "Unsupported directory entry type: %u\n", 3575 dir_type); 3576 return -EINVAL; 3577 } 3578 if (fw_size < sizeof(struct bnxt_fw_header)) { 3579 netdev_err(dev, "Invalid firmware file size: %u\n", 3580 (unsigned int)fw_size); 3581 return -EINVAL; 3582 } 3583 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 3584 netdev_err(dev, "Invalid firmware signature: %08X\n", 3585 le32_to_cpu(header->signature)); 3586 return -EINVAL; 3587 } 3588 if (header->code_type != code_type) { 3589 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 3590 code_type, header->code_type); 3591 return -EINVAL; 3592 } 3593 if (header->device != DEVICE_CUMULUS_FAMILY) { 3594 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 3595 DEVICE_CUMULUS_FAMILY, header->device); 3596 return -EINVAL; 3597 } 3598 /* Confirm the CRC32 checksum of the file: */ 3599 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3600 sizeof(stored_crc))); 3601 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3602 if (calculated_crc != stored_crc) { 3603 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 3604 (unsigned long)stored_crc, 3605 (unsigned long)calculated_crc); 3606 return -EINVAL; 3607 } 3608 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3609 0, 0, 0, fw_data, fw_size); 3610 if (rc == 0) /* Firmware update successful */ 3611 rc = bnxt_firmware_reset(dev, dir_type); 3612 3613 return rc; 3614 } 3615 3616 static int bnxt_flash_microcode(struct net_device *dev, 3617 u16 dir_type, 3618 const u8 *fw_data, 3619 size_t fw_size) 3620 { 3621 struct bnxt_ucode_trailer *trailer; 3622 u32 calculated_crc; 3623 u32 stored_crc; 3624 int rc = 0; 3625 3626 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 3627 netdev_err(dev, "Invalid microcode file size: %u\n", 3628 (unsigned int)fw_size); 3629 return -EINVAL; 3630 } 3631 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 3632 sizeof(*trailer))); 3633 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 3634 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 3635 le32_to_cpu(trailer->sig)); 3636 return -EINVAL; 3637 } 3638 if (le16_to_cpu(trailer->dir_type) != dir_type) { 3639 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 3640 dir_type, le16_to_cpu(trailer->dir_type)); 3641 return -EINVAL; 3642 } 3643 if (le16_to_cpu(trailer->trailer_length) < 3644 sizeof(struct bnxt_ucode_trailer)) { 3645 netdev_err(dev, "Invalid microcode trailer length: %d\n", 3646 le16_to_cpu(trailer->trailer_length)); 3647 return -EINVAL; 3648 } 3649 3650 /* Confirm the CRC32 checksum of the file: */ 3651 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3652 sizeof(stored_crc))); 3653 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3654 if (calculated_crc != stored_crc) { 3655 netdev_err(dev, 3656 "CRC32 (%08lX) does not match calculated: %08lX\n", 3657 (unsigned long)stored_crc, 3658 (unsigned long)calculated_crc); 3659 return -EINVAL; 3660 } 3661 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3662 0, 0, 0, fw_data, fw_size); 3663 3664 return rc; 3665 } 3666 3667 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 3668 { 3669 switch (dir_type) { 3670 case BNX_DIR_TYPE_CHIMP_PATCH: 3671 case BNX_DIR_TYPE_BOOTCODE: 3672 case BNX_DIR_TYPE_BOOTCODE_2: 3673 case BNX_DIR_TYPE_APE_FW: 3674 case BNX_DIR_TYPE_APE_PATCH: 3675 case BNX_DIR_TYPE_KONG_FW: 3676 case BNX_DIR_TYPE_KONG_PATCH: 3677 case BNX_DIR_TYPE_BONO_FW: 3678 case BNX_DIR_TYPE_BONO_PATCH: 3679 return true; 3680 } 3681 3682 return false; 3683 } 3684 3685 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 3686 { 3687 switch (dir_type) { 3688 case BNX_DIR_TYPE_AVS: 3689 case BNX_DIR_TYPE_EXP_ROM_MBA: 3690 case BNX_DIR_TYPE_PCIE: 3691 case BNX_DIR_TYPE_TSCF_UCODE: 3692 case BNX_DIR_TYPE_EXT_PHY: 3693 case BNX_DIR_TYPE_CCM: 3694 case BNX_DIR_TYPE_ISCSI_BOOT: 3695 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3696 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3697 return true; 3698 } 3699 3700 return false; 3701 } 3702 3703 static bool bnxt_dir_type_is_executable(u16 dir_type) 3704 { 3705 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3706 bnxt_dir_type_is_other_exec_format(dir_type); 3707 } 3708 3709 static int bnxt_flash_firmware_from_file(struct net_device *dev, 3710 u16 dir_type, 3711 const char *filename) 3712 { 3713 const struct firmware *fw; 3714 int rc; 3715 3716 rc = request_firmware(&fw, filename, &dev->dev); 3717 if (rc != 0) { 3718 netdev_err(dev, "Error %d requesting firmware file: %s\n", 3719 rc, filename); 3720 return rc; 3721 } 3722 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 3723 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 3724 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 3725 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 3726 else 3727 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3728 0, 0, 0, fw->data, fw->size); 3729 release_firmware(fw); 3730 return rc; 3731 } 3732 3733 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" 3734 #define MSG_INVALID_PKG "PKG install error : Invalid package" 3735 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" 3736 #define MSG_INVALID_DEV "PKG install error : Invalid device" 3737 #define MSG_INTERNAL_ERR "PKG install error : Internal error" 3738 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" 3739 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" 3740 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" 3741 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" 3742 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" 3743 3744 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, 3745 struct netlink_ext_ack *extack) 3746 { 3747 switch (result) { 3748 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: 3749 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: 3750 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: 3751 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: 3752 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: 3753 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: 3754 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); 3755 return -EINVAL; 3756 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: 3757 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: 3758 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: 3759 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: 3760 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: 3761 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: 3762 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: 3763 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: 3764 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: 3765 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: 3766 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: 3767 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: 3768 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: 3769 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); 3770 return -ENOPKG; 3771 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: 3772 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); 3773 return -EPERM; 3774 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: 3775 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: 3776 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: 3777 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: 3778 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: 3779 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); 3780 return -EOPNOTSUPP; 3781 default: 3782 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); 3783 return -EIO; 3784 } 3785 } 3786 3787 #define BNXT_PKG_DMA_SIZE 0x40000 3788 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 3789 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 3790 3791 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, 3792 struct netlink_ext_ack *extack) 3793 { 3794 u32 item_len; 3795 int rc; 3796 3797 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3798 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, 3799 &item_len, NULL); 3800 if (rc) { 3801 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3802 return rc; 3803 } 3804 3805 if (fw_size > item_len) { 3806 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, 3807 BNX_DIR_ORDINAL_FIRST, 0, 1, 3808 round_up(fw_size, 4096), NULL, 0); 3809 if (rc) { 3810 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); 3811 return rc; 3812 } 3813 } 3814 return 0; 3815 } 3816 3817 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 3818 u32 install_type, struct netlink_ext_ack *extack) 3819 { 3820 struct hwrm_nvm_install_update_input *install; 3821 struct hwrm_nvm_install_update_output *resp; 3822 struct hwrm_nvm_modify_input *modify; 3823 struct bnxt *bp = netdev_priv(dev); 3824 bool defrag_attempted = false; 3825 dma_addr_t dma_handle; 3826 u8 *kmem = NULL; 3827 u32 modify_len; 3828 u32 item_len; 3829 u8 cmd_err; 3830 u16 index; 3831 int rc; 3832 3833 /* resize before flashing larger image than available space */ 3834 rc = bnxt_resize_update_entry(dev, fw->size, extack); 3835 if (rc) 3836 return rc; 3837 3838 bnxt_hwrm_fw_set_time(bp); 3839 3840 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); 3841 if (rc) 3842 return rc; 3843 3844 /* Try allocating a large DMA buffer first. Older fw will 3845 * cause excessive NVRAM erases when using small blocks. 3846 */ 3847 modify_len = roundup_pow_of_two(fw->size); 3848 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 3849 while (1) { 3850 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); 3851 if (!kmem && modify_len > PAGE_SIZE) 3852 modify_len /= 2; 3853 else 3854 break; 3855 } 3856 if (!kmem) { 3857 hwrm_req_drop(bp, modify); 3858 return -ENOMEM; 3859 } 3860 3861 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); 3862 if (rc) { 3863 hwrm_req_drop(bp, modify); 3864 return rc; 3865 } 3866 3867 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout); 3868 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout); 3869 3870 hwrm_req_hold(bp, modify); 3871 modify->host_src_addr = cpu_to_le64(dma_handle); 3872 3873 resp = hwrm_req_hold(bp, install); 3874 if ((install_type & 0xffff) == 0) 3875 install_type >>= 16; 3876 install->install_type = cpu_to_le32(install_type); 3877 3878 do { 3879 u32 copied = 0, len = modify_len; 3880 3881 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3882 BNX_DIR_ORDINAL_FIRST, 3883 BNX_DIR_EXT_NONE, 3884 &index, &item_len, NULL); 3885 if (rc) { 3886 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3887 break; 3888 } 3889 if (fw->size > item_len) { 3890 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); 3891 rc = -EFBIG; 3892 break; 3893 } 3894 3895 modify->dir_idx = cpu_to_le16(index); 3896 3897 if (fw->size > modify_len) 3898 modify->flags = BNXT_NVM_MORE_FLAG; 3899 while (copied < fw->size) { 3900 u32 balance = fw->size - copied; 3901 3902 if (balance <= modify_len) { 3903 len = balance; 3904 if (copied) 3905 modify->flags |= BNXT_NVM_LAST_FLAG; 3906 } 3907 memcpy(kmem, fw->data + copied, len); 3908 modify->len = cpu_to_le32(len); 3909 modify->offset = cpu_to_le32(copied); 3910 rc = hwrm_req_send(bp, modify); 3911 if (rc) 3912 goto pkg_abort; 3913 copied += len; 3914 } 3915 3916 rc = hwrm_req_send_silent(bp, install); 3917 if (!rc) 3918 break; 3919 3920 if (defrag_attempted) { 3921 /* We have tried to defragment already in the previous 3922 * iteration. Return with the result for INSTALL_UPDATE 3923 */ 3924 break; 3925 } 3926 3927 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 3928 3929 switch (cmd_err) { 3930 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: 3931 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); 3932 rc = -EALREADY; 3933 break; 3934 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: 3935 install->flags = 3936 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 3937 3938 rc = hwrm_req_send_silent(bp, install); 3939 if (!rc) 3940 break; 3941 3942 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 3943 3944 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 3945 /* FW has cleared NVM area, driver will create 3946 * UPDATE directory and try the flash again 3947 */ 3948 defrag_attempted = true; 3949 install->flags = 0; 3950 rc = bnxt_flash_nvram(bp->dev, 3951 BNX_DIR_TYPE_UPDATE, 3952 BNX_DIR_ORDINAL_FIRST, 3953 0, 0, item_len, NULL, 0); 3954 if (!rc) 3955 break; 3956 } 3957 fallthrough; 3958 default: 3959 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); 3960 } 3961 } while (defrag_attempted && !rc); 3962 3963 pkg_abort: 3964 hwrm_req_drop(bp, modify); 3965 hwrm_req_drop(bp, install); 3966 3967 if (resp->result) { 3968 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 3969 (s8)resp->result, (int)resp->problem_item); 3970 rc = nvm_update_err_to_stderr(dev, resp->result, extack); 3971 } 3972 if (rc == -EACCES) 3973 bnxt_print_admin_err(bp); 3974 return rc; 3975 } 3976 3977 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 3978 u32 install_type, struct netlink_ext_ack *extack) 3979 { 3980 const struct firmware *fw; 3981 int rc; 3982 3983 rc = request_firmware(&fw, filename, &dev->dev); 3984 if (rc != 0) { 3985 netdev_err(dev, "PKG error %d requesting file: %s\n", 3986 rc, filename); 3987 return rc; 3988 } 3989 3990 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); 3991 3992 release_firmware(fw); 3993 3994 return rc; 3995 } 3996 3997 static int bnxt_flash_device(struct net_device *dev, 3998 struct ethtool_flash *flash) 3999 { 4000 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 4001 netdev_err(dev, "flashdev not supported from a virtual function\n"); 4002 return -EINVAL; 4003 } 4004 4005 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 4006 flash->region > 0xffff) 4007 return bnxt_flash_package_from_file(dev, flash->data, 4008 flash->region, NULL); 4009 4010 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 4011 } 4012 4013 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 4014 { 4015 struct hwrm_nvm_get_dir_info_output *output; 4016 struct hwrm_nvm_get_dir_info_input *req; 4017 struct bnxt *bp = netdev_priv(dev); 4018 int rc; 4019 4020 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); 4021 if (rc) 4022 return rc; 4023 4024 output = hwrm_req_hold(bp, req); 4025 rc = hwrm_req_send(bp, req); 4026 if (!rc) { 4027 *entries = le32_to_cpu(output->entries); 4028 *length = le32_to_cpu(output->entry_length); 4029 } 4030 hwrm_req_drop(bp, req); 4031 return rc; 4032 } 4033 4034 static int bnxt_get_eeprom_len(struct net_device *dev) 4035 { 4036 struct bnxt *bp = netdev_priv(dev); 4037 4038 if (BNXT_VF(bp)) 4039 return 0; 4040 4041 /* The -1 return value allows the entire 32-bit range of offsets to be 4042 * passed via the ethtool command-line utility. 4043 */ 4044 return -1; 4045 } 4046 4047 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 4048 { 4049 struct bnxt *bp = netdev_priv(dev); 4050 int rc; 4051 u32 dir_entries; 4052 u32 entry_length; 4053 u8 *buf; 4054 size_t buflen; 4055 dma_addr_t dma_handle; 4056 struct hwrm_nvm_get_dir_entries_input *req; 4057 4058 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 4059 if (rc != 0) 4060 return rc; 4061 4062 if (!dir_entries || !entry_length) 4063 return -EIO; 4064 4065 /* Insert 2 bytes of directory info (count and size of entries) */ 4066 if (len < 2) 4067 return -EINVAL; 4068 4069 *data++ = dir_entries; 4070 *data++ = entry_length; 4071 len -= 2; 4072 memset(data, 0xff, len); 4073 4074 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); 4075 if (rc) 4076 return rc; 4077 4078 buflen = mul_u32_u32(dir_entries, entry_length); 4079 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); 4080 if (!buf) { 4081 hwrm_req_drop(bp, req); 4082 return -ENOMEM; 4083 } 4084 req->host_dest_addr = cpu_to_le64(dma_handle); 4085 4086 hwrm_req_hold(bp, req); /* hold the slice */ 4087 rc = hwrm_req_send(bp, req); 4088 if (rc == 0) 4089 memcpy(data, buf, len > buflen ? buflen : len); 4090 hwrm_req_drop(bp, req); 4091 return rc; 4092 } 4093 4094 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 4095 u32 length, u8 *data) 4096 { 4097 struct bnxt *bp = netdev_priv(dev); 4098 int rc; 4099 u8 *buf; 4100 dma_addr_t dma_handle; 4101 struct hwrm_nvm_read_input *req; 4102 4103 if (!length) 4104 return -EINVAL; 4105 4106 rc = hwrm_req_init(bp, req, HWRM_NVM_READ); 4107 if (rc) 4108 return rc; 4109 4110 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); 4111 if (!buf) { 4112 hwrm_req_drop(bp, req); 4113 return -ENOMEM; 4114 } 4115 4116 req->host_dest_addr = cpu_to_le64(dma_handle); 4117 req->dir_idx = cpu_to_le16(index); 4118 req->offset = cpu_to_le32(offset); 4119 req->len = cpu_to_le32(length); 4120 4121 hwrm_req_hold(bp, req); /* hold the slice */ 4122 rc = hwrm_req_send(bp, req); 4123 if (rc == 0) 4124 memcpy(data, buf, length); 4125 hwrm_req_drop(bp, req); 4126 return rc; 4127 } 4128 4129 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 4130 u16 ext, u16 *index, u32 *item_length, 4131 u32 *data_length) 4132 { 4133 struct hwrm_nvm_find_dir_entry_output *output; 4134 struct hwrm_nvm_find_dir_entry_input *req; 4135 struct bnxt *bp = netdev_priv(dev); 4136 int rc; 4137 4138 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); 4139 if (rc) 4140 return rc; 4141 4142 req->enables = 0; 4143 req->dir_idx = 0; 4144 req->dir_type = cpu_to_le16(type); 4145 req->dir_ordinal = cpu_to_le16(ordinal); 4146 req->dir_ext = cpu_to_le16(ext); 4147 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 4148 output = hwrm_req_hold(bp, req); 4149 rc = hwrm_req_send_silent(bp, req); 4150 if (rc == 0) { 4151 if (index) 4152 *index = le16_to_cpu(output->dir_idx); 4153 if (item_length) 4154 *item_length = le32_to_cpu(output->dir_item_length); 4155 if (data_length) 4156 *data_length = le32_to_cpu(output->dir_data_length); 4157 } 4158 hwrm_req_drop(bp, req); 4159 return rc; 4160 } 4161 4162 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 4163 { 4164 char *retval = NULL; 4165 char *p; 4166 char *value; 4167 int field = 0; 4168 4169 if (datalen < 1) 4170 return NULL; 4171 /* null-terminate the log data (removing last '\n'): */ 4172 data[datalen - 1] = 0; 4173 for (p = data; *p != 0; p++) { 4174 field = 0; 4175 retval = NULL; 4176 while (*p != 0 && *p != '\n') { 4177 value = p; 4178 while (*p != 0 && *p != '\t' && *p != '\n') 4179 p++; 4180 if (field == desired_field) 4181 retval = value; 4182 if (*p != '\t') 4183 break; 4184 *p = 0; 4185 field++; 4186 p++; 4187 } 4188 if (*p == 0) 4189 break; 4190 *p = 0; 4191 } 4192 return retval; 4193 } 4194 4195 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) 4196 { 4197 struct bnxt *bp = netdev_priv(dev); 4198 u16 index = 0; 4199 char *pkgver; 4200 u32 pkglen; 4201 u8 *pkgbuf; 4202 int rc; 4203 4204 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 4205 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 4206 &index, NULL, &pkglen); 4207 if (rc) 4208 return rc; 4209 4210 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 4211 if (!pkgbuf) { 4212 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 4213 pkglen); 4214 return -ENOMEM; 4215 } 4216 4217 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); 4218 if (rc) 4219 goto err; 4220 4221 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 4222 pkglen); 4223 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) 4224 strscpy(ver, pkgver, size); 4225 else 4226 rc = -ENOENT; 4227 4228 err: 4229 kfree(pkgbuf); 4230 4231 return rc; 4232 } 4233 4234 static void bnxt_get_pkgver(struct net_device *dev) 4235 { 4236 struct bnxt *bp = netdev_priv(dev); 4237 char buf[FW_VER_STR_LEN - 5]; 4238 int len; 4239 4240 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { 4241 len = strlen(bp->fw_ver_str); 4242 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len, 4243 "/pkg %s", buf); 4244 } 4245 } 4246 4247 static int bnxt_get_eeprom(struct net_device *dev, 4248 struct ethtool_eeprom *eeprom, 4249 u8 *data) 4250 { 4251 u32 index; 4252 u32 offset; 4253 4254 if (eeprom->offset == 0) /* special offset value to get directory */ 4255 return bnxt_get_nvram_directory(dev, eeprom->len, data); 4256 4257 index = eeprom->offset >> 24; 4258 offset = eeprom->offset & 0xffffff; 4259 4260 if (index == 0) { 4261 netdev_err(dev, "unsupported index value: %d\n", index); 4262 return -EINVAL; 4263 } 4264 4265 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 4266 } 4267 4268 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 4269 { 4270 struct hwrm_nvm_erase_dir_entry_input *req; 4271 struct bnxt *bp = netdev_priv(dev); 4272 int rc; 4273 4274 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); 4275 if (rc) 4276 return rc; 4277 4278 req->dir_idx = cpu_to_le16(index); 4279 return hwrm_req_send(bp, req); 4280 } 4281 4282 static int bnxt_set_eeprom(struct net_device *dev, 4283 struct ethtool_eeprom *eeprom, 4284 u8 *data) 4285 { 4286 struct bnxt *bp = netdev_priv(dev); 4287 u8 index, dir_op; 4288 u16 type, ext, ordinal, attr; 4289 4290 if (!BNXT_PF(bp)) { 4291 netdev_err(dev, "NVM write not supported from a virtual function\n"); 4292 return -EINVAL; 4293 } 4294 4295 type = eeprom->magic >> 16; 4296 4297 if (type == 0xffff) { /* special value for directory operations */ 4298 index = eeprom->magic & 0xff; 4299 dir_op = eeprom->magic >> 8; 4300 if (index == 0) 4301 return -EINVAL; 4302 switch (dir_op) { 4303 case 0x0e: /* erase */ 4304 if (eeprom->offset != ~eeprom->magic) 4305 return -EINVAL; 4306 return bnxt_erase_nvram_directory(dev, index - 1); 4307 default: 4308 return -EINVAL; 4309 } 4310 } 4311 4312 /* Create or re-write an NVM item: */ 4313 if (bnxt_dir_type_is_executable(type)) 4314 return -EOPNOTSUPP; 4315 ext = eeprom->magic & 0xffff; 4316 ordinal = eeprom->offset >> 16; 4317 attr = eeprom->offset & 0xffff; 4318 4319 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, 4320 eeprom->len); 4321 } 4322 4323 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) 4324 { 4325 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 4326 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 4327 struct bnxt *bp = netdev_priv(dev); 4328 struct ethtool_keee *eee = &bp->eee; 4329 struct bnxt_link_info *link_info = &bp->link_info; 4330 int rc = 0; 4331 4332 if (!BNXT_PHY_CFG_ABLE(bp)) 4333 return -EOPNOTSUPP; 4334 4335 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4336 return -EOPNOTSUPP; 4337 4338 mutex_lock(&bp->link_lock); 4339 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 4340 if (!edata->eee_enabled) 4341 goto eee_ok; 4342 4343 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4344 netdev_warn(dev, "EEE requires autoneg\n"); 4345 rc = -EINVAL; 4346 goto eee_exit; 4347 } 4348 if (edata->tx_lpi_enabled) { 4349 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 4350 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 4351 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 4352 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 4353 rc = -EINVAL; 4354 goto eee_exit; 4355 } else if (!bp->lpi_tmr_hi) { 4356 edata->tx_lpi_timer = eee->tx_lpi_timer; 4357 } 4358 } 4359 if (linkmode_empty(edata->advertised)) { 4360 linkmode_and(edata->advertised, advertising, eee->supported); 4361 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { 4362 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); 4363 rc = -EINVAL; 4364 goto eee_exit; 4365 } 4366 4367 linkmode_copy(eee->advertised, edata->advertised); 4368 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 4369 eee->tx_lpi_timer = edata->tx_lpi_timer; 4370 eee_ok: 4371 eee->eee_enabled = edata->eee_enabled; 4372 4373 if (netif_running(dev)) 4374 rc = bnxt_hwrm_set_link_setting(bp, false, true); 4375 4376 eee_exit: 4377 mutex_unlock(&bp->link_lock); 4378 return rc; 4379 } 4380 4381 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) 4382 { 4383 struct bnxt *bp = netdev_priv(dev); 4384 4385 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4386 return -EOPNOTSUPP; 4387 4388 *edata = bp->eee; 4389 if (!bp->eee.eee_enabled) { 4390 /* Preserve tx_lpi_timer so that the last value will be used 4391 * by default when it is re-enabled. 4392 */ 4393 linkmode_zero(edata->advertised); 4394 edata->tx_lpi_enabled = 0; 4395 } 4396 4397 if (!bp->eee.eee_active) 4398 linkmode_zero(edata->lp_advertised); 4399 4400 return 0; 4401 } 4402 4403 static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val) 4404 { 4405 struct hwrm_queue_pfcwd_timeout_qcfg_output *resp; 4406 struct hwrm_queue_pfcwd_timeout_qcfg_input *req; 4407 int rc; 4408 4409 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG); 4410 if (rc) 4411 return rc; 4412 resp = hwrm_req_hold(bp, req); 4413 rc = hwrm_req_send(bp, req); 4414 if (!rc) 4415 *val = le16_to_cpu(resp->pfcwd_timeout_value); 4416 hwrm_req_drop(bp, req); 4417 return rc; 4418 } 4419 4420 static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val) 4421 { 4422 struct hwrm_queue_pfcwd_timeout_cfg_input *req; 4423 int rc; 4424 4425 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG); 4426 if (rc) 4427 return rc; 4428 req->pfcwd_timeout_value = cpu_to_le16(val); 4429 rc = hwrm_req_send(bp, req); 4430 return rc; 4431 } 4432 4433 static int bnxt_set_tunable(struct net_device *dev, 4434 const struct ethtool_tunable *tuna, 4435 const void *data) 4436 { 4437 struct bnxt *bp = netdev_priv(dev); 4438 u32 rx_copybreak, val; 4439 4440 switch (tuna->id) { 4441 case ETHTOOL_RX_COPYBREAK: 4442 rx_copybreak = *(u32 *)data; 4443 if (rx_copybreak > BNXT_MAX_RX_COPYBREAK) 4444 return -ERANGE; 4445 if (rx_copybreak != bp->rx_copybreak) { 4446 if (netif_running(dev)) 4447 return -EBUSY; 4448 bp->rx_copybreak = rx_copybreak; 4449 } 4450 return 0; 4451 case ETHTOOL_PFC_PREVENTION_TOUT: 4452 if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms) 4453 return -EOPNOTSUPP; 4454 4455 val = *(u16 *)data; 4456 if (val > bp->max_pfcwd_tmo_ms && 4457 val != PFC_STORM_PREVENTION_AUTO) 4458 return -EINVAL; 4459 return bnxt_hwrm_pfcwd_cfg(bp, val); 4460 default: 4461 return -EOPNOTSUPP; 4462 } 4463 } 4464 4465 static int bnxt_get_tunable(struct net_device *dev, 4466 const struct ethtool_tunable *tuna, void *data) 4467 { 4468 struct bnxt *bp = netdev_priv(dev); 4469 4470 switch (tuna->id) { 4471 case ETHTOOL_RX_COPYBREAK: 4472 *(u32 *)data = bp->rx_copybreak; 4473 break; 4474 case ETHTOOL_PFC_PREVENTION_TOUT: 4475 if (!bp->max_pfcwd_tmo_ms) 4476 return -EOPNOTSUPP; 4477 return bnxt_hwrm_pfcwd_qcfg(bp, data); 4478 default: 4479 return -EOPNOTSUPP; 4480 } 4481 4482 return 0; 4483 } 4484 4485 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 4486 u16 page_number, u8 bank, 4487 u16 start_addr, u16 data_length, 4488 u8 *buf) 4489 { 4490 struct hwrm_port_phy_i2c_read_output *output; 4491 struct hwrm_port_phy_i2c_read_input *req; 4492 int rc, byte_offset = 0; 4493 4494 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); 4495 if (rc) 4496 return rc; 4497 4498 output = hwrm_req_hold(bp, req); 4499 req->i2c_slave_addr = i2c_addr; 4500 req->page_number = cpu_to_le16(page_number); 4501 req->port_id = cpu_to_le16(bp->pf.port_id); 4502 do { 4503 u16 xfer_size; 4504 4505 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 4506 data_length -= xfer_size; 4507 req->page_offset = cpu_to_le16(start_addr + byte_offset); 4508 req->data_length = xfer_size; 4509 req->enables = 4510 cpu_to_le32((start_addr + byte_offset ? 4511 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 4512 0) | 4513 (bank ? 4514 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : 4515 0)); 4516 rc = hwrm_req_send(bp, req); 4517 if (!rc) 4518 memcpy(buf + byte_offset, output->data, xfer_size); 4519 byte_offset += xfer_size; 4520 } while (!rc && data_length > 0); 4521 hwrm_req_drop(bp, req); 4522 4523 return rc; 4524 } 4525 4526 static int bnxt_get_module_info(struct net_device *dev, 4527 struct ethtool_modinfo *modinfo) 4528 { 4529 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 4530 struct bnxt *bp = netdev_priv(dev); 4531 int rc; 4532 4533 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4534 return -EPERM; 4535 4536 /* No point in going further if phy status indicates 4537 * module is not inserted or if it is powered down or 4538 * if it is of type 10GBase-T 4539 */ 4540 if (bp->link_info.module_status > 4541 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4542 return -EOPNOTSUPP; 4543 4544 /* This feature is not supported in older firmware versions */ 4545 if (bp->hwrm_spec_code < 0x10202) 4546 return -EOPNOTSUPP; 4547 4548 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, 4549 SFF_DIAG_SUPPORT_OFFSET + 1, 4550 data); 4551 if (!rc) { 4552 u8 module_id = data[0]; 4553 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 4554 4555 switch (module_id) { 4556 case SFF_MODULE_ID_SFP: 4557 modinfo->type = ETH_MODULE_SFF_8472; 4558 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4559 if (!diag_supported) 4560 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4561 break; 4562 case SFF_MODULE_ID_QSFP: 4563 case SFF_MODULE_ID_QSFP_PLUS: 4564 modinfo->type = ETH_MODULE_SFF_8436; 4565 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4566 break; 4567 case SFF_MODULE_ID_QSFP28: 4568 modinfo->type = ETH_MODULE_SFF_8636; 4569 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 4570 break; 4571 default: 4572 rc = -EOPNOTSUPP; 4573 break; 4574 } 4575 } 4576 return rc; 4577 } 4578 4579 static int bnxt_get_module_eeprom(struct net_device *dev, 4580 struct ethtool_eeprom *eeprom, 4581 u8 *data) 4582 { 4583 struct bnxt *bp = netdev_priv(dev); 4584 u16 start = eeprom->offset, length = eeprom->len; 4585 int rc = 0; 4586 4587 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4588 return -EPERM; 4589 4590 memset(data, 0, eeprom->len); 4591 4592 /* Read A0 portion of the EEPROM */ 4593 if (start < ETH_MODULE_SFF_8436_LEN) { 4594 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 4595 length = ETH_MODULE_SFF_8436_LEN - start; 4596 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4597 start, length, data); 4598 if (rc) 4599 return rc; 4600 start += length; 4601 data += length; 4602 length = eeprom->len - length; 4603 } 4604 4605 /* Read A2 portion of the EEPROM */ 4606 if (length) { 4607 start -= ETH_MODULE_SFF_8436_LEN; 4608 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, 4609 start, length, data); 4610 } 4611 return rc; 4612 } 4613 4614 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) 4615 { 4616 if (bp->link_info.module_status <= 4617 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4618 return 0; 4619 4620 switch (bp->link_info.module_status) { 4621 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 4622 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); 4623 break; 4624 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: 4625 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); 4626 break; 4627 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: 4628 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); 4629 break; 4630 default: 4631 NL_SET_ERR_MSG_MOD(extack, "Unknown error"); 4632 break; 4633 } 4634 return -EINVAL; 4635 } 4636 4637 static int 4638 bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, 4639 const struct ethtool_module_eeprom *page_data, 4640 struct netlink_ext_ack *extack) 4641 { 4642 int rc; 4643 4644 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 4645 NL_SET_ERR_MSG_MOD(extack, 4646 "Module read/write not permitted on untrusted VF"); 4647 return -EPERM; 4648 } 4649 4650 rc = bnxt_get_module_status(bp, extack); 4651 if (rc) 4652 return rc; 4653 4654 if (bp->hwrm_spec_code < 0x10202) { 4655 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); 4656 return -EINVAL; 4657 } 4658 4659 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { 4660 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); 4661 return -EINVAL; 4662 } 4663 return 0; 4664 } 4665 4666 static int bnxt_get_module_eeprom_by_page(struct net_device *dev, 4667 const struct ethtool_module_eeprom *page_data, 4668 struct netlink_ext_ack *extack) 4669 { 4670 struct bnxt *bp = netdev_priv(dev); 4671 int rc; 4672 4673 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4674 if (rc) 4675 return rc; 4676 4677 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, 4678 page_data->page, page_data->bank, 4679 page_data->offset, 4680 page_data->length, 4681 page_data->data); 4682 if (rc) { 4683 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); 4684 return rc; 4685 } 4686 return page_data->length; 4687 } 4688 4689 static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, 4690 const struct ethtool_module_eeprom *page) 4691 { 4692 struct hwrm_port_phy_i2c_write_input *req; 4693 int bytes_written = 0; 4694 int rc; 4695 4696 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); 4697 if (rc) 4698 return rc; 4699 4700 hwrm_req_hold(bp, req); 4701 req->i2c_slave_addr = page->i2c_address << 1; 4702 req->page_number = cpu_to_le16(page->page); 4703 req->bank_number = page->bank; 4704 req->port_id = cpu_to_le16(bp->pf.port_id); 4705 req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | 4706 PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); 4707 4708 while (bytes_written < page->length) { 4709 u16 xfer_size; 4710 4711 xfer_size = min_t(u16, page->length - bytes_written, 4712 BNXT_MAX_PHY_I2C_RESP_SIZE); 4713 req->page_offset = cpu_to_le16(page->offset + bytes_written); 4714 req->data_length = xfer_size; 4715 memcpy(req->data, page->data + bytes_written, xfer_size); 4716 rc = hwrm_req_send(bp, req); 4717 if (rc) 4718 break; 4719 bytes_written += xfer_size; 4720 } 4721 4722 hwrm_req_drop(bp, req); 4723 return rc; 4724 } 4725 4726 static int bnxt_set_module_eeprom_by_page(struct net_device *dev, 4727 const struct ethtool_module_eeprom *page_data, 4728 struct netlink_ext_ack *extack) 4729 { 4730 struct bnxt *bp = netdev_priv(dev); 4731 int rc; 4732 4733 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4734 if (rc) 4735 return rc; 4736 4737 rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); 4738 if (rc) { 4739 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); 4740 return rc; 4741 } 4742 return page_data->length; 4743 } 4744 4745 static int bnxt_nway_reset(struct net_device *dev) 4746 { 4747 int rc = 0; 4748 4749 struct bnxt *bp = netdev_priv(dev); 4750 struct bnxt_link_info *link_info = &bp->link_info; 4751 4752 if (!BNXT_PHY_CFG_ABLE(bp)) 4753 return -EOPNOTSUPP; 4754 4755 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 4756 return -EINVAL; 4757 4758 if (netif_running(dev)) 4759 rc = bnxt_hwrm_set_link_setting(bp, true, false); 4760 4761 return rc; 4762 } 4763 4764 static int bnxt_set_phys_id(struct net_device *dev, 4765 enum ethtool_phys_id_state state) 4766 { 4767 struct hwrm_port_led_cfg_input *req; 4768 struct bnxt *bp = netdev_priv(dev); 4769 struct bnxt_pf_info *pf = &bp->pf; 4770 struct bnxt_led_cfg *led_cfg; 4771 u8 led_state; 4772 __le16 duration; 4773 int rc, i; 4774 4775 if (!bp->num_leds || BNXT_VF(bp)) 4776 return -EOPNOTSUPP; 4777 4778 if (state == ETHTOOL_ID_ACTIVE) { 4779 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 4780 duration = cpu_to_le16(500); 4781 } else if (state == ETHTOOL_ID_INACTIVE) { 4782 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 4783 duration = cpu_to_le16(0); 4784 } else { 4785 return -EINVAL; 4786 } 4787 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); 4788 if (rc) 4789 return rc; 4790 4791 req->port_id = cpu_to_le16(pf->port_id); 4792 req->num_leds = bp->num_leds; 4793 led_cfg = (struct bnxt_led_cfg *)&req->led0_id; 4794 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 4795 req->enables |= BNXT_LED_DFLT_ENABLES(i); 4796 led_cfg->led_id = bp->leds[i].led_id; 4797 led_cfg->led_state = led_state; 4798 led_cfg->led_blink_on = duration; 4799 led_cfg->led_blink_off = duration; 4800 led_cfg->led_group_id = bp->leds[i].led_group_id; 4801 } 4802 return hwrm_req_send(bp, req); 4803 } 4804 4805 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 4806 { 4807 struct hwrm_selftest_irq_input *req; 4808 int rc; 4809 4810 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); 4811 if (rc) 4812 return rc; 4813 4814 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4815 return hwrm_req_send(bp, req); 4816 } 4817 4818 static int bnxt_test_irq(struct bnxt *bp) 4819 { 4820 int i; 4821 4822 for (i = 0; i < bp->cp_nr_rings; i++) { 4823 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 4824 int rc; 4825 4826 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 4827 if (rc) 4828 return rc; 4829 } 4830 return 0; 4831 } 4832 4833 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 4834 { 4835 struct hwrm_port_mac_cfg_input *req; 4836 int rc; 4837 4838 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); 4839 if (rc) 4840 return rc; 4841 4842 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 4843 if (enable) 4844 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 4845 else 4846 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 4847 return hwrm_req_send(bp, req); 4848 } 4849 4850 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 4851 { 4852 struct hwrm_port_phy_qcaps_output *resp; 4853 struct hwrm_port_phy_qcaps_input *req; 4854 int rc; 4855 4856 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 4857 if (rc) 4858 return rc; 4859 4860 resp = hwrm_req_hold(bp, req); 4861 rc = hwrm_req_send(bp, req); 4862 if (!rc) 4863 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 4864 4865 hwrm_req_drop(bp, req); 4866 return rc; 4867 } 4868 4869 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 4870 struct hwrm_port_phy_cfg_input *req) 4871 { 4872 struct bnxt_link_info *link_info = &bp->link_info; 4873 u16 fw_advertising; 4874 u16 fw_speed; 4875 int rc; 4876 4877 if (!link_info->autoneg || 4878 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 4879 return 0; 4880 4881 rc = bnxt_query_force_speeds(bp, &fw_advertising); 4882 if (rc) 4883 return rc; 4884 4885 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 4886 if (BNXT_LINK_IS_UP(bp)) 4887 fw_speed = bp->link_info.link_speed; 4888 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 4889 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 4890 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 4891 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 4892 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 4893 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 4894 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 4895 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 4896 4897 req->force_link_speed = cpu_to_le16(fw_speed); 4898 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 4899 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 4900 rc = hwrm_req_send(bp, req); 4901 req->flags = 0; 4902 req->force_link_speed = cpu_to_le16(0); 4903 return rc; 4904 } 4905 4906 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 4907 { 4908 struct hwrm_port_phy_cfg_input *req; 4909 int rc; 4910 4911 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 4912 if (rc) 4913 return rc; 4914 4915 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ 4916 hwrm_req_hold(bp, req); 4917 4918 if (enable) { 4919 bnxt_disable_an_for_lpbk(bp, req); 4920 if (ext) 4921 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 4922 else 4923 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 4924 } else { 4925 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 4926 } 4927 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 4928 rc = hwrm_req_send(bp, req); 4929 hwrm_req_drop(bp, req); 4930 return rc; 4931 } 4932 4933 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 4934 u32 raw_cons, int pkt_size) 4935 { 4936 struct bnxt_napi *bnapi = cpr->bnapi; 4937 struct bnxt_rx_ring_info *rxr; 4938 struct bnxt_sw_rx_bd *rx_buf; 4939 struct rx_cmp *rxcmp; 4940 u16 cp_cons, cons; 4941 u8 *data; 4942 u32 len; 4943 int i; 4944 4945 rxr = bnapi->rx_ring; 4946 cp_cons = RING_CMP(raw_cons); 4947 rxcmp = (struct rx_cmp *) 4948 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 4949 cons = rxcmp->rx_cmp_opaque; 4950 rx_buf = &rxr->rx_buf_ring[cons]; 4951 data = rx_buf->data_ptr; 4952 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 4953 if (len != pkt_size) 4954 return -EIO; 4955 i = ETH_ALEN; 4956 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 4957 return -EIO; 4958 i += ETH_ALEN; 4959 for ( ; i < pkt_size; i++) { 4960 if (data[i] != (u8)(i & 0xff)) 4961 return -EIO; 4962 } 4963 return 0; 4964 } 4965 4966 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 4967 int pkt_size) 4968 { 4969 struct tx_cmp *txcmp; 4970 int rc = -EIO; 4971 u32 raw_cons; 4972 u32 cons; 4973 int i; 4974 4975 raw_cons = cpr->cp_raw_cons; 4976 for (i = 0; i < 200; i++) { 4977 cons = RING_CMP(raw_cons); 4978 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 4979 4980 if (!TX_CMP_VALID(txcmp, raw_cons)) { 4981 udelay(5); 4982 continue; 4983 } 4984 4985 /* The valid test of the entry must be done first before 4986 * reading any further. 4987 */ 4988 dma_rmb(); 4989 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP || 4990 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) { 4991 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 4992 raw_cons = NEXT_RAW_CMP(raw_cons); 4993 raw_cons = NEXT_RAW_CMP(raw_cons); 4994 break; 4995 } 4996 raw_cons = NEXT_RAW_CMP(raw_cons); 4997 } 4998 cpr->cp_raw_cons = raw_cons; 4999 return rc; 5000 } 5001 5002 static int bnxt_run_loopback(struct bnxt *bp) 5003 { 5004 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 5005 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5006 struct bnxt_cp_ring_info *cpr; 5007 int pkt_size, i = 0; 5008 struct sk_buff *skb; 5009 dma_addr_t map; 5010 u8 *data; 5011 int rc; 5012 5013 cpr = &rxr->bnapi->cp_ring; 5014 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5015 cpr = rxr->rx_cpr; 5016 pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK, 5017 bp->rx_copybreak)); 5018 skb = netdev_alloc_skb(bp->dev, pkt_size); 5019 if (!skb) 5020 return -ENOMEM; 5021 data = skb_put(skb, pkt_size); 5022 ether_addr_copy(&data[i], bp->dev->dev_addr); 5023 i += ETH_ALEN; 5024 ether_addr_copy(&data[i], bp->dev->dev_addr); 5025 i += ETH_ALEN; 5026 for ( ; i < pkt_size; i++) 5027 data[i] = (u8)(i & 0xff); 5028 5029 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 5030 DMA_TO_DEVICE); 5031 if (dma_mapping_error(&bp->pdev->dev, map)) { 5032 dev_kfree_skb(skb); 5033 return -EIO; 5034 } 5035 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); 5036 5037 /* Sync BD data before updating doorbell */ 5038 wmb(); 5039 5040 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 5041 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 5042 5043 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 5044 dev_kfree_skb(skb); 5045 return rc; 5046 } 5047 5048 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 5049 { 5050 struct hwrm_selftest_exec_output *resp; 5051 struct hwrm_selftest_exec_input *req; 5052 int rc; 5053 5054 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); 5055 if (rc) 5056 return rc; 5057 5058 hwrm_req_timeout(bp, req, bp->test_info->timeout); 5059 req->flags = test_mask; 5060 5061 resp = hwrm_req_hold(bp, req); 5062 rc = hwrm_req_send(bp, req); 5063 *test_results = resp->test_success; 5064 hwrm_req_drop(bp, req); 5065 return rc; 5066 } 5067 5068 #define BNXT_DRV_TESTS 4 5069 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 5070 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 5071 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 5072 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 5073 5074 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 5075 u64 *buf) 5076 { 5077 struct bnxt *bp = netdev_priv(dev); 5078 bool do_ext_lpbk = false; 5079 bool offline = false; 5080 u8 test_results = 0; 5081 u8 test_mask = 0; 5082 int rc = 0, i; 5083 5084 if (!bp->num_tests || !BNXT_PF(bp)) 5085 return; 5086 5087 memset(buf, 0, sizeof(u64) * bp->num_tests); 5088 if (etest->flags & ETH_TEST_FL_OFFLINE && 5089 bnxt_ulp_registered(bp->edev)) { 5090 etest->flags |= ETH_TEST_FL_FAILED; 5091 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); 5092 return; 5093 } 5094 5095 if (!netif_running(dev)) { 5096 etest->flags |= ETH_TEST_FL_FAILED; 5097 return; 5098 } 5099 5100 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 5101 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 5102 do_ext_lpbk = true; 5103 5104 if (etest->flags & ETH_TEST_FL_OFFLINE) { 5105 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 5106 etest->flags |= ETH_TEST_FL_FAILED; 5107 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 5108 return; 5109 } 5110 offline = true; 5111 } 5112 5113 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5114 u8 bit_val = 1 << i; 5115 5116 if (!(bp->test_info->offline_mask & bit_val)) 5117 test_mask |= bit_val; 5118 else if (offline) 5119 test_mask |= bit_val; 5120 } 5121 if (!offline) { 5122 bnxt_run_fw_tests(bp, test_mask, &test_results); 5123 } else { 5124 bnxt_close_nic(bp, true, false); 5125 bnxt_run_fw_tests(bp, test_mask, &test_results); 5126 5127 rc = bnxt_half_open_nic(bp); 5128 if (rc) { 5129 etest->flags |= ETH_TEST_FL_FAILED; 5130 return; 5131 } 5132 buf[BNXT_MACLPBK_TEST_IDX] = 1; 5133 if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) 5134 goto skip_mac_loopback; 5135 5136 bnxt_hwrm_mac_loopback(bp, true); 5137 msleep(250); 5138 if (bnxt_run_loopback(bp)) 5139 etest->flags |= ETH_TEST_FL_FAILED; 5140 else 5141 buf[BNXT_MACLPBK_TEST_IDX] = 0; 5142 5143 bnxt_hwrm_mac_loopback(bp, false); 5144 skip_mac_loopback: 5145 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 5146 if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) 5147 goto skip_phy_loopback; 5148 5149 bnxt_hwrm_phy_loopback(bp, true, false); 5150 msleep(1000); 5151 if (bnxt_run_loopback(bp)) 5152 etest->flags |= ETH_TEST_FL_FAILED; 5153 else 5154 buf[BNXT_PHYLPBK_TEST_IDX] = 0; 5155 skip_phy_loopback: 5156 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 5157 if (do_ext_lpbk) { 5158 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 5159 bnxt_hwrm_phy_loopback(bp, true, true); 5160 msleep(1000); 5161 if (bnxt_run_loopback(bp)) 5162 etest->flags |= ETH_TEST_FL_FAILED; 5163 else 5164 buf[BNXT_EXTLPBK_TEST_IDX] = 0; 5165 } 5166 bnxt_hwrm_phy_loopback(bp, false, false); 5167 bnxt_half_close_nic(bp); 5168 rc = bnxt_open_nic(bp, true, true); 5169 } 5170 if (rc || bnxt_test_irq(bp)) { 5171 buf[BNXT_IRQ_TEST_IDX] = 1; 5172 etest->flags |= ETH_TEST_FL_FAILED; 5173 } 5174 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5175 u8 bit_val = 1 << i; 5176 5177 if ((test_mask & bit_val) && !(test_results & bit_val)) { 5178 buf[i] = 1; 5179 etest->flags |= ETH_TEST_FL_FAILED; 5180 } 5181 } 5182 } 5183 5184 static int bnxt_reset(struct net_device *dev, u32 *flags) 5185 { 5186 struct bnxt *bp = netdev_priv(dev); 5187 bool reload = false; 5188 u32 req = *flags; 5189 5190 if (!req) 5191 return -EINVAL; 5192 5193 if (!BNXT_PF(bp)) { 5194 netdev_err(dev, "Reset is not supported from a VF\n"); 5195 return -EOPNOTSUPP; 5196 } 5197 5198 if (pci_vfs_assigned(bp->pdev) && 5199 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 5200 netdev_err(dev, 5201 "Reset not allowed when VFs are assigned to VMs\n"); 5202 return -EBUSY; 5203 } 5204 5205 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 5206 /* This feature is not supported in older firmware versions */ 5207 if (bp->hwrm_spec_code >= 0x10803) { 5208 if (!bnxt_firmware_reset_chip(dev)) { 5209 netdev_info(dev, "Firmware reset request successful.\n"); 5210 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 5211 reload = true; 5212 *flags &= ~BNXT_FW_RESET_CHIP; 5213 } 5214 } else if (req == BNXT_FW_RESET_CHIP) { 5215 return -EOPNOTSUPP; /* only request, fail hard */ 5216 } 5217 } 5218 5219 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { 5220 /* This feature is not supported in older firmware versions */ 5221 if (bp->hwrm_spec_code >= 0x10803) { 5222 if (!bnxt_firmware_reset_ap(dev)) { 5223 netdev_info(dev, "Reset application processor successful.\n"); 5224 reload = true; 5225 *flags &= ~BNXT_FW_RESET_AP; 5226 } 5227 } else if (req == BNXT_FW_RESET_AP) { 5228 return -EOPNOTSUPP; /* only request, fail hard */ 5229 } 5230 } 5231 5232 if (reload) 5233 netdev_info(dev, "Reload driver to complete reset\n"); 5234 5235 return 0; 5236 } 5237 5238 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 5239 { 5240 struct bnxt *bp = netdev_priv(dev); 5241 5242 if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { 5243 netdev_info(dev, 5244 "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); 5245 return -EINVAL; 5246 } 5247 5248 if (dump->flag == BNXT_DUMP_CRASH) { 5249 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR && 5250 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { 5251 netdev_info(dev, 5252 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 5253 return -EOPNOTSUPP; 5254 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) { 5255 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); 5256 return -EOPNOTSUPP; 5257 } 5258 } 5259 5260 bp->dump_flag = dump->flag; 5261 return 0; 5262 } 5263 5264 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 5265 { 5266 struct bnxt *bp = netdev_priv(dev); 5267 5268 if (bp->hwrm_spec_code < 0x10801) 5269 return -EOPNOTSUPP; 5270 5271 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 5272 bp->ver_resp.hwrm_fw_min_8b << 16 | 5273 bp->ver_resp.hwrm_fw_bld_8b << 8 | 5274 bp->ver_resp.hwrm_fw_rsvd_8b; 5275 5276 dump->flag = bp->dump_flag; 5277 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); 5278 return 0; 5279 } 5280 5281 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 5282 void *buf) 5283 { 5284 struct bnxt *bp = netdev_priv(dev); 5285 5286 if (bp->hwrm_spec_code < 0x10801) 5287 return -EOPNOTSUPP; 5288 5289 memset(buf, 0, dump->len); 5290 5291 dump->flag = bp->dump_flag; 5292 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); 5293 } 5294 5295 static int bnxt_get_ts_info(struct net_device *dev, 5296 struct kernel_ethtool_ts_info *info) 5297 { 5298 struct bnxt *bp = netdev_priv(dev); 5299 struct bnxt_ptp_cfg *ptp; 5300 5301 ptp = bp->ptp_cfg; 5302 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; 5303 5304 if (!ptp) 5305 return 0; 5306 5307 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 5308 SOF_TIMESTAMPING_RX_HARDWARE | 5309 SOF_TIMESTAMPING_RAW_HARDWARE; 5310 if (ptp->ptp_clock) 5311 info->phc_index = ptp_clock_index(ptp->ptp_clock); 5312 5313 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 5314 5315 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 5316 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 5317 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 5318 5319 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) 5320 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); 5321 return 0; 5322 } 5323 5324 static void bnxt_hwrm_pcie_qstats(struct bnxt *bp) 5325 { 5326 struct hwrm_pcie_qstats_output *resp; 5327 struct hwrm_pcie_qstats_input *req; 5328 5329 bp->pcie_stat_len = 0; 5330 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 5331 return; 5332 5333 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 5334 return; 5335 5336 resp = hwrm_req_hold(bp, req); 5337 if (__bnxt_hwrm_pcie_qstats(bp, req)) 5338 bp->pcie_stat_len = min_t(u16, 5339 le16_to_cpu(resp->pcie_stat_size), 5340 sizeof(struct pcie_ctx_hw_stats_v2)); 5341 hwrm_req_drop(bp, req); 5342 } 5343 5344 void bnxt_ethtool_init(struct bnxt *bp) 5345 { 5346 struct hwrm_selftest_qlist_output *resp; 5347 struct hwrm_selftest_qlist_input *req; 5348 struct bnxt_test_info *test_info; 5349 struct net_device *dev = bp->dev; 5350 int i, rc; 5351 5352 bnxt_hwrm_pcie_qstats(bp); 5353 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 5354 bnxt_get_pkgver(dev); 5355 5356 bp->num_tests = 0; 5357 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 5358 return; 5359 5360 test_info = bp->test_info; 5361 if (!test_info) { 5362 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 5363 if (!test_info) 5364 return; 5365 bp->test_info = test_info; 5366 } 5367 5368 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) 5369 return; 5370 5371 resp = hwrm_req_hold(bp, req); 5372 rc = hwrm_req_send_silent(bp, req); 5373 if (rc) 5374 goto ethtool_init_exit; 5375 5376 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 5377 if (bp->num_tests > BNXT_MAX_TEST) 5378 bp->num_tests = BNXT_MAX_TEST; 5379 5380 test_info->offline_mask = resp->offline_tests; 5381 test_info->timeout = le16_to_cpu(resp->test_timeout); 5382 if (!test_info->timeout) 5383 test_info->timeout = HWRM_CMD_TIMEOUT; 5384 for (i = 0; i < bp->num_tests; i++) { 5385 char *str = test_info->string[i]; 5386 char *fw_str = resp->test_name[i]; 5387 5388 if (i == BNXT_MACLPBK_TEST_IDX) { 5389 strcpy(str, "Mac loopback test (offline)"); 5390 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 5391 strcpy(str, "Phy loopback test (offline)"); 5392 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 5393 strcpy(str, "Ext loopback test (offline)"); 5394 } else if (i == BNXT_IRQ_TEST_IDX) { 5395 strcpy(str, "Interrupt_test (offline)"); 5396 } else { 5397 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 5398 fw_str, test_info->offline_mask & (1 << i) ? 5399 "offline" : "online"); 5400 } 5401 } 5402 5403 ethtool_init_exit: 5404 hwrm_req_drop(bp, req); 5405 } 5406 5407 static void bnxt_get_eth_phy_stats(struct net_device *dev, 5408 struct ethtool_eth_phy_stats *phy_stats) 5409 { 5410 struct bnxt *bp = netdev_priv(dev); 5411 u64 *rx; 5412 5413 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5414 return; 5415 5416 rx = bp->rx_port_stats_ext.sw_stats; 5417 phy_stats->SymbolErrorDuringCarrier = 5418 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 5419 } 5420 5421 static void bnxt_get_eth_mac_stats(struct net_device *dev, 5422 struct ethtool_eth_mac_stats *mac_stats) 5423 { 5424 struct bnxt *bp = netdev_priv(dev); 5425 u64 *rx, *tx; 5426 5427 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5428 return; 5429 5430 rx = bp->port_stats.sw_stats; 5431 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5432 5433 mac_stats->FramesReceivedOK = 5434 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 5435 mac_stats->FramesTransmittedOK = 5436 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 5437 mac_stats->FrameCheckSequenceErrors = 5438 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 5439 mac_stats->AlignmentErrors = 5440 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 5441 mac_stats->OutOfRangeLengthField = 5442 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 5443 } 5444 5445 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 5446 struct ethtool_eth_ctrl_stats *ctrl_stats) 5447 { 5448 struct bnxt *bp = netdev_priv(dev); 5449 u64 *rx; 5450 5451 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5452 return; 5453 5454 rx = bp->port_stats.sw_stats; 5455 ctrl_stats->MACControlFramesReceived = 5456 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 5457 } 5458 5459 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 5460 { 0, 64 }, 5461 { 65, 127 }, 5462 { 128, 255 }, 5463 { 256, 511 }, 5464 { 512, 1023 }, 5465 { 1024, 1518 }, 5466 { 1519, 2047 }, 5467 { 2048, 4095 }, 5468 { 4096, 9216 }, 5469 { 9217, 16383 }, 5470 {} 5471 }; 5472 5473 static void bnxt_get_rmon_stats(struct net_device *dev, 5474 struct ethtool_rmon_stats *rmon_stats, 5475 const struct ethtool_rmon_hist_range **ranges) 5476 { 5477 struct bnxt *bp = netdev_priv(dev); 5478 u64 *rx, *tx; 5479 5480 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5481 return; 5482 5483 rx = bp->port_stats.sw_stats; 5484 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5485 5486 rmon_stats->jabbers = 5487 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 5488 rmon_stats->oversize_pkts = 5489 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 5490 rmon_stats->undersize_pkts = 5491 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 5492 5493 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 5494 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 5495 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 5496 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 5497 rmon_stats->hist[4] = 5498 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 5499 rmon_stats->hist[5] = 5500 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 5501 rmon_stats->hist[6] = 5502 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 5503 rmon_stats->hist[7] = 5504 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 5505 rmon_stats->hist[8] = 5506 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 5507 rmon_stats->hist[9] = 5508 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 5509 5510 rmon_stats->hist_tx[0] = 5511 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 5512 rmon_stats->hist_tx[1] = 5513 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 5514 rmon_stats->hist_tx[2] = 5515 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 5516 rmon_stats->hist_tx[3] = 5517 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 5518 rmon_stats->hist_tx[4] = 5519 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 5520 rmon_stats->hist_tx[5] = 5521 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 5522 rmon_stats->hist_tx[6] = 5523 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 5524 rmon_stats->hist_tx[7] = 5525 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 5526 rmon_stats->hist_tx[8] = 5527 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 5528 rmon_stats->hist_tx[9] = 5529 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 5530 5531 *ranges = bnxt_rmon_ranges; 5532 } 5533 5534 static void bnxt_get_ptp_stats(struct net_device *dev, 5535 struct ethtool_ts_stats *ts_stats) 5536 { 5537 struct bnxt *bp = netdev_priv(dev); 5538 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 5539 5540 if (ptp) { 5541 ts_stats->pkts = ptp->stats.ts_pkts; 5542 ts_stats->lost = ptp->stats.ts_lost; 5543 ts_stats->err = atomic64_read(&ptp->stats.ts_err); 5544 } 5545 } 5546 5547 static void bnxt_get_link_ext_stats(struct net_device *dev, 5548 struct ethtool_link_ext_stats *stats) 5549 { 5550 struct bnxt *bp = netdev_priv(dev); 5551 u64 *rx; 5552 5553 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5554 return; 5555 5556 rx = bp->rx_port_stats_ext.sw_stats; 5557 stats->link_down_events = 5558 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); 5559 } 5560 5561 void bnxt_ethtool_free(struct bnxt *bp) 5562 { 5563 kfree(bp->test_info); 5564 bp->test_info = NULL; 5565 } 5566 5567 const struct ethtool_ops bnxt_ethtool_ops = { 5568 .cap_link_lanes_supported = 1, 5569 .rxfh_per_ctx_key = 1, 5570 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, 5571 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, 5572 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), 5573 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5574 ETHTOOL_COALESCE_MAX_FRAMES | 5575 ETHTOOL_COALESCE_USECS_IRQ | 5576 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 5577 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 5578 ETHTOOL_COALESCE_USE_ADAPTIVE_RX | 5579 ETHTOOL_COALESCE_USE_CQE, 5580 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | 5581 ETHTOOL_RING_USE_HDS_THRS, 5582 .get_link_ksettings = bnxt_get_link_ksettings, 5583 .set_link_ksettings = bnxt_set_link_ksettings, 5584 .get_fec_stats = bnxt_get_fec_stats, 5585 .get_fecparam = bnxt_get_fecparam, 5586 .set_fecparam = bnxt_set_fecparam, 5587 .get_pause_stats = bnxt_get_pause_stats, 5588 .get_pauseparam = bnxt_get_pauseparam, 5589 .set_pauseparam = bnxt_set_pauseparam, 5590 .get_drvinfo = bnxt_get_drvinfo, 5591 .get_regs_len = bnxt_get_regs_len, 5592 .get_regs = bnxt_get_regs, 5593 .get_wol = bnxt_get_wol, 5594 .set_wol = bnxt_set_wol, 5595 .get_coalesce = bnxt_get_coalesce, 5596 .set_coalesce = bnxt_set_coalesce, 5597 .get_msglevel = bnxt_get_msglevel, 5598 .set_msglevel = bnxt_set_msglevel, 5599 .get_sset_count = bnxt_get_sset_count, 5600 .get_strings = bnxt_get_strings, 5601 .get_ethtool_stats = bnxt_get_ethtool_stats, 5602 .set_ringparam = bnxt_set_ringparam, 5603 .get_ringparam = bnxt_get_ringparam, 5604 .get_channels = bnxt_get_channels, 5605 .set_channels = bnxt_set_channels, 5606 .get_rxnfc = bnxt_get_rxnfc, 5607 .set_rxnfc = bnxt_set_rxnfc, 5608 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 5609 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 5610 .get_rxfh = bnxt_get_rxfh, 5611 .set_rxfh = bnxt_set_rxfh, 5612 .get_rxfh_fields = bnxt_get_rxfh_fields, 5613 .set_rxfh_fields = bnxt_set_rxfh_fields, 5614 .create_rxfh_context = bnxt_create_rxfh_context, 5615 .modify_rxfh_context = bnxt_modify_rxfh_context, 5616 .remove_rxfh_context = bnxt_remove_rxfh_context, 5617 .flash_device = bnxt_flash_device, 5618 .get_eeprom_len = bnxt_get_eeprom_len, 5619 .get_eeprom = bnxt_get_eeprom, 5620 .set_eeprom = bnxt_set_eeprom, 5621 .get_link = bnxt_get_link, 5622 .get_link_ext_stats = bnxt_get_link_ext_stats, 5623 .get_eee = bnxt_get_eee, 5624 .set_eee = bnxt_set_eee, 5625 .get_tunable = bnxt_get_tunable, 5626 .set_tunable = bnxt_set_tunable, 5627 .get_module_info = bnxt_get_module_info, 5628 .get_module_eeprom = bnxt_get_module_eeprom, 5629 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, 5630 .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, 5631 .nway_reset = bnxt_nway_reset, 5632 .set_phys_id = bnxt_set_phys_id, 5633 .self_test = bnxt_self_test, 5634 .get_ts_info = bnxt_get_ts_info, 5635 .reset = bnxt_reset, 5636 .set_dump = bnxt_set_dump, 5637 .get_dump_flag = bnxt_get_dump_flag, 5638 .get_dump_data = bnxt_get_dump_data, 5639 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 5640 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 5641 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 5642 .get_rmon_stats = bnxt_get_rmon_stats, 5643 .get_ts_stats = bnxt_get_ptp_stats, 5644 }; 5645