1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/ctype.h> 13 #include <linux/stringify.h> 14 #include <linux/ethtool.h> 15 #include <linux/ethtool_netlink.h> 16 #include <linux/linkmode.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/etherdevice.h> 20 #include <linux/crc32.h> 21 #include <linux/firmware.h> 22 #include <linux/utsname.h> 23 #include <linux/time.h> 24 #include <linux/ptp_clock_kernel.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/timecounter.h> 27 #include <net/netdev_queues.h> 28 #include <net/netlink.h> 29 #include <linux/bnxt/hsi.h> 30 #include "bnxt.h" 31 #include "bnxt_hwrm.h" 32 #include "bnxt_ulp.h" 33 #include "bnxt_xdp.h" 34 #include "bnxt_ptp.h" 35 #include "bnxt_ethtool.h" 36 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 37 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 38 #include "bnxt_coredump.h" 39 40 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \ 41 do { \ 42 if (extack) \ 43 NL_SET_ERR_MSG_MOD(extack, msg); \ 44 netdev_err(dev, "%s\n", msg); \ 45 } while (0) 46 47 static u32 bnxt_get_msglevel(struct net_device *dev) 48 { 49 struct bnxt *bp = netdev_priv(dev); 50 51 return bp->msg_enable; 52 } 53 54 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 55 { 56 struct bnxt *bp = netdev_priv(dev); 57 58 bp->msg_enable = value; 59 } 60 61 static int bnxt_get_coalesce(struct net_device *dev, 62 struct ethtool_coalesce *coal, 63 struct kernel_ethtool_coalesce *kernel_coal, 64 struct netlink_ext_ack *extack) 65 { 66 struct bnxt *bp = netdev_priv(dev); 67 struct bnxt_coal *hw_coal; 68 u16 mult; 69 70 memset(coal, 0, sizeof(*coal)); 71 72 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 73 74 hw_coal = &bp->rx_coal; 75 mult = hw_coal->bufs_per_record; 76 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 77 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 78 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 79 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 80 if (hw_coal->flags & 81 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 82 kernel_coal->use_cqe_mode_rx = true; 83 84 hw_coal = &bp->tx_coal; 85 mult = hw_coal->bufs_per_record; 86 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 87 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 88 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 89 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 90 if (hw_coal->flags & 91 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 92 kernel_coal->use_cqe_mode_tx = true; 93 94 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 95 96 return 0; 97 } 98 99 static int bnxt_set_coalesce(struct net_device *dev, 100 struct ethtool_coalesce *coal, 101 struct kernel_ethtool_coalesce *kernel_coal, 102 struct netlink_ext_ack *extack) 103 { 104 struct bnxt *bp = netdev_priv(dev); 105 bool update_stats = false; 106 struct bnxt_coal *hw_coal; 107 int rc = 0; 108 u16 mult; 109 110 if (coal->use_adaptive_rx_coalesce) { 111 bp->flags |= BNXT_FLAG_DIM; 112 } else { 113 if (bp->flags & BNXT_FLAG_DIM) { 114 bp->flags &= ~(BNXT_FLAG_DIM); 115 goto reset_coalesce; 116 } 117 } 118 119 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && 120 !(bp->coal_cap.cmpl_params & 121 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) 122 return -EOPNOTSUPP; 123 124 hw_coal = &bp->rx_coal; 125 mult = hw_coal->bufs_per_record; 126 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 127 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 128 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 129 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 130 hw_coal->flags &= 131 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 132 if (kernel_coal->use_cqe_mode_rx) 133 hw_coal->flags |= 134 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 135 136 hw_coal = &bp->tx_coal; 137 mult = hw_coal->bufs_per_record; 138 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 139 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 140 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 141 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 142 hw_coal->flags &= 143 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 144 if (kernel_coal->use_cqe_mode_tx) 145 hw_coal->flags |= 146 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 147 148 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 149 u32 stats_ticks = coal->stats_block_coalesce_usecs; 150 151 /* Allow 0, which means disable. */ 152 if (stats_ticks) 153 stats_ticks = clamp_t(u32, stats_ticks, 154 BNXT_MIN_STATS_COAL_TICKS, 155 BNXT_MAX_STATS_COAL_TICKS); 156 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 157 bp->stats_coal_ticks = stats_ticks; 158 if (bp->stats_coal_ticks) 159 bp->current_interval = 160 bp->stats_coal_ticks * HZ / 1000000; 161 else 162 bp->current_interval = BNXT_TIMER_INTERVAL; 163 update_stats = true; 164 } 165 166 reset_coalesce: 167 if (test_bit(BNXT_STATE_OPEN, &bp->state)) { 168 if (update_stats) { 169 bnxt_close_nic(bp, true, false); 170 rc = bnxt_open_nic(bp, true, false); 171 } else { 172 rc = bnxt_hwrm_set_coal(bp); 173 } 174 } 175 176 return rc; 177 } 178 179 static const char * const bnxt_ring_rx_stats_str[] = { 180 "rx_ucast_packets", 181 "rx_mcast_packets", 182 "rx_bcast_packets", 183 "rx_discards", 184 "rx_errors", 185 "rx_ucast_bytes", 186 "rx_mcast_bytes", 187 "rx_bcast_bytes", 188 }; 189 190 static const char * const bnxt_ring_tx_stats_str[] = { 191 "tx_ucast_packets", 192 "tx_mcast_packets", 193 "tx_bcast_packets", 194 "tx_errors", 195 "tx_discards", 196 "tx_ucast_bytes", 197 "tx_mcast_bytes", 198 "tx_bcast_bytes", 199 }; 200 201 static const char * const bnxt_ring_tpa_stats_str[] = { 202 "tpa_packets", 203 "tpa_bytes", 204 "tpa_events", 205 "tpa_aborts", 206 }; 207 208 static const char * const bnxt_ring_tpa2_stats_str[] = { 209 "rx_tpa_eligible_pkt", 210 "rx_tpa_eligible_bytes", 211 "rx_tpa_pkt", 212 "rx_tpa_bytes", 213 "rx_tpa_errors", 214 "rx_tpa_events", 215 }; 216 217 static const char * const bnxt_rx_sw_stats_str[] = { 218 "rx_l4_csum_errors", 219 "rx_resets", 220 "rx_buf_errors", 221 }; 222 223 static const char * const bnxt_cmn_sw_stats_str[] = { 224 "missed_irqs", 225 }; 226 227 #define BNXT_RX_STATS_ENTRY(counter) \ 228 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 229 230 #define BNXT_TX_STATS_ENTRY(counter) \ 231 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 232 233 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 234 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 235 236 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 237 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 238 239 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 241 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 242 243 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 245 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 246 247 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 248 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 249 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 250 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 251 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 252 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 253 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 254 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 255 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 256 257 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 258 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 259 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 260 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 261 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 262 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 263 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 264 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 265 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 266 267 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 268 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 269 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 270 271 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 272 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 273 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 274 275 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 276 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 277 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 278 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 279 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 280 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 281 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 282 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 283 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 284 285 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 286 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 287 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 288 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 289 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 290 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 291 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 292 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 293 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 294 295 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 297 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 298 299 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 307 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 308 309 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 310 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 311 __stringify(counter##_pri##n) } 312 313 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 314 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 315 __stringify(counter##_pri##n) } 316 317 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 318 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 319 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 320 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 321 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 322 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 323 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 324 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 325 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 326 327 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 328 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 329 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 330 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 331 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 332 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 333 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 334 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 335 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 336 337 enum { 338 RX_TOTAL_DISCARDS, 339 TX_TOTAL_DISCARDS, 340 RX_NETPOLL_DISCARDS, 341 }; 342 343 static const char *const bnxt_ring_err_stats_arr[] = { 344 "rx_total_l4_csum_errors", 345 "rx_total_resets", 346 "rx_total_buf_errors", 347 "rx_total_oom_discards", 348 "rx_total_netpoll_discards", 349 "rx_total_ring_discards", 350 "tx_total_resets", 351 "tx_total_ring_discards", 352 "total_missed_irqs", 353 }; 354 355 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 356 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 357 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 358 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 359 360 static const struct { 361 long offset; 362 char string[ETH_GSTRING_LEN]; 363 } bnxt_port_stats_arr[] = { 364 BNXT_RX_STATS_ENTRY(rx_64b_frames), 365 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 366 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 367 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 368 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 369 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 370 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 371 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 372 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 373 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 374 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 375 BNXT_RX_STATS_ENTRY(rx_total_frames), 376 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 377 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 378 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 379 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 380 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 381 BNXT_RX_STATS_ENTRY(rx_pause_frames), 382 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 383 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 384 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 385 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 386 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 387 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 388 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 389 BNXT_RX_STATS_ENTRY(rx_good_frames), 390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 397 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 398 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 400 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 401 BNXT_RX_STATS_ENTRY(rx_bytes), 402 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 403 BNXT_RX_STATS_ENTRY(rx_runt_frames), 404 BNXT_RX_STATS_ENTRY(rx_stat_discard), 405 BNXT_RX_STATS_ENTRY(rx_stat_err), 406 407 BNXT_TX_STATS_ENTRY(tx_64b_frames), 408 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 409 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 410 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 411 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 412 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 413 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 414 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 415 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 416 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 417 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 418 BNXT_TX_STATS_ENTRY(tx_good_frames), 419 BNXT_TX_STATS_ENTRY(tx_total_frames), 420 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 421 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 422 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 423 BNXT_TX_STATS_ENTRY(tx_pause_frames), 424 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 425 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 426 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 427 BNXT_TX_STATS_ENTRY(tx_err), 428 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 436 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 438 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 439 BNXT_TX_STATS_ENTRY(tx_total_collisions), 440 BNXT_TX_STATS_ENTRY(tx_bytes), 441 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 442 BNXT_TX_STATS_ENTRY(tx_stat_discard), 443 BNXT_TX_STATS_ENTRY(tx_stat_error), 444 }; 445 446 static const struct { 447 long offset; 448 char string[ETH_GSTRING_LEN]; 449 } bnxt_port_stats_ext_arr[] = { 450 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 451 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 452 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 453 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 454 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 455 BNXT_RX_STATS_EXT_COS_ENTRIES, 456 BNXT_RX_STATS_EXT_PFC_ENTRIES, 457 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 458 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 459 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 460 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 461 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), 463 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), 464 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss), 465 }; 466 467 static const struct { 468 long offset; 469 char string[ETH_GSTRING_LEN]; 470 } bnxt_tx_port_stats_ext_arr[] = { 471 BNXT_TX_STATS_EXT_COS_ENTRIES, 472 BNXT_TX_STATS_EXT_PFC_ENTRIES, 473 }; 474 475 static const struct { 476 long base_off; 477 char string[ETH_GSTRING_LEN]; 478 } bnxt_rx_bytes_pri_arr[] = { 479 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 480 }; 481 482 static const struct { 483 long base_off; 484 char string[ETH_GSTRING_LEN]; 485 } bnxt_rx_pkts_pri_arr[] = { 486 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 487 }; 488 489 static const struct { 490 long base_off; 491 char string[ETH_GSTRING_LEN]; 492 } bnxt_tx_bytes_pri_arr[] = { 493 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 494 }; 495 496 static const struct { 497 long base_off; 498 char string[ETH_GSTRING_LEN]; 499 } bnxt_tx_pkts_pri_arr[] = { 500 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 501 }; 502 503 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) 504 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 505 #define BNXT_NUM_STATS_PRI \ 506 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 507 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 508 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 509 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 510 511 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 512 { 513 if (BNXT_SUPPORTS_TPA(bp)) { 514 if (bp->max_tpa_v2) { 515 if (BNXT_CHIP_P5(bp)) 516 return BNXT_NUM_TPA_RING_STATS_P5; 517 return BNXT_NUM_TPA_RING_STATS_P7; 518 } 519 return BNXT_NUM_TPA_RING_STATS; 520 } 521 return 0; 522 } 523 524 static int bnxt_get_num_ring_stats(struct bnxt *bp) 525 { 526 int rx, tx, cmn; 527 528 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 529 bnxt_get_num_tpa_ring_stats(bp); 530 tx = NUM_RING_TX_HW_STATS; 531 cmn = NUM_RING_CMN_SW_STATS; 532 return rx * bp->rx_nr_rings + 533 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) + 534 cmn * bp->cp_nr_rings; 535 } 536 537 static int bnxt_get_num_stats(struct bnxt *bp) 538 { 539 int num_stats = bnxt_get_num_ring_stats(bp); 540 int len; 541 542 num_stats += BNXT_NUM_RING_ERR_STATS; 543 544 if (bp->flags & BNXT_FLAG_PORT_STATS) 545 num_stats += BNXT_NUM_PORT_STATS; 546 547 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 548 len = min_t(int, bp->fw_rx_stats_ext_size, 549 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 550 num_stats += len; 551 len = min_t(int, bp->fw_tx_stats_ext_size, 552 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 553 num_stats += len; 554 if (bp->pri2cos_valid) 555 num_stats += BNXT_NUM_STATS_PRI; 556 } 557 558 return num_stats; 559 } 560 561 static int bnxt_get_sset_count(struct net_device *dev, int sset) 562 { 563 struct bnxt *bp = netdev_priv(dev); 564 565 switch (sset) { 566 case ETH_SS_STATS: 567 return bnxt_get_num_stats(bp); 568 case ETH_SS_TEST: 569 if (!bp->num_tests) 570 return -EOPNOTSUPP; 571 return bp->num_tests; 572 default: 573 return -EOPNOTSUPP; 574 } 575 } 576 577 static bool is_rx_ring(struct bnxt *bp, int ring_num) 578 { 579 return ring_num < bp->rx_nr_rings; 580 } 581 582 static bool is_tx_ring(struct bnxt *bp, int ring_num) 583 { 584 int tx_base = 0; 585 586 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 587 tx_base = bp->rx_nr_rings; 588 589 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 590 return true; 591 return false; 592 } 593 594 static void bnxt_get_ethtool_stats(struct net_device *dev, 595 struct ethtool_stats *stats, u64 *buf) 596 { 597 struct bnxt_total_ring_err_stats ring_err_stats = {0}; 598 struct bnxt *bp = netdev_priv(dev); 599 u64 *curr, *prev; 600 u32 tpa_stats; 601 u32 i, j = 0; 602 603 if (!bp->bnapi) { 604 j += bnxt_get_num_ring_stats(bp); 605 goto skip_ring_stats; 606 } 607 608 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 609 for (i = 0; i < bp->cp_nr_rings; i++) { 610 struct bnxt_napi *bnapi = bp->bnapi[i]; 611 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 612 u64 *sw_stats = cpr->stats.sw_stats; 613 u64 *sw; 614 int k; 615 616 if (is_rx_ring(bp, i)) { 617 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 618 buf[j] = sw_stats[k]; 619 } 620 if (is_tx_ring(bp, i)) { 621 k = NUM_RING_RX_HW_STATS; 622 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 623 j++, k++) 624 buf[j] = sw_stats[k]; 625 } 626 if (!tpa_stats || !is_rx_ring(bp, i)) 627 goto skip_tpa_ring_stats; 628 629 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 630 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 631 tpa_stats; j++, k++) 632 buf[j] = sw_stats[k]; 633 634 skip_tpa_ring_stats: 635 sw = (u64 *)&cpr->sw_stats->rx; 636 if (is_rx_ring(bp, i)) { 637 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 638 buf[j] = sw[k]; 639 } 640 641 sw = (u64 *)&cpr->sw_stats->cmn; 642 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 643 buf[j] = sw[k]; 644 } 645 646 bnxt_get_ring_err_stats(bp, &ring_err_stats); 647 648 skip_ring_stats: 649 curr = &ring_err_stats.rx_total_l4_csum_errors; 650 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; 651 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) 652 buf[j] = *curr + *prev; 653 654 if (bp->flags & BNXT_FLAG_PORT_STATS) { 655 u64 *port_stats = bp->port_stats.sw_stats; 656 657 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 658 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 659 } 660 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 661 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 662 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 663 u32 len; 664 665 len = min_t(u32, bp->fw_rx_stats_ext_size, 666 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 667 for (i = 0; i < len; i++, j++) { 668 buf[j] = *(rx_port_stats_ext + 669 bnxt_port_stats_ext_arr[i].offset); 670 } 671 len = min_t(u32, bp->fw_tx_stats_ext_size, 672 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 673 for (i = 0; i < len; i++, j++) { 674 buf[j] = *(tx_port_stats_ext + 675 bnxt_tx_port_stats_ext_arr[i].offset); 676 } 677 if (bp->pri2cos_valid) { 678 for (i = 0; i < 8; i++, j++) { 679 long n = bnxt_rx_bytes_pri_arr[i].base_off + 680 bp->pri2cos_idx[i]; 681 682 buf[j] = *(rx_port_stats_ext + n); 683 } 684 for (i = 0; i < 8; i++, j++) { 685 long n = bnxt_rx_pkts_pri_arr[i].base_off + 686 bp->pri2cos_idx[i]; 687 688 buf[j] = *(rx_port_stats_ext + n); 689 } 690 for (i = 0; i < 8; i++, j++) { 691 u8 cos_idx = bp->pri2cos_idx[i]; 692 long n; 693 694 n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx; 695 buf[j] = *(tx_port_stats_ext + n); 696 if (bp->cos0_cos1_shared && !cos_idx) 697 buf[j] += *(tx_port_stats_ext + n + 1); 698 } 699 for (i = 0; i < 8; i++, j++) { 700 u8 cos_idx = bp->pri2cos_idx[i]; 701 long n; 702 703 n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx; 704 buf[j] = *(tx_port_stats_ext + n); 705 if (bp->cos0_cos1_shared && !cos_idx) 706 buf[j] += *(tx_port_stats_ext + n + 1); 707 } 708 } 709 } 710 } 711 712 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 713 { 714 struct bnxt *bp = netdev_priv(dev); 715 u32 i, j, num_str; 716 const char *str; 717 718 switch (stringset) { 719 case ETH_SS_STATS: 720 for (i = 0; i < bp->cp_nr_rings; i++) { 721 if (is_rx_ring(bp, i)) 722 for (j = 0; j < NUM_RING_RX_HW_STATS; j++) { 723 str = bnxt_ring_rx_stats_str[j]; 724 ethtool_sprintf(&buf, "[%d]: %s", i, 725 str); 726 } 727 if (is_tx_ring(bp, i)) 728 for (j = 0; j < NUM_RING_TX_HW_STATS; j++) { 729 str = bnxt_ring_tx_stats_str[j]; 730 ethtool_sprintf(&buf, "[%d]: %s", i, 731 str); 732 } 733 num_str = bnxt_get_num_tpa_ring_stats(bp); 734 if (!num_str || !is_rx_ring(bp, i)) 735 goto skip_tpa_stats; 736 737 if (bp->max_tpa_v2) 738 for (j = 0; j < num_str; j++) { 739 str = bnxt_ring_tpa2_stats_str[j]; 740 ethtool_sprintf(&buf, "[%d]: %s", i, 741 str); 742 } 743 else 744 for (j = 0; j < num_str; j++) { 745 str = bnxt_ring_tpa_stats_str[j]; 746 ethtool_sprintf(&buf, "[%d]: %s", i, 747 str); 748 } 749 skip_tpa_stats: 750 if (is_rx_ring(bp, i)) 751 for (j = 0; j < NUM_RING_RX_SW_STATS; j++) { 752 str = bnxt_rx_sw_stats_str[j]; 753 ethtool_sprintf(&buf, "[%d]: %s", i, 754 str); 755 } 756 for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) { 757 str = bnxt_cmn_sw_stats_str[j]; 758 ethtool_sprintf(&buf, "[%d]: %s", i, str); 759 } 760 } 761 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) 762 ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]); 763 764 if (bp->flags & BNXT_FLAG_PORT_STATS) 765 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 766 str = bnxt_port_stats_arr[i].string; 767 ethtool_puts(&buf, str); 768 } 769 770 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 771 u32 len; 772 773 len = min_t(u32, bp->fw_rx_stats_ext_size, 774 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 775 for (i = 0; i < len; i++) { 776 str = bnxt_port_stats_ext_arr[i].string; 777 ethtool_puts(&buf, str); 778 } 779 780 len = min_t(u32, bp->fw_tx_stats_ext_size, 781 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 782 for (i = 0; i < len; i++) { 783 str = bnxt_tx_port_stats_ext_arr[i].string; 784 ethtool_puts(&buf, str); 785 } 786 787 if (bp->pri2cos_valid) { 788 for (i = 0; i < 8; i++) { 789 str = bnxt_rx_bytes_pri_arr[i].string; 790 ethtool_puts(&buf, str); 791 } 792 793 for (i = 0; i < 8; i++) { 794 str = bnxt_rx_pkts_pri_arr[i].string; 795 ethtool_puts(&buf, str); 796 } 797 798 for (i = 0; i < 8; i++) { 799 str = bnxt_tx_bytes_pri_arr[i].string; 800 ethtool_puts(&buf, str); 801 } 802 803 for (i = 0; i < 8; i++) { 804 str = bnxt_tx_pkts_pri_arr[i].string; 805 ethtool_puts(&buf, str); 806 } 807 } 808 } 809 break; 810 case ETH_SS_TEST: 811 if (bp->num_tests) 812 for (i = 0; i < bp->num_tests; i++) 813 ethtool_puts(&buf, bp->test_info->string[i]); 814 break; 815 default: 816 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 817 stringset); 818 break; 819 } 820 } 821 822 static void bnxt_get_ringparam(struct net_device *dev, 823 struct ethtool_ringparam *ering, 824 struct kernel_ethtool_ringparam *kernel_ering, 825 struct netlink_ext_ack *extack) 826 { 827 struct bnxt *bp = netdev_priv(dev); 828 829 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 830 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 831 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 832 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 833 } else { 834 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 835 ering->rx_jumbo_max_pending = 0; 836 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 837 } 838 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 839 840 ering->rx_pending = bp->rx_ring_size; 841 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 842 ering->tx_pending = bp->tx_ring_size; 843 844 kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; 845 } 846 847 static int bnxt_set_ringparam(struct net_device *dev, 848 struct ethtool_ringparam *ering, 849 struct kernel_ethtool_ringparam *kernel_ering, 850 struct netlink_ext_ack *extack) 851 { 852 u8 tcp_data_split = kernel_ering->tcp_data_split; 853 struct bnxt *bp = netdev_priv(dev); 854 u8 hds_config_mod; 855 856 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 857 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 858 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) 859 return -EINVAL; 860 861 hds_config_mod = tcp_data_split != dev->cfg->hds_config; 862 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) 863 return -EINVAL; 864 865 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 866 hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { 867 NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); 868 return -EINVAL; 869 } 870 871 if (netif_running(dev)) 872 bnxt_close_nic(bp, false, false); 873 874 if (hds_config_mod) { 875 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) 876 bp->flags |= BNXT_FLAG_HDS; 877 else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) 878 bp->flags &= ~BNXT_FLAG_HDS; 879 } 880 881 bp->rx_ring_size = ering->rx_pending; 882 bp->tx_ring_size = ering->tx_pending; 883 bnxt_set_ring_params(bp); 884 885 if (netif_running(dev)) 886 return bnxt_open_nic(bp, false, false); 887 888 return 0; 889 } 890 891 static void bnxt_get_channels(struct net_device *dev, 892 struct ethtool_channels *channel) 893 { 894 struct bnxt *bp = netdev_priv(dev); 895 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 896 int max_rx_rings, max_tx_rings, tcs; 897 int max_tx_sch_inputs, tx_grps; 898 899 /* Get the most up-to-date max_tx_sch_inputs. */ 900 if (netif_running(dev) && BNXT_NEW_RM(bp)) 901 bnxt_hwrm_func_resc_qcaps(bp, false); 902 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 903 904 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 905 if (max_tx_sch_inputs) 906 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 907 908 tcs = bp->num_tc; 909 tx_grps = max(tcs, 1); 910 if (bp->tx_nr_rings_xdp) 911 tx_grps++; 912 max_tx_rings /= tx_grps; 913 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 914 915 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 916 max_rx_rings = 0; 917 max_tx_rings = 0; 918 } 919 if (max_tx_sch_inputs) 920 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 921 922 if (tcs > 1) 923 max_tx_rings /= tcs; 924 925 channel->max_rx = max_rx_rings; 926 channel->max_tx = max_tx_rings; 927 channel->max_other = 0; 928 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 929 channel->combined_count = bp->rx_nr_rings; 930 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 931 channel->combined_count--; 932 } else { 933 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 934 channel->rx_count = bp->rx_nr_rings; 935 channel->tx_count = bp->tx_nr_rings_per_tc; 936 } 937 } 938 } 939 940 static int bnxt_set_channels(struct net_device *dev, 941 struct ethtool_channels *channel) 942 { 943 struct bnxt *bp = netdev_priv(dev); 944 int req_tx_rings, req_rx_rings, tcs; 945 bool sh = false; 946 int tx_xdp = 0; 947 int rc = 0; 948 949 if (channel->other_count) 950 return -EINVAL; 951 952 if (!channel->combined_count && 953 (!channel->rx_count || !channel->tx_count)) 954 return -EINVAL; 955 956 if (channel->combined_count && 957 (channel->rx_count || channel->tx_count)) 958 return -EINVAL; 959 960 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 961 channel->tx_count)) 962 return -EINVAL; 963 964 if (channel->combined_count) 965 sh = true; 966 967 tcs = bp->num_tc; 968 969 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 970 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 971 if (bp->tx_nr_rings_xdp) { 972 if (!sh) { 973 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 974 return -EINVAL; 975 } 976 tx_xdp = req_rx_rings; 977 } 978 979 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 980 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 981 (netif_is_rxfh_configured(dev) || bp->num_rss_ctx)) { 982 netdev_warn(dev, "RSS table size change required, RSS table entries must be default (with no additional RSS contexts present) to proceed\n"); 983 return -EINVAL; 984 } 985 986 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 987 if (rc) { 988 netdev_warn(dev, "Unable to allocate the requested rings\n"); 989 return rc; 990 } 991 992 if (netif_running(dev)) { 993 if (BNXT_PF(bp)) { 994 /* TODO CHIMP_FW: Send message to all VF's 995 * before PF unload 996 */ 997 } 998 bnxt_close_nic(bp, true, false); 999 } 1000 1001 if (sh) { 1002 bp->flags |= BNXT_FLAG_SHARED_RINGS; 1003 bp->rx_nr_rings = channel->combined_count; 1004 bp->tx_nr_rings_per_tc = channel->combined_count; 1005 } else { 1006 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 1007 bp->rx_nr_rings = channel->rx_count; 1008 bp->tx_nr_rings_per_tc = channel->tx_count; 1009 } 1010 bp->tx_nr_rings_xdp = tx_xdp; 1011 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 1012 if (tcs > 1) 1013 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 1014 1015 bnxt_set_cp_rings(bp, sh); 1016 1017 /* After changing number of rx channels, update NTUPLE feature. */ 1018 netdev_update_features(dev); 1019 if (netif_running(dev)) { 1020 rc = bnxt_open_nic(bp, true, false); 1021 if ((!rc) && BNXT_PF(bp)) { 1022 /* TODO CHIMP_FW: Send message to all VF's 1023 * to renable 1024 */ 1025 } 1026 } else { 1027 rc = bnxt_reserve_rings(bp, true); 1028 } 1029 1030 return rc; 1031 } 1032 1033 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], 1034 int tbl_size, u32 *ids, u32 start, 1035 u32 id_cnt) 1036 { 1037 int i, j = start; 1038 1039 if (j >= id_cnt) 1040 return j; 1041 for (i = 0; i < tbl_size; i++) { 1042 struct hlist_head *head; 1043 struct bnxt_filter_base *fltr; 1044 1045 head = &tbl[i]; 1046 hlist_for_each_entry_rcu(fltr, head, hash) { 1047 if (!fltr->flags || 1048 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) 1049 continue; 1050 ids[j++] = fltr->sw_id; 1051 if (j == id_cnt) 1052 return j; 1053 } 1054 } 1055 return j; 1056 } 1057 1058 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, 1059 struct hlist_head tbl[], 1060 int tbl_size, u32 id) 1061 { 1062 int i; 1063 1064 for (i = 0; i < tbl_size; i++) { 1065 struct hlist_head *head; 1066 struct bnxt_filter_base *fltr; 1067 1068 head = &tbl[i]; 1069 hlist_for_each_entry_rcu(fltr, head, hash) { 1070 if (fltr->flags && fltr->sw_id == id) 1071 return fltr; 1072 } 1073 } 1074 return NULL; 1075 } 1076 1077 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 1078 u32 *rule_locs) 1079 { 1080 u32 count; 1081 1082 cmd->data = bp->ntp_fltr_count; 1083 rcu_read_lock(); 1084 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, 1085 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, 1086 cmd->rule_cnt); 1087 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, 1088 BNXT_NTP_FLTR_HASH_SIZE, 1089 rule_locs, count, 1090 cmd->rule_cnt); 1091 rcu_read_unlock(); 1092 1093 return 0; 1094 } 1095 1096 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1097 { 1098 struct ethtool_rx_flow_spec *fs = 1099 (struct ethtool_rx_flow_spec *)&cmd->fs; 1100 struct bnxt_filter_base *fltr_base; 1101 struct bnxt_ntuple_filter *fltr; 1102 struct bnxt_flow_masks *fmasks; 1103 struct flow_keys *fkeys; 1104 int rc = -EINVAL; 1105 1106 if (fs->location >= bp->max_fltr) 1107 return rc; 1108 1109 rcu_read_lock(); 1110 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1111 BNXT_L2_FLTR_HASH_SIZE, 1112 fs->location); 1113 if (fltr_base) { 1114 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1115 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1116 struct bnxt_l2_filter *l2_fltr; 1117 struct bnxt_l2_key *l2_key; 1118 1119 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1120 l2_key = &l2_fltr->l2_key; 1121 fs->flow_type = ETHER_FLOW; 1122 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); 1123 eth_broadcast_addr(m_ether->h_dest); 1124 if (l2_key->vlan) { 1125 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1126 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1127 1128 fs->flow_type |= FLOW_EXT; 1129 m_ext->vlan_tci = htons(0xfff); 1130 h_ext->vlan_tci = htons(l2_key->vlan); 1131 } 1132 if (fltr_base->flags & BNXT_ACT_RING_DST) 1133 fs->ring_cookie = fltr_base->rxq; 1134 if (fltr_base->flags & BNXT_ACT_FUNC_DST) 1135 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << 1136 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 1137 rcu_read_unlock(); 1138 return 0; 1139 } 1140 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1141 BNXT_NTP_FLTR_HASH_SIZE, 1142 fs->location); 1143 if (!fltr_base) { 1144 rcu_read_unlock(); 1145 return rc; 1146 } 1147 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1148 1149 fkeys = &fltr->fkeys; 1150 fmasks = &fltr->fmasks; 1151 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1152 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1153 fs->flow_type = IP_USER_FLOW; 1154 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1155 fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD; 1156 fs->m_u.usr_ip4_spec.proto = 0; 1157 } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) { 1158 fs->flow_type = IP_USER_FLOW; 1159 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1160 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; 1161 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; 1162 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1163 fs->flow_type = TCP_V4_FLOW; 1164 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1165 fs->flow_type = UDP_V4_FLOW; 1166 } else { 1167 goto fltr_err; 1168 } 1169 1170 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1171 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; 1172 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1173 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; 1174 if (fs->flow_type == TCP_V4_FLOW || 1175 fs->flow_type == UDP_V4_FLOW) { 1176 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1177 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; 1178 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1179 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; 1180 } 1181 } else { 1182 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1183 fs->flow_type = IPV6_USER_FLOW; 1184 fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD; 1185 fs->m_u.usr_ip6_spec.l4_proto = 0; 1186 } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) { 1187 fs->flow_type = IPV6_USER_FLOW; 1188 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; 1189 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; 1190 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1191 fs->flow_type = TCP_V6_FLOW; 1192 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1193 fs->flow_type = UDP_V6_FLOW; 1194 } else { 1195 goto fltr_err; 1196 } 1197 1198 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1199 fkeys->addrs.v6addrs.src; 1200 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = 1201 fmasks->addrs.v6addrs.src; 1202 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1203 fkeys->addrs.v6addrs.dst; 1204 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = 1205 fmasks->addrs.v6addrs.dst; 1206 if (fs->flow_type == TCP_V6_FLOW || 1207 fs->flow_type == UDP_V6_FLOW) { 1208 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1209 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; 1210 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1211 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; 1212 } 1213 } 1214 1215 if (fltr->base.flags & BNXT_ACT_DROP) { 1216 fs->ring_cookie = RX_CLS_FLOW_DISC; 1217 } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 1218 fs->flow_type |= FLOW_RSS; 1219 cmd->rss_context = fltr->base.fw_vnic_id; 1220 } else { 1221 fs->ring_cookie = fltr->base.rxq; 1222 } 1223 rc = 0; 1224 1225 fltr_err: 1226 rcu_read_unlock(); 1227 1228 return rc; 1229 } 1230 1231 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, 1232 u32 index) 1233 { 1234 struct ethtool_rxfh_context *ctx; 1235 1236 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); 1237 if (!ctx) 1238 return NULL; 1239 return ethtool_rxfh_context_priv(ctx); 1240 } 1241 1242 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, 1243 struct bnxt_vnic_info *vnic) 1244 { 1245 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 1246 1247 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 1248 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, 1249 vnic->rss_table_size, 1250 &vnic->rss_table_dma_addr, 1251 GFP_KERNEL); 1252 if (!vnic->rss_table) 1253 return -ENOMEM; 1254 1255 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 1256 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 1257 return 0; 1258 } 1259 1260 static int bnxt_add_l2_cls_rule(struct bnxt *bp, 1261 struct ethtool_rx_flow_spec *fs) 1262 { 1263 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1264 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1265 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1266 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1267 struct bnxt_l2_filter *fltr; 1268 struct bnxt_l2_key key; 1269 u16 vnic_id; 1270 u8 flags; 1271 int rc; 1272 1273 if (BNXT_CHIP_P5_PLUS(bp)) 1274 return -EOPNOTSUPP; 1275 1276 if (!is_broadcast_ether_addr(m_ether->h_dest)) 1277 return -EINVAL; 1278 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); 1279 key.vlan = 0; 1280 if (fs->flow_type & FLOW_EXT) { 1281 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1282 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1283 1284 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) 1285 return -EINVAL; 1286 key.vlan = ntohs(h_ext->vlan_tci); 1287 } 1288 1289 if (vf) { 1290 flags = BNXT_ACT_FUNC_DST; 1291 vnic_id = 0xffff; 1292 vf--; 1293 } else { 1294 flags = BNXT_ACT_RING_DST; 1295 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; 1296 } 1297 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); 1298 if (IS_ERR(fltr)) 1299 return PTR_ERR(fltr); 1300 1301 fltr->base.fw_vnic_id = vnic_id; 1302 fltr->base.rxq = ring; 1303 fltr->base.vf_idx = vf; 1304 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 1305 if (rc) 1306 bnxt_del_l2_filter(bp, fltr); 1307 else 1308 fs->location = fltr->base.sw_id; 1309 return rc; 1310 } 1311 1312 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, 1313 struct ethtool_usrip4_spec *ip_mask) 1314 { 1315 u8 mproto = ip_mask->proto; 1316 u8 sproto = ip_spec->proto; 1317 1318 if (ip_mask->l4_4_bytes || ip_mask->tos || 1319 ip_spec->ip_ver != ETH_RX_NFC_IP4 || 1320 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP))) 1321 return false; 1322 return true; 1323 } 1324 1325 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, 1326 struct ethtool_usrip6_spec *ip_mask) 1327 { 1328 u8 mproto = ip_mask->l4_proto; 1329 u8 sproto = ip_spec->l4_proto; 1330 1331 if (ip_mask->l4_4_bytes || ip_mask->tclass || 1332 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6))) 1333 return false; 1334 return true; 1335 } 1336 1337 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, 1338 struct ethtool_rxnfc *cmd) 1339 { 1340 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1341 struct bnxt_ntuple_filter *new_fltr, *fltr; 1342 u32 flow_type = fs->flow_type & 0xff; 1343 struct bnxt_l2_filter *l2_fltr; 1344 struct bnxt_flow_masks *fmasks; 1345 struct flow_keys *fkeys; 1346 u32 idx; 1347 int rc; 1348 1349 if (!bp->vnic_info) 1350 return -EAGAIN; 1351 1352 if (fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) 1353 return -EOPNOTSUPP; 1354 1355 if (fs->ring_cookie != RX_CLS_FLOW_DISC && 1356 ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) 1357 return -EOPNOTSUPP; 1358 1359 if (flow_type == IP_USER_FLOW) { 1360 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, 1361 &fs->m_u.usr_ip4_spec)) 1362 return -EOPNOTSUPP; 1363 } 1364 1365 if (flow_type == IPV6_USER_FLOW) { 1366 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, 1367 &fs->m_u.usr_ip6_spec)) 1368 return -EOPNOTSUPP; 1369 } 1370 1371 new_fltr = kzalloc_obj(*new_fltr); 1372 if (!new_fltr) 1373 return -ENOMEM; 1374 1375 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 1376 atomic_inc(&l2_fltr->refcnt); 1377 new_fltr->l2_fltr = l2_fltr; 1378 fmasks = &new_fltr->fmasks; 1379 fkeys = &new_fltr->fkeys; 1380 1381 rc = -EOPNOTSUPP; 1382 switch (flow_type) { 1383 case IP_USER_FLOW: { 1384 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; 1385 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; 1386 1387 fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto 1388 : BNXT_IP_PROTO_WILDCARD; 1389 fkeys->basic.n_proto = htons(ETH_P_IP); 1390 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1391 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1392 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1393 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1394 break; 1395 } 1396 case TCP_V4_FLOW: 1397 case UDP_V4_FLOW: { 1398 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; 1399 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; 1400 1401 fkeys->basic.ip_proto = IPPROTO_TCP; 1402 if (flow_type == UDP_V4_FLOW) 1403 fkeys->basic.ip_proto = IPPROTO_UDP; 1404 fkeys->basic.n_proto = htons(ETH_P_IP); 1405 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1406 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1407 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1408 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1409 fkeys->ports.src = ip_spec->psrc; 1410 fmasks->ports.src = ip_mask->psrc; 1411 fkeys->ports.dst = ip_spec->pdst; 1412 fmasks->ports.dst = ip_mask->pdst; 1413 break; 1414 } 1415 case IPV6_USER_FLOW: { 1416 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; 1417 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; 1418 1419 fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto 1420 : BNXT_IP_PROTO_WILDCARD; 1421 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1422 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1423 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1424 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1425 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1426 break; 1427 } 1428 case TCP_V6_FLOW: 1429 case UDP_V6_FLOW: { 1430 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; 1431 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; 1432 1433 fkeys->basic.ip_proto = IPPROTO_TCP; 1434 if (flow_type == UDP_V6_FLOW) 1435 fkeys->basic.ip_proto = IPPROTO_UDP; 1436 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1437 1438 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1439 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1440 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1441 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1442 fkeys->ports.src = ip_spec->psrc; 1443 fmasks->ports.src = ip_mask->psrc; 1444 fkeys->ports.dst = ip_spec->pdst; 1445 fmasks->ports.dst = ip_mask->pdst; 1446 break; 1447 } 1448 default: 1449 rc = -EOPNOTSUPP; 1450 goto ntuple_err; 1451 } 1452 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) 1453 goto ntuple_err; 1454 1455 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); 1456 rcu_read_lock(); 1457 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 1458 if (fltr) { 1459 rcu_read_unlock(); 1460 rc = -EEXIST; 1461 goto ntuple_err; 1462 } 1463 rcu_read_unlock(); 1464 1465 new_fltr->base.flags = BNXT_ACT_NO_AGING; 1466 if (fs->flow_type & FLOW_RSS) { 1467 struct bnxt_rss_ctx *rss_ctx; 1468 1469 new_fltr->base.fw_vnic_id = 0; 1470 new_fltr->base.flags |= BNXT_ACT_RSS_CTX; 1471 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); 1472 if (rss_ctx) { 1473 new_fltr->base.fw_vnic_id = rss_ctx->index; 1474 } else { 1475 rc = -EINVAL; 1476 goto ntuple_err; 1477 } 1478 } 1479 if (fs->ring_cookie == RX_CLS_FLOW_DISC) 1480 new_fltr->base.flags |= BNXT_ACT_DROP; 1481 else 1482 new_fltr->base.rxq = ethtool_get_flow_spec_ring(fs->ring_cookie); 1483 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); 1484 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 1485 if (!rc) { 1486 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); 1487 if (rc) { 1488 bnxt_del_ntp_filter(bp, new_fltr); 1489 return rc; 1490 } 1491 fs->location = new_fltr->base.sw_id; 1492 return 0; 1493 } 1494 1495 ntuple_err: 1496 atomic_dec(&l2_fltr->refcnt); 1497 kfree(new_fltr); 1498 return rc; 1499 } 1500 1501 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1502 { 1503 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1504 u32 ring, flow_type; 1505 int rc; 1506 u8 vf; 1507 1508 if (!netif_running(bp->dev)) 1509 return -EAGAIN; 1510 if (!(bp->flags & BNXT_FLAG_RFS)) 1511 return -EPERM; 1512 if (fs->location != RX_CLS_LOC_ANY) 1513 return -EINVAL; 1514 1515 flow_type = fs->flow_type; 1516 if ((flow_type == IP_USER_FLOW || 1517 flow_type == IPV6_USER_FLOW) && 1518 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) 1519 return -EOPNOTSUPP; 1520 if (flow_type & FLOW_MAC_EXT) 1521 return -EINVAL; 1522 flow_type &= ~FLOW_EXT; 1523 1524 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) 1525 return bnxt_add_ntuple_cls_rule(bp, cmd); 1526 1527 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1528 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1529 if (BNXT_VF(bp) && vf) 1530 return -EINVAL; 1531 if (BNXT_PF(bp) && vf > bp->pf.active_vfs) 1532 return -EINVAL; 1533 if (!vf && ring >= bp->rx_nr_rings) 1534 return -EINVAL; 1535 1536 if (flow_type == ETHER_FLOW) 1537 rc = bnxt_add_l2_cls_rule(bp, fs); 1538 else 1539 rc = bnxt_add_ntuple_cls_rule(bp, cmd); 1540 return rc; 1541 } 1542 1543 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1544 { 1545 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1546 struct bnxt_filter_base *fltr_base; 1547 struct bnxt_ntuple_filter *fltr; 1548 u32 id = fs->location; 1549 1550 rcu_read_lock(); 1551 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1552 BNXT_L2_FLTR_HASH_SIZE, id); 1553 if (fltr_base) { 1554 struct bnxt_l2_filter *l2_fltr; 1555 1556 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1557 rcu_read_unlock(); 1558 bnxt_hwrm_l2_filter_free(bp, l2_fltr); 1559 bnxt_del_l2_filter(bp, l2_fltr); 1560 return 0; 1561 } 1562 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1563 BNXT_NTP_FLTR_HASH_SIZE, id); 1564 if (!fltr_base) { 1565 rcu_read_unlock(); 1566 return -ENOENT; 1567 } 1568 1569 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1570 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) { 1571 rcu_read_unlock(); 1572 return -EINVAL; 1573 } 1574 rcu_read_unlock(); 1575 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); 1576 bnxt_del_ntp_filter(bp, fltr); 1577 return 0; 1578 } 1579 1580 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1581 { 1582 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1583 return RXH_IP_SRC | RXH_IP_DST; 1584 return 0; 1585 } 1586 1587 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1588 { 1589 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1590 return RXH_IP_SRC | RXH_IP_DST; 1591 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL) 1592 return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL; 1593 return 0; 1594 } 1595 1596 static int bnxt_get_rxfh_fields(struct net_device *dev, 1597 struct ethtool_rxfh_fields *cmd) 1598 { 1599 struct bnxt *bp = netdev_priv(dev); 1600 1601 cmd->data = 0; 1602 switch (cmd->flow_type) { 1603 case TCP_V4_FLOW: 1604 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1605 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1606 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1607 cmd->data |= get_ethtool_ipv4_rss(bp); 1608 break; 1609 case UDP_V4_FLOW: 1610 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1611 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1612 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1613 fallthrough; 1614 case AH_ESP_V4_FLOW: 1615 if (bp->rss_hash_cfg & 1616 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1617 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) 1618 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1619 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1620 fallthrough; 1621 case SCTP_V4_FLOW: 1622 case AH_V4_FLOW: 1623 case ESP_V4_FLOW: 1624 case IPV4_FLOW: 1625 cmd->data |= get_ethtool_ipv4_rss(bp); 1626 break; 1627 1628 case TCP_V6_FLOW: 1629 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1630 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1631 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1632 cmd->data |= get_ethtool_ipv6_rss(bp); 1633 break; 1634 case UDP_V6_FLOW: 1635 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1636 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1637 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1638 fallthrough; 1639 case AH_ESP_V6_FLOW: 1640 if (bp->rss_hash_cfg & 1641 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1642 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) 1643 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1644 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1645 fallthrough; 1646 case SCTP_V6_FLOW: 1647 case AH_V6_FLOW: 1648 case ESP_V6_FLOW: 1649 case IPV6_FLOW: 1650 cmd->data |= get_ethtool_ipv6_rss(bp); 1651 break; 1652 } 1653 return 0; 1654 } 1655 1656 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1657 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1658 1659 static int bnxt_set_rxfh_fields(struct net_device *dev, 1660 const struct ethtool_rxfh_fields *cmd, 1661 struct netlink_ext_ack *extack) 1662 { 1663 struct bnxt *bp = netdev_priv(dev); 1664 int tuple, rc = 0; 1665 u32 rss_hash_cfg; 1666 1667 rss_hash_cfg = bp->rss_hash_cfg; 1668 1669 if (cmd->data == RXH_4TUPLE) 1670 tuple = 4; 1671 else if (cmd->data == RXH_2TUPLE || 1672 cmd->data == (RXH_2TUPLE | RXH_IP6_FL)) 1673 tuple = 2; 1674 else if (!cmd->data) 1675 tuple = 0; 1676 else 1677 return -EINVAL; 1678 1679 if (cmd->data & RXH_IP6_FL && 1680 !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP)) 1681 return -EINVAL; 1682 1683 if (cmd->flow_type == TCP_V4_FLOW) { 1684 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1685 if (tuple == 4) 1686 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1687 } else if (cmd->flow_type == UDP_V4_FLOW) { 1688 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1689 return -EINVAL; 1690 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1691 if (tuple == 4) 1692 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1693 } else if (cmd->flow_type == TCP_V6_FLOW) { 1694 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1695 if (tuple == 4) 1696 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1697 } else if (cmd->flow_type == UDP_V6_FLOW) { 1698 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1699 return -EINVAL; 1700 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1701 if (tuple == 4) 1702 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1703 } else if (cmd->flow_type == AH_ESP_V4_FLOW) { 1704 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || 1705 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) 1706 return -EINVAL; 1707 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1708 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); 1709 if (tuple == 4) 1710 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1711 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; 1712 } else if (cmd->flow_type == AH_ESP_V6_FLOW) { 1713 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || 1714 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) 1715 return -EINVAL; 1716 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1717 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); 1718 if (tuple == 4) 1719 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1720 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; 1721 } else if (tuple == 4) { 1722 return -EINVAL; 1723 } 1724 1725 switch (cmd->flow_type) { 1726 case TCP_V4_FLOW: 1727 case UDP_V4_FLOW: 1728 case SCTP_V4_FLOW: 1729 case AH_ESP_V4_FLOW: 1730 case AH_V4_FLOW: 1731 case ESP_V4_FLOW: 1732 case IPV4_FLOW: 1733 if (tuple == 2) 1734 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1735 else if (!tuple) 1736 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1737 break; 1738 1739 case TCP_V6_FLOW: 1740 case UDP_V6_FLOW: 1741 case SCTP_V6_FLOW: 1742 case AH_ESP_V6_FLOW: 1743 case AH_V6_FLOW: 1744 case ESP_V6_FLOW: 1745 case IPV6_FLOW: 1746 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 1747 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL); 1748 if (!tuple) 1749 break; 1750 if (cmd->data & RXH_IP6_FL) 1751 rss_hash_cfg |= 1752 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; 1753 else if (tuple == 2) 1754 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1755 break; 1756 } 1757 1758 if (bp->rss_hash_cfg == rss_hash_cfg) 1759 return 0; 1760 1761 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 1762 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; 1763 bp->rss_hash_cfg = rss_hash_cfg; 1764 if (netif_running(bp->dev)) { 1765 bnxt_close_nic(bp, false, false); 1766 rc = bnxt_open_nic(bp, false, false); 1767 } 1768 return rc; 1769 } 1770 1771 static u32 bnxt_get_rx_ring_count(struct net_device *dev) 1772 { 1773 struct bnxt *bp = netdev_priv(dev); 1774 1775 return bp->rx_nr_rings; 1776 } 1777 1778 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1779 u32 *rule_locs) 1780 { 1781 struct bnxt *bp = netdev_priv(dev); 1782 int rc = 0; 1783 1784 switch (cmd->cmd) { 1785 case ETHTOOL_GRXCLSRLCNT: 1786 cmd->rule_cnt = bp->ntp_fltr_count; 1787 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; 1788 break; 1789 1790 case ETHTOOL_GRXCLSRLALL: 1791 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1792 break; 1793 1794 case ETHTOOL_GRXCLSRULE: 1795 rc = bnxt_grxclsrule(bp, cmd); 1796 break; 1797 1798 default: 1799 rc = -EOPNOTSUPP; 1800 break; 1801 } 1802 1803 return rc; 1804 } 1805 1806 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1807 { 1808 struct bnxt *bp = netdev_priv(dev); 1809 int rc; 1810 1811 switch (cmd->cmd) { 1812 case ETHTOOL_SRXCLSRLINS: 1813 rc = bnxt_srxclsrlins(bp, cmd); 1814 break; 1815 1816 case ETHTOOL_SRXCLSRLDEL: 1817 rc = bnxt_srxclsrldel(bp, cmd); 1818 break; 1819 1820 default: 1821 rc = -EOPNOTSUPP; 1822 break; 1823 } 1824 return rc; 1825 } 1826 1827 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1828 { 1829 struct bnxt *bp = netdev_priv(dev); 1830 1831 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1832 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1833 BNXT_RSS_TABLE_ENTRIES_P5; 1834 return HW_HASH_INDEX_SIZE; 1835 } 1836 1837 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1838 { 1839 return HW_HASH_KEY_SIZE; 1840 } 1841 1842 static int bnxt_get_rxfh(struct net_device *dev, 1843 struct ethtool_rxfh_param *rxfh) 1844 { 1845 struct bnxt_rss_ctx *rss_ctx = NULL; 1846 struct bnxt *bp = netdev_priv(dev); 1847 u32 *indir_tbl = bp->rss_indir_tbl; 1848 struct bnxt_vnic_info *vnic; 1849 u32 i, tbl_size; 1850 1851 rxfh->hfunc = ETH_RSS_HASH_TOP; 1852 1853 if (!bp->vnic_info) 1854 return 0; 1855 1856 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 1857 if (rxfh->rss_context) { 1858 struct ethtool_rxfh_context *ctx; 1859 1860 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); 1861 if (!ctx) 1862 return -EINVAL; 1863 indir_tbl = ethtool_rxfh_context_indir(ctx); 1864 rss_ctx = ethtool_rxfh_context_priv(ctx); 1865 vnic = &rss_ctx->vnic; 1866 } 1867 1868 if (rxfh->indir && indir_tbl) { 1869 tbl_size = bnxt_get_rxfh_indir_size(dev); 1870 for (i = 0; i < tbl_size; i++) 1871 rxfh->indir[i] = indir_tbl[i]; 1872 } 1873 1874 if (rxfh->key && vnic->rss_hash_key) 1875 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1876 1877 return 0; 1878 } 1879 1880 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, 1881 struct bnxt_rss_ctx *rss_ctx, 1882 const struct ethtool_rxfh_param *rxfh) 1883 { 1884 if (rxfh->key) { 1885 if (rss_ctx) { 1886 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, 1887 HW_HASH_KEY_SIZE); 1888 } else { 1889 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); 1890 bp->rss_hash_key_updated = true; 1891 } 1892 } 1893 if (rxfh->indir) { 1894 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 1895 u32 *indir_tbl = bp->rss_indir_tbl; 1896 1897 if (rss_ctx) 1898 indir_tbl = ethtool_rxfh_context_indir(ctx); 1899 for (i = 0; i < tbl_size; i++) 1900 indir_tbl[i] = rxfh->indir[i]; 1901 pad = bp->rss_indir_tbl_entries - tbl_size; 1902 if (pad) 1903 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); 1904 } 1905 } 1906 1907 static int bnxt_rxfh_context_check(struct bnxt *bp, 1908 const struct ethtool_rxfh_param *rxfh, 1909 struct netlink_ext_ack *extack) 1910 { 1911 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { 1912 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); 1913 return -EOPNOTSUPP; 1914 } 1915 1916 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { 1917 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); 1918 return -EOPNOTSUPP; 1919 } 1920 1921 if (!netif_running(bp->dev)) { 1922 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); 1923 return -EAGAIN; 1924 } 1925 1926 return 0; 1927 } 1928 1929 static int bnxt_create_rxfh_context(struct net_device *dev, 1930 struct ethtool_rxfh_context *ctx, 1931 const struct ethtool_rxfh_param *rxfh, 1932 struct netlink_ext_ack *extack) 1933 { 1934 struct bnxt *bp = netdev_priv(dev); 1935 struct bnxt_rss_ctx *rss_ctx; 1936 struct bnxt_vnic_info *vnic; 1937 int rc; 1938 1939 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1940 if (rc) 1941 return rc; 1942 1943 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { 1944 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", 1945 BNXT_MAX_ETH_RSS_CTX); 1946 return -EINVAL; 1947 } 1948 1949 if (!bnxt_rfs_capable(bp, true)) { 1950 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); 1951 return -ENOMEM; 1952 } 1953 1954 rss_ctx = ethtool_rxfh_context_priv(ctx); 1955 1956 bp->num_rss_ctx++; 1957 1958 vnic = &rss_ctx->vnic; 1959 vnic->rss_ctx = ctx; 1960 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; 1961 vnic->vnic_id = BNXT_VNIC_ID_INVALID; 1962 rc = bnxt_alloc_vnic_rss_table(bp, vnic); 1963 if (rc) 1964 goto out; 1965 1966 /* Populate defaults in the context */ 1967 bnxt_set_dflt_rss_indir_tbl(bp, ctx); 1968 ctx->hfunc = ETH_RSS_HASH_TOP; 1969 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); 1970 memcpy(ethtool_rxfh_context_key(ctx), 1971 bp->rss_hash_key, HW_HASH_KEY_SIZE); 1972 1973 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); 1974 if (rc) { 1975 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); 1976 goto out; 1977 } 1978 1979 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); 1980 if (rc) { 1981 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1982 goto out; 1983 } 1984 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 1985 1986 rc = __bnxt_setup_vnic_p5(bp, vnic); 1987 if (rc) { 1988 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1989 goto out; 1990 } 1991 1992 rss_ctx->index = rxfh->rss_context; 1993 return 0; 1994 out: 1995 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 1996 return rc; 1997 } 1998 1999 static int bnxt_modify_rxfh_context(struct net_device *dev, 2000 struct ethtool_rxfh_context *ctx, 2001 const struct ethtool_rxfh_param *rxfh, 2002 struct netlink_ext_ack *extack) 2003 { 2004 struct bnxt *bp = netdev_priv(dev); 2005 struct bnxt_rss_ctx *rss_ctx; 2006 int rc; 2007 2008 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 2009 if (rc) 2010 return rc; 2011 2012 rss_ctx = ethtool_rxfh_context_priv(ctx); 2013 2014 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 2015 2016 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); 2017 } 2018 2019 static int bnxt_remove_rxfh_context(struct net_device *dev, 2020 struct ethtool_rxfh_context *ctx, 2021 u32 rss_context, 2022 struct netlink_ext_ack *extack) 2023 { 2024 struct bnxt *bp = netdev_priv(dev); 2025 struct bnxt_rss_ctx *rss_ctx; 2026 2027 rss_ctx = ethtool_rxfh_context_priv(ctx); 2028 2029 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 2030 return 0; 2031 } 2032 2033 static int bnxt_set_rxfh(struct net_device *dev, 2034 struct ethtool_rxfh_param *rxfh, 2035 struct netlink_ext_ack *extack) 2036 { 2037 struct bnxt *bp = netdev_priv(dev); 2038 int rc = 0; 2039 2040 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) 2041 return -EOPNOTSUPP; 2042 2043 bnxt_modify_rss(bp, NULL, NULL, rxfh); 2044 2045 if (netif_running(bp->dev)) { 2046 bnxt_close_nic(bp, false, false); 2047 rc = bnxt_open_nic(bp, false, false); 2048 } 2049 return rc; 2050 } 2051 2052 static void bnxt_get_drvinfo(struct net_device *dev, 2053 struct ethtool_drvinfo *info) 2054 { 2055 struct bnxt *bp = netdev_priv(dev); 2056 2057 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 2058 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 2059 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 2060 info->n_stats = bnxt_get_num_stats(bp); 2061 info->testinfo_len = bp->num_tests; 2062 /* TODO CHIMP_FW: eeprom dump details */ 2063 info->eedump_len = 0; 2064 /* TODO CHIMP FW: reg dump details */ 2065 info->regdump_len = 0; 2066 } 2067 2068 static int bnxt_get_regs_len(struct net_device *dev) 2069 { 2070 struct bnxt *bp = netdev_priv(dev); 2071 2072 if (!BNXT_PF(bp)) 2073 return -EOPNOTSUPP; 2074 2075 return BNXT_PXP_REG_LEN + bp->pcie_stat_len; 2076 } 2077 2078 static void * 2079 __bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req) 2080 { 2081 struct pcie_ctx_hw_stats_v2 *hw_pcie_stats; 2082 dma_addr_t hw_pcie_stats_addr; 2083 int rc; 2084 2085 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), 2086 &hw_pcie_stats_addr); 2087 if (!hw_pcie_stats) 2088 return NULL; 2089 2090 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 2091 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 2092 rc = hwrm_req_send(bp, req); 2093 2094 return rc ? NULL : hw_pcie_stats; 2095 } 2096 2097 #define BNXT_PCIE_32B_ENTRY(start, end) \ 2098 { offsetof(struct pcie_ctx_hw_stats_v2, start),\ 2099 offsetof(struct pcie_ctx_hw_stats_v2, end) } 2100 2101 static const struct { 2102 u16 start; 2103 u16 end; 2104 } bnxt_pcie_32b_entries[] = { 2105 BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), 2106 BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1), 2107 BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2), 2108 }; 2109 2110 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2111 void *_p) 2112 { 2113 struct hwrm_pcie_qstats_output *resp; 2114 struct hwrm_pcie_qstats_input *req; 2115 struct bnxt *bp = netdev_priv(dev); 2116 u8 *src; 2117 2118 regs->version = 0; 2119 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) 2120 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 2121 2122 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 2123 return; 2124 2125 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 2126 return; 2127 2128 resp = hwrm_req_hold(bp, req); 2129 src = __bnxt_hwrm_pcie_qstats(bp, req); 2130 if (src) { 2131 u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); 2132 int i, j, len; 2133 2134 len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size)); 2135 if (len <= sizeof(struct pcie_ctx_hw_stats)) 2136 regs->version = 1; 2137 else if (len < sizeof(struct pcie_ctx_hw_stats_v2)) 2138 regs->version = 2; 2139 else 2140 regs->version = 3; 2141 2142 for (i = 0, j = 0; i < len; ) { 2143 if (i >= bnxt_pcie_32b_entries[j].start && 2144 i <= bnxt_pcie_32b_entries[j].end) { 2145 u32 *dst32 = (u32 *)(dst + i); 2146 2147 *dst32 = le32_to_cpu(*(__le32 *)(src + i)); 2148 i += 4; 2149 if (i > bnxt_pcie_32b_entries[j].end && 2150 j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) 2151 j++; 2152 } else { 2153 u64 *dst64 = (u64 *)(dst + i); 2154 2155 *dst64 = le64_to_cpu(*(__le64 *)(src + i)); 2156 i += 8; 2157 } 2158 } 2159 } 2160 hwrm_req_drop(bp, req); 2161 } 2162 2163 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2164 { 2165 struct bnxt *bp = netdev_priv(dev); 2166 2167 wol->supported = 0; 2168 wol->wolopts = 0; 2169 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2170 if (bp->flags & BNXT_FLAG_WOL_CAP) { 2171 wol->supported = WAKE_MAGIC; 2172 if (bp->wol) 2173 wol->wolopts = WAKE_MAGIC; 2174 } 2175 } 2176 2177 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2178 { 2179 struct bnxt *bp = netdev_priv(dev); 2180 2181 if (wol->wolopts & ~WAKE_MAGIC) 2182 return -EINVAL; 2183 2184 if (wol->wolopts & WAKE_MAGIC) { 2185 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 2186 return -EINVAL; 2187 if (!bp->wol) { 2188 if (bnxt_hwrm_alloc_wol_fltr(bp)) 2189 return -EBUSY; 2190 bp->wol = 1; 2191 } 2192 } else { 2193 if (bp->wol) { 2194 if (bnxt_hwrm_free_wol_fltr(bp)) 2195 return -EBUSY; 2196 bp->wol = 0; 2197 } 2198 } 2199 return 0; 2200 } 2201 2202 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 2203 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) 2204 { 2205 linkmode_zero(mode); 2206 2207 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 2208 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); 2209 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 2210 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); 2211 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 2212 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); 2213 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 2214 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); 2215 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 2216 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); 2217 } 2218 2219 enum bnxt_media_type { 2220 BNXT_MEDIA_UNKNOWN = 0, 2221 BNXT_MEDIA_TP, 2222 BNXT_MEDIA_CR, 2223 BNXT_MEDIA_SR, 2224 BNXT_MEDIA_LR_ER_FR, 2225 BNXT_MEDIA_KR, 2226 BNXT_MEDIA_KX, 2227 BNXT_MEDIA_X, 2228 __BNXT_MEDIA_END, 2229 }; 2230 2231 static const enum bnxt_media_type bnxt_phy_types[] = { 2232 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, 2233 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, 2234 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, 2235 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, 2236 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, 2237 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, 2238 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, 2239 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, 2240 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, 2241 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, 2242 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, 2243 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, 2244 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, 2245 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, 2246 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, 2247 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2248 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2249 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, 2250 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, 2251 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, 2252 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2253 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2254 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, 2255 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, 2256 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, 2257 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, 2258 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, 2259 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, 2260 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2261 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2262 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, 2263 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, 2264 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2265 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2266 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, 2267 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, 2268 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2269 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2270 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR, 2271 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR, 2272 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2273 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2274 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR, 2275 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR, 2276 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2277 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2278 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR, 2279 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR, 2280 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR, 2281 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR, 2282 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR, 2283 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR, 2284 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2285 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2286 }; 2287 2288 static enum bnxt_media_type 2289 bnxt_get_media(struct bnxt_link_info *link_info) 2290 { 2291 switch (link_info->media_type) { 2292 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: 2293 return BNXT_MEDIA_TP; 2294 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: 2295 return BNXT_MEDIA_CR; 2296 default: 2297 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) 2298 return bnxt_phy_types[link_info->phy_type]; 2299 return BNXT_MEDIA_UNKNOWN; 2300 } 2301 } 2302 2303 enum bnxt_link_speed_indices { 2304 BNXT_LINK_SPEED_UNKNOWN = 0, 2305 BNXT_LINK_SPEED_100MB_IDX, 2306 BNXT_LINK_SPEED_1GB_IDX, 2307 BNXT_LINK_SPEED_10GB_IDX, 2308 BNXT_LINK_SPEED_25GB_IDX, 2309 BNXT_LINK_SPEED_40GB_IDX, 2310 BNXT_LINK_SPEED_50GB_IDX, 2311 BNXT_LINK_SPEED_100GB_IDX, 2312 BNXT_LINK_SPEED_200GB_IDX, 2313 BNXT_LINK_SPEED_400GB_IDX, 2314 __BNXT_LINK_SPEED_END 2315 }; 2316 2317 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) 2318 { 2319 switch (speed) { 2320 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; 2321 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; 2322 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; 2323 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; 2324 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; 2325 case BNXT_LINK_SPEED_50GB: 2326 case BNXT_LINK_SPEED_50GB_PAM4: 2327 return BNXT_LINK_SPEED_50GB_IDX; 2328 case BNXT_LINK_SPEED_100GB: 2329 case BNXT_LINK_SPEED_100GB_PAM4: 2330 case BNXT_LINK_SPEED_100GB_PAM4_112: 2331 return BNXT_LINK_SPEED_100GB_IDX; 2332 case BNXT_LINK_SPEED_200GB: 2333 case BNXT_LINK_SPEED_200GB_PAM4: 2334 case BNXT_LINK_SPEED_200GB_PAM4_112: 2335 return BNXT_LINK_SPEED_200GB_IDX; 2336 case BNXT_LINK_SPEED_400GB: 2337 case BNXT_LINK_SPEED_400GB_PAM4: 2338 case BNXT_LINK_SPEED_400GB_PAM4_112: 2339 return BNXT_LINK_SPEED_400GB_IDX; 2340 default: return BNXT_LINK_SPEED_UNKNOWN; 2341 } 2342 } 2343 2344 static const enum ethtool_link_mode_bit_indices 2345 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { 2346 [BNXT_LINK_SPEED_100MB_IDX] = { 2347 { 2348 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2349 }, 2350 }, 2351 [BNXT_LINK_SPEED_1GB_IDX] = { 2352 { 2353 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2354 /* historically baseT, but DAC is more correctly baseX */ 2355 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2356 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2357 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2358 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2359 }, 2360 }, 2361 [BNXT_LINK_SPEED_10GB_IDX] = { 2362 { 2363 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2364 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2365 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2366 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2367 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2368 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2369 }, 2370 }, 2371 [BNXT_LINK_SPEED_25GB_IDX] = { 2372 { 2373 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2374 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2375 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2376 }, 2377 }, 2378 [BNXT_LINK_SPEED_40GB_IDX] = { 2379 { 2380 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2381 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2382 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2383 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2384 }, 2385 }, 2386 [BNXT_LINK_SPEED_50GB_IDX] = { 2387 [BNXT_SIG_MODE_NRZ] = { 2388 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2389 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2390 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2391 }, 2392 [BNXT_SIG_MODE_PAM4] = { 2393 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2394 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2395 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2396 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2397 }, 2398 }, 2399 [BNXT_LINK_SPEED_100GB_IDX] = { 2400 [BNXT_SIG_MODE_NRZ] = { 2401 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2402 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2403 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2404 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2405 }, 2406 [BNXT_SIG_MODE_PAM4] = { 2407 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2408 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2409 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2410 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2411 }, 2412 [BNXT_SIG_MODE_PAM4_112] = { 2413 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, 2414 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, 2415 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, 2416 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, 2417 }, 2418 }, 2419 [BNXT_LINK_SPEED_200GB_IDX] = { 2420 [BNXT_SIG_MODE_PAM4] = { 2421 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2422 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2423 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2424 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2425 }, 2426 [BNXT_SIG_MODE_PAM4_112] = { 2427 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, 2428 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, 2429 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, 2430 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, 2431 }, 2432 }, 2433 [BNXT_LINK_SPEED_400GB_IDX] = { 2434 [BNXT_SIG_MODE_PAM4] = { 2435 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2436 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2437 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2438 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2439 }, 2440 [BNXT_SIG_MODE_PAM4_112] = { 2441 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, 2442 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, 2443 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, 2444 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, 2445 }, 2446 }, 2447 }; 2448 2449 #define BNXT_LINK_MODE_UNKNOWN -1 2450 2451 static enum ethtool_link_mode_bit_indices 2452 bnxt_get_link_mode(struct bnxt_link_info *link_info) 2453 { 2454 enum ethtool_link_mode_bit_indices link_mode; 2455 enum bnxt_link_speed_indices speed; 2456 enum bnxt_media_type media; 2457 u8 sig_mode; 2458 2459 if (link_info->phy_link_status != BNXT_LINK_LINK) 2460 return BNXT_LINK_MODE_UNKNOWN; 2461 2462 media = bnxt_get_media(link_info); 2463 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 2464 speed = bnxt_fw_speed_idx(link_info->link_speed); 2465 sig_mode = link_info->active_fec_sig_mode & 2466 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 2467 } else { 2468 speed = bnxt_fw_speed_idx(link_info->req_link_speed); 2469 sig_mode = link_info->req_signal_mode; 2470 } 2471 if (sig_mode >= BNXT_SIG_MODE_MAX) 2472 return BNXT_LINK_MODE_UNKNOWN; 2473 2474 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux 2475 * link mode, but since no such devices exist, the zeroes in the 2476 * map can be conveniently used to represent unknown link modes. 2477 */ 2478 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2479 if (!link_mode) 2480 return BNXT_LINK_MODE_UNKNOWN; 2481 2482 switch (link_mode) { 2483 case ETHTOOL_LINK_MODE_100baseT_Full_BIT: 2484 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2485 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; 2486 break; 2487 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: 2488 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2489 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; 2490 break; 2491 default: 2492 break; 2493 } 2494 2495 return link_mode; 2496 } 2497 2498 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, 2499 struct ethtool_link_ksettings *lk_ksettings) 2500 { 2501 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2502 2503 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { 2504 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2505 lk_ksettings->link_modes.supported); 2506 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2507 lk_ksettings->link_modes.supported); 2508 } 2509 2510 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 || 2511 link_info->support_pam4_auto_speeds) 2512 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2513 lk_ksettings->link_modes.supported); 2514 2515 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2516 return; 2517 2518 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) 2519 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2520 lk_ksettings->link_modes.advertising); 2521 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) 2522 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2523 lk_ksettings->link_modes.advertising); 2524 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) 2525 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2526 lk_ksettings->link_modes.lp_advertising); 2527 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) 2528 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2529 lk_ksettings->link_modes.lp_advertising); 2530 } 2531 2532 static const u16 bnxt_nrz_speed_masks[] = { 2533 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, 2534 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, 2535 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, 2536 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, 2537 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, 2538 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, 2539 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, 2540 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2541 }; 2542 2543 static const u16 bnxt_pam4_speed_masks[] = { 2544 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, 2545 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, 2546 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, 2547 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2548 }; 2549 2550 static const u16 bnxt_nrz_speeds2_masks[] = { 2551 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB, 2552 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB, 2553 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB, 2554 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB, 2555 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB, 2556 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB, 2557 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2558 }; 2559 2560 static const u16 bnxt_pam4_speeds2_masks[] = { 2561 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4, 2562 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4, 2563 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4, 2564 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4, 2565 }; 2566 2567 static const u16 bnxt_pam4_112_speeds2_masks[] = { 2568 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112, 2569 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112, 2570 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112, 2571 }; 2572 2573 static enum bnxt_link_speed_indices 2574 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk) 2575 { 2576 const u16 *speeds; 2577 int idx, len; 2578 2579 switch (sig_mode) { 2580 case BNXT_SIG_MODE_NRZ: 2581 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2582 speeds = bnxt_nrz_speeds2_masks; 2583 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks); 2584 } else { 2585 speeds = bnxt_nrz_speed_masks; 2586 len = ARRAY_SIZE(bnxt_nrz_speed_masks); 2587 } 2588 break; 2589 case BNXT_SIG_MODE_PAM4: 2590 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2591 speeds = bnxt_pam4_speeds2_masks; 2592 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks); 2593 } else { 2594 speeds = bnxt_pam4_speed_masks; 2595 len = ARRAY_SIZE(bnxt_pam4_speed_masks); 2596 } 2597 break; 2598 case BNXT_SIG_MODE_PAM4_112: 2599 speeds = bnxt_pam4_112_speeds2_masks; 2600 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks); 2601 break; 2602 default: 2603 return BNXT_LINK_SPEED_UNKNOWN; 2604 } 2605 2606 for (idx = 0; idx < len; idx++) { 2607 if (speeds[idx] == speed_msk) 2608 return idx; 2609 } 2610 2611 return BNXT_LINK_SPEED_UNKNOWN; 2612 } 2613 2614 #define BNXT_FW_SPEED_MSK_BITS 16 2615 2616 static void 2617 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2618 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2619 { 2620 enum ethtool_link_mode_bit_indices link_mode; 2621 enum bnxt_link_speed_indices speed; 2622 u8 bit; 2623 2624 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { 2625 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit); 2626 if (!speed) 2627 continue; 2628 2629 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2630 if (!link_mode) 2631 continue; 2632 2633 linkmode_set_bit(link_mode, et_mask); 2634 } 2635 } 2636 2637 static void 2638 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2639 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2640 { 2641 if (media) { 2642 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2643 et_mask); 2644 return; 2645 } 2646 2647 /* list speeds for all media if unknown */ 2648 for (media = 1; media < __BNXT_MEDIA_END; media++) 2649 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2650 et_mask); 2651 } 2652 2653 static void 2654 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info, 2655 enum bnxt_media_type media, 2656 struct ethtool_link_ksettings *lk_ksettings) 2657 { 2658 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2659 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2660 u16 phy_flags = bp->phy_flags; 2661 2662 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2663 sp_nrz = link_info->support_speeds2; 2664 sp_pam4 = link_info->support_speeds2; 2665 sp_pam4_112 = link_info->support_speeds2; 2666 } else { 2667 sp_nrz = link_info->support_speeds; 2668 sp_pam4 = link_info->support_pam4_speeds; 2669 } 2670 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2671 lk_ksettings->link_modes.supported); 2672 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2673 lk_ksettings->link_modes.supported); 2674 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2675 phy_flags, lk_ksettings->link_modes.supported); 2676 } 2677 2678 static void 2679 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info, 2680 enum bnxt_media_type media, 2681 struct ethtool_link_ksettings *lk_ksettings) 2682 { 2683 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2684 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2685 u16 phy_flags = bp->phy_flags; 2686 2687 sp_nrz = link_info->advertising; 2688 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2689 sp_pam4 = link_info->advertising; 2690 sp_pam4_112 = link_info->advertising; 2691 } else { 2692 sp_pam4 = link_info->advertising_pam4; 2693 } 2694 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2695 lk_ksettings->link_modes.advertising); 2696 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2697 lk_ksettings->link_modes.advertising); 2698 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2699 phy_flags, lk_ksettings->link_modes.advertising); 2700 } 2701 2702 static void 2703 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info, 2704 enum bnxt_media_type media, 2705 struct ethtool_link_ksettings *lk_ksettings) 2706 { 2707 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2708 u16 phy_flags = bp->phy_flags; 2709 2710 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media, 2711 BNXT_SIG_MODE_NRZ, phy_flags, 2712 lk_ksettings->link_modes.lp_advertising); 2713 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media, 2714 BNXT_SIG_MODE_PAM4, phy_flags, 2715 lk_ksettings->link_modes.lp_advertising); 2716 } 2717 2718 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, 2719 u16 speed_msk, const unsigned long *et_mask, 2720 enum ethtool_link_mode_bit_indices mode) 2721 { 2722 bool mode_desired = linkmode_test_bit(mode, et_mask); 2723 2724 if (!mode) 2725 return; 2726 2727 /* enabled speeds for installed media should override */ 2728 if (installed_media && mode_desired) { 2729 *speeds |= speed_msk; 2730 *delta |= speed_msk; 2731 return; 2732 } 2733 2734 /* many to one mapping, only allow one change per fw_speed bit */ 2735 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { 2736 *speeds ^= speed_msk; 2737 *delta |= speed_msk; 2738 } 2739 } 2740 2741 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, 2742 const unsigned long *et_mask) 2743 { 2744 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2745 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks; 2746 enum bnxt_media_type media = bnxt_get_media(link_info); 2747 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL; 2748 u32 delta_pam4_112 = 0; 2749 u32 delta_pam4 = 0; 2750 u32 delta_nrz = 0; 2751 int i, m; 2752 2753 adv = &link_info->advertising; 2754 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2755 adv_pam4 = &link_info->advertising; 2756 adv_pam4_112 = &link_info->advertising; 2757 sp_msks = bnxt_nrz_speeds2_masks; 2758 sp_pam4_msks = bnxt_pam4_speeds2_masks; 2759 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks; 2760 } else { 2761 adv_pam4 = &link_info->advertising_pam4; 2762 sp_msks = bnxt_nrz_speed_masks; 2763 sp_pam4_msks = bnxt_pam4_speed_masks; 2764 } 2765 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { 2766 /* accept any legal media from user */ 2767 for (m = 1; m < __BNXT_MEDIA_END; m++) { 2768 bnxt_update_speed(&delta_nrz, m == media, 2769 adv, sp_msks[i], et_mask, 2770 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); 2771 bnxt_update_speed(&delta_pam4, m == media, 2772 adv_pam4, sp_pam4_msks[i], et_mask, 2773 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); 2774 if (!adv_pam4_112) 2775 continue; 2776 2777 bnxt_update_speed(&delta_pam4_112, m == media, 2778 adv_pam4_112, sp_pam4_112_msks[i], et_mask, 2779 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]); 2780 } 2781 } 2782 } 2783 2784 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 2785 struct ethtool_link_ksettings *lk_ksettings) 2786 { 2787 u16 fec_cfg = link_info->fec_cfg; 2788 2789 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 2790 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2791 lk_ksettings->link_modes.advertising); 2792 return; 2793 } 2794 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 2795 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2796 lk_ksettings->link_modes.advertising); 2797 if (fec_cfg & BNXT_FEC_ENC_RS) 2798 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2799 lk_ksettings->link_modes.advertising); 2800 if (fec_cfg & BNXT_FEC_ENC_LLRS) 2801 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2802 lk_ksettings->link_modes.advertising); 2803 } 2804 2805 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 2806 struct ethtool_link_ksettings *lk_ksettings) 2807 { 2808 u16 fec_cfg = link_info->fec_cfg; 2809 2810 if (fec_cfg & BNXT_FEC_NONE) { 2811 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2812 lk_ksettings->link_modes.supported); 2813 return; 2814 } 2815 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 2816 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2817 lk_ksettings->link_modes.supported); 2818 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 2819 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2820 lk_ksettings->link_modes.supported); 2821 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 2822 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2823 lk_ksettings->link_modes.supported); 2824 } 2825 2826 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 2827 { 2828 switch (fw_link_speed) { 2829 case BNXT_LINK_SPEED_100MB: 2830 return SPEED_100; 2831 case BNXT_LINK_SPEED_1GB: 2832 return SPEED_1000; 2833 case BNXT_LINK_SPEED_2_5GB: 2834 return SPEED_2500; 2835 case BNXT_LINK_SPEED_10GB: 2836 return SPEED_10000; 2837 case BNXT_LINK_SPEED_20GB: 2838 return SPEED_20000; 2839 case BNXT_LINK_SPEED_25GB: 2840 return SPEED_25000; 2841 case BNXT_LINK_SPEED_40GB: 2842 return SPEED_40000; 2843 case BNXT_LINK_SPEED_50GB: 2844 case BNXT_LINK_SPEED_50GB_PAM4: 2845 return SPEED_50000; 2846 case BNXT_LINK_SPEED_100GB: 2847 case BNXT_LINK_SPEED_100GB_PAM4: 2848 case BNXT_LINK_SPEED_100GB_PAM4_112: 2849 return SPEED_100000; 2850 case BNXT_LINK_SPEED_200GB: 2851 case BNXT_LINK_SPEED_200GB_PAM4: 2852 case BNXT_LINK_SPEED_200GB_PAM4_112: 2853 return SPEED_200000; 2854 case BNXT_LINK_SPEED_400GB: 2855 case BNXT_LINK_SPEED_400GB_PAM4: 2856 case BNXT_LINK_SPEED_400GB_PAM4_112: 2857 return SPEED_400000; 2858 default: 2859 return SPEED_UNKNOWN; 2860 } 2861 } 2862 2863 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, 2864 struct bnxt_link_info *link_info) 2865 { 2866 struct ethtool_link_settings *base = &lk_ksettings->base; 2867 2868 if (link_info->link_state == BNXT_LINK_STATE_UP) { 2869 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 2870 base->duplex = DUPLEX_HALF; 2871 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2872 base->duplex = DUPLEX_FULL; 2873 lk_ksettings->lanes = link_info->active_lanes; 2874 } else if (!link_info->autoneg) { 2875 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 2876 base->duplex = DUPLEX_HALF; 2877 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 2878 base->duplex = DUPLEX_FULL; 2879 } 2880 } 2881 2882 static int bnxt_get_link_ksettings(struct net_device *dev, 2883 struct ethtool_link_ksettings *lk_ksettings) 2884 { 2885 struct ethtool_link_settings *base = &lk_ksettings->base; 2886 enum ethtool_link_mode_bit_indices link_mode; 2887 struct bnxt *bp = netdev_priv(dev); 2888 struct bnxt_link_info *link_info; 2889 enum bnxt_media_type media; 2890 2891 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); 2892 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 2893 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 2894 base->duplex = DUPLEX_UNKNOWN; 2895 base->speed = SPEED_UNKNOWN; 2896 link_info = &bp->link_info; 2897 2898 mutex_lock(&bp->link_lock); 2899 bnxt_get_ethtool_modes(link_info, lk_ksettings); 2900 media = bnxt_get_media(link_info); 2901 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings); 2902 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 2903 link_mode = bnxt_get_link_mode(link_info); 2904 if (link_mode != BNXT_LINK_MODE_UNKNOWN) 2905 ethtool_params_from_link_mode(lk_ksettings, link_mode); 2906 else 2907 bnxt_get_default_speeds(lk_ksettings, link_info); 2908 2909 if (link_info->autoneg) { 2910 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 2911 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2912 lk_ksettings->link_modes.advertising); 2913 base->autoneg = AUTONEG_ENABLE; 2914 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings); 2915 if (link_info->phy_link_status == BNXT_LINK_LINK) 2916 bnxt_get_all_ethtool_lp_speeds(link_info, media, 2917 lk_ksettings); 2918 } else { 2919 base->autoneg = AUTONEG_DISABLE; 2920 } 2921 2922 base->port = PORT_NONE; 2923 if (media == BNXT_MEDIA_TP) { 2924 base->port = PORT_TP; 2925 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2926 lk_ksettings->link_modes.supported); 2927 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2928 lk_ksettings->link_modes.advertising); 2929 } else if (media == BNXT_MEDIA_KR) { 2930 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2931 lk_ksettings->link_modes.supported); 2932 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2933 lk_ksettings->link_modes.advertising); 2934 } else { 2935 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2936 lk_ksettings->link_modes.supported); 2937 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2938 lk_ksettings->link_modes.advertising); 2939 2940 if (media == BNXT_MEDIA_CR) 2941 base->port = PORT_DA; 2942 else 2943 base->port = PORT_FIBRE; 2944 } 2945 base->phy_address = link_info->phy_addr; 2946 mutex_unlock(&bp->link_lock); 2947 2948 return 0; 2949 } 2950 2951 static int 2952 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) 2953 { 2954 struct bnxt *bp = netdev_priv(dev); 2955 struct bnxt_link_info *link_info = &bp->link_info; 2956 u16 support_pam4_spds = link_info->support_pam4_speeds; 2957 u16 support_spds2 = link_info->support_speeds2; 2958 u16 support_spds = link_info->support_speeds; 2959 u8 sig_mode = BNXT_SIG_MODE_NRZ; 2960 u32 lanes_needed = 1; 2961 u16 fw_speed = 0; 2962 2963 switch (ethtool_speed) { 2964 case SPEED_100: 2965 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 2966 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 2967 break; 2968 case SPEED_1000: 2969 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) || 2970 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB)) 2971 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 2972 break; 2973 case SPEED_2500: 2974 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 2975 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 2976 break; 2977 case SPEED_10000: 2978 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) || 2979 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB)) 2980 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 2981 break; 2982 case SPEED_20000: 2983 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { 2984 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 2985 lanes_needed = 2; 2986 } 2987 break; 2988 case SPEED_25000: 2989 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) || 2990 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB)) 2991 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 2992 break; 2993 case SPEED_40000: 2994 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) || 2995 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) { 2996 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 2997 lanes_needed = 4; 2998 } 2999 break; 3000 case SPEED_50000: 3001 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) || 3002 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) && 3003 lanes != 1) { 3004 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 3005 lanes_needed = 2; 3006 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 3007 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 3008 sig_mode = BNXT_SIG_MODE_PAM4; 3009 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) { 3010 fw_speed = BNXT_LINK_SPEED_50GB_PAM4; 3011 sig_mode = BNXT_SIG_MODE_PAM4; 3012 } 3013 break; 3014 case SPEED_100000: 3015 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) || 3016 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) && 3017 lanes != 2 && lanes != 1) { 3018 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 3019 lanes_needed = 4; 3020 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 3021 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 3022 sig_mode = BNXT_SIG_MODE_PAM4; 3023 lanes_needed = 2; 3024 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) && 3025 lanes != 1) { 3026 fw_speed = BNXT_LINK_SPEED_100GB_PAM4; 3027 sig_mode = BNXT_SIG_MODE_PAM4; 3028 lanes_needed = 2; 3029 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) { 3030 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112; 3031 sig_mode = BNXT_SIG_MODE_PAM4_112; 3032 } 3033 break; 3034 case SPEED_200000: 3035 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 3036 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 3037 sig_mode = BNXT_SIG_MODE_PAM4; 3038 lanes_needed = 4; 3039 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) && 3040 lanes != 2) { 3041 fw_speed = BNXT_LINK_SPEED_200GB_PAM4; 3042 sig_mode = BNXT_SIG_MODE_PAM4; 3043 lanes_needed = 4; 3044 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) { 3045 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112; 3046 sig_mode = BNXT_SIG_MODE_PAM4_112; 3047 lanes_needed = 2; 3048 } 3049 break; 3050 case SPEED_400000: 3051 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) && 3052 lanes != 4) { 3053 fw_speed = BNXT_LINK_SPEED_400GB_PAM4; 3054 sig_mode = BNXT_SIG_MODE_PAM4; 3055 lanes_needed = 8; 3056 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) { 3057 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112; 3058 sig_mode = BNXT_SIG_MODE_PAM4_112; 3059 lanes_needed = 4; 3060 } 3061 break; 3062 } 3063 3064 if (!fw_speed) { 3065 netdev_err(dev, "unsupported speed!\n"); 3066 return -EINVAL; 3067 } 3068 3069 if (lanes && lanes != lanes_needed) { 3070 netdev_err(dev, "unsupported number of lanes for speed\n"); 3071 return -EINVAL; 3072 } 3073 3074 if (link_info->req_link_speed == fw_speed && 3075 link_info->req_signal_mode == sig_mode && 3076 link_info->autoneg == 0) 3077 return -EALREADY; 3078 3079 link_info->req_link_speed = fw_speed; 3080 link_info->req_signal_mode = sig_mode; 3081 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 3082 link_info->autoneg = 0; 3083 link_info->advertising = 0; 3084 link_info->advertising_pam4 = 0; 3085 3086 return 0; 3087 } 3088 3089 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) 3090 { 3091 u16 fw_speed_mask = 0; 3092 3093 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || 3094 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) 3095 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 3096 3097 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || 3098 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) 3099 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 3100 3101 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) 3102 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 3103 3104 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) 3105 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 3106 3107 return fw_speed_mask; 3108 } 3109 3110 static int bnxt_set_link_ksettings(struct net_device *dev, 3111 const struct ethtool_link_ksettings *lk_ksettings) 3112 { 3113 struct bnxt *bp = netdev_priv(dev); 3114 struct bnxt_link_info *link_info = &bp->link_info; 3115 const struct ethtool_link_settings *base = &lk_ksettings->base; 3116 bool set_pause = false; 3117 u32 speed, lanes = 0; 3118 int rc = 0; 3119 3120 if (!BNXT_PHY_CFG_ABLE(bp)) 3121 return -EOPNOTSUPP; 3122 3123 mutex_lock(&bp->link_lock); 3124 if (base->autoneg == AUTONEG_ENABLE) { 3125 bnxt_set_ethtool_speeds(link_info, 3126 lk_ksettings->link_modes.advertising); 3127 link_info->autoneg |= BNXT_AUTONEG_SPEED; 3128 if (!link_info->advertising && !link_info->advertising_pam4) { 3129 link_info->advertising = link_info->support_auto_speeds; 3130 link_info->advertising_pam4 = 3131 link_info->support_pam4_auto_speeds; 3132 } 3133 /* any change to autoneg will cause link change, therefore the 3134 * driver should put back the original pause setting in autoneg 3135 */ 3136 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3137 set_pause = true; 3138 } else { 3139 u8 phy_type = link_info->phy_type; 3140 3141 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 3142 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 3143 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 3144 netdev_err(dev, "10GBase-T devices must autoneg\n"); 3145 rc = -EINVAL; 3146 goto set_setting_exit; 3147 } 3148 if (base->duplex == DUPLEX_HALF) { 3149 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 3150 rc = -EINVAL; 3151 goto set_setting_exit; 3152 } 3153 speed = base->speed; 3154 lanes = lk_ksettings->lanes; 3155 rc = bnxt_force_link_speed(dev, speed, lanes); 3156 if (rc) { 3157 if (rc == -EALREADY) 3158 rc = 0; 3159 goto set_setting_exit; 3160 } 3161 } 3162 3163 if (netif_running(dev)) 3164 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 3165 3166 set_setting_exit: 3167 mutex_unlock(&bp->link_lock); 3168 return rc; 3169 } 3170 3171 static int bnxt_get_fecparam(struct net_device *dev, 3172 struct ethtool_fecparam *fec) 3173 { 3174 struct bnxt *bp = netdev_priv(dev); 3175 struct bnxt_link_info *link_info; 3176 u8 active_fec; 3177 u16 fec_cfg; 3178 3179 link_info = &bp->link_info; 3180 fec_cfg = link_info->fec_cfg; 3181 active_fec = link_info->active_fec_sig_mode & 3182 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 3183 if (fec_cfg & BNXT_FEC_NONE) { 3184 fec->fec = ETHTOOL_FEC_NONE; 3185 fec->active_fec = ETHTOOL_FEC_NONE; 3186 return 0; 3187 } 3188 if (fec_cfg & BNXT_FEC_AUTONEG) 3189 fec->fec |= ETHTOOL_FEC_AUTO; 3190 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 3191 fec->fec |= ETHTOOL_FEC_BASER; 3192 if (fec_cfg & BNXT_FEC_ENC_RS) 3193 fec->fec |= ETHTOOL_FEC_RS; 3194 if (fec_cfg & BNXT_FEC_ENC_LLRS) 3195 fec->fec |= ETHTOOL_FEC_LLRS; 3196 3197 switch (active_fec) { 3198 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 3199 fec->active_fec |= ETHTOOL_FEC_BASER; 3200 break; 3201 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 3202 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 3203 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 3204 fec->active_fec |= ETHTOOL_FEC_RS; 3205 break; 3206 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 3207 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 3208 fec->active_fec |= ETHTOOL_FEC_LLRS; 3209 break; 3210 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 3211 fec->active_fec |= ETHTOOL_FEC_OFF; 3212 break; 3213 } 3214 return 0; 3215 } 3216 3217 static const struct ethtool_fec_hist_range bnxt_fec_ranges[] = { 3218 { 0, 0}, 3219 { 1, 1}, 3220 { 2, 2}, 3221 { 3, 3}, 3222 { 4, 4}, 3223 { 5, 5}, 3224 { 6, 6}, 3225 { 7, 7}, 3226 { 8, 8}, 3227 { 9, 9}, 3228 { 10, 10}, 3229 { 11, 11}, 3230 { 12, 12}, 3231 { 13, 13}, 3232 { 14, 14}, 3233 { 15, 15}, 3234 { 0, 0}, 3235 }; 3236 3237 static void bnxt_hwrm_port_phy_fdrstat(struct bnxt *bp, 3238 struct ethtool_fec_hist *hist) 3239 { 3240 struct ethtool_fec_hist_value *values = hist->values; 3241 struct hwrm_port_phy_fdrstat_output *resp; 3242 struct hwrm_port_phy_fdrstat_input *req; 3243 int rc, i; 3244 3245 if (!(bp->phy_flags & BNXT_PHY_FL_FDRSTATS)) 3246 return; 3247 3248 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_FDRSTAT); 3249 if (rc) 3250 return; 3251 3252 req->port_id = cpu_to_le16(bp->pf.port_id); 3253 req->ops = cpu_to_le16(PORT_PHY_FDRSTAT_REQ_OPS_COUNTER); 3254 resp = hwrm_req_hold(bp, req); 3255 rc = hwrm_req_send(bp, req); 3256 if (!rc) { 3257 hist->ranges = bnxt_fec_ranges; 3258 for (i = 0; i <= 15; i++) { 3259 __le64 sum = resp->accumulated_codewords_err_s[i]; 3260 3261 values[i].sum = le64_to_cpu(sum); 3262 } 3263 } 3264 hwrm_req_drop(bp, req); 3265 } 3266 3267 static void bnxt_get_fec_stats(struct net_device *dev, 3268 struct ethtool_fec_stats *fec_stats, 3269 struct ethtool_fec_hist *hist) 3270 { 3271 struct bnxt *bp = netdev_priv(dev); 3272 u64 *rx; 3273 3274 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 3275 return; 3276 3277 rx = bp->rx_port_stats_ext.sw_stats; 3278 fec_stats->corrected_bits.total = 3279 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 3280 3281 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) 3282 return; 3283 3284 fec_stats->corrected_blocks.total = 3285 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); 3286 fec_stats->uncorrectable_blocks.total = 3287 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); 3288 bnxt_hwrm_port_phy_fdrstat(bp, hist); 3289 } 3290 3291 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 3292 u32 fec) 3293 { 3294 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 3295 3296 if (fec & ETHTOOL_FEC_BASER) 3297 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 3298 else if (fec & ETHTOOL_FEC_RS) 3299 fw_fec |= BNXT_FEC_RS_ON(link_info); 3300 else if (fec & ETHTOOL_FEC_LLRS) 3301 fw_fec |= BNXT_FEC_LLRS_ON; 3302 return fw_fec; 3303 } 3304 3305 static int bnxt_set_fecparam(struct net_device *dev, 3306 struct ethtool_fecparam *fecparam) 3307 { 3308 struct hwrm_port_phy_cfg_input *req; 3309 struct bnxt *bp = netdev_priv(dev); 3310 struct bnxt_link_info *link_info; 3311 u32 new_cfg, fec = fecparam->fec; 3312 u16 fec_cfg; 3313 int rc; 3314 3315 link_info = &bp->link_info; 3316 fec_cfg = link_info->fec_cfg; 3317 if (fec_cfg & BNXT_FEC_NONE) 3318 return -EOPNOTSUPP; 3319 3320 if (fec & ETHTOOL_FEC_OFF) { 3321 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 3322 BNXT_FEC_ALL_OFF(link_info); 3323 goto apply_fec; 3324 } 3325 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 3326 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 3327 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 3328 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 3329 return -EINVAL; 3330 3331 if (fec & ETHTOOL_FEC_AUTO) { 3332 if (!link_info->autoneg) 3333 return -EINVAL; 3334 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 3335 } else { 3336 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 3337 } 3338 3339 apply_fec: 3340 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 3341 if (rc) 3342 return rc; 3343 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3344 rc = hwrm_req_send(bp, req); 3345 /* update current settings */ 3346 if (!rc) { 3347 mutex_lock(&bp->link_lock); 3348 bnxt_update_link(bp, false); 3349 mutex_unlock(&bp->link_lock); 3350 } 3351 return rc; 3352 } 3353 3354 static void bnxt_get_pauseparam(struct net_device *dev, 3355 struct ethtool_pauseparam *epause) 3356 { 3357 struct bnxt *bp = netdev_priv(dev); 3358 struct bnxt_link_info *link_info = &bp->link_info; 3359 3360 if (BNXT_VF(bp)) 3361 return; 3362 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 3363 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 3364 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 3365 } 3366 3367 static void bnxt_get_pause_stats(struct net_device *dev, 3368 struct ethtool_pause_stats *epstat) 3369 { 3370 struct bnxt *bp = netdev_priv(dev); 3371 u64 *rx, *tx; 3372 3373 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 3374 return; 3375 3376 rx = bp->port_stats.sw_stats; 3377 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3378 3379 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 3380 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 3381 } 3382 3383 static int bnxt_set_pauseparam(struct net_device *dev, 3384 struct ethtool_pauseparam *epause) 3385 { 3386 int rc = 0; 3387 struct bnxt *bp = netdev_priv(dev); 3388 struct bnxt_link_info *link_info = &bp->link_info; 3389 3390 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3391 return -EOPNOTSUPP; 3392 3393 mutex_lock(&bp->link_lock); 3394 if (epause->autoneg) { 3395 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 3396 rc = -EINVAL; 3397 goto pause_exit; 3398 } 3399 3400 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 3401 link_info->req_flow_ctrl = 0; 3402 } else { 3403 /* when transition from auto pause to force pause, 3404 * force a link change 3405 */ 3406 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 3407 link_info->force_link_chng = true; 3408 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 3409 link_info->req_flow_ctrl = 0; 3410 } 3411 if (epause->rx_pause) 3412 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 3413 3414 if (epause->tx_pause) 3415 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 3416 3417 if (netif_running(dev)) 3418 rc = bnxt_hwrm_set_pause(bp); 3419 3420 pause_exit: 3421 mutex_unlock(&bp->link_lock); 3422 return rc; 3423 } 3424 3425 static u32 bnxt_get_link(struct net_device *dev) 3426 { 3427 struct bnxt *bp = netdev_priv(dev); 3428 3429 /* TODO: handle MF, VF, driver close case */ 3430 return BNXT_LINK_IS_UP(bp); 3431 } 3432 3433 static int bnxt_get_link_ext_state(struct net_device *dev, 3434 struct ethtool_link_ext_state_info *info) 3435 { 3436 struct bnxt *bp = netdev_priv(dev); 3437 u8 reason; 3438 3439 if (BNXT_LINK_IS_UP(bp)) 3440 return -ENODATA; 3441 3442 reason = bp->link_info.link_down_reason; 3443 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF) { 3444 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE; 3445 info->link_training = ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT; 3446 return 0; 3447 } 3448 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED) { 3449 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_NO_CABLE; 3450 return 0; 3451 } 3452 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION) { 3453 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_OTP_SPEED_VIOLATION; 3454 return 0; 3455 } 3456 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT) { 3457 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_MODULE; 3458 return 0; 3459 } 3460 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST) { 3461 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_BMC_REQUEST_DOWN; 3462 return 0; 3463 } 3464 return -ENODATA; 3465 } 3466 3467 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 3468 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 3469 { 3470 struct hwrm_nvm_get_dev_info_output *resp; 3471 struct hwrm_nvm_get_dev_info_input *req; 3472 int rc; 3473 3474 if (BNXT_VF(bp)) 3475 return -EOPNOTSUPP; 3476 3477 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); 3478 if (rc) 3479 return rc; 3480 3481 resp = hwrm_req_hold(bp, req); 3482 rc = hwrm_req_send(bp, req); 3483 if (!rc) 3484 memcpy(nvm_dev_info, resp, sizeof(*resp)); 3485 hwrm_req_drop(bp, req); 3486 return rc; 3487 } 3488 3489 static void bnxt_print_admin_err(struct bnxt *bp) 3490 { 3491 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 3492 } 3493 3494 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 3495 u16 ext, u16 *index, u32 *item_length, 3496 u32 *data_length); 3497 3498 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 3499 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 3500 u32 dir_item_len, const u8 *data, 3501 size_t data_len) 3502 { 3503 struct bnxt *bp = netdev_priv(dev); 3504 struct hwrm_nvm_write_input *req; 3505 int rc; 3506 3507 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); 3508 if (rc) 3509 return rc; 3510 3511 if (data_len && data) { 3512 dma_addr_t dma_handle; 3513 u8 *kmem; 3514 3515 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); 3516 if (!kmem) { 3517 hwrm_req_drop(bp, req); 3518 return -ENOMEM; 3519 } 3520 3521 req->dir_data_length = cpu_to_le32(data_len); 3522 3523 memcpy(kmem, data, data_len); 3524 req->host_src_addr = cpu_to_le64(dma_handle); 3525 } 3526 3527 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3528 req->dir_type = cpu_to_le16(dir_type); 3529 req->dir_ordinal = cpu_to_le16(dir_ordinal); 3530 req->dir_ext = cpu_to_le16(dir_ext); 3531 req->dir_attr = cpu_to_le16(dir_attr); 3532 req->dir_item_length = cpu_to_le32(dir_item_len); 3533 rc = hwrm_req_send(bp, req); 3534 3535 if (rc == -EACCES) 3536 bnxt_print_admin_err(bp); 3537 return rc; 3538 } 3539 3540 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 3541 u8 self_reset, u8 flags) 3542 { 3543 struct bnxt *bp = netdev_priv(dev); 3544 struct hwrm_fw_reset_input *req; 3545 int rc; 3546 3547 if (!bnxt_hwrm_reset_permitted(bp)) { 3548 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); 3549 return -EPERM; 3550 } 3551 3552 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 3553 if (rc) 3554 return rc; 3555 3556 req->embedded_proc_type = proc_type; 3557 req->selfrst_status = self_reset; 3558 req->flags = flags; 3559 3560 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 3561 rc = hwrm_req_send_silent(bp, req); 3562 } else { 3563 rc = hwrm_req_send(bp, req); 3564 if (rc == -EACCES) 3565 bnxt_print_admin_err(bp); 3566 } 3567 return rc; 3568 } 3569 3570 static int bnxt_firmware_reset(struct net_device *dev, 3571 enum bnxt_nvm_directory_type dir_type) 3572 { 3573 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 3574 u8 proc_type, flags = 0; 3575 3576 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 3577 /* (e.g. when firmware isn't already running) */ 3578 switch (dir_type) { 3579 case BNX_DIR_TYPE_CHIMP_PATCH: 3580 case BNX_DIR_TYPE_BOOTCODE: 3581 case BNX_DIR_TYPE_BOOTCODE_2: 3582 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 3583 /* Self-reset ChiMP upon next PCIe reset: */ 3584 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3585 break; 3586 case BNX_DIR_TYPE_APE_FW: 3587 case BNX_DIR_TYPE_APE_PATCH: 3588 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 3589 /* Self-reset APE upon next PCIe reset: */ 3590 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3591 break; 3592 case BNX_DIR_TYPE_KONG_FW: 3593 case BNX_DIR_TYPE_KONG_PATCH: 3594 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 3595 break; 3596 case BNX_DIR_TYPE_BONO_FW: 3597 case BNX_DIR_TYPE_BONO_PATCH: 3598 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 3599 break; 3600 default: 3601 return -EINVAL; 3602 } 3603 3604 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 3605 } 3606 3607 static int bnxt_firmware_reset_chip(struct net_device *dev) 3608 { 3609 struct bnxt *bp = netdev_priv(dev); 3610 u8 flags = 0; 3611 3612 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 3613 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 3614 3615 return bnxt_hwrm_firmware_reset(dev, 3616 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 3617 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 3618 flags); 3619 } 3620 3621 static int bnxt_firmware_reset_ap(struct net_device *dev) 3622 { 3623 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 3624 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 3625 0); 3626 } 3627 3628 static int bnxt_flash_firmware(struct net_device *dev, 3629 u16 dir_type, 3630 const u8 *fw_data, 3631 size_t fw_size) 3632 { 3633 int rc = 0; 3634 u16 code_type; 3635 u32 stored_crc; 3636 u32 calculated_crc; 3637 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 3638 3639 switch (dir_type) { 3640 case BNX_DIR_TYPE_BOOTCODE: 3641 case BNX_DIR_TYPE_BOOTCODE_2: 3642 code_type = CODE_BOOT; 3643 break; 3644 case BNX_DIR_TYPE_CHIMP_PATCH: 3645 code_type = CODE_CHIMP_PATCH; 3646 break; 3647 case BNX_DIR_TYPE_APE_FW: 3648 code_type = CODE_MCTP_PASSTHRU; 3649 break; 3650 case BNX_DIR_TYPE_APE_PATCH: 3651 code_type = CODE_APE_PATCH; 3652 break; 3653 case BNX_DIR_TYPE_KONG_FW: 3654 code_type = CODE_KONG_FW; 3655 break; 3656 case BNX_DIR_TYPE_KONG_PATCH: 3657 code_type = CODE_KONG_PATCH; 3658 break; 3659 case BNX_DIR_TYPE_BONO_FW: 3660 code_type = CODE_BONO_FW; 3661 break; 3662 case BNX_DIR_TYPE_BONO_PATCH: 3663 code_type = CODE_BONO_PATCH; 3664 break; 3665 default: 3666 netdev_err(dev, "Unsupported directory entry type: %u\n", 3667 dir_type); 3668 return -EINVAL; 3669 } 3670 if (fw_size < sizeof(struct bnxt_fw_header)) { 3671 netdev_err(dev, "Invalid firmware file size: %u\n", 3672 (unsigned int)fw_size); 3673 return -EINVAL; 3674 } 3675 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 3676 netdev_err(dev, "Invalid firmware signature: %08X\n", 3677 le32_to_cpu(header->signature)); 3678 return -EINVAL; 3679 } 3680 if (header->code_type != code_type) { 3681 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 3682 code_type, header->code_type); 3683 return -EINVAL; 3684 } 3685 if (header->device != DEVICE_CUMULUS_FAMILY) { 3686 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 3687 DEVICE_CUMULUS_FAMILY, header->device); 3688 return -EINVAL; 3689 } 3690 /* Confirm the CRC32 checksum of the file: */ 3691 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3692 sizeof(stored_crc))); 3693 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3694 if (calculated_crc != stored_crc) { 3695 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 3696 (unsigned long)stored_crc, 3697 (unsigned long)calculated_crc); 3698 return -EINVAL; 3699 } 3700 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3701 0, 0, 0, fw_data, fw_size); 3702 if (rc == 0) /* Firmware update successful */ 3703 rc = bnxt_firmware_reset(dev, dir_type); 3704 3705 return rc; 3706 } 3707 3708 static int bnxt_flash_microcode(struct net_device *dev, 3709 u16 dir_type, 3710 const u8 *fw_data, 3711 size_t fw_size) 3712 { 3713 struct bnxt_ucode_trailer *trailer; 3714 u32 calculated_crc; 3715 u32 stored_crc; 3716 int rc = 0; 3717 3718 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 3719 netdev_err(dev, "Invalid microcode file size: %u\n", 3720 (unsigned int)fw_size); 3721 return -EINVAL; 3722 } 3723 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 3724 sizeof(*trailer))); 3725 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 3726 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 3727 le32_to_cpu(trailer->sig)); 3728 return -EINVAL; 3729 } 3730 if (le16_to_cpu(trailer->dir_type) != dir_type) { 3731 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 3732 dir_type, le16_to_cpu(trailer->dir_type)); 3733 return -EINVAL; 3734 } 3735 if (le16_to_cpu(trailer->trailer_length) < 3736 sizeof(struct bnxt_ucode_trailer)) { 3737 netdev_err(dev, "Invalid microcode trailer length: %d\n", 3738 le16_to_cpu(trailer->trailer_length)); 3739 return -EINVAL; 3740 } 3741 3742 /* Confirm the CRC32 checksum of the file: */ 3743 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3744 sizeof(stored_crc))); 3745 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3746 if (calculated_crc != stored_crc) { 3747 netdev_err(dev, 3748 "CRC32 (%08lX) does not match calculated: %08lX\n", 3749 (unsigned long)stored_crc, 3750 (unsigned long)calculated_crc); 3751 return -EINVAL; 3752 } 3753 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3754 0, 0, 0, fw_data, fw_size); 3755 3756 return rc; 3757 } 3758 3759 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 3760 { 3761 switch (dir_type) { 3762 case BNX_DIR_TYPE_CHIMP_PATCH: 3763 case BNX_DIR_TYPE_BOOTCODE: 3764 case BNX_DIR_TYPE_BOOTCODE_2: 3765 case BNX_DIR_TYPE_APE_FW: 3766 case BNX_DIR_TYPE_APE_PATCH: 3767 case BNX_DIR_TYPE_KONG_FW: 3768 case BNX_DIR_TYPE_KONG_PATCH: 3769 case BNX_DIR_TYPE_BONO_FW: 3770 case BNX_DIR_TYPE_BONO_PATCH: 3771 return true; 3772 } 3773 3774 return false; 3775 } 3776 3777 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 3778 { 3779 switch (dir_type) { 3780 case BNX_DIR_TYPE_AVS: 3781 case BNX_DIR_TYPE_EXP_ROM_MBA: 3782 case BNX_DIR_TYPE_PCIE: 3783 case BNX_DIR_TYPE_TSCF_UCODE: 3784 case BNX_DIR_TYPE_EXT_PHY: 3785 case BNX_DIR_TYPE_CCM: 3786 case BNX_DIR_TYPE_ISCSI_BOOT: 3787 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3788 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3789 return true; 3790 } 3791 3792 return false; 3793 } 3794 3795 static bool bnxt_dir_type_is_executable(u16 dir_type) 3796 { 3797 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3798 bnxt_dir_type_is_other_exec_format(dir_type); 3799 } 3800 3801 static int bnxt_flash_firmware_from_file(struct net_device *dev, 3802 u16 dir_type, 3803 const char *filename) 3804 { 3805 const struct firmware *fw; 3806 int rc; 3807 3808 rc = request_firmware(&fw, filename, &dev->dev); 3809 if (rc != 0) { 3810 netdev_err(dev, "Error %d requesting firmware file: %s\n", 3811 rc, filename); 3812 return rc; 3813 } 3814 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 3815 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 3816 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 3817 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 3818 else 3819 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3820 0, 0, 0, fw->data, fw->size); 3821 release_firmware(fw); 3822 return rc; 3823 } 3824 3825 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" 3826 #define MSG_INVALID_PKG "PKG install error : Invalid package" 3827 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" 3828 #define MSG_INVALID_DEV "PKG install error : Invalid device" 3829 #define MSG_INTERNAL_ERR "PKG install error : Internal error" 3830 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" 3831 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" 3832 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" 3833 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" 3834 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" 3835 3836 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, 3837 struct netlink_ext_ack *extack) 3838 { 3839 switch (result) { 3840 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: 3841 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: 3842 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: 3843 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: 3844 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: 3845 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: 3846 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); 3847 return -EINVAL; 3848 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: 3849 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: 3850 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: 3851 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: 3852 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: 3853 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: 3854 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: 3855 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: 3856 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: 3857 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: 3858 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: 3859 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: 3860 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: 3861 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); 3862 return -ENOPKG; 3863 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: 3864 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); 3865 return -EPERM; 3866 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: 3867 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: 3868 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: 3869 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: 3870 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: 3871 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); 3872 return -EOPNOTSUPP; 3873 default: 3874 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); 3875 return -EIO; 3876 } 3877 } 3878 3879 #define BNXT_PKG_DMA_SIZE 0x40000 3880 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 3881 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 3882 3883 static int bnxt_hwrm_nvm_defrag(struct bnxt *bp) 3884 { 3885 struct hwrm_nvm_defrag_input *req; 3886 int rc; 3887 3888 rc = hwrm_req_init(bp, req, HWRM_NVM_DEFRAG); 3889 if (rc) 3890 return rc; 3891 req->flags = cpu_to_le32(NVM_DEFRAG_REQ_FLAGS_DEFRAG); 3892 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3893 3894 return hwrm_req_send(bp, req); 3895 } 3896 3897 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, 3898 struct netlink_ext_ack *extack) 3899 { 3900 struct bnxt *bp = netdev_priv(dev); 3901 bool retry = false; 3902 u32 item_len; 3903 int rc; 3904 3905 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3906 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, 3907 &item_len, NULL); 3908 if (rc) { 3909 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3910 return rc; 3911 } 3912 3913 if (fw_size > item_len) { 3914 do { 3915 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, 3916 BNX_DIR_ORDINAL_FIRST, 0, 1, 3917 round_up(fw_size, 4096), NULL, 3918 0); 3919 3920 if (rc == -ENOSPC) { 3921 if (retry || bnxt_hwrm_nvm_defrag(bp)) 3922 break; 3923 retry = true; 3924 } 3925 } while (rc == -ENOSPC); 3926 3927 if (rc) { 3928 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); 3929 return rc; 3930 } 3931 } 3932 return 0; 3933 } 3934 3935 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 3936 u32 install_type, struct netlink_ext_ack *extack) 3937 { 3938 struct hwrm_nvm_install_update_input *install; 3939 struct hwrm_nvm_install_update_output *resp; 3940 struct hwrm_nvm_modify_input *modify; 3941 struct bnxt *bp = netdev_priv(dev); 3942 bool defrag_attempted = false; 3943 dma_addr_t dma_handle; 3944 u8 *kmem = NULL; 3945 u32 modify_len; 3946 u32 item_len; 3947 u8 cmd_err; 3948 u16 index; 3949 int rc; 3950 3951 /* resize before flashing larger image than available space */ 3952 rc = bnxt_resize_update_entry(dev, fw->size, extack); 3953 if (rc) 3954 return rc; 3955 3956 bnxt_hwrm_fw_set_time(bp); 3957 3958 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); 3959 if (rc) 3960 return rc; 3961 3962 /* Try allocating a large DMA buffer first. Older fw will 3963 * cause excessive NVRAM erases when using small blocks. 3964 */ 3965 modify_len = roundup_pow_of_two(fw->size); 3966 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 3967 while (1) { 3968 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); 3969 if (!kmem && modify_len > PAGE_SIZE) 3970 modify_len /= 2; 3971 else 3972 break; 3973 } 3974 if (!kmem) { 3975 hwrm_req_drop(bp, modify); 3976 return -ENOMEM; 3977 } 3978 3979 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); 3980 if (rc) { 3981 hwrm_req_drop(bp, modify); 3982 return rc; 3983 } 3984 3985 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout); 3986 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout); 3987 3988 hwrm_req_hold(bp, modify); 3989 modify->host_src_addr = cpu_to_le64(dma_handle); 3990 3991 resp = hwrm_req_hold(bp, install); 3992 if ((install_type & 0xffff) == 0) 3993 install_type >>= 16; 3994 install->install_type = cpu_to_le32(install_type); 3995 3996 do { 3997 u32 copied = 0, len = modify_len; 3998 3999 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 4000 BNX_DIR_ORDINAL_FIRST, 4001 BNX_DIR_EXT_NONE, 4002 &index, &item_len, NULL); 4003 if (rc) { 4004 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 4005 break; 4006 } 4007 if (fw->size > item_len) { 4008 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); 4009 rc = -EFBIG; 4010 break; 4011 } 4012 4013 modify->dir_idx = cpu_to_le16(index); 4014 4015 if (fw->size > modify_len) 4016 modify->flags = BNXT_NVM_MORE_FLAG; 4017 while (copied < fw->size) { 4018 u32 balance = fw->size - copied; 4019 4020 if (balance <= modify_len) { 4021 len = balance; 4022 if (copied) 4023 modify->flags |= BNXT_NVM_LAST_FLAG; 4024 } 4025 memcpy(kmem, fw->data + copied, len); 4026 modify->len = cpu_to_le32(len); 4027 modify->offset = cpu_to_le32(copied); 4028 rc = hwrm_req_send(bp, modify); 4029 if (rc) 4030 goto pkg_abort; 4031 copied += len; 4032 } 4033 4034 rc = hwrm_req_send_silent(bp, install); 4035 if (!rc) 4036 break; 4037 4038 if (defrag_attempted) { 4039 /* We have tried to defragment already in the previous 4040 * iteration. Return with the result for INSTALL_UPDATE 4041 */ 4042 break; 4043 } 4044 4045 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 4046 4047 switch (cmd_err) { 4048 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: 4049 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); 4050 rc = -EALREADY; 4051 break; 4052 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: 4053 install->flags = 4054 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 4055 4056 rc = hwrm_req_send_silent(bp, install); 4057 if (!rc) 4058 break; 4059 4060 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 4061 4062 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 4063 /* FW has cleared NVM area, driver will create 4064 * UPDATE directory and try the flash again 4065 */ 4066 defrag_attempted = true; 4067 install->flags = 0; 4068 rc = bnxt_flash_nvram(bp->dev, 4069 BNX_DIR_TYPE_UPDATE, 4070 BNX_DIR_ORDINAL_FIRST, 4071 0, 0, item_len, NULL, 0); 4072 if (!rc) 4073 break; 4074 } 4075 fallthrough; 4076 default: 4077 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); 4078 } 4079 } while (defrag_attempted && !rc); 4080 4081 pkg_abort: 4082 hwrm_req_drop(bp, modify); 4083 hwrm_req_drop(bp, install); 4084 4085 if (resp->result) { 4086 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 4087 (s8)resp->result, (int)resp->problem_item); 4088 rc = nvm_update_err_to_stderr(dev, resp->result, extack); 4089 } 4090 if (rc == -EACCES) 4091 bnxt_print_admin_err(bp); 4092 return rc; 4093 } 4094 4095 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 4096 u32 install_type, struct netlink_ext_ack *extack) 4097 { 4098 const struct firmware *fw; 4099 int rc; 4100 4101 rc = request_firmware(&fw, filename, &dev->dev); 4102 if (rc != 0) { 4103 netdev_err(dev, "PKG error %d requesting file: %s\n", 4104 rc, filename); 4105 return rc; 4106 } 4107 4108 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); 4109 4110 release_firmware(fw); 4111 4112 return rc; 4113 } 4114 4115 static int bnxt_flash_device(struct net_device *dev, 4116 struct ethtool_flash *flash) 4117 { 4118 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 4119 netdev_err(dev, "flashdev not supported from a virtual function\n"); 4120 return -EINVAL; 4121 } 4122 4123 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 4124 flash->region > 0xffff) 4125 return bnxt_flash_package_from_file(dev, flash->data, 4126 flash->region, NULL); 4127 4128 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 4129 } 4130 4131 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 4132 { 4133 struct hwrm_nvm_get_dir_info_output *output; 4134 struct hwrm_nvm_get_dir_info_input *req; 4135 struct bnxt *bp = netdev_priv(dev); 4136 int rc; 4137 4138 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); 4139 if (rc) 4140 return rc; 4141 4142 output = hwrm_req_hold(bp, req); 4143 rc = hwrm_req_send(bp, req); 4144 if (!rc) { 4145 *entries = le32_to_cpu(output->entries); 4146 *length = le32_to_cpu(output->entry_length); 4147 } 4148 hwrm_req_drop(bp, req); 4149 return rc; 4150 } 4151 4152 static int bnxt_get_eeprom_len(struct net_device *dev) 4153 { 4154 struct bnxt *bp = netdev_priv(dev); 4155 4156 if (BNXT_VF(bp)) 4157 return 0; 4158 4159 /* The -1 return value allows the entire 32-bit range of offsets to be 4160 * passed via the ethtool command-line utility. 4161 */ 4162 return -1; 4163 } 4164 4165 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 4166 { 4167 struct bnxt *bp = netdev_priv(dev); 4168 int rc; 4169 u32 dir_entries; 4170 u32 entry_length; 4171 u8 *buf; 4172 size_t buflen; 4173 dma_addr_t dma_handle; 4174 struct hwrm_nvm_get_dir_entries_input *req; 4175 4176 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 4177 if (rc != 0) 4178 return rc; 4179 4180 if (!dir_entries || !entry_length) 4181 return -EIO; 4182 4183 /* Insert 2 bytes of directory info (count and size of entries) */ 4184 if (len < 2) 4185 return -EINVAL; 4186 4187 *data++ = dir_entries; 4188 *data++ = entry_length; 4189 len -= 2; 4190 memset(data, 0xff, len); 4191 4192 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); 4193 if (rc) 4194 return rc; 4195 4196 buflen = mul_u32_u32(dir_entries, entry_length); 4197 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); 4198 if (!buf) { 4199 hwrm_req_drop(bp, req); 4200 return -ENOMEM; 4201 } 4202 req->host_dest_addr = cpu_to_le64(dma_handle); 4203 4204 hwrm_req_hold(bp, req); /* hold the slice */ 4205 rc = hwrm_req_send(bp, req); 4206 if (rc == 0) 4207 memcpy(data, buf, len > buflen ? buflen : len); 4208 hwrm_req_drop(bp, req); 4209 return rc; 4210 } 4211 4212 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 4213 u32 length, u8 *data) 4214 { 4215 struct bnxt *bp = netdev_priv(dev); 4216 int rc; 4217 u8 *buf; 4218 dma_addr_t dma_handle; 4219 struct hwrm_nvm_read_input *req; 4220 4221 if (!length) 4222 return -EINVAL; 4223 4224 rc = hwrm_req_init(bp, req, HWRM_NVM_READ); 4225 if (rc) 4226 return rc; 4227 4228 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); 4229 if (!buf) { 4230 hwrm_req_drop(bp, req); 4231 return -ENOMEM; 4232 } 4233 4234 req->host_dest_addr = cpu_to_le64(dma_handle); 4235 req->dir_idx = cpu_to_le16(index); 4236 req->offset = cpu_to_le32(offset); 4237 req->len = cpu_to_le32(length); 4238 4239 hwrm_req_hold(bp, req); /* hold the slice */ 4240 rc = hwrm_req_send(bp, req); 4241 if (rc == 0) 4242 memcpy(data, buf, length); 4243 hwrm_req_drop(bp, req); 4244 return rc; 4245 } 4246 4247 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 4248 u16 ext, u16 *index, u32 *item_length, 4249 u32 *data_length) 4250 { 4251 struct hwrm_nvm_find_dir_entry_output *output; 4252 struct hwrm_nvm_find_dir_entry_input *req; 4253 struct bnxt *bp = netdev_priv(dev); 4254 int rc; 4255 4256 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); 4257 if (rc) 4258 return rc; 4259 4260 req->enables = 0; 4261 req->dir_idx = 0; 4262 req->dir_type = cpu_to_le16(type); 4263 req->dir_ordinal = cpu_to_le16(ordinal); 4264 req->dir_ext = cpu_to_le16(ext); 4265 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 4266 output = hwrm_req_hold(bp, req); 4267 rc = hwrm_req_send_silent(bp, req); 4268 if (rc == 0) { 4269 if (index) 4270 *index = le16_to_cpu(output->dir_idx); 4271 if (item_length) 4272 *item_length = le32_to_cpu(output->dir_item_length); 4273 if (data_length) 4274 *data_length = le32_to_cpu(output->dir_data_length); 4275 } 4276 hwrm_req_drop(bp, req); 4277 return rc; 4278 } 4279 4280 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 4281 { 4282 char *retval = NULL; 4283 char *p; 4284 char *value; 4285 int field = 0; 4286 4287 if (datalen < 1) 4288 return NULL; 4289 /* null-terminate the log data (removing last '\n'): */ 4290 data[datalen - 1] = 0; 4291 for (p = data; *p != 0; p++) { 4292 field = 0; 4293 retval = NULL; 4294 while (*p != 0 && *p != '\n') { 4295 value = p; 4296 while (*p != 0 && *p != '\t' && *p != '\n') 4297 p++; 4298 if (field == desired_field) 4299 retval = value; 4300 if (*p != '\t') 4301 break; 4302 *p = 0; 4303 field++; 4304 p++; 4305 } 4306 if (*p == 0) 4307 break; 4308 *p = 0; 4309 } 4310 return retval; 4311 } 4312 4313 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) 4314 { 4315 struct bnxt *bp = netdev_priv(dev); 4316 u16 index = 0; 4317 char *pkgver; 4318 u32 pkglen; 4319 u8 *pkgbuf; 4320 int rc; 4321 4322 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 4323 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 4324 &index, NULL, &pkglen); 4325 if (rc) 4326 return rc; 4327 4328 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 4329 if (!pkgbuf) { 4330 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 4331 pkglen); 4332 return -ENOMEM; 4333 } 4334 4335 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); 4336 if (rc) 4337 goto err; 4338 4339 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 4340 pkglen); 4341 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) 4342 strscpy(ver, pkgver, size); 4343 else 4344 rc = -ENOENT; 4345 4346 err: 4347 kfree(pkgbuf); 4348 4349 return rc; 4350 } 4351 4352 static void bnxt_get_pkgver(struct net_device *dev) 4353 { 4354 struct bnxt *bp = netdev_priv(dev); 4355 char buf[FW_VER_STR_LEN - 5]; 4356 int len; 4357 4358 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { 4359 len = strlen(bp->fw_ver_str); 4360 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len, 4361 "/pkg %s", buf); 4362 } 4363 } 4364 4365 static int bnxt_get_eeprom(struct net_device *dev, 4366 struct ethtool_eeprom *eeprom, 4367 u8 *data) 4368 { 4369 u32 index; 4370 u32 offset; 4371 4372 if (eeprom->offset == 0) /* special offset value to get directory */ 4373 return bnxt_get_nvram_directory(dev, eeprom->len, data); 4374 4375 index = eeprom->offset >> 24; 4376 offset = eeprom->offset & 0xffffff; 4377 4378 if (index == 0) { 4379 netdev_err(dev, "unsupported index value: %d\n", index); 4380 return -EINVAL; 4381 } 4382 4383 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 4384 } 4385 4386 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 4387 { 4388 struct hwrm_nvm_erase_dir_entry_input *req; 4389 struct bnxt *bp = netdev_priv(dev); 4390 int rc; 4391 4392 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); 4393 if (rc) 4394 return rc; 4395 4396 req->dir_idx = cpu_to_le16(index); 4397 return hwrm_req_send(bp, req); 4398 } 4399 4400 static int bnxt_set_eeprom(struct net_device *dev, 4401 struct ethtool_eeprom *eeprom, 4402 u8 *data) 4403 { 4404 struct bnxt *bp = netdev_priv(dev); 4405 u8 index, dir_op; 4406 u16 type, ext, ordinal, attr; 4407 4408 if (!BNXT_PF(bp)) { 4409 netdev_err(dev, "NVM write not supported from a virtual function\n"); 4410 return -EINVAL; 4411 } 4412 4413 type = eeprom->magic >> 16; 4414 4415 if (type == 0xffff) { /* special value for directory operations */ 4416 index = eeprom->magic & 0xff; 4417 dir_op = eeprom->magic >> 8; 4418 if (index == 0) 4419 return -EINVAL; 4420 switch (dir_op) { 4421 case 0x0e: /* erase */ 4422 if (eeprom->offset != ~eeprom->magic) 4423 return -EINVAL; 4424 return bnxt_erase_nvram_directory(dev, index - 1); 4425 default: 4426 return -EINVAL; 4427 } 4428 } 4429 4430 /* Create or re-write an NVM item: */ 4431 if (bnxt_dir_type_is_executable(type)) 4432 return -EOPNOTSUPP; 4433 ext = eeprom->magic & 0xffff; 4434 ordinal = eeprom->offset >> 16; 4435 attr = eeprom->offset & 0xffff; 4436 4437 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, 4438 eeprom->len); 4439 } 4440 4441 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) 4442 { 4443 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 4444 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 4445 struct bnxt *bp = netdev_priv(dev); 4446 struct ethtool_keee *eee = &bp->eee; 4447 struct bnxt_link_info *link_info = &bp->link_info; 4448 int rc = 0; 4449 4450 if (!BNXT_PHY_CFG_ABLE(bp)) 4451 return -EOPNOTSUPP; 4452 4453 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4454 return -EOPNOTSUPP; 4455 4456 mutex_lock(&bp->link_lock); 4457 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 4458 if (!edata->eee_enabled) 4459 goto eee_ok; 4460 4461 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4462 netdev_warn(dev, "EEE requires autoneg\n"); 4463 rc = -EINVAL; 4464 goto eee_exit; 4465 } 4466 if (edata->tx_lpi_enabled) { 4467 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 4468 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 4469 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 4470 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 4471 rc = -EINVAL; 4472 goto eee_exit; 4473 } else if (!bp->lpi_tmr_hi) { 4474 edata->tx_lpi_timer = eee->tx_lpi_timer; 4475 } 4476 } 4477 if (linkmode_empty(edata->advertised)) { 4478 linkmode_and(edata->advertised, advertising, eee->supported); 4479 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { 4480 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); 4481 rc = -EINVAL; 4482 goto eee_exit; 4483 } 4484 4485 linkmode_copy(eee->advertised, edata->advertised); 4486 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 4487 eee->tx_lpi_timer = edata->tx_lpi_timer; 4488 eee_ok: 4489 eee->eee_enabled = edata->eee_enabled; 4490 4491 if (netif_running(dev)) 4492 rc = bnxt_hwrm_set_link_setting(bp, false, true); 4493 4494 eee_exit: 4495 mutex_unlock(&bp->link_lock); 4496 return rc; 4497 } 4498 4499 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) 4500 { 4501 struct bnxt *bp = netdev_priv(dev); 4502 4503 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4504 return -EOPNOTSUPP; 4505 4506 *edata = bp->eee; 4507 if (!bp->eee.eee_enabled) { 4508 /* Preserve tx_lpi_timer so that the last value will be used 4509 * by default when it is re-enabled. 4510 */ 4511 linkmode_zero(edata->advertised); 4512 edata->tx_lpi_enabled = 0; 4513 } 4514 4515 if (!bp->eee.eee_active) 4516 linkmode_zero(edata->lp_advertised); 4517 4518 return 0; 4519 } 4520 4521 static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val) 4522 { 4523 struct hwrm_queue_pfcwd_timeout_qcfg_output *resp; 4524 struct hwrm_queue_pfcwd_timeout_qcfg_input *req; 4525 int rc; 4526 4527 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG); 4528 if (rc) 4529 return rc; 4530 resp = hwrm_req_hold(bp, req); 4531 rc = hwrm_req_send(bp, req); 4532 if (!rc) 4533 *val = le16_to_cpu(resp->pfcwd_timeout_value); 4534 hwrm_req_drop(bp, req); 4535 return rc; 4536 } 4537 4538 static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val) 4539 { 4540 struct hwrm_queue_pfcwd_timeout_cfg_input *req; 4541 int rc; 4542 4543 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG); 4544 if (rc) 4545 return rc; 4546 req->pfcwd_timeout_value = cpu_to_le16(val); 4547 rc = hwrm_req_send(bp, req); 4548 return rc; 4549 } 4550 4551 static int bnxt_set_tunable(struct net_device *dev, 4552 const struct ethtool_tunable *tuna, 4553 const void *data) 4554 { 4555 struct bnxt *bp = netdev_priv(dev); 4556 u32 rx_copybreak, val; 4557 4558 switch (tuna->id) { 4559 case ETHTOOL_RX_COPYBREAK: 4560 rx_copybreak = *(u32 *)data; 4561 if (rx_copybreak > BNXT_MAX_RX_COPYBREAK) 4562 return -ERANGE; 4563 if (rx_copybreak != bp->rx_copybreak) { 4564 if (netif_running(dev)) 4565 return -EBUSY; 4566 bp->rx_copybreak = rx_copybreak; 4567 } 4568 return 0; 4569 case ETHTOOL_PFC_PREVENTION_TOUT: 4570 if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms) 4571 return -EOPNOTSUPP; 4572 4573 val = *(u16 *)data; 4574 if (val > bp->max_pfcwd_tmo_ms && 4575 val != PFC_STORM_PREVENTION_AUTO) 4576 return -EINVAL; 4577 return bnxt_hwrm_pfcwd_cfg(bp, val); 4578 default: 4579 return -EOPNOTSUPP; 4580 } 4581 } 4582 4583 static int bnxt_get_tunable(struct net_device *dev, 4584 const struct ethtool_tunable *tuna, void *data) 4585 { 4586 struct bnxt *bp = netdev_priv(dev); 4587 4588 switch (tuna->id) { 4589 case ETHTOOL_RX_COPYBREAK: 4590 *(u32 *)data = bp->rx_copybreak; 4591 break; 4592 case ETHTOOL_PFC_PREVENTION_TOUT: 4593 if (!bp->max_pfcwd_tmo_ms) 4594 return -EOPNOTSUPP; 4595 return bnxt_hwrm_pfcwd_qcfg(bp, data); 4596 default: 4597 return -EOPNOTSUPP; 4598 } 4599 4600 return 0; 4601 } 4602 4603 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 4604 u16 page_number, u8 bank, 4605 u16 start_addr, u16 data_length, 4606 u8 *buf) 4607 { 4608 struct hwrm_port_phy_i2c_read_output *output; 4609 struct hwrm_port_phy_i2c_read_input *req; 4610 int rc, byte_offset = 0; 4611 4612 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); 4613 if (rc) 4614 return rc; 4615 4616 output = hwrm_req_hold(bp, req); 4617 req->i2c_slave_addr = i2c_addr; 4618 req->page_number = cpu_to_le16(page_number); 4619 req->port_id = cpu_to_le16(bp->pf.port_id); 4620 do { 4621 u16 xfer_size; 4622 4623 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 4624 data_length -= xfer_size; 4625 req->page_offset = cpu_to_le16(start_addr + byte_offset); 4626 req->data_length = xfer_size; 4627 req->enables = 4628 cpu_to_le32((start_addr + byte_offset ? 4629 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 4630 0) | 4631 (bank ? 4632 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : 4633 0)); 4634 rc = hwrm_req_send(bp, req); 4635 if (!rc) 4636 memcpy(buf + byte_offset, output->data, xfer_size); 4637 byte_offset += xfer_size; 4638 } while (!rc && data_length > 0); 4639 hwrm_req_drop(bp, req); 4640 4641 return rc; 4642 } 4643 4644 static int bnxt_get_module_info(struct net_device *dev, 4645 struct ethtool_modinfo *modinfo) 4646 { 4647 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 4648 struct bnxt *bp = netdev_priv(dev); 4649 int rc; 4650 4651 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4652 return -EPERM; 4653 4654 /* No point in going further if phy status indicates 4655 * module is not inserted or if it is powered down or 4656 * if it is of type 10GBase-T 4657 */ 4658 if (bp->link_info.module_status > 4659 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4660 return -EOPNOTSUPP; 4661 4662 /* This feature is not supported in older firmware versions */ 4663 if (bp->hwrm_spec_code < 0x10202) 4664 return -EOPNOTSUPP; 4665 4666 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, 4667 SFF_DIAG_SUPPORT_OFFSET + 1, 4668 data); 4669 if (!rc) { 4670 u8 module_id = data[0]; 4671 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 4672 4673 switch (module_id) { 4674 case SFF_MODULE_ID_SFP: 4675 modinfo->type = ETH_MODULE_SFF_8472; 4676 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4677 if (!diag_supported) 4678 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4679 break; 4680 case SFF_MODULE_ID_QSFP: 4681 case SFF_MODULE_ID_QSFP_PLUS: 4682 modinfo->type = ETH_MODULE_SFF_8436; 4683 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4684 break; 4685 case SFF_MODULE_ID_QSFP28: 4686 modinfo->type = ETH_MODULE_SFF_8636; 4687 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 4688 break; 4689 default: 4690 rc = -EOPNOTSUPP; 4691 break; 4692 } 4693 } 4694 return rc; 4695 } 4696 4697 static int bnxt_get_module_eeprom(struct net_device *dev, 4698 struct ethtool_eeprom *eeprom, 4699 u8 *data) 4700 { 4701 struct bnxt *bp = netdev_priv(dev); 4702 u16 start = eeprom->offset, length = eeprom->len; 4703 int rc = 0; 4704 4705 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4706 return -EPERM; 4707 4708 memset(data, 0, eeprom->len); 4709 4710 /* Read A0 portion of the EEPROM */ 4711 if (start < ETH_MODULE_SFF_8436_LEN) { 4712 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 4713 length = ETH_MODULE_SFF_8436_LEN - start; 4714 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4715 start, length, data); 4716 if (rc) 4717 return rc; 4718 start += length; 4719 data += length; 4720 length = eeprom->len - length; 4721 } 4722 4723 /* Read A2 portion of the EEPROM */ 4724 if (length) { 4725 start -= ETH_MODULE_SFF_8436_LEN; 4726 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, 4727 start, length, data); 4728 } 4729 return rc; 4730 } 4731 4732 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) 4733 { 4734 if (bp->link_info.module_status <= 4735 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4736 return 0; 4737 4738 if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 4739 bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){ 4740 NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T"); 4741 return -EOPNOTSUPP; 4742 } 4743 switch (bp->link_info.module_status) { 4744 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 4745 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); 4746 break; 4747 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: 4748 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); 4749 break; 4750 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: 4751 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); 4752 break; 4753 default: 4754 NL_SET_ERR_MSG_MOD(extack, "Unknown error"); 4755 break; 4756 } 4757 return -EINVAL; 4758 } 4759 4760 static int 4761 bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, 4762 const struct ethtool_module_eeprom *page_data, 4763 struct netlink_ext_ack *extack) 4764 { 4765 int rc; 4766 4767 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 4768 NL_SET_ERR_MSG_MOD(extack, 4769 "Module read/write not permitted on untrusted VF"); 4770 return -EPERM; 4771 } 4772 4773 rc = bnxt_get_module_status(bp, extack); 4774 if (rc) 4775 return rc; 4776 4777 if (bp->hwrm_spec_code < 0x10202) { 4778 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); 4779 return -EINVAL; 4780 } 4781 4782 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { 4783 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); 4784 return -EINVAL; 4785 } 4786 return 0; 4787 } 4788 4789 static int bnxt_get_module_eeprom_by_page(struct net_device *dev, 4790 const struct ethtool_module_eeprom *page_data, 4791 struct netlink_ext_ack *extack) 4792 { 4793 struct bnxt *bp = netdev_priv(dev); 4794 int rc; 4795 4796 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4797 if (rc) 4798 return rc; 4799 4800 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, 4801 page_data->page, page_data->bank, 4802 page_data->offset, 4803 page_data->length, 4804 page_data->data); 4805 if (rc) { 4806 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); 4807 return rc; 4808 } 4809 return page_data->length; 4810 } 4811 4812 static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, 4813 const struct ethtool_module_eeprom *page) 4814 { 4815 struct hwrm_port_phy_i2c_write_input *req; 4816 int bytes_written = 0; 4817 int rc; 4818 4819 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); 4820 if (rc) 4821 return rc; 4822 4823 hwrm_req_hold(bp, req); 4824 req->i2c_slave_addr = page->i2c_address << 1; 4825 req->page_number = cpu_to_le16(page->page); 4826 req->bank_number = page->bank; 4827 req->port_id = cpu_to_le16(bp->pf.port_id); 4828 req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | 4829 PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); 4830 4831 while (bytes_written < page->length) { 4832 u16 xfer_size; 4833 4834 xfer_size = min_t(u16, page->length - bytes_written, 4835 BNXT_MAX_PHY_I2C_RESP_SIZE); 4836 req->page_offset = cpu_to_le16(page->offset + bytes_written); 4837 req->data_length = xfer_size; 4838 memcpy(req->data, page->data + bytes_written, xfer_size); 4839 rc = hwrm_req_send(bp, req); 4840 if (rc) 4841 break; 4842 bytes_written += xfer_size; 4843 } 4844 4845 hwrm_req_drop(bp, req); 4846 return rc; 4847 } 4848 4849 static int bnxt_set_module_eeprom_by_page(struct net_device *dev, 4850 const struct ethtool_module_eeprom *page_data, 4851 struct netlink_ext_ack *extack) 4852 { 4853 struct bnxt *bp = netdev_priv(dev); 4854 int rc; 4855 4856 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4857 if (rc) 4858 return rc; 4859 4860 rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); 4861 if (rc) { 4862 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); 4863 return rc; 4864 } 4865 return page_data->length; 4866 } 4867 4868 static int bnxt_nway_reset(struct net_device *dev) 4869 { 4870 int rc = 0; 4871 4872 struct bnxt *bp = netdev_priv(dev); 4873 struct bnxt_link_info *link_info = &bp->link_info; 4874 4875 if (!BNXT_PHY_CFG_ABLE(bp)) 4876 return -EOPNOTSUPP; 4877 4878 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 4879 return -EINVAL; 4880 4881 if (netif_running(dev)) 4882 rc = bnxt_hwrm_set_link_setting(bp, true, false); 4883 4884 return rc; 4885 } 4886 4887 static int bnxt_set_phys_id(struct net_device *dev, 4888 enum ethtool_phys_id_state state) 4889 { 4890 struct hwrm_port_led_cfg_input *req; 4891 struct bnxt *bp = netdev_priv(dev); 4892 struct bnxt_pf_info *pf = &bp->pf; 4893 struct bnxt_led_cfg *led_cfg; 4894 u8 led_state; 4895 __le16 duration; 4896 int rc, i; 4897 4898 if (!bp->num_leds || BNXT_VF(bp)) 4899 return -EOPNOTSUPP; 4900 4901 if (state == ETHTOOL_ID_ACTIVE) { 4902 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 4903 duration = cpu_to_le16(500); 4904 } else if (state == ETHTOOL_ID_INACTIVE) { 4905 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 4906 duration = cpu_to_le16(0); 4907 } else { 4908 return -EINVAL; 4909 } 4910 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); 4911 if (rc) 4912 return rc; 4913 4914 req->port_id = cpu_to_le16(pf->port_id); 4915 req->num_leds = bp->num_leds; 4916 led_cfg = (struct bnxt_led_cfg *)&req->led0_id; 4917 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 4918 req->enables |= BNXT_LED_DFLT_ENABLES(i); 4919 led_cfg->led_id = bp->leds[i].led_id; 4920 led_cfg->led_state = led_state; 4921 led_cfg->led_blink_on = duration; 4922 led_cfg->led_blink_off = duration; 4923 led_cfg->led_group_id = bp->leds[i].led_group_id; 4924 } 4925 return hwrm_req_send(bp, req); 4926 } 4927 4928 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 4929 { 4930 struct hwrm_selftest_irq_input *req; 4931 int rc; 4932 4933 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); 4934 if (rc) 4935 return rc; 4936 4937 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4938 return hwrm_req_send(bp, req); 4939 } 4940 4941 static int bnxt_test_irq(struct bnxt *bp) 4942 { 4943 int i; 4944 4945 for (i = 0; i < bp->cp_nr_rings; i++) { 4946 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 4947 int rc; 4948 4949 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 4950 if (rc) 4951 return rc; 4952 } 4953 return 0; 4954 } 4955 4956 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 4957 { 4958 struct hwrm_port_mac_cfg_input *req; 4959 int rc; 4960 4961 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); 4962 if (rc) 4963 return rc; 4964 4965 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 4966 if (enable) 4967 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 4968 else 4969 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 4970 return hwrm_req_send(bp, req); 4971 } 4972 4973 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 4974 { 4975 struct hwrm_port_phy_qcaps_output *resp; 4976 struct hwrm_port_phy_qcaps_input *req; 4977 int rc; 4978 4979 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 4980 if (rc) 4981 return rc; 4982 4983 resp = hwrm_req_hold(bp, req); 4984 rc = hwrm_req_send(bp, req); 4985 if (!rc) 4986 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 4987 4988 hwrm_req_drop(bp, req); 4989 return rc; 4990 } 4991 4992 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 4993 struct hwrm_port_phy_cfg_input *req) 4994 { 4995 struct bnxt_link_info *link_info = &bp->link_info; 4996 u16 fw_advertising; 4997 u16 fw_speed; 4998 int rc; 4999 5000 if (!link_info->autoneg || 5001 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 5002 return 0; 5003 5004 rc = bnxt_query_force_speeds(bp, &fw_advertising); 5005 if (rc) 5006 return rc; 5007 5008 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 5009 if (BNXT_LINK_IS_UP(bp)) 5010 fw_speed = bp->link_info.link_speed; 5011 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 5012 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 5013 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 5014 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 5015 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 5016 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 5017 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 5018 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 5019 5020 req->force_link_speed = cpu_to_le16(fw_speed); 5021 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 5022 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 5023 rc = hwrm_req_send(bp, req); 5024 req->flags = 0; 5025 req->force_link_speed = cpu_to_le16(0); 5026 return rc; 5027 } 5028 5029 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 5030 { 5031 struct hwrm_port_phy_cfg_input *req; 5032 int rc; 5033 5034 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 5035 if (rc) 5036 return rc; 5037 5038 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ 5039 hwrm_req_hold(bp, req); 5040 5041 if (enable) { 5042 bnxt_disable_an_for_lpbk(bp, req); 5043 if (ext) 5044 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 5045 else 5046 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 5047 } else { 5048 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 5049 } 5050 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 5051 rc = hwrm_req_send(bp, req); 5052 hwrm_req_drop(bp, req); 5053 return rc; 5054 } 5055 5056 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 5057 u32 raw_cons, int pkt_size) 5058 { 5059 struct bnxt_napi *bnapi = cpr->bnapi; 5060 struct bnxt_rx_ring_info *rxr; 5061 struct bnxt_sw_rx_bd *rx_buf; 5062 struct rx_cmp *rxcmp; 5063 u16 cp_cons, cons; 5064 u8 *data; 5065 u32 len; 5066 int i; 5067 5068 rxr = bnapi->rx_ring; 5069 cp_cons = RING_CMP(raw_cons); 5070 rxcmp = (struct rx_cmp *) 5071 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 5072 cons = rxcmp->rx_cmp_opaque; 5073 rx_buf = &rxr->rx_buf_ring[cons]; 5074 data = rx_buf->data_ptr; 5075 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 5076 if (len != pkt_size) 5077 return -EIO; 5078 i = ETH_ALEN; 5079 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 5080 return -EIO; 5081 i += ETH_ALEN; 5082 for ( ; i < pkt_size; i++) { 5083 if (data[i] != (u8)(i & 0xff)) 5084 return -EIO; 5085 } 5086 return 0; 5087 } 5088 5089 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 5090 int pkt_size) 5091 { 5092 struct tx_cmp *txcmp; 5093 int rc = -EIO; 5094 u32 raw_cons; 5095 u32 cons; 5096 int i; 5097 5098 raw_cons = cpr->cp_raw_cons; 5099 for (i = 0; i < 200; i++) { 5100 cons = RING_CMP(raw_cons); 5101 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 5102 5103 if (!TX_CMP_VALID(txcmp, raw_cons)) { 5104 udelay(5); 5105 continue; 5106 } 5107 5108 /* The valid test of the entry must be done first before 5109 * reading any further. 5110 */ 5111 dma_rmb(); 5112 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP || 5113 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) { 5114 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 5115 raw_cons = NEXT_RAW_CMP(raw_cons); 5116 raw_cons = NEXT_RAW_CMP(raw_cons); 5117 break; 5118 } 5119 raw_cons = NEXT_RAW_CMP(raw_cons); 5120 } 5121 cpr->cp_raw_cons = raw_cons; 5122 return rc; 5123 } 5124 5125 static int bnxt_run_loopback(struct bnxt *bp) 5126 { 5127 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 5128 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5129 struct bnxt_cp_ring_info *cpr; 5130 int pkt_size, i = 0; 5131 struct sk_buff *skb; 5132 dma_addr_t map; 5133 u8 *data; 5134 int rc; 5135 5136 cpr = &rxr->bnapi->cp_ring; 5137 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5138 cpr = rxr->rx_cpr; 5139 pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK, 5140 bp->rx_copybreak)); 5141 skb = netdev_alloc_skb(bp->dev, pkt_size); 5142 if (!skb) 5143 return -ENOMEM; 5144 data = skb_put(skb, pkt_size); 5145 ether_addr_copy(&data[i], bp->dev->dev_addr); 5146 i += ETH_ALEN; 5147 ether_addr_copy(&data[i], bp->dev->dev_addr); 5148 i += ETH_ALEN; 5149 for ( ; i < pkt_size; i++) 5150 data[i] = (u8)(i & 0xff); 5151 5152 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 5153 DMA_TO_DEVICE); 5154 if (dma_mapping_error(&bp->pdev->dev, map)) { 5155 dev_kfree_skb(skb); 5156 return -EIO; 5157 } 5158 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); 5159 5160 /* Sync BD data before updating doorbell */ 5161 wmb(); 5162 5163 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 5164 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 5165 5166 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 5167 dev_kfree_skb(skb); 5168 return rc; 5169 } 5170 5171 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 5172 { 5173 struct hwrm_selftest_exec_output *resp; 5174 struct hwrm_selftest_exec_input *req; 5175 int rc; 5176 5177 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); 5178 if (rc) 5179 return rc; 5180 5181 hwrm_req_timeout(bp, req, bp->test_info->timeout); 5182 req->flags = test_mask; 5183 5184 resp = hwrm_req_hold(bp, req); 5185 rc = hwrm_req_send(bp, req); 5186 *test_results = resp->test_success; 5187 hwrm_req_drop(bp, req); 5188 return rc; 5189 } 5190 5191 #define BNXT_DRV_TESTS 4 5192 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 5193 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 5194 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 5195 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 5196 5197 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 5198 u64 *buf) 5199 { 5200 struct bnxt *bp = netdev_priv(dev); 5201 bool do_ext_lpbk = false; 5202 bool offline = false; 5203 u8 test_results = 0; 5204 u8 test_mask = 0; 5205 int rc = 0, i; 5206 5207 if (!bp->num_tests || !BNXT_PF(bp)) 5208 return; 5209 5210 memset(buf, 0, sizeof(u64) * bp->num_tests); 5211 if (etest->flags & ETH_TEST_FL_OFFLINE && 5212 bnxt_ulp_registered(bp->edev)) { 5213 etest->flags |= ETH_TEST_FL_FAILED; 5214 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); 5215 return; 5216 } 5217 5218 if (!netif_running(dev)) { 5219 etest->flags |= ETH_TEST_FL_FAILED; 5220 return; 5221 } 5222 5223 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 5224 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 5225 do_ext_lpbk = true; 5226 5227 if (etest->flags & ETH_TEST_FL_OFFLINE) { 5228 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 5229 etest->flags |= ETH_TEST_FL_FAILED; 5230 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 5231 return; 5232 } 5233 offline = true; 5234 } 5235 5236 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5237 u8 bit_val = 1 << i; 5238 5239 if (!(bp->test_info->offline_mask & bit_val)) 5240 test_mask |= bit_val; 5241 else if (offline) 5242 test_mask |= bit_val; 5243 } 5244 if (!offline) { 5245 bnxt_run_fw_tests(bp, test_mask, &test_results); 5246 } else { 5247 bnxt_close_nic(bp, true, false); 5248 bnxt_run_fw_tests(bp, test_mask, &test_results); 5249 5250 rc = bnxt_half_open_nic(bp); 5251 if (rc) { 5252 etest->flags |= ETH_TEST_FL_FAILED; 5253 return; 5254 } 5255 buf[BNXT_MACLPBK_TEST_IDX] = 1; 5256 if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) 5257 goto skip_mac_loopback; 5258 5259 bnxt_hwrm_mac_loopback(bp, true); 5260 msleep(250); 5261 if (bnxt_run_loopback(bp)) 5262 etest->flags |= ETH_TEST_FL_FAILED; 5263 else 5264 buf[BNXT_MACLPBK_TEST_IDX] = 0; 5265 5266 bnxt_hwrm_mac_loopback(bp, false); 5267 skip_mac_loopback: 5268 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 5269 if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) 5270 goto skip_phy_loopback; 5271 5272 bnxt_hwrm_phy_loopback(bp, true, false); 5273 msleep(1000); 5274 if (bnxt_run_loopback(bp)) 5275 etest->flags |= ETH_TEST_FL_FAILED; 5276 else 5277 buf[BNXT_PHYLPBK_TEST_IDX] = 0; 5278 skip_phy_loopback: 5279 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 5280 if (do_ext_lpbk) { 5281 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 5282 bnxt_hwrm_phy_loopback(bp, true, true); 5283 msleep(1000); 5284 if (bnxt_run_loopback(bp)) 5285 etest->flags |= ETH_TEST_FL_FAILED; 5286 else 5287 buf[BNXT_EXTLPBK_TEST_IDX] = 0; 5288 } 5289 bnxt_hwrm_phy_loopback(bp, false, false); 5290 bnxt_half_close_nic(bp); 5291 rc = bnxt_open_nic(bp, true, true); 5292 } 5293 if (rc || bnxt_test_irq(bp)) { 5294 buf[BNXT_IRQ_TEST_IDX] = 1; 5295 etest->flags |= ETH_TEST_FL_FAILED; 5296 } 5297 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5298 u8 bit_val = 1 << i; 5299 5300 if ((test_mask & bit_val) && !(test_results & bit_val)) { 5301 buf[i] = 1; 5302 etest->flags |= ETH_TEST_FL_FAILED; 5303 } 5304 } 5305 } 5306 5307 static int bnxt_reset(struct net_device *dev, u32 *flags) 5308 { 5309 struct bnxt *bp = netdev_priv(dev); 5310 bool reload = false; 5311 u32 req = *flags; 5312 5313 if (!req) 5314 return -EINVAL; 5315 5316 if (!BNXT_PF(bp)) { 5317 netdev_err(dev, "Reset is not supported from a VF\n"); 5318 return -EOPNOTSUPP; 5319 } 5320 5321 if (pci_vfs_assigned(bp->pdev) && 5322 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 5323 netdev_err(dev, 5324 "Reset not allowed when VFs are assigned to VMs\n"); 5325 return -EBUSY; 5326 } 5327 5328 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 5329 /* This feature is not supported in older firmware versions */ 5330 if (bp->hwrm_spec_code >= 0x10803) { 5331 if (!bnxt_firmware_reset_chip(dev)) { 5332 netdev_info(dev, "Firmware reset request successful.\n"); 5333 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 5334 reload = true; 5335 *flags &= ~BNXT_FW_RESET_CHIP; 5336 } 5337 } else if (req == BNXT_FW_RESET_CHIP) { 5338 return -EOPNOTSUPP; /* only request, fail hard */ 5339 } 5340 } 5341 5342 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { 5343 /* This feature is not supported in older firmware versions */ 5344 if (bp->hwrm_spec_code >= 0x10803) { 5345 if (!bnxt_firmware_reset_ap(dev)) { 5346 netdev_info(dev, "Reset application processor successful.\n"); 5347 reload = true; 5348 *flags &= ~BNXT_FW_RESET_AP; 5349 } 5350 } else if (req == BNXT_FW_RESET_AP) { 5351 return -EOPNOTSUPP; /* only request, fail hard */ 5352 } 5353 } 5354 5355 if (reload) 5356 netdev_info(dev, "Reload driver to complete reset\n"); 5357 5358 return 0; 5359 } 5360 5361 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 5362 { 5363 struct bnxt *bp = netdev_priv(dev); 5364 5365 if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { 5366 netdev_info(dev, 5367 "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); 5368 return -EINVAL; 5369 } 5370 5371 if (dump->flag == BNXT_DUMP_CRASH) { 5372 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR && 5373 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { 5374 netdev_info(dev, 5375 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 5376 return -EOPNOTSUPP; 5377 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) { 5378 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); 5379 return -EOPNOTSUPP; 5380 } 5381 } 5382 5383 bp->dump_flag = dump->flag; 5384 return 0; 5385 } 5386 5387 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 5388 { 5389 struct bnxt *bp = netdev_priv(dev); 5390 5391 if (bp->hwrm_spec_code < 0x10801) 5392 return -EOPNOTSUPP; 5393 5394 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 5395 bp->ver_resp.hwrm_fw_min_8b << 16 | 5396 bp->ver_resp.hwrm_fw_bld_8b << 8 | 5397 bp->ver_resp.hwrm_fw_rsvd_8b; 5398 5399 dump->flag = bp->dump_flag; 5400 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); 5401 return 0; 5402 } 5403 5404 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 5405 void *buf) 5406 { 5407 struct bnxt *bp = netdev_priv(dev); 5408 5409 if (bp->hwrm_spec_code < 0x10801) 5410 return -EOPNOTSUPP; 5411 5412 memset(buf, 0, dump->len); 5413 5414 dump->flag = bp->dump_flag; 5415 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); 5416 } 5417 5418 static int bnxt_get_ts_info(struct net_device *dev, 5419 struct kernel_ethtool_ts_info *info) 5420 { 5421 struct bnxt *bp = netdev_priv(dev); 5422 struct bnxt_ptp_cfg *ptp; 5423 5424 ptp = bp->ptp_cfg; 5425 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; 5426 5427 if (!ptp) 5428 return 0; 5429 5430 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 5431 SOF_TIMESTAMPING_RX_HARDWARE | 5432 SOF_TIMESTAMPING_RAW_HARDWARE; 5433 if (ptp->ptp_clock) 5434 info->phc_index = ptp_clock_index(ptp->ptp_clock); 5435 5436 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 5437 5438 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 5439 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 5440 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 5441 5442 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) 5443 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); 5444 return 0; 5445 } 5446 5447 static void bnxt_hwrm_pcie_qstats(struct bnxt *bp) 5448 { 5449 struct hwrm_pcie_qstats_output *resp; 5450 struct hwrm_pcie_qstats_input *req; 5451 5452 bp->pcie_stat_len = 0; 5453 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 5454 return; 5455 5456 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 5457 return; 5458 5459 resp = hwrm_req_hold(bp, req); 5460 if (__bnxt_hwrm_pcie_qstats(bp, req)) 5461 bp->pcie_stat_len = min_t(u16, 5462 le16_to_cpu(resp->pcie_stat_size), 5463 sizeof(struct pcie_ctx_hw_stats_v2)); 5464 hwrm_req_drop(bp, req); 5465 } 5466 5467 void bnxt_ethtool_init(struct bnxt *bp) 5468 { 5469 struct hwrm_selftest_qlist_output *resp; 5470 struct hwrm_selftest_qlist_input *req; 5471 struct bnxt_test_info *test_info; 5472 struct net_device *dev = bp->dev; 5473 int i, rc; 5474 5475 bnxt_hwrm_pcie_qstats(bp); 5476 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 5477 bnxt_get_pkgver(dev); 5478 5479 bp->num_tests = 0; 5480 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 5481 return; 5482 5483 test_info = bp->test_info; 5484 if (!test_info) { 5485 test_info = kzalloc_obj(*bp->test_info); 5486 if (!test_info) 5487 return; 5488 bp->test_info = test_info; 5489 } 5490 5491 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) 5492 return; 5493 5494 resp = hwrm_req_hold(bp, req); 5495 rc = hwrm_req_send_silent(bp, req); 5496 if (rc) 5497 goto ethtool_init_exit; 5498 5499 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 5500 if (bp->num_tests > BNXT_MAX_TEST) 5501 bp->num_tests = BNXT_MAX_TEST; 5502 5503 test_info->offline_mask = resp->offline_tests; 5504 test_info->timeout = le16_to_cpu(resp->test_timeout); 5505 if (!test_info->timeout) 5506 test_info->timeout = HWRM_CMD_TIMEOUT; 5507 for (i = 0; i < bp->num_tests; i++) { 5508 char *str = test_info->string[i]; 5509 char *fw_str = resp->test_name[i]; 5510 5511 if (i == BNXT_MACLPBK_TEST_IDX) { 5512 strcpy(str, "Mac loopback test (offline)"); 5513 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 5514 strcpy(str, "Phy loopback test (offline)"); 5515 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 5516 strcpy(str, "Ext loopback test (offline)"); 5517 } else if (i == BNXT_IRQ_TEST_IDX) { 5518 strcpy(str, "Interrupt_test (offline)"); 5519 } else { 5520 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 5521 fw_str, test_info->offline_mask & (1 << i) ? 5522 "offline" : "online"); 5523 } 5524 } 5525 5526 ethtool_init_exit: 5527 hwrm_req_drop(bp, req); 5528 } 5529 5530 static void bnxt_get_eth_phy_stats(struct net_device *dev, 5531 struct ethtool_eth_phy_stats *phy_stats) 5532 { 5533 struct bnxt *bp = netdev_priv(dev); 5534 u64 *rx; 5535 5536 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5537 return; 5538 5539 rx = bp->rx_port_stats_ext.sw_stats; 5540 phy_stats->SymbolErrorDuringCarrier = 5541 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 5542 } 5543 5544 static void bnxt_get_eth_mac_stats(struct net_device *dev, 5545 struct ethtool_eth_mac_stats *mac_stats) 5546 { 5547 struct bnxt *bp = netdev_priv(dev); 5548 u64 *rx, *tx; 5549 5550 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5551 return; 5552 5553 rx = bp->port_stats.sw_stats; 5554 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5555 5556 mac_stats->FramesReceivedOK = 5557 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 5558 mac_stats->FramesTransmittedOK = 5559 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 5560 mac_stats->FrameCheckSequenceErrors = 5561 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 5562 mac_stats->AlignmentErrors = 5563 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 5564 mac_stats->OutOfRangeLengthField = 5565 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 5566 } 5567 5568 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 5569 struct ethtool_eth_ctrl_stats *ctrl_stats) 5570 { 5571 struct bnxt *bp = netdev_priv(dev); 5572 u64 *rx; 5573 5574 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5575 return; 5576 5577 rx = bp->port_stats.sw_stats; 5578 ctrl_stats->MACControlFramesReceived = 5579 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 5580 } 5581 5582 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 5583 { 0, 64 }, 5584 { 65, 127 }, 5585 { 128, 255 }, 5586 { 256, 511 }, 5587 { 512, 1023 }, 5588 { 1024, 1518 }, 5589 { 1519, 2047 }, 5590 { 2048, 4095 }, 5591 { 4096, 9216 }, 5592 { 9217, 16383 }, 5593 {} 5594 }; 5595 5596 static void bnxt_get_rmon_stats(struct net_device *dev, 5597 struct ethtool_rmon_stats *rmon_stats, 5598 const struct ethtool_rmon_hist_range **ranges) 5599 { 5600 struct bnxt *bp = netdev_priv(dev); 5601 u64 *rx, *tx; 5602 5603 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5604 return; 5605 5606 rx = bp->port_stats.sw_stats; 5607 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5608 5609 rmon_stats->jabbers = 5610 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 5611 rmon_stats->oversize_pkts = 5612 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 5613 rmon_stats->undersize_pkts = 5614 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 5615 5616 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 5617 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 5618 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 5619 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 5620 rmon_stats->hist[4] = 5621 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 5622 rmon_stats->hist[5] = 5623 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 5624 rmon_stats->hist[6] = 5625 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 5626 rmon_stats->hist[7] = 5627 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 5628 rmon_stats->hist[8] = 5629 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 5630 rmon_stats->hist[9] = 5631 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 5632 5633 rmon_stats->hist_tx[0] = 5634 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 5635 rmon_stats->hist_tx[1] = 5636 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 5637 rmon_stats->hist_tx[2] = 5638 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 5639 rmon_stats->hist_tx[3] = 5640 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 5641 rmon_stats->hist_tx[4] = 5642 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 5643 rmon_stats->hist_tx[5] = 5644 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 5645 rmon_stats->hist_tx[6] = 5646 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 5647 rmon_stats->hist_tx[7] = 5648 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 5649 rmon_stats->hist_tx[8] = 5650 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 5651 rmon_stats->hist_tx[9] = 5652 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 5653 5654 *ranges = bnxt_rmon_ranges; 5655 } 5656 5657 static void bnxt_get_ptp_stats(struct net_device *dev, 5658 struct ethtool_ts_stats *ts_stats) 5659 { 5660 struct bnxt *bp = netdev_priv(dev); 5661 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 5662 5663 if (ptp) { 5664 ts_stats->pkts = ptp->stats.ts_pkts; 5665 ts_stats->lost = ptp->stats.ts_lost; 5666 ts_stats->err = atomic64_read(&ptp->stats.ts_err); 5667 } 5668 } 5669 5670 static void bnxt_get_link_ext_stats(struct net_device *dev, 5671 struct ethtool_link_ext_stats *stats) 5672 { 5673 struct bnxt *bp = netdev_priv(dev); 5674 u64 *rx; 5675 5676 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5677 return; 5678 5679 rx = bp->rx_port_stats_ext.sw_stats; 5680 stats->link_down_events = 5681 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); 5682 } 5683 5684 void bnxt_ethtool_free(struct bnxt *bp) 5685 { 5686 kfree(bp->test_info); 5687 bp->test_info = NULL; 5688 } 5689 5690 const struct ethtool_ops bnxt_ethtool_ops = { 5691 .cap_link_lanes_supported = 1, 5692 .rxfh_per_ctx_key = 1, 5693 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, 5694 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, 5695 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), 5696 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5697 ETHTOOL_COALESCE_MAX_FRAMES | 5698 ETHTOOL_COALESCE_USECS_IRQ | 5699 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 5700 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 5701 ETHTOOL_COALESCE_USE_ADAPTIVE_RX | 5702 ETHTOOL_COALESCE_USE_CQE, 5703 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | 5704 ETHTOOL_RING_USE_HDS_THRS, 5705 .get_link_ksettings = bnxt_get_link_ksettings, 5706 .set_link_ksettings = bnxt_set_link_ksettings, 5707 .get_fec_stats = bnxt_get_fec_stats, 5708 .get_fecparam = bnxt_get_fecparam, 5709 .set_fecparam = bnxt_set_fecparam, 5710 .get_pause_stats = bnxt_get_pause_stats, 5711 .get_pauseparam = bnxt_get_pauseparam, 5712 .set_pauseparam = bnxt_set_pauseparam, 5713 .get_drvinfo = bnxt_get_drvinfo, 5714 .get_regs_len = bnxt_get_regs_len, 5715 .get_regs = bnxt_get_regs, 5716 .get_wol = bnxt_get_wol, 5717 .set_wol = bnxt_set_wol, 5718 .get_coalesce = bnxt_get_coalesce, 5719 .set_coalesce = bnxt_set_coalesce, 5720 .get_msglevel = bnxt_get_msglevel, 5721 .set_msglevel = bnxt_set_msglevel, 5722 .get_sset_count = bnxt_get_sset_count, 5723 .get_strings = bnxt_get_strings, 5724 .get_ethtool_stats = bnxt_get_ethtool_stats, 5725 .set_ringparam = bnxt_set_ringparam, 5726 .get_ringparam = bnxt_get_ringparam, 5727 .get_channels = bnxt_get_channels, 5728 .set_channels = bnxt_set_channels, 5729 .get_rxnfc = bnxt_get_rxnfc, 5730 .set_rxnfc = bnxt_set_rxnfc, 5731 .get_rx_ring_count = bnxt_get_rx_ring_count, 5732 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 5733 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 5734 .get_rxfh = bnxt_get_rxfh, 5735 .set_rxfh = bnxt_set_rxfh, 5736 .get_rxfh_fields = bnxt_get_rxfh_fields, 5737 .set_rxfh_fields = bnxt_set_rxfh_fields, 5738 .create_rxfh_context = bnxt_create_rxfh_context, 5739 .modify_rxfh_context = bnxt_modify_rxfh_context, 5740 .remove_rxfh_context = bnxt_remove_rxfh_context, 5741 .flash_device = bnxt_flash_device, 5742 .get_eeprom_len = bnxt_get_eeprom_len, 5743 .get_eeprom = bnxt_get_eeprom, 5744 .set_eeprom = bnxt_set_eeprom, 5745 .get_link = bnxt_get_link, 5746 .get_link_ext_state = bnxt_get_link_ext_state, 5747 .get_link_ext_stats = bnxt_get_link_ext_stats, 5748 .get_eee = bnxt_get_eee, 5749 .set_eee = bnxt_set_eee, 5750 .get_tunable = bnxt_get_tunable, 5751 .set_tunable = bnxt_set_tunable, 5752 .get_module_info = bnxt_get_module_info, 5753 .get_module_eeprom = bnxt_get_module_eeprom, 5754 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, 5755 .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, 5756 .nway_reset = bnxt_nway_reset, 5757 .set_phys_id = bnxt_set_phys_id, 5758 .self_test = bnxt_self_test, 5759 .get_ts_info = bnxt_get_ts_info, 5760 .reset = bnxt_reset, 5761 .set_dump = bnxt_set_dump, 5762 .get_dump_flag = bnxt_get_dump_flag, 5763 .get_dump_data = bnxt_get_dump_data, 5764 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 5765 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 5766 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 5767 .get_rmon_stats = bnxt_get_rmon_stats, 5768 .get_ts_stats = bnxt_get_ptp_stats, 5769 }; 5770