1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/ctype.h> 13 #include <linux/stringify.h> 14 #include <linux/ethtool.h> 15 #include <linux/ethtool_netlink.h> 16 #include <linux/linkmode.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/etherdevice.h> 20 #include <linux/crc32.h> 21 #include <linux/firmware.h> 22 #include <linux/utsname.h> 23 #include <linux/time.h> 24 #include <linux/ptp_clock_kernel.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/timecounter.h> 27 #include <net/netdev_queues.h> 28 #include <net/netlink.h> 29 #include <linux/bnxt/hsi.h> 30 #include "bnxt.h" 31 #include "bnxt_hwrm.h" 32 #include "bnxt_ulp.h" 33 #include "bnxt_xdp.h" 34 #include "bnxt_ptp.h" 35 #include "bnxt_ethtool.h" 36 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 37 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 38 #include "bnxt_coredump.h" 39 40 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \ 41 do { \ 42 if (extack) \ 43 NL_SET_ERR_MSG_MOD(extack, msg); \ 44 netdev_err(dev, "%s\n", msg); \ 45 } while (0) 46 47 static u32 bnxt_get_msglevel(struct net_device *dev) 48 { 49 struct bnxt *bp = netdev_priv(dev); 50 51 return bp->msg_enable; 52 } 53 54 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 55 { 56 struct bnxt *bp = netdev_priv(dev); 57 58 bp->msg_enable = value; 59 } 60 61 static int bnxt_get_coalesce(struct net_device *dev, 62 struct ethtool_coalesce *coal, 63 struct kernel_ethtool_coalesce *kernel_coal, 64 struct netlink_ext_ack *extack) 65 { 66 struct bnxt *bp = netdev_priv(dev); 67 struct bnxt_coal *hw_coal; 68 u16 mult; 69 70 memset(coal, 0, sizeof(*coal)); 71 72 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 73 74 hw_coal = &bp->rx_coal; 75 mult = hw_coal->bufs_per_record; 76 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 77 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 78 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 79 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 80 if (hw_coal->flags & 81 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 82 kernel_coal->use_cqe_mode_rx = true; 83 84 hw_coal = &bp->tx_coal; 85 mult = hw_coal->bufs_per_record; 86 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 87 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 88 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 89 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 90 if (hw_coal->flags & 91 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 92 kernel_coal->use_cqe_mode_tx = true; 93 94 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 95 96 return 0; 97 } 98 99 static int bnxt_set_coalesce(struct net_device *dev, 100 struct ethtool_coalesce *coal, 101 struct kernel_ethtool_coalesce *kernel_coal, 102 struct netlink_ext_ack *extack) 103 { 104 struct bnxt *bp = netdev_priv(dev); 105 bool update_stats = false; 106 struct bnxt_coal *hw_coal; 107 int rc = 0; 108 u16 mult; 109 110 if (coal->use_adaptive_rx_coalesce) { 111 bp->flags |= BNXT_FLAG_DIM; 112 } else { 113 if (bp->flags & BNXT_FLAG_DIM) { 114 bp->flags &= ~(BNXT_FLAG_DIM); 115 goto reset_coalesce; 116 } 117 } 118 119 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && 120 !(bp->coal_cap.cmpl_params & 121 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) 122 return -EOPNOTSUPP; 123 124 hw_coal = &bp->rx_coal; 125 mult = hw_coal->bufs_per_record; 126 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 127 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 128 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 129 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 130 hw_coal->flags &= 131 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 132 if (kernel_coal->use_cqe_mode_rx) 133 hw_coal->flags |= 134 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 135 136 hw_coal = &bp->tx_coal; 137 mult = hw_coal->bufs_per_record; 138 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 139 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 140 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 141 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 142 hw_coal->flags &= 143 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 144 if (kernel_coal->use_cqe_mode_tx) 145 hw_coal->flags |= 146 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 147 148 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 149 u32 stats_ticks = coal->stats_block_coalesce_usecs; 150 151 /* Allow 0, which means disable. */ 152 if (stats_ticks) 153 stats_ticks = clamp_t(u32, stats_ticks, 154 BNXT_MIN_STATS_COAL_TICKS, 155 BNXT_MAX_STATS_COAL_TICKS); 156 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 157 bp->stats_coal_ticks = stats_ticks; 158 if (bp->stats_coal_ticks) 159 bp->current_interval = 160 bp->stats_coal_ticks * HZ / 1000000; 161 else 162 bp->current_interval = BNXT_TIMER_INTERVAL; 163 update_stats = true; 164 } 165 166 reset_coalesce: 167 if (test_bit(BNXT_STATE_OPEN, &bp->state)) { 168 if (update_stats) { 169 bnxt_close_nic(bp, true, false); 170 rc = bnxt_open_nic(bp, true, false); 171 } else { 172 rc = bnxt_hwrm_set_coal(bp); 173 } 174 } 175 176 return rc; 177 } 178 179 static const char * const bnxt_ring_rx_stats_str[] = { 180 "rx_ucast_packets", 181 "rx_mcast_packets", 182 "rx_bcast_packets", 183 "rx_discards", 184 "rx_errors", 185 "rx_ucast_bytes", 186 "rx_mcast_bytes", 187 "rx_bcast_bytes", 188 }; 189 190 static const char * const bnxt_ring_tx_stats_str[] = { 191 "tx_ucast_packets", 192 "tx_mcast_packets", 193 "tx_bcast_packets", 194 "tx_errors", 195 "tx_discards", 196 "tx_ucast_bytes", 197 "tx_mcast_bytes", 198 "tx_bcast_bytes", 199 }; 200 201 static const char * const bnxt_ring_tpa_stats_str[] = { 202 "tpa_packets", 203 "tpa_bytes", 204 "tpa_events", 205 "tpa_aborts", 206 }; 207 208 static const char * const bnxt_ring_tpa2_stats_str[] = { 209 "rx_tpa_eligible_pkt", 210 "rx_tpa_eligible_bytes", 211 "rx_tpa_pkt", 212 "rx_tpa_bytes", 213 "rx_tpa_errors", 214 "rx_tpa_events", 215 }; 216 217 static const char * const bnxt_rx_sw_stats_str[] = { 218 "rx_l4_csum_errors", 219 "rx_resets", 220 "rx_buf_errors", 221 }; 222 223 static const char * const bnxt_cmn_sw_stats_str[] = { 224 "missed_irqs", 225 }; 226 227 #define BNXT_RX_STATS_ENTRY(counter) \ 228 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 229 230 #define BNXT_TX_STATS_ENTRY(counter) \ 231 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 232 233 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 234 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 235 236 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 237 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 238 239 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 241 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 242 243 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 245 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 246 247 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 248 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 249 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 250 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 251 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 252 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 253 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 254 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 255 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 256 257 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 258 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 259 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 260 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 261 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 262 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 263 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 264 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 265 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 266 267 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 268 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 269 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 270 271 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 272 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 273 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 274 275 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 276 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 277 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 278 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 279 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 280 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 281 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 282 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 283 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 284 285 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 286 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 287 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 288 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 289 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 290 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 291 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 292 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 293 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 294 295 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 297 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 298 299 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 307 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 308 309 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 310 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 311 __stringify(counter##_pri##n) } 312 313 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 314 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 315 __stringify(counter##_pri##n) } 316 317 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 318 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 319 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 320 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 321 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 322 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 323 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 324 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 325 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 326 327 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 328 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 329 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 330 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 331 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 332 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 333 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 334 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 335 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 336 337 enum { 338 RX_TOTAL_DISCARDS, 339 TX_TOTAL_DISCARDS, 340 RX_NETPOLL_DISCARDS, 341 }; 342 343 static const char *const bnxt_ring_err_stats_arr[] = { 344 "rx_total_l4_csum_errors", 345 "rx_total_resets", 346 "rx_total_buf_errors", 347 "rx_total_oom_discards", 348 "rx_total_netpoll_discards", 349 "rx_total_ring_discards", 350 "tx_total_resets", 351 "tx_total_ring_discards", 352 "total_missed_irqs", 353 }; 354 355 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 356 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 357 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 358 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 359 360 static const struct { 361 long offset; 362 char string[ETH_GSTRING_LEN]; 363 } bnxt_port_stats_arr[] = { 364 BNXT_RX_STATS_ENTRY(rx_64b_frames), 365 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 366 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 367 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 368 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 369 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 370 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 371 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 372 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 373 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 374 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 375 BNXT_RX_STATS_ENTRY(rx_total_frames), 376 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 377 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 378 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 379 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 380 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 381 BNXT_RX_STATS_ENTRY(rx_pause_frames), 382 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 383 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 384 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 385 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 386 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 387 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 388 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 389 BNXT_RX_STATS_ENTRY(rx_good_frames), 390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 397 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 398 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 400 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 401 BNXT_RX_STATS_ENTRY(rx_bytes), 402 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 403 BNXT_RX_STATS_ENTRY(rx_runt_frames), 404 BNXT_RX_STATS_ENTRY(rx_stat_discard), 405 BNXT_RX_STATS_ENTRY(rx_stat_err), 406 407 BNXT_TX_STATS_ENTRY(tx_64b_frames), 408 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 409 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 410 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 411 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 412 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 413 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 414 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 415 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 416 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 417 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 418 BNXT_TX_STATS_ENTRY(tx_good_frames), 419 BNXT_TX_STATS_ENTRY(tx_total_frames), 420 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 421 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 422 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 423 BNXT_TX_STATS_ENTRY(tx_pause_frames), 424 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 425 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 426 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 427 BNXT_TX_STATS_ENTRY(tx_err), 428 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 436 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 438 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 439 BNXT_TX_STATS_ENTRY(tx_total_collisions), 440 BNXT_TX_STATS_ENTRY(tx_bytes), 441 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 442 BNXT_TX_STATS_ENTRY(tx_stat_discard), 443 BNXT_TX_STATS_ENTRY(tx_stat_error), 444 }; 445 446 static const struct { 447 long offset; 448 char string[ETH_GSTRING_LEN]; 449 } bnxt_port_stats_ext_arr[] = { 450 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 451 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 452 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 453 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 454 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 455 BNXT_RX_STATS_EXT_COS_ENTRIES, 456 BNXT_RX_STATS_EXT_PFC_ENTRIES, 457 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 458 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 459 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 460 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 461 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), 463 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), 464 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss), 465 }; 466 467 static const struct { 468 long offset; 469 char string[ETH_GSTRING_LEN]; 470 } bnxt_tx_port_stats_ext_arr[] = { 471 BNXT_TX_STATS_EXT_COS_ENTRIES, 472 BNXT_TX_STATS_EXT_PFC_ENTRIES, 473 }; 474 475 static const struct { 476 long base_off; 477 char string[ETH_GSTRING_LEN]; 478 } bnxt_rx_bytes_pri_arr[] = { 479 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 480 }; 481 482 static const struct { 483 long base_off; 484 char string[ETH_GSTRING_LEN]; 485 } bnxt_rx_pkts_pri_arr[] = { 486 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 487 }; 488 489 static const struct { 490 long base_off; 491 char string[ETH_GSTRING_LEN]; 492 } bnxt_tx_bytes_pri_arr[] = { 493 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 494 }; 495 496 static const struct { 497 long base_off; 498 char string[ETH_GSTRING_LEN]; 499 } bnxt_tx_pkts_pri_arr[] = { 500 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 501 }; 502 503 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) 504 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 505 #define BNXT_NUM_STATS_PRI \ 506 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 507 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 508 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 509 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 510 511 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 512 { 513 if (BNXT_SUPPORTS_TPA(bp)) { 514 if (bp->max_tpa_v2) { 515 if (BNXT_CHIP_P5(bp)) 516 return BNXT_NUM_TPA_RING_STATS_P5; 517 return BNXT_NUM_TPA_RING_STATS_P7; 518 } 519 return BNXT_NUM_TPA_RING_STATS; 520 } 521 return 0; 522 } 523 524 static int bnxt_get_num_ring_stats(struct bnxt *bp) 525 { 526 int rx, tx, cmn; 527 528 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 529 bnxt_get_num_tpa_ring_stats(bp); 530 tx = NUM_RING_TX_HW_STATS; 531 cmn = NUM_RING_CMN_SW_STATS; 532 return rx * bp->rx_nr_rings + 533 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) + 534 cmn * bp->cp_nr_rings; 535 } 536 537 static int bnxt_get_num_stats(struct bnxt *bp) 538 { 539 int num_stats = bnxt_get_num_ring_stats(bp); 540 int len; 541 542 num_stats += BNXT_NUM_RING_ERR_STATS; 543 544 if (bp->flags & BNXT_FLAG_PORT_STATS) 545 num_stats += BNXT_NUM_PORT_STATS; 546 547 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 548 len = min_t(int, bp->fw_rx_stats_ext_size, 549 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 550 num_stats += len; 551 len = min_t(int, bp->fw_tx_stats_ext_size, 552 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 553 num_stats += len; 554 if (bp->pri2cos_valid) 555 num_stats += BNXT_NUM_STATS_PRI; 556 } 557 558 return num_stats; 559 } 560 561 static int bnxt_get_sset_count(struct net_device *dev, int sset) 562 { 563 struct bnxt *bp = netdev_priv(dev); 564 565 switch (sset) { 566 case ETH_SS_STATS: 567 return bnxt_get_num_stats(bp); 568 case ETH_SS_TEST: 569 if (!bp->num_tests) 570 return -EOPNOTSUPP; 571 return bp->num_tests; 572 default: 573 return -EOPNOTSUPP; 574 } 575 } 576 577 static bool is_rx_ring(struct bnxt *bp, int ring_num) 578 { 579 return ring_num < bp->rx_nr_rings; 580 } 581 582 static bool is_tx_ring(struct bnxt *bp, int ring_num) 583 { 584 int tx_base = 0; 585 586 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 587 tx_base = bp->rx_nr_rings; 588 589 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 590 return true; 591 return false; 592 } 593 594 static void bnxt_get_ethtool_stats(struct net_device *dev, 595 struct ethtool_stats *stats, u64 *buf) 596 { 597 struct bnxt_total_ring_err_stats ring_err_stats = {0}; 598 struct bnxt *bp = netdev_priv(dev); 599 u64 *curr, *prev; 600 u32 tpa_stats; 601 u32 i, j = 0; 602 603 if (!bp->bnapi) { 604 j += bnxt_get_num_ring_stats(bp); 605 goto skip_ring_stats; 606 } 607 608 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 609 for (i = 0; i < bp->cp_nr_rings; i++) { 610 struct bnxt_napi *bnapi = bp->bnapi[i]; 611 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 612 u64 *sw_stats = cpr->stats.sw_stats; 613 u64 *sw; 614 int k; 615 616 if (is_rx_ring(bp, i)) { 617 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 618 buf[j] = sw_stats[k]; 619 } 620 if (is_tx_ring(bp, i)) { 621 k = NUM_RING_RX_HW_STATS; 622 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 623 j++, k++) 624 buf[j] = sw_stats[k]; 625 } 626 if (!tpa_stats || !is_rx_ring(bp, i)) 627 goto skip_tpa_ring_stats; 628 629 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 630 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 631 tpa_stats; j++, k++) 632 buf[j] = sw_stats[k]; 633 634 skip_tpa_ring_stats: 635 sw = (u64 *)&cpr->sw_stats->rx; 636 if (is_rx_ring(bp, i)) { 637 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 638 buf[j] = sw[k]; 639 } 640 641 sw = (u64 *)&cpr->sw_stats->cmn; 642 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 643 buf[j] = sw[k]; 644 } 645 646 bnxt_get_ring_err_stats(bp, &ring_err_stats); 647 648 skip_ring_stats: 649 curr = &ring_err_stats.rx_total_l4_csum_errors; 650 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; 651 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) 652 buf[j] = *curr + *prev; 653 654 if (bp->flags & BNXT_FLAG_PORT_STATS) { 655 u64 *port_stats = bp->port_stats.sw_stats; 656 657 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 658 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 659 } 660 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 661 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 662 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 663 u32 len; 664 665 len = min_t(u32, bp->fw_rx_stats_ext_size, 666 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 667 for (i = 0; i < len; i++, j++) { 668 buf[j] = *(rx_port_stats_ext + 669 bnxt_port_stats_ext_arr[i].offset); 670 } 671 len = min_t(u32, bp->fw_tx_stats_ext_size, 672 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 673 for (i = 0; i < len; i++, j++) { 674 buf[j] = *(tx_port_stats_ext + 675 bnxt_tx_port_stats_ext_arr[i].offset); 676 } 677 if (bp->pri2cos_valid) { 678 for (i = 0; i < 8; i++, j++) { 679 long n = bnxt_rx_bytes_pri_arr[i].base_off + 680 bp->pri2cos_idx[i]; 681 682 buf[j] = *(rx_port_stats_ext + n); 683 } 684 for (i = 0; i < 8; i++, j++) { 685 long n = bnxt_rx_pkts_pri_arr[i].base_off + 686 bp->pri2cos_idx[i]; 687 688 buf[j] = *(rx_port_stats_ext + n); 689 } 690 for (i = 0; i < 8; i++, j++) { 691 u8 cos_idx = bp->pri2cos_idx[i]; 692 long n; 693 694 n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx; 695 buf[j] = *(tx_port_stats_ext + n); 696 if (bp->cos0_cos1_shared && !cos_idx) 697 buf[j] += *(tx_port_stats_ext + n + 1); 698 } 699 for (i = 0; i < 8; i++, j++) { 700 u8 cos_idx = bp->pri2cos_idx[i]; 701 long n; 702 703 n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx; 704 buf[j] = *(tx_port_stats_ext + n); 705 if (bp->cos0_cos1_shared && !cos_idx) 706 buf[j] += *(tx_port_stats_ext + n + 1); 707 } 708 } 709 } 710 } 711 712 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 713 { 714 struct bnxt *bp = netdev_priv(dev); 715 u32 i, j, num_str; 716 const char *str; 717 718 switch (stringset) { 719 case ETH_SS_STATS: 720 for (i = 0; i < bp->cp_nr_rings; i++) { 721 if (is_rx_ring(bp, i)) 722 for (j = 0; j < NUM_RING_RX_HW_STATS; j++) { 723 str = bnxt_ring_rx_stats_str[j]; 724 ethtool_sprintf(&buf, "[%d]: %s", i, 725 str); 726 } 727 if (is_tx_ring(bp, i)) 728 for (j = 0; j < NUM_RING_TX_HW_STATS; j++) { 729 str = bnxt_ring_tx_stats_str[j]; 730 ethtool_sprintf(&buf, "[%d]: %s", i, 731 str); 732 } 733 num_str = bnxt_get_num_tpa_ring_stats(bp); 734 if (!num_str || !is_rx_ring(bp, i)) 735 goto skip_tpa_stats; 736 737 if (bp->max_tpa_v2) 738 for (j = 0; j < num_str; j++) { 739 str = bnxt_ring_tpa2_stats_str[j]; 740 ethtool_sprintf(&buf, "[%d]: %s", i, 741 str); 742 } 743 else 744 for (j = 0; j < num_str; j++) { 745 str = bnxt_ring_tpa_stats_str[j]; 746 ethtool_sprintf(&buf, "[%d]: %s", i, 747 str); 748 } 749 skip_tpa_stats: 750 if (is_rx_ring(bp, i)) 751 for (j = 0; j < NUM_RING_RX_SW_STATS; j++) { 752 str = bnxt_rx_sw_stats_str[j]; 753 ethtool_sprintf(&buf, "[%d]: %s", i, 754 str); 755 } 756 for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) { 757 str = bnxt_cmn_sw_stats_str[j]; 758 ethtool_sprintf(&buf, "[%d]: %s", i, str); 759 } 760 } 761 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) 762 ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]); 763 764 if (bp->flags & BNXT_FLAG_PORT_STATS) 765 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 766 str = bnxt_port_stats_arr[i].string; 767 ethtool_puts(&buf, str); 768 } 769 770 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 771 u32 len; 772 773 len = min_t(u32, bp->fw_rx_stats_ext_size, 774 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 775 for (i = 0; i < len; i++) { 776 str = bnxt_port_stats_ext_arr[i].string; 777 ethtool_puts(&buf, str); 778 } 779 780 len = min_t(u32, bp->fw_tx_stats_ext_size, 781 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 782 for (i = 0; i < len; i++) { 783 str = bnxt_tx_port_stats_ext_arr[i].string; 784 ethtool_puts(&buf, str); 785 } 786 787 if (bp->pri2cos_valid) { 788 for (i = 0; i < 8; i++) { 789 str = bnxt_rx_bytes_pri_arr[i].string; 790 ethtool_puts(&buf, str); 791 } 792 793 for (i = 0; i < 8; i++) { 794 str = bnxt_rx_pkts_pri_arr[i].string; 795 ethtool_puts(&buf, str); 796 } 797 798 for (i = 0; i < 8; i++) { 799 str = bnxt_tx_bytes_pri_arr[i].string; 800 ethtool_puts(&buf, str); 801 } 802 803 for (i = 0; i < 8; i++) { 804 str = bnxt_tx_pkts_pri_arr[i].string; 805 ethtool_puts(&buf, str); 806 } 807 } 808 } 809 break; 810 case ETH_SS_TEST: 811 if (bp->num_tests) 812 for (i = 0; i < bp->num_tests; i++) 813 ethtool_puts(&buf, bp->test_info->string[i]); 814 break; 815 default: 816 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 817 stringset); 818 break; 819 } 820 } 821 822 static void bnxt_get_ringparam(struct net_device *dev, 823 struct ethtool_ringparam *ering, 824 struct kernel_ethtool_ringparam *kernel_ering, 825 struct netlink_ext_ack *extack) 826 { 827 struct bnxt *bp = netdev_priv(dev); 828 829 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 830 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 831 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 832 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 833 } else { 834 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 835 ering->rx_jumbo_max_pending = 0; 836 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 837 } 838 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 839 840 ering->rx_pending = bp->rx_ring_size; 841 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 842 ering->tx_pending = bp->tx_ring_size; 843 844 kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; 845 } 846 847 static int bnxt_set_ringparam(struct net_device *dev, 848 struct ethtool_ringparam *ering, 849 struct kernel_ethtool_ringparam *kernel_ering, 850 struct netlink_ext_ack *extack) 851 { 852 u8 tcp_data_split = kernel_ering->tcp_data_split; 853 struct bnxt *bp = netdev_priv(dev); 854 u8 hds_config_mod; 855 856 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 857 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 858 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) 859 return -EINVAL; 860 861 hds_config_mod = tcp_data_split != dev->cfg->hds_config; 862 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) 863 return -EINVAL; 864 865 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 866 hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { 867 NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); 868 return -EINVAL; 869 } 870 871 if (netif_running(dev)) 872 bnxt_close_nic(bp, false, false); 873 874 if (hds_config_mod) { 875 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) 876 bp->flags |= BNXT_FLAG_HDS; 877 else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) 878 bp->flags &= ~BNXT_FLAG_HDS; 879 } 880 881 bp->rx_ring_size = ering->rx_pending; 882 bp->tx_ring_size = ering->tx_pending; 883 bnxt_set_ring_params(bp); 884 885 if (netif_running(dev)) 886 return bnxt_open_nic(bp, false, false); 887 888 return 0; 889 } 890 891 static void bnxt_get_channels(struct net_device *dev, 892 struct ethtool_channels *channel) 893 { 894 struct bnxt *bp = netdev_priv(dev); 895 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 896 int max_rx_rings, max_tx_rings, tcs; 897 int max_tx_sch_inputs, tx_grps; 898 899 /* Get the most up-to-date max_tx_sch_inputs. */ 900 if (netif_running(dev) && BNXT_NEW_RM(bp)) 901 bnxt_hwrm_func_resc_qcaps(bp, false); 902 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 903 904 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 905 if (max_tx_sch_inputs) 906 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 907 908 tcs = bp->num_tc; 909 tx_grps = max(tcs, 1); 910 if (bp->tx_nr_rings_xdp) 911 tx_grps++; 912 max_tx_rings /= tx_grps; 913 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 914 915 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 916 max_rx_rings = 0; 917 max_tx_rings = 0; 918 } 919 if (max_tx_sch_inputs) 920 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 921 922 if (tcs > 1) 923 max_tx_rings /= tcs; 924 925 channel->max_rx = max_rx_rings; 926 channel->max_tx = max_tx_rings; 927 channel->max_other = 0; 928 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 929 channel->combined_count = bp->rx_nr_rings; 930 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 931 channel->combined_count--; 932 } else { 933 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 934 channel->rx_count = bp->rx_nr_rings; 935 channel->tx_count = bp->tx_nr_rings_per_tc; 936 } 937 } 938 } 939 940 static int bnxt_set_channels(struct net_device *dev, 941 struct ethtool_channels *channel) 942 { 943 struct bnxt *bp = netdev_priv(dev); 944 int req_tx_rings, req_rx_rings, tcs; 945 bool sh = false; 946 int tx_xdp = 0; 947 int rc = 0; 948 int tx_cp; 949 950 if (channel->other_count) 951 return -EINVAL; 952 953 if (!channel->combined_count && 954 (!channel->rx_count || !channel->tx_count)) 955 return -EINVAL; 956 957 if (channel->combined_count && 958 (channel->rx_count || channel->tx_count)) 959 return -EINVAL; 960 961 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 962 channel->tx_count)) 963 return -EINVAL; 964 965 if (channel->combined_count) 966 sh = true; 967 968 tcs = bp->num_tc; 969 970 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 971 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 972 if (bp->tx_nr_rings_xdp) { 973 if (!sh) { 974 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 975 return -EINVAL; 976 } 977 tx_xdp = req_rx_rings; 978 } 979 980 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 981 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 982 netif_is_rxfh_configured(dev)) { 983 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 984 return -EINVAL; 985 } 986 987 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 988 if (rc) { 989 netdev_warn(dev, "Unable to allocate the requested rings\n"); 990 return rc; 991 } 992 993 if (netif_running(dev)) { 994 if (BNXT_PF(bp)) { 995 /* TODO CHIMP_FW: Send message to all VF's 996 * before PF unload 997 */ 998 } 999 bnxt_close_nic(bp, true, false); 1000 } 1001 1002 if (sh) { 1003 bp->flags |= BNXT_FLAG_SHARED_RINGS; 1004 bp->rx_nr_rings = channel->combined_count; 1005 bp->tx_nr_rings_per_tc = channel->combined_count; 1006 } else { 1007 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 1008 bp->rx_nr_rings = channel->rx_count; 1009 bp->tx_nr_rings_per_tc = channel->tx_count; 1010 } 1011 bp->tx_nr_rings_xdp = tx_xdp; 1012 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 1013 if (tcs > 1) 1014 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 1015 1016 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 1017 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 1018 tx_cp + bp->rx_nr_rings; 1019 1020 /* After changing number of rx channels, update NTUPLE feature. */ 1021 netdev_update_features(dev); 1022 if (netif_running(dev)) { 1023 rc = bnxt_open_nic(bp, true, false); 1024 if ((!rc) && BNXT_PF(bp)) { 1025 /* TODO CHIMP_FW: Send message to all VF's 1026 * to renable 1027 */ 1028 } 1029 } else { 1030 rc = bnxt_reserve_rings(bp, true); 1031 } 1032 1033 return rc; 1034 } 1035 1036 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], 1037 int tbl_size, u32 *ids, u32 start, 1038 u32 id_cnt) 1039 { 1040 int i, j = start; 1041 1042 if (j >= id_cnt) 1043 return j; 1044 for (i = 0; i < tbl_size; i++) { 1045 struct hlist_head *head; 1046 struct bnxt_filter_base *fltr; 1047 1048 head = &tbl[i]; 1049 hlist_for_each_entry_rcu(fltr, head, hash) { 1050 if (!fltr->flags || 1051 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) 1052 continue; 1053 ids[j++] = fltr->sw_id; 1054 if (j == id_cnt) 1055 return j; 1056 } 1057 } 1058 return j; 1059 } 1060 1061 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, 1062 struct hlist_head tbl[], 1063 int tbl_size, u32 id) 1064 { 1065 int i; 1066 1067 for (i = 0; i < tbl_size; i++) { 1068 struct hlist_head *head; 1069 struct bnxt_filter_base *fltr; 1070 1071 head = &tbl[i]; 1072 hlist_for_each_entry_rcu(fltr, head, hash) { 1073 if (fltr->flags && fltr->sw_id == id) 1074 return fltr; 1075 } 1076 } 1077 return NULL; 1078 } 1079 1080 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 1081 u32 *rule_locs) 1082 { 1083 u32 count; 1084 1085 cmd->data = bp->ntp_fltr_count; 1086 rcu_read_lock(); 1087 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, 1088 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, 1089 cmd->rule_cnt); 1090 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, 1091 BNXT_NTP_FLTR_HASH_SIZE, 1092 rule_locs, count, 1093 cmd->rule_cnt); 1094 rcu_read_unlock(); 1095 1096 return 0; 1097 } 1098 1099 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1100 { 1101 struct ethtool_rx_flow_spec *fs = 1102 (struct ethtool_rx_flow_spec *)&cmd->fs; 1103 struct bnxt_filter_base *fltr_base; 1104 struct bnxt_ntuple_filter *fltr; 1105 struct bnxt_flow_masks *fmasks; 1106 struct flow_keys *fkeys; 1107 int rc = -EINVAL; 1108 1109 if (fs->location >= bp->max_fltr) 1110 return rc; 1111 1112 rcu_read_lock(); 1113 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1114 BNXT_L2_FLTR_HASH_SIZE, 1115 fs->location); 1116 if (fltr_base) { 1117 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1118 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1119 struct bnxt_l2_filter *l2_fltr; 1120 struct bnxt_l2_key *l2_key; 1121 1122 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1123 l2_key = &l2_fltr->l2_key; 1124 fs->flow_type = ETHER_FLOW; 1125 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); 1126 eth_broadcast_addr(m_ether->h_dest); 1127 if (l2_key->vlan) { 1128 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1129 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1130 1131 fs->flow_type |= FLOW_EXT; 1132 m_ext->vlan_tci = htons(0xfff); 1133 h_ext->vlan_tci = htons(l2_key->vlan); 1134 } 1135 if (fltr_base->flags & BNXT_ACT_RING_DST) 1136 fs->ring_cookie = fltr_base->rxq; 1137 if (fltr_base->flags & BNXT_ACT_FUNC_DST) 1138 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << 1139 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 1140 rcu_read_unlock(); 1141 return 0; 1142 } 1143 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1144 BNXT_NTP_FLTR_HASH_SIZE, 1145 fs->location); 1146 if (!fltr_base) { 1147 rcu_read_unlock(); 1148 return rc; 1149 } 1150 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1151 1152 fkeys = &fltr->fkeys; 1153 fmasks = &fltr->fmasks; 1154 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1155 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1156 fs->flow_type = IP_USER_FLOW; 1157 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1158 fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD; 1159 fs->m_u.usr_ip4_spec.proto = 0; 1160 } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) { 1161 fs->flow_type = IP_USER_FLOW; 1162 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1163 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; 1164 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; 1165 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1166 fs->flow_type = TCP_V4_FLOW; 1167 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1168 fs->flow_type = UDP_V4_FLOW; 1169 } else { 1170 goto fltr_err; 1171 } 1172 1173 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1174 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; 1175 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1176 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; 1177 if (fs->flow_type == TCP_V4_FLOW || 1178 fs->flow_type == UDP_V4_FLOW) { 1179 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1180 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; 1181 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1182 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; 1183 } 1184 } else { 1185 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1186 fs->flow_type = IPV6_USER_FLOW; 1187 fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD; 1188 fs->m_u.usr_ip6_spec.l4_proto = 0; 1189 } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) { 1190 fs->flow_type = IPV6_USER_FLOW; 1191 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; 1192 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; 1193 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1194 fs->flow_type = TCP_V6_FLOW; 1195 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1196 fs->flow_type = UDP_V6_FLOW; 1197 } else { 1198 goto fltr_err; 1199 } 1200 1201 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1202 fkeys->addrs.v6addrs.src; 1203 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = 1204 fmasks->addrs.v6addrs.src; 1205 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1206 fkeys->addrs.v6addrs.dst; 1207 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = 1208 fmasks->addrs.v6addrs.dst; 1209 if (fs->flow_type == TCP_V6_FLOW || 1210 fs->flow_type == UDP_V6_FLOW) { 1211 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1212 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; 1213 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1214 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; 1215 } 1216 } 1217 1218 if (fltr->base.flags & BNXT_ACT_DROP) { 1219 fs->ring_cookie = RX_CLS_FLOW_DISC; 1220 } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 1221 fs->flow_type |= FLOW_RSS; 1222 cmd->rss_context = fltr->base.fw_vnic_id; 1223 } else { 1224 fs->ring_cookie = fltr->base.rxq; 1225 } 1226 rc = 0; 1227 1228 fltr_err: 1229 rcu_read_unlock(); 1230 1231 return rc; 1232 } 1233 1234 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, 1235 u32 index) 1236 { 1237 struct ethtool_rxfh_context *ctx; 1238 1239 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); 1240 if (!ctx) 1241 return NULL; 1242 return ethtool_rxfh_context_priv(ctx); 1243 } 1244 1245 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, 1246 struct bnxt_vnic_info *vnic) 1247 { 1248 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 1249 1250 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 1251 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, 1252 vnic->rss_table_size, 1253 &vnic->rss_table_dma_addr, 1254 GFP_KERNEL); 1255 if (!vnic->rss_table) 1256 return -ENOMEM; 1257 1258 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 1259 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 1260 return 0; 1261 } 1262 1263 static int bnxt_add_l2_cls_rule(struct bnxt *bp, 1264 struct ethtool_rx_flow_spec *fs) 1265 { 1266 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1267 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1268 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1269 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1270 struct bnxt_l2_filter *fltr; 1271 struct bnxt_l2_key key; 1272 u16 vnic_id; 1273 u8 flags; 1274 int rc; 1275 1276 if (BNXT_CHIP_P5_PLUS(bp)) 1277 return -EOPNOTSUPP; 1278 1279 if (!is_broadcast_ether_addr(m_ether->h_dest)) 1280 return -EINVAL; 1281 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); 1282 key.vlan = 0; 1283 if (fs->flow_type & FLOW_EXT) { 1284 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1285 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1286 1287 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) 1288 return -EINVAL; 1289 key.vlan = ntohs(h_ext->vlan_tci); 1290 } 1291 1292 if (vf) { 1293 flags = BNXT_ACT_FUNC_DST; 1294 vnic_id = 0xffff; 1295 vf--; 1296 } else { 1297 flags = BNXT_ACT_RING_DST; 1298 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; 1299 } 1300 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); 1301 if (IS_ERR(fltr)) 1302 return PTR_ERR(fltr); 1303 1304 fltr->base.fw_vnic_id = vnic_id; 1305 fltr->base.rxq = ring; 1306 fltr->base.vf_idx = vf; 1307 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 1308 if (rc) 1309 bnxt_del_l2_filter(bp, fltr); 1310 else 1311 fs->location = fltr->base.sw_id; 1312 return rc; 1313 } 1314 1315 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, 1316 struct ethtool_usrip4_spec *ip_mask) 1317 { 1318 u8 mproto = ip_mask->proto; 1319 u8 sproto = ip_spec->proto; 1320 1321 if (ip_mask->l4_4_bytes || ip_mask->tos || 1322 ip_spec->ip_ver != ETH_RX_NFC_IP4 || 1323 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP))) 1324 return false; 1325 return true; 1326 } 1327 1328 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, 1329 struct ethtool_usrip6_spec *ip_mask) 1330 { 1331 u8 mproto = ip_mask->l4_proto; 1332 u8 sproto = ip_spec->l4_proto; 1333 1334 if (ip_mask->l4_4_bytes || ip_mask->tclass || 1335 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6))) 1336 return false; 1337 return true; 1338 } 1339 1340 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, 1341 struct ethtool_rxnfc *cmd) 1342 { 1343 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1344 struct bnxt_ntuple_filter *new_fltr, *fltr; 1345 u32 flow_type = fs->flow_type & 0xff; 1346 struct bnxt_l2_filter *l2_fltr; 1347 struct bnxt_flow_masks *fmasks; 1348 struct flow_keys *fkeys; 1349 u32 idx; 1350 int rc; 1351 1352 if (!bp->vnic_info) 1353 return -EAGAIN; 1354 1355 if (fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) 1356 return -EOPNOTSUPP; 1357 1358 if (fs->ring_cookie != RX_CLS_FLOW_DISC && 1359 ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) 1360 return -EOPNOTSUPP; 1361 1362 if (flow_type == IP_USER_FLOW) { 1363 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, 1364 &fs->m_u.usr_ip4_spec)) 1365 return -EOPNOTSUPP; 1366 } 1367 1368 if (flow_type == IPV6_USER_FLOW) { 1369 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, 1370 &fs->m_u.usr_ip6_spec)) 1371 return -EOPNOTSUPP; 1372 } 1373 1374 new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); 1375 if (!new_fltr) 1376 return -ENOMEM; 1377 1378 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 1379 atomic_inc(&l2_fltr->refcnt); 1380 new_fltr->l2_fltr = l2_fltr; 1381 fmasks = &new_fltr->fmasks; 1382 fkeys = &new_fltr->fkeys; 1383 1384 rc = -EOPNOTSUPP; 1385 switch (flow_type) { 1386 case IP_USER_FLOW: { 1387 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; 1388 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; 1389 1390 fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto 1391 : BNXT_IP_PROTO_WILDCARD; 1392 fkeys->basic.n_proto = htons(ETH_P_IP); 1393 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1394 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1395 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1396 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1397 break; 1398 } 1399 case TCP_V4_FLOW: 1400 case UDP_V4_FLOW: { 1401 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; 1402 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; 1403 1404 fkeys->basic.ip_proto = IPPROTO_TCP; 1405 if (flow_type == UDP_V4_FLOW) 1406 fkeys->basic.ip_proto = IPPROTO_UDP; 1407 fkeys->basic.n_proto = htons(ETH_P_IP); 1408 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1409 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1410 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1411 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1412 fkeys->ports.src = ip_spec->psrc; 1413 fmasks->ports.src = ip_mask->psrc; 1414 fkeys->ports.dst = ip_spec->pdst; 1415 fmasks->ports.dst = ip_mask->pdst; 1416 break; 1417 } 1418 case IPV6_USER_FLOW: { 1419 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; 1420 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; 1421 1422 fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto 1423 : BNXT_IP_PROTO_WILDCARD; 1424 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1425 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1426 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1427 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1428 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1429 break; 1430 } 1431 case TCP_V6_FLOW: 1432 case UDP_V6_FLOW: { 1433 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; 1434 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; 1435 1436 fkeys->basic.ip_proto = IPPROTO_TCP; 1437 if (flow_type == UDP_V6_FLOW) 1438 fkeys->basic.ip_proto = IPPROTO_UDP; 1439 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1440 1441 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1442 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1443 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1444 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1445 fkeys->ports.src = ip_spec->psrc; 1446 fmasks->ports.src = ip_mask->psrc; 1447 fkeys->ports.dst = ip_spec->pdst; 1448 fmasks->ports.dst = ip_mask->pdst; 1449 break; 1450 } 1451 default: 1452 rc = -EOPNOTSUPP; 1453 goto ntuple_err; 1454 } 1455 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) 1456 goto ntuple_err; 1457 1458 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); 1459 rcu_read_lock(); 1460 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 1461 if (fltr) { 1462 rcu_read_unlock(); 1463 rc = -EEXIST; 1464 goto ntuple_err; 1465 } 1466 rcu_read_unlock(); 1467 1468 new_fltr->base.flags = BNXT_ACT_NO_AGING; 1469 if (fs->flow_type & FLOW_RSS) { 1470 struct bnxt_rss_ctx *rss_ctx; 1471 1472 new_fltr->base.fw_vnic_id = 0; 1473 new_fltr->base.flags |= BNXT_ACT_RSS_CTX; 1474 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); 1475 if (rss_ctx) { 1476 new_fltr->base.fw_vnic_id = rss_ctx->index; 1477 } else { 1478 rc = -EINVAL; 1479 goto ntuple_err; 1480 } 1481 } 1482 if (fs->ring_cookie == RX_CLS_FLOW_DISC) 1483 new_fltr->base.flags |= BNXT_ACT_DROP; 1484 else 1485 new_fltr->base.rxq = ethtool_get_flow_spec_ring(fs->ring_cookie); 1486 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); 1487 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 1488 if (!rc) { 1489 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); 1490 if (rc) { 1491 bnxt_del_ntp_filter(bp, new_fltr); 1492 return rc; 1493 } 1494 fs->location = new_fltr->base.sw_id; 1495 return 0; 1496 } 1497 1498 ntuple_err: 1499 atomic_dec(&l2_fltr->refcnt); 1500 kfree(new_fltr); 1501 return rc; 1502 } 1503 1504 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1505 { 1506 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1507 u32 ring, flow_type; 1508 int rc; 1509 u8 vf; 1510 1511 if (!netif_running(bp->dev)) 1512 return -EAGAIN; 1513 if (!(bp->flags & BNXT_FLAG_RFS)) 1514 return -EPERM; 1515 if (fs->location != RX_CLS_LOC_ANY) 1516 return -EINVAL; 1517 1518 flow_type = fs->flow_type; 1519 if ((flow_type == IP_USER_FLOW || 1520 flow_type == IPV6_USER_FLOW) && 1521 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) 1522 return -EOPNOTSUPP; 1523 if (flow_type & FLOW_MAC_EXT) 1524 return -EINVAL; 1525 flow_type &= ~FLOW_EXT; 1526 1527 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) 1528 return bnxt_add_ntuple_cls_rule(bp, cmd); 1529 1530 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1531 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1532 if (BNXT_VF(bp) && vf) 1533 return -EINVAL; 1534 if (BNXT_PF(bp) && vf > bp->pf.active_vfs) 1535 return -EINVAL; 1536 if (!vf && ring >= bp->rx_nr_rings) 1537 return -EINVAL; 1538 1539 if (flow_type == ETHER_FLOW) 1540 rc = bnxt_add_l2_cls_rule(bp, fs); 1541 else 1542 rc = bnxt_add_ntuple_cls_rule(bp, cmd); 1543 return rc; 1544 } 1545 1546 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1547 { 1548 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1549 struct bnxt_filter_base *fltr_base; 1550 struct bnxt_ntuple_filter *fltr; 1551 u32 id = fs->location; 1552 1553 rcu_read_lock(); 1554 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1555 BNXT_L2_FLTR_HASH_SIZE, id); 1556 if (fltr_base) { 1557 struct bnxt_l2_filter *l2_fltr; 1558 1559 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1560 rcu_read_unlock(); 1561 bnxt_hwrm_l2_filter_free(bp, l2_fltr); 1562 bnxt_del_l2_filter(bp, l2_fltr); 1563 return 0; 1564 } 1565 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1566 BNXT_NTP_FLTR_HASH_SIZE, id); 1567 if (!fltr_base) { 1568 rcu_read_unlock(); 1569 return -ENOENT; 1570 } 1571 1572 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1573 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) { 1574 rcu_read_unlock(); 1575 return -EINVAL; 1576 } 1577 rcu_read_unlock(); 1578 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); 1579 bnxt_del_ntp_filter(bp, fltr); 1580 return 0; 1581 } 1582 1583 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1584 { 1585 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1586 return RXH_IP_SRC | RXH_IP_DST; 1587 return 0; 1588 } 1589 1590 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1591 { 1592 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1593 return RXH_IP_SRC | RXH_IP_DST; 1594 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL) 1595 return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL; 1596 return 0; 1597 } 1598 1599 static int bnxt_get_rxfh_fields(struct net_device *dev, 1600 struct ethtool_rxfh_fields *cmd) 1601 { 1602 struct bnxt *bp = netdev_priv(dev); 1603 1604 cmd->data = 0; 1605 switch (cmd->flow_type) { 1606 case TCP_V4_FLOW: 1607 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1608 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1609 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1610 cmd->data |= get_ethtool_ipv4_rss(bp); 1611 break; 1612 case UDP_V4_FLOW: 1613 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1614 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1615 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1616 fallthrough; 1617 case AH_ESP_V4_FLOW: 1618 if (bp->rss_hash_cfg & 1619 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1620 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) 1621 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1622 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1623 fallthrough; 1624 case SCTP_V4_FLOW: 1625 case AH_V4_FLOW: 1626 case ESP_V4_FLOW: 1627 case IPV4_FLOW: 1628 cmd->data |= get_ethtool_ipv4_rss(bp); 1629 break; 1630 1631 case TCP_V6_FLOW: 1632 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1633 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1634 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1635 cmd->data |= get_ethtool_ipv6_rss(bp); 1636 break; 1637 case UDP_V6_FLOW: 1638 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1639 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1640 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1641 fallthrough; 1642 case AH_ESP_V6_FLOW: 1643 if (bp->rss_hash_cfg & 1644 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1645 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) 1646 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1647 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1648 fallthrough; 1649 case SCTP_V6_FLOW: 1650 case AH_V6_FLOW: 1651 case ESP_V6_FLOW: 1652 case IPV6_FLOW: 1653 cmd->data |= get_ethtool_ipv6_rss(bp); 1654 break; 1655 } 1656 return 0; 1657 } 1658 1659 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1660 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1661 1662 static int bnxt_set_rxfh_fields(struct net_device *dev, 1663 const struct ethtool_rxfh_fields *cmd, 1664 struct netlink_ext_ack *extack) 1665 { 1666 struct bnxt *bp = netdev_priv(dev); 1667 int tuple, rc = 0; 1668 u32 rss_hash_cfg; 1669 1670 rss_hash_cfg = bp->rss_hash_cfg; 1671 1672 if (cmd->data == RXH_4TUPLE) 1673 tuple = 4; 1674 else if (cmd->data == RXH_2TUPLE || 1675 cmd->data == (RXH_2TUPLE | RXH_IP6_FL)) 1676 tuple = 2; 1677 else if (!cmd->data) 1678 tuple = 0; 1679 else 1680 return -EINVAL; 1681 1682 if (cmd->data & RXH_IP6_FL && 1683 !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP)) 1684 return -EINVAL; 1685 1686 if (cmd->flow_type == TCP_V4_FLOW) { 1687 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1688 if (tuple == 4) 1689 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1690 } else if (cmd->flow_type == UDP_V4_FLOW) { 1691 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1692 return -EINVAL; 1693 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1694 if (tuple == 4) 1695 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1696 } else if (cmd->flow_type == TCP_V6_FLOW) { 1697 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1698 if (tuple == 4) 1699 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1700 } else if (cmd->flow_type == UDP_V6_FLOW) { 1701 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1702 return -EINVAL; 1703 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1704 if (tuple == 4) 1705 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1706 } else if (cmd->flow_type == AH_ESP_V4_FLOW) { 1707 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || 1708 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) 1709 return -EINVAL; 1710 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1711 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); 1712 if (tuple == 4) 1713 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1714 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; 1715 } else if (cmd->flow_type == AH_ESP_V6_FLOW) { 1716 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || 1717 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) 1718 return -EINVAL; 1719 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1720 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); 1721 if (tuple == 4) 1722 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1723 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; 1724 } else if (tuple == 4) { 1725 return -EINVAL; 1726 } 1727 1728 switch (cmd->flow_type) { 1729 case TCP_V4_FLOW: 1730 case UDP_V4_FLOW: 1731 case SCTP_V4_FLOW: 1732 case AH_ESP_V4_FLOW: 1733 case AH_V4_FLOW: 1734 case ESP_V4_FLOW: 1735 case IPV4_FLOW: 1736 if (tuple == 2) 1737 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1738 else if (!tuple) 1739 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1740 break; 1741 1742 case TCP_V6_FLOW: 1743 case UDP_V6_FLOW: 1744 case SCTP_V6_FLOW: 1745 case AH_ESP_V6_FLOW: 1746 case AH_V6_FLOW: 1747 case ESP_V6_FLOW: 1748 case IPV6_FLOW: 1749 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 1750 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL); 1751 if (!tuple) 1752 break; 1753 if (cmd->data & RXH_IP6_FL) 1754 rss_hash_cfg |= 1755 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; 1756 else if (tuple == 2) 1757 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1758 break; 1759 } 1760 1761 if (bp->rss_hash_cfg == rss_hash_cfg) 1762 return 0; 1763 1764 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 1765 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; 1766 bp->rss_hash_cfg = rss_hash_cfg; 1767 if (netif_running(bp->dev)) { 1768 bnxt_close_nic(bp, false, false); 1769 rc = bnxt_open_nic(bp, false, false); 1770 } 1771 return rc; 1772 } 1773 1774 static u32 bnxt_get_rx_ring_count(struct net_device *dev) 1775 { 1776 struct bnxt *bp = netdev_priv(dev); 1777 1778 return bp->rx_nr_rings; 1779 } 1780 1781 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1782 u32 *rule_locs) 1783 { 1784 struct bnxt *bp = netdev_priv(dev); 1785 int rc = 0; 1786 1787 switch (cmd->cmd) { 1788 case ETHTOOL_GRXCLSRLCNT: 1789 cmd->rule_cnt = bp->ntp_fltr_count; 1790 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; 1791 break; 1792 1793 case ETHTOOL_GRXCLSRLALL: 1794 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1795 break; 1796 1797 case ETHTOOL_GRXCLSRULE: 1798 rc = bnxt_grxclsrule(bp, cmd); 1799 break; 1800 1801 default: 1802 rc = -EOPNOTSUPP; 1803 break; 1804 } 1805 1806 return rc; 1807 } 1808 1809 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1810 { 1811 struct bnxt *bp = netdev_priv(dev); 1812 int rc; 1813 1814 switch (cmd->cmd) { 1815 case ETHTOOL_SRXCLSRLINS: 1816 rc = bnxt_srxclsrlins(bp, cmd); 1817 break; 1818 1819 case ETHTOOL_SRXCLSRLDEL: 1820 rc = bnxt_srxclsrldel(bp, cmd); 1821 break; 1822 1823 default: 1824 rc = -EOPNOTSUPP; 1825 break; 1826 } 1827 return rc; 1828 } 1829 1830 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1831 { 1832 struct bnxt *bp = netdev_priv(dev); 1833 1834 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1835 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1836 BNXT_RSS_TABLE_ENTRIES_P5; 1837 return HW_HASH_INDEX_SIZE; 1838 } 1839 1840 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1841 { 1842 return HW_HASH_KEY_SIZE; 1843 } 1844 1845 static int bnxt_get_rxfh(struct net_device *dev, 1846 struct ethtool_rxfh_param *rxfh) 1847 { 1848 struct bnxt_rss_ctx *rss_ctx = NULL; 1849 struct bnxt *bp = netdev_priv(dev); 1850 u32 *indir_tbl = bp->rss_indir_tbl; 1851 struct bnxt_vnic_info *vnic; 1852 u32 i, tbl_size; 1853 1854 rxfh->hfunc = ETH_RSS_HASH_TOP; 1855 1856 if (!bp->vnic_info) 1857 return 0; 1858 1859 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 1860 if (rxfh->rss_context) { 1861 struct ethtool_rxfh_context *ctx; 1862 1863 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); 1864 if (!ctx) 1865 return -EINVAL; 1866 indir_tbl = ethtool_rxfh_context_indir(ctx); 1867 rss_ctx = ethtool_rxfh_context_priv(ctx); 1868 vnic = &rss_ctx->vnic; 1869 } 1870 1871 if (rxfh->indir && indir_tbl) { 1872 tbl_size = bnxt_get_rxfh_indir_size(dev); 1873 for (i = 0; i < tbl_size; i++) 1874 rxfh->indir[i] = indir_tbl[i]; 1875 } 1876 1877 if (rxfh->key && vnic->rss_hash_key) 1878 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1879 1880 return 0; 1881 } 1882 1883 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, 1884 struct bnxt_rss_ctx *rss_ctx, 1885 const struct ethtool_rxfh_param *rxfh) 1886 { 1887 if (rxfh->key) { 1888 if (rss_ctx) { 1889 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, 1890 HW_HASH_KEY_SIZE); 1891 } else { 1892 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); 1893 bp->rss_hash_key_updated = true; 1894 } 1895 } 1896 if (rxfh->indir) { 1897 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 1898 u32 *indir_tbl = bp->rss_indir_tbl; 1899 1900 if (rss_ctx) 1901 indir_tbl = ethtool_rxfh_context_indir(ctx); 1902 for (i = 0; i < tbl_size; i++) 1903 indir_tbl[i] = rxfh->indir[i]; 1904 pad = bp->rss_indir_tbl_entries - tbl_size; 1905 if (pad) 1906 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); 1907 } 1908 } 1909 1910 static int bnxt_rxfh_context_check(struct bnxt *bp, 1911 const struct ethtool_rxfh_param *rxfh, 1912 struct netlink_ext_ack *extack) 1913 { 1914 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { 1915 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); 1916 return -EOPNOTSUPP; 1917 } 1918 1919 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { 1920 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); 1921 return -EOPNOTSUPP; 1922 } 1923 1924 if (!netif_running(bp->dev)) { 1925 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); 1926 return -EAGAIN; 1927 } 1928 1929 return 0; 1930 } 1931 1932 static int bnxt_create_rxfh_context(struct net_device *dev, 1933 struct ethtool_rxfh_context *ctx, 1934 const struct ethtool_rxfh_param *rxfh, 1935 struct netlink_ext_ack *extack) 1936 { 1937 struct bnxt *bp = netdev_priv(dev); 1938 struct bnxt_rss_ctx *rss_ctx; 1939 struct bnxt_vnic_info *vnic; 1940 int rc; 1941 1942 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1943 if (rc) 1944 return rc; 1945 1946 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { 1947 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", 1948 BNXT_MAX_ETH_RSS_CTX); 1949 return -EINVAL; 1950 } 1951 1952 if (!bnxt_rfs_capable(bp, true)) { 1953 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); 1954 return -ENOMEM; 1955 } 1956 1957 rss_ctx = ethtool_rxfh_context_priv(ctx); 1958 1959 bp->num_rss_ctx++; 1960 1961 vnic = &rss_ctx->vnic; 1962 vnic->rss_ctx = ctx; 1963 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; 1964 vnic->vnic_id = BNXT_VNIC_ID_INVALID; 1965 rc = bnxt_alloc_vnic_rss_table(bp, vnic); 1966 if (rc) 1967 goto out; 1968 1969 /* Populate defaults in the context */ 1970 bnxt_set_dflt_rss_indir_tbl(bp, ctx); 1971 ctx->hfunc = ETH_RSS_HASH_TOP; 1972 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); 1973 memcpy(ethtool_rxfh_context_key(ctx), 1974 bp->rss_hash_key, HW_HASH_KEY_SIZE); 1975 1976 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); 1977 if (rc) { 1978 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); 1979 goto out; 1980 } 1981 1982 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); 1983 if (rc) { 1984 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1985 goto out; 1986 } 1987 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 1988 1989 rc = __bnxt_setup_vnic_p5(bp, vnic); 1990 if (rc) { 1991 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 1992 goto out; 1993 } 1994 1995 rss_ctx->index = rxfh->rss_context; 1996 return 0; 1997 out: 1998 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 1999 return rc; 2000 } 2001 2002 static int bnxt_modify_rxfh_context(struct net_device *dev, 2003 struct ethtool_rxfh_context *ctx, 2004 const struct ethtool_rxfh_param *rxfh, 2005 struct netlink_ext_ack *extack) 2006 { 2007 struct bnxt *bp = netdev_priv(dev); 2008 struct bnxt_rss_ctx *rss_ctx; 2009 int rc; 2010 2011 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 2012 if (rc) 2013 return rc; 2014 2015 rss_ctx = ethtool_rxfh_context_priv(ctx); 2016 2017 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 2018 2019 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); 2020 } 2021 2022 static int bnxt_remove_rxfh_context(struct net_device *dev, 2023 struct ethtool_rxfh_context *ctx, 2024 u32 rss_context, 2025 struct netlink_ext_ack *extack) 2026 { 2027 struct bnxt *bp = netdev_priv(dev); 2028 struct bnxt_rss_ctx *rss_ctx; 2029 2030 rss_ctx = ethtool_rxfh_context_priv(ctx); 2031 2032 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 2033 return 0; 2034 } 2035 2036 static int bnxt_set_rxfh(struct net_device *dev, 2037 struct ethtool_rxfh_param *rxfh, 2038 struct netlink_ext_ack *extack) 2039 { 2040 struct bnxt *bp = netdev_priv(dev); 2041 int rc = 0; 2042 2043 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) 2044 return -EOPNOTSUPP; 2045 2046 bnxt_modify_rss(bp, NULL, NULL, rxfh); 2047 2048 if (netif_running(bp->dev)) { 2049 bnxt_close_nic(bp, false, false); 2050 rc = bnxt_open_nic(bp, false, false); 2051 } 2052 return rc; 2053 } 2054 2055 static void bnxt_get_drvinfo(struct net_device *dev, 2056 struct ethtool_drvinfo *info) 2057 { 2058 struct bnxt *bp = netdev_priv(dev); 2059 2060 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 2061 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 2062 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 2063 info->n_stats = bnxt_get_num_stats(bp); 2064 info->testinfo_len = bp->num_tests; 2065 /* TODO CHIMP_FW: eeprom dump details */ 2066 info->eedump_len = 0; 2067 /* TODO CHIMP FW: reg dump details */ 2068 info->regdump_len = 0; 2069 } 2070 2071 static int bnxt_get_regs_len(struct net_device *dev) 2072 { 2073 struct bnxt *bp = netdev_priv(dev); 2074 2075 if (!BNXT_PF(bp)) 2076 return -EOPNOTSUPP; 2077 2078 return BNXT_PXP_REG_LEN + bp->pcie_stat_len; 2079 } 2080 2081 static void * 2082 __bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req) 2083 { 2084 struct pcie_ctx_hw_stats_v2 *hw_pcie_stats; 2085 dma_addr_t hw_pcie_stats_addr; 2086 int rc; 2087 2088 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), 2089 &hw_pcie_stats_addr); 2090 if (!hw_pcie_stats) 2091 return NULL; 2092 2093 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 2094 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 2095 rc = hwrm_req_send(bp, req); 2096 2097 return rc ? NULL : hw_pcie_stats; 2098 } 2099 2100 #define BNXT_PCIE_32B_ENTRY(start, end) \ 2101 { offsetof(struct pcie_ctx_hw_stats_v2, start),\ 2102 offsetof(struct pcie_ctx_hw_stats_v2, end) } 2103 2104 static const struct { 2105 u16 start; 2106 u16 end; 2107 } bnxt_pcie_32b_entries[] = { 2108 BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), 2109 BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1), 2110 BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2), 2111 }; 2112 2113 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2114 void *_p) 2115 { 2116 struct hwrm_pcie_qstats_output *resp; 2117 struct hwrm_pcie_qstats_input *req; 2118 struct bnxt *bp = netdev_priv(dev); 2119 u8 *src; 2120 2121 regs->version = 0; 2122 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) 2123 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 2124 2125 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 2126 return; 2127 2128 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 2129 return; 2130 2131 resp = hwrm_req_hold(bp, req); 2132 src = __bnxt_hwrm_pcie_qstats(bp, req); 2133 if (src) { 2134 u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); 2135 int i, j, len; 2136 2137 len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size)); 2138 if (len <= sizeof(struct pcie_ctx_hw_stats)) 2139 regs->version = 1; 2140 else if (len < sizeof(struct pcie_ctx_hw_stats_v2)) 2141 regs->version = 2; 2142 else 2143 regs->version = 3; 2144 2145 for (i = 0, j = 0; i < len; ) { 2146 if (i >= bnxt_pcie_32b_entries[j].start && 2147 i <= bnxt_pcie_32b_entries[j].end) { 2148 u32 *dst32 = (u32 *)(dst + i); 2149 2150 *dst32 = le32_to_cpu(*(__le32 *)(src + i)); 2151 i += 4; 2152 if (i > bnxt_pcie_32b_entries[j].end && 2153 j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) 2154 j++; 2155 } else { 2156 u64 *dst64 = (u64 *)(dst + i); 2157 2158 *dst64 = le64_to_cpu(*(__le64 *)(src + i)); 2159 i += 8; 2160 } 2161 } 2162 } 2163 hwrm_req_drop(bp, req); 2164 } 2165 2166 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2167 { 2168 struct bnxt *bp = netdev_priv(dev); 2169 2170 wol->supported = 0; 2171 wol->wolopts = 0; 2172 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2173 if (bp->flags & BNXT_FLAG_WOL_CAP) { 2174 wol->supported = WAKE_MAGIC; 2175 if (bp->wol) 2176 wol->wolopts = WAKE_MAGIC; 2177 } 2178 } 2179 2180 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2181 { 2182 struct bnxt *bp = netdev_priv(dev); 2183 2184 if (wol->wolopts & ~WAKE_MAGIC) 2185 return -EINVAL; 2186 2187 if (wol->wolopts & WAKE_MAGIC) { 2188 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 2189 return -EINVAL; 2190 if (!bp->wol) { 2191 if (bnxt_hwrm_alloc_wol_fltr(bp)) 2192 return -EBUSY; 2193 bp->wol = 1; 2194 } 2195 } else { 2196 if (bp->wol) { 2197 if (bnxt_hwrm_free_wol_fltr(bp)) 2198 return -EBUSY; 2199 bp->wol = 0; 2200 } 2201 } 2202 return 0; 2203 } 2204 2205 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 2206 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) 2207 { 2208 linkmode_zero(mode); 2209 2210 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 2211 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); 2212 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 2213 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); 2214 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 2215 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); 2216 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 2217 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); 2218 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 2219 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); 2220 } 2221 2222 enum bnxt_media_type { 2223 BNXT_MEDIA_UNKNOWN = 0, 2224 BNXT_MEDIA_TP, 2225 BNXT_MEDIA_CR, 2226 BNXT_MEDIA_SR, 2227 BNXT_MEDIA_LR_ER_FR, 2228 BNXT_MEDIA_KR, 2229 BNXT_MEDIA_KX, 2230 BNXT_MEDIA_X, 2231 __BNXT_MEDIA_END, 2232 }; 2233 2234 static const enum bnxt_media_type bnxt_phy_types[] = { 2235 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, 2236 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, 2237 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, 2238 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, 2239 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, 2240 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, 2241 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, 2242 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, 2243 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, 2244 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, 2245 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, 2246 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, 2247 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, 2248 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, 2249 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, 2250 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2251 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2252 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, 2253 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, 2254 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, 2255 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2256 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2257 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, 2258 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, 2259 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, 2260 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, 2261 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, 2262 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, 2263 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2264 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2265 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, 2266 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, 2267 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2268 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2269 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, 2270 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, 2271 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2272 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2273 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR, 2274 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR, 2275 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2276 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2277 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR, 2278 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR, 2279 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2280 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2281 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR, 2282 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR, 2283 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR, 2284 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR, 2285 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR, 2286 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR, 2287 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2288 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2289 }; 2290 2291 static enum bnxt_media_type 2292 bnxt_get_media(struct bnxt_link_info *link_info) 2293 { 2294 switch (link_info->media_type) { 2295 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: 2296 return BNXT_MEDIA_TP; 2297 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: 2298 return BNXT_MEDIA_CR; 2299 default: 2300 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) 2301 return bnxt_phy_types[link_info->phy_type]; 2302 return BNXT_MEDIA_UNKNOWN; 2303 } 2304 } 2305 2306 enum bnxt_link_speed_indices { 2307 BNXT_LINK_SPEED_UNKNOWN = 0, 2308 BNXT_LINK_SPEED_100MB_IDX, 2309 BNXT_LINK_SPEED_1GB_IDX, 2310 BNXT_LINK_SPEED_10GB_IDX, 2311 BNXT_LINK_SPEED_25GB_IDX, 2312 BNXT_LINK_SPEED_40GB_IDX, 2313 BNXT_LINK_SPEED_50GB_IDX, 2314 BNXT_LINK_SPEED_100GB_IDX, 2315 BNXT_LINK_SPEED_200GB_IDX, 2316 BNXT_LINK_SPEED_400GB_IDX, 2317 __BNXT_LINK_SPEED_END 2318 }; 2319 2320 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) 2321 { 2322 switch (speed) { 2323 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; 2324 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; 2325 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; 2326 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; 2327 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; 2328 case BNXT_LINK_SPEED_50GB: 2329 case BNXT_LINK_SPEED_50GB_PAM4: 2330 return BNXT_LINK_SPEED_50GB_IDX; 2331 case BNXT_LINK_SPEED_100GB: 2332 case BNXT_LINK_SPEED_100GB_PAM4: 2333 case BNXT_LINK_SPEED_100GB_PAM4_112: 2334 return BNXT_LINK_SPEED_100GB_IDX; 2335 case BNXT_LINK_SPEED_200GB: 2336 case BNXT_LINK_SPEED_200GB_PAM4: 2337 case BNXT_LINK_SPEED_200GB_PAM4_112: 2338 return BNXT_LINK_SPEED_200GB_IDX; 2339 case BNXT_LINK_SPEED_400GB: 2340 case BNXT_LINK_SPEED_400GB_PAM4: 2341 case BNXT_LINK_SPEED_400GB_PAM4_112: 2342 return BNXT_LINK_SPEED_400GB_IDX; 2343 default: return BNXT_LINK_SPEED_UNKNOWN; 2344 } 2345 } 2346 2347 static const enum ethtool_link_mode_bit_indices 2348 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { 2349 [BNXT_LINK_SPEED_100MB_IDX] = { 2350 { 2351 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2352 }, 2353 }, 2354 [BNXT_LINK_SPEED_1GB_IDX] = { 2355 { 2356 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2357 /* historically baseT, but DAC is more correctly baseX */ 2358 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2359 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2360 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2361 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2362 }, 2363 }, 2364 [BNXT_LINK_SPEED_10GB_IDX] = { 2365 { 2366 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2367 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2368 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2369 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2370 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2371 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2372 }, 2373 }, 2374 [BNXT_LINK_SPEED_25GB_IDX] = { 2375 { 2376 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2377 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2378 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2379 }, 2380 }, 2381 [BNXT_LINK_SPEED_40GB_IDX] = { 2382 { 2383 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2384 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2385 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2386 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2387 }, 2388 }, 2389 [BNXT_LINK_SPEED_50GB_IDX] = { 2390 [BNXT_SIG_MODE_NRZ] = { 2391 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2392 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2393 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2394 }, 2395 [BNXT_SIG_MODE_PAM4] = { 2396 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2397 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2398 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2399 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2400 }, 2401 }, 2402 [BNXT_LINK_SPEED_100GB_IDX] = { 2403 [BNXT_SIG_MODE_NRZ] = { 2404 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2405 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2406 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2407 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2408 }, 2409 [BNXT_SIG_MODE_PAM4] = { 2410 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2411 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2412 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2413 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2414 }, 2415 [BNXT_SIG_MODE_PAM4_112] = { 2416 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, 2417 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, 2418 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, 2419 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, 2420 }, 2421 }, 2422 [BNXT_LINK_SPEED_200GB_IDX] = { 2423 [BNXT_SIG_MODE_PAM4] = { 2424 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2425 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2426 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2427 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2428 }, 2429 [BNXT_SIG_MODE_PAM4_112] = { 2430 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, 2431 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, 2432 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, 2433 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, 2434 }, 2435 }, 2436 [BNXT_LINK_SPEED_400GB_IDX] = { 2437 [BNXT_SIG_MODE_PAM4] = { 2438 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2439 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2440 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2441 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2442 }, 2443 [BNXT_SIG_MODE_PAM4_112] = { 2444 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, 2445 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, 2446 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, 2447 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, 2448 }, 2449 }, 2450 }; 2451 2452 #define BNXT_LINK_MODE_UNKNOWN -1 2453 2454 static enum ethtool_link_mode_bit_indices 2455 bnxt_get_link_mode(struct bnxt_link_info *link_info) 2456 { 2457 enum ethtool_link_mode_bit_indices link_mode; 2458 enum bnxt_link_speed_indices speed; 2459 enum bnxt_media_type media; 2460 u8 sig_mode; 2461 2462 if (link_info->phy_link_status != BNXT_LINK_LINK) 2463 return BNXT_LINK_MODE_UNKNOWN; 2464 2465 media = bnxt_get_media(link_info); 2466 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 2467 speed = bnxt_fw_speed_idx(link_info->link_speed); 2468 sig_mode = link_info->active_fec_sig_mode & 2469 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 2470 } else { 2471 speed = bnxt_fw_speed_idx(link_info->req_link_speed); 2472 sig_mode = link_info->req_signal_mode; 2473 } 2474 if (sig_mode >= BNXT_SIG_MODE_MAX) 2475 return BNXT_LINK_MODE_UNKNOWN; 2476 2477 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux 2478 * link mode, but since no such devices exist, the zeroes in the 2479 * map can be conveniently used to represent unknown link modes. 2480 */ 2481 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2482 if (!link_mode) 2483 return BNXT_LINK_MODE_UNKNOWN; 2484 2485 switch (link_mode) { 2486 case ETHTOOL_LINK_MODE_100baseT_Full_BIT: 2487 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2488 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; 2489 break; 2490 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: 2491 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2492 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; 2493 break; 2494 default: 2495 break; 2496 } 2497 2498 return link_mode; 2499 } 2500 2501 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, 2502 struct ethtool_link_ksettings *lk_ksettings) 2503 { 2504 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2505 2506 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { 2507 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2508 lk_ksettings->link_modes.supported); 2509 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2510 lk_ksettings->link_modes.supported); 2511 } 2512 2513 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 || 2514 link_info->support_pam4_auto_speeds) 2515 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2516 lk_ksettings->link_modes.supported); 2517 2518 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2519 return; 2520 2521 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) 2522 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2523 lk_ksettings->link_modes.advertising); 2524 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) 2525 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2526 lk_ksettings->link_modes.advertising); 2527 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) 2528 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2529 lk_ksettings->link_modes.lp_advertising); 2530 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) 2531 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2532 lk_ksettings->link_modes.lp_advertising); 2533 } 2534 2535 static const u16 bnxt_nrz_speed_masks[] = { 2536 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, 2537 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, 2538 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, 2539 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, 2540 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, 2541 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, 2542 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, 2543 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2544 }; 2545 2546 static const u16 bnxt_pam4_speed_masks[] = { 2547 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, 2548 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, 2549 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, 2550 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2551 }; 2552 2553 static const u16 bnxt_nrz_speeds2_masks[] = { 2554 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB, 2555 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB, 2556 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB, 2557 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB, 2558 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB, 2559 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB, 2560 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2561 }; 2562 2563 static const u16 bnxt_pam4_speeds2_masks[] = { 2564 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4, 2565 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4, 2566 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4, 2567 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4, 2568 }; 2569 2570 static const u16 bnxt_pam4_112_speeds2_masks[] = { 2571 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112, 2572 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112, 2573 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112, 2574 }; 2575 2576 static enum bnxt_link_speed_indices 2577 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk) 2578 { 2579 const u16 *speeds; 2580 int idx, len; 2581 2582 switch (sig_mode) { 2583 case BNXT_SIG_MODE_NRZ: 2584 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2585 speeds = bnxt_nrz_speeds2_masks; 2586 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks); 2587 } else { 2588 speeds = bnxt_nrz_speed_masks; 2589 len = ARRAY_SIZE(bnxt_nrz_speed_masks); 2590 } 2591 break; 2592 case BNXT_SIG_MODE_PAM4: 2593 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2594 speeds = bnxt_pam4_speeds2_masks; 2595 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks); 2596 } else { 2597 speeds = bnxt_pam4_speed_masks; 2598 len = ARRAY_SIZE(bnxt_pam4_speed_masks); 2599 } 2600 break; 2601 case BNXT_SIG_MODE_PAM4_112: 2602 speeds = bnxt_pam4_112_speeds2_masks; 2603 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks); 2604 break; 2605 default: 2606 return BNXT_LINK_SPEED_UNKNOWN; 2607 } 2608 2609 for (idx = 0; idx < len; idx++) { 2610 if (speeds[idx] == speed_msk) 2611 return idx; 2612 } 2613 2614 return BNXT_LINK_SPEED_UNKNOWN; 2615 } 2616 2617 #define BNXT_FW_SPEED_MSK_BITS 16 2618 2619 static void 2620 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2621 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2622 { 2623 enum ethtool_link_mode_bit_indices link_mode; 2624 enum bnxt_link_speed_indices speed; 2625 u8 bit; 2626 2627 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { 2628 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit); 2629 if (!speed) 2630 continue; 2631 2632 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2633 if (!link_mode) 2634 continue; 2635 2636 linkmode_set_bit(link_mode, et_mask); 2637 } 2638 } 2639 2640 static void 2641 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2642 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2643 { 2644 if (media) { 2645 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2646 et_mask); 2647 return; 2648 } 2649 2650 /* list speeds for all media if unknown */ 2651 for (media = 1; media < __BNXT_MEDIA_END; media++) 2652 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2653 et_mask); 2654 } 2655 2656 static void 2657 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info, 2658 enum bnxt_media_type media, 2659 struct ethtool_link_ksettings *lk_ksettings) 2660 { 2661 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2662 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2663 u16 phy_flags = bp->phy_flags; 2664 2665 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2666 sp_nrz = link_info->support_speeds2; 2667 sp_pam4 = link_info->support_speeds2; 2668 sp_pam4_112 = link_info->support_speeds2; 2669 } else { 2670 sp_nrz = link_info->support_speeds; 2671 sp_pam4 = link_info->support_pam4_speeds; 2672 } 2673 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2674 lk_ksettings->link_modes.supported); 2675 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2676 lk_ksettings->link_modes.supported); 2677 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2678 phy_flags, lk_ksettings->link_modes.supported); 2679 } 2680 2681 static void 2682 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info, 2683 enum bnxt_media_type media, 2684 struct ethtool_link_ksettings *lk_ksettings) 2685 { 2686 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2687 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2688 u16 phy_flags = bp->phy_flags; 2689 2690 sp_nrz = link_info->advertising; 2691 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2692 sp_pam4 = link_info->advertising; 2693 sp_pam4_112 = link_info->advertising; 2694 } else { 2695 sp_pam4 = link_info->advertising_pam4; 2696 } 2697 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2698 lk_ksettings->link_modes.advertising); 2699 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2700 lk_ksettings->link_modes.advertising); 2701 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2702 phy_flags, lk_ksettings->link_modes.advertising); 2703 } 2704 2705 static void 2706 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info, 2707 enum bnxt_media_type media, 2708 struct ethtool_link_ksettings *lk_ksettings) 2709 { 2710 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2711 u16 phy_flags = bp->phy_flags; 2712 2713 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media, 2714 BNXT_SIG_MODE_NRZ, phy_flags, 2715 lk_ksettings->link_modes.lp_advertising); 2716 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media, 2717 BNXT_SIG_MODE_PAM4, phy_flags, 2718 lk_ksettings->link_modes.lp_advertising); 2719 } 2720 2721 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, 2722 u16 speed_msk, const unsigned long *et_mask, 2723 enum ethtool_link_mode_bit_indices mode) 2724 { 2725 bool mode_desired = linkmode_test_bit(mode, et_mask); 2726 2727 if (!mode) 2728 return; 2729 2730 /* enabled speeds for installed media should override */ 2731 if (installed_media && mode_desired) { 2732 *speeds |= speed_msk; 2733 *delta |= speed_msk; 2734 return; 2735 } 2736 2737 /* many to one mapping, only allow one change per fw_speed bit */ 2738 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { 2739 *speeds ^= speed_msk; 2740 *delta |= speed_msk; 2741 } 2742 } 2743 2744 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, 2745 const unsigned long *et_mask) 2746 { 2747 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2748 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks; 2749 enum bnxt_media_type media = bnxt_get_media(link_info); 2750 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL; 2751 u32 delta_pam4_112 = 0; 2752 u32 delta_pam4 = 0; 2753 u32 delta_nrz = 0; 2754 int i, m; 2755 2756 adv = &link_info->advertising; 2757 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2758 adv_pam4 = &link_info->advertising; 2759 adv_pam4_112 = &link_info->advertising; 2760 sp_msks = bnxt_nrz_speeds2_masks; 2761 sp_pam4_msks = bnxt_pam4_speeds2_masks; 2762 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks; 2763 } else { 2764 adv_pam4 = &link_info->advertising_pam4; 2765 sp_msks = bnxt_nrz_speed_masks; 2766 sp_pam4_msks = bnxt_pam4_speed_masks; 2767 } 2768 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { 2769 /* accept any legal media from user */ 2770 for (m = 1; m < __BNXT_MEDIA_END; m++) { 2771 bnxt_update_speed(&delta_nrz, m == media, 2772 adv, sp_msks[i], et_mask, 2773 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); 2774 bnxt_update_speed(&delta_pam4, m == media, 2775 adv_pam4, sp_pam4_msks[i], et_mask, 2776 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); 2777 if (!adv_pam4_112) 2778 continue; 2779 2780 bnxt_update_speed(&delta_pam4_112, m == media, 2781 adv_pam4_112, sp_pam4_112_msks[i], et_mask, 2782 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]); 2783 } 2784 } 2785 } 2786 2787 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 2788 struct ethtool_link_ksettings *lk_ksettings) 2789 { 2790 u16 fec_cfg = link_info->fec_cfg; 2791 2792 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 2793 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2794 lk_ksettings->link_modes.advertising); 2795 return; 2796 } 2797 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 2798 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2799 lk_ksettings->link_modes.advertising); 2800 if (fec_cfg & BNXT_FEC_ENC_RS) 2801 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2802 lk_ksettings->link_modes.advertising); 2803 if (fec_cfg & BNXT_FEC_ENC_LLRS) 2804 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2805 lk_ksettings->link_modes.advertising); 2806 } 2807 2808 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 2809 struct ethtool_link_ksettings *lk_ksettings) 2810 { 2811 u16 fec_cfg = link_info->fec_cfg; 2812 2813 if (fec_cfg & BNXT_FEC_NONE) { 2814 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2815 lk_ksettings->link_modes.supported); 2816 return; 2817 } 2818 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 2819 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2820 lk_ksettings->link_modes.supported); 2821 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 2822 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2823 lk_ksettings->link_modes.supported); 2824 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 2825 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2826 lk_ksettings->link_modes.supported); 2827 } 2828 2829 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 2830 { 2831 switch (fw_link_speed) { 2832 case BNXT_LINK_SPEED_100MB: 2833 return SPEED_100; 2834 case BNXT_LINK_SPEED_1GB: 2835 return SPEED_1000; 2836 case BNXT_LINK_SPEED_2_5GB: 2837 return SPEED_2500; 2838 case BNXT_LINK_SPEED_10GB: 2839 return SPEED_10000; 2840 case BNXT_LINK_SPEED_20GB: 2841 return SPEED_20000; 2842 case BNXT_LINK_SPEED_25GB: 2843 return SPEED_25000; 2844 case BNXT_LINK_SPEED_40GB: 2845 return SPEED_40000; 2846 case BNXT_LINK_SPEED_50GB: 2847 case BNXT_LINK_SPEED_50GB_PAM4: 2848 return SPEED_50000; 2849 case BNXT_LINK_SPEED_100GB: 2850 case BNXT_LINK_SPEED_100GB_PAM4: 2851 case BNXT_LINK_SPEED_100GB_PAM4_112: 2852 return SPEED_100000; 2853 case BNXT_LINK_SPEED_200GB: 2854 case BNXT_LINK_SPEED_200GB_PAM4: 2855 case BNXT_LINK_SPEED_200GB_PAM4_112: 2856 return SPEED_200000; 2857 case BNXT_LINK_SPEED_400GB: 2858 case BNXT_LINK_SPEED_400GB_PAM4: 2859 case BNXT_LINK_SPEED_400GB_PAM4_112: 2860 return SPEED_400000; 2861 default: 2862 return SPEED_UNKNOWN; 2863 } 2864 } 2865 2866 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, 2867 struct bnxt_link_info *link_info) 2868 { 2869 struct ethtool_link_settings *base = &lk_ksettings->base; 2870 2871 if (link_info->link_state == BNXT_LINK_STATE_UP) { 2872 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 2873 base->duplex = DUPLEX_HALF; 2874 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2875 base->duplex = DUPLEX_FULL; 2876 lk_ksettings->lanes = link_info->active_lanes; 2877 } else if (!link_info->autoneg) { 2878 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 2879 base->duplex = DUPLEX_HALF; 2880 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 2881 base->duplex = DUPLEX_FULL; 2882 } 2883 } 2884 2885 static int bnxt_get_link_ksettings(struct net_device *dev, 2886 struct ethtool_link_ksettings *lk_ksettings) 2887 { 2888 struct ethtool_link_settings *base = &lk_ksettings->base; 2889 enum ethtool_link_mode_bit_indices link_mode; 2890 struct bnxt *bp = netdev_priv(dev); 2891 struct bnxt_link_info *link_info; 2892 enum bnxt_media_type media; 2893 2894 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); 2895 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 2896 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 2897 base->duplex = DUPLEX_UNKNOWN; 2898 base->speed = SPEED_UNKNOWN; 2899 link_info = &bp->link_info; 2900 2901 mutex_lock(&bp->link_lock); 2902 bnxt_get_ethtool_modes(link_info, lk_ksettings); 2903 media = bnxt_get_media(link_info); 2904 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings); 2905 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 2906 link_mode = bnxt_get_link_mode(link_info); 2907 if (link_mode != BNXT_LINK_MODE_UNKNOWN) 2908 ethtool_params_from_link_mode(lk_ksettings, link_mode); 2909 else 2910 bnxt_get_default_speeds(lk_ksettings, link_info); 2911 2912 if (link_info->autoneg) { 2913 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 2914 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2915 lk_ksettings->link_modes.advertising); 2916 base->autoneg = AUTONEG_ENABLE; 2917 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings); 2918 if (link_info->phy_link_status == BNXT_LINK_LINK) 2919 bnxt_get_all_ethtool_lp_speeds(link_info, media, 2920 lk_ksettings); 2921 } else { 2922 base->autoneg = AUTONEG_DISABLE; 2923 } 2924 2925 base->port = PORT_NONE; 2926 if (media == BNXT_MEDIA_TP) { 2927 base->port = PORT_TP; 2928 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2929 lk_ksettings->link_modes.supported); 2930 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2931 lk_ksettings->link_modes.advertising); 2932 } else if (media == BNXT_MEDIA_KR) { 2933 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2934 lk_ksettings->link_modes.supported); 2935 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2936 lk_ksettings->link_modes.advertising); 2937 } else { 2938 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2939 lk_ksettings->link_modes.supported); 2940 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2941 lk_ksettings->link_modes.advertising); 2942 2943 if (media == BNXT_MEDIA_CR) 2944 base->port = PORT_DA; 2945 else 2946 base->port = PORT_FIBRE; 2947 } 2948 base->phy_address = link_info->phy_addr; 2949 mutex_unlock(&bp->link_lock); 2950 2951 return 0; 2952 } 2953 2954 static int 2955 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) 2956 { 2957 struct bnxt *bp = netdev_priv(dev); 2958 struct bnxt_link_info *link_info = &bp->link_info; 2959 u16 support_pam4_spds = link_info->support_pam4_speeds; 2960 u16 support_spds2 = link_info->support_speeds2; 2961 u16 support_spds = link_info->support_speeds; 2962 u8 sig_mode = BNXT_SIG_MODE_NRZ; 2963 u32 lanes_needed = 1; 2964 u16 fw_speed = 0; 2965 2966 switch (ethtool_speed) { 2967 case SPEED_100: 2968 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 2969 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 2970 break; 2971 case SPEED_1000: 2972 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) || 2973 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB)) 2974 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 2975 break; 2976 case SPEED_2500: 2977 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 2978 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 2979 break; 2980 case SPEED_10000: 2981 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) || 2982 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB)) 2983 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 2984 break; 2985 case SPEED_20000: 2986 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { 2987 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 2988 lanes_needed = 2; 2989 } 2990 break; 2991 case SPEED_25000: 2992 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) || 2993 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB)) 2994 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 2995 break; 2996 case SPEED_40000: 2997 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) || 2998 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) { 2999 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 3000 lanes_needed = 4; 3001 } 3002 break; 3003 case SPEED_50000: 3004 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) || 3005 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) && 3006 lanes != 1) { 3007 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 3008 lanes_needed = 2; 3009 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 3010 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 3011 sig_mode = BNXT_SIG_MODE_PAM4; 3012 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) { 3013 fw_speed = BNXT_LINK_SPEED_50GB_PAM4; 3014 sig_mode = BNXT_SIG_MODE_PAM4; 3015 } 3016 break; 3017 case SPEED_100000: 3018 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) || 3019 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) && 3020 lanes != 2 && lanes != 1) { 3021 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 3022 lanes_needed = 4; 3023 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 3024 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 3025 sig_mode = BNXT_SIG_MODE_PAM4; 3026 lanes_needed = 2; 3027 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) && 3028 lanes != 1) { 3029 fw_speed = BNXT_LINK_SPEED_100GB_PAM4; 3030 sig_mode = BNXT_SIG_MODE_PAM4; 3031 lanes_needed = 2; 3032 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) { 3033 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112; 3034 sig_mode = BNXT_SIG_MODE_PAM4_112; 3035 } 3036 break; 3037 case SPEED_200000: 3038 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 3039 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 3040 sig_mode = BNXT_SIG_MODE_PAM4; 3041 lanes_needed = 4; 3042 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) && 3043 lanes != 2) { 3044 fw_speed = BNXT_LINK_SPEED_200GB_PAM4; 3045 sig_mode = BNXT_SIG_MODE_PAM4; 3046 lanes_needed = 4; 3047 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) { 3048 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112; 3049 sig_mode = BNXT_SIG_MODE_PAM4_112; 3050 lanes_needed = 2; 3051 } 3052 break; 3053 case SPEED_400000: 3054 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) && 3055 lanes != 4) { 3056 fw_speed = BNXT_LINK_SPEED_400GB_PAM4; 3057 sig_mode = BNXT_SIG_MODE_PAM4; 3058 lanes_needed = 8; 3059 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) { 3060 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112; 3061 sig_mode = BNXT_SIG_MODE_PAM4_112; 3062 lanes_needed = 4; 3063 } 3064 break; 3065 } 3066 3067 if (!fw_speed) { 3068 netdev_err(dev, "unsupported speed!\n"); 3069 return -EINVAL; 3070 } 3071 3072 if (lanes && lanes != lanes_needed) { 3073 netdev_err(dev, "unsupported number of lanes for speed\n"); 3074 return -EINVAL; 3075 } 3076 3077 if (link_info->req_link_speed == fw_speed && 3078 link_info->req_signal_mode == sig_mode && 3079 link_info->autoneg == 0) 3080 return -EALREADY; 3081 3082 link_info->req_link_speed = fw_speed; 3083 link_info->req_signal_mode = sig_mode; 3084 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 3085 link_info->autoneg = 0; 3086 link_info->advertising = 0; 3087 link_info->advertising_pam4 = 0; 3088 3089 return 0; 3090 } 3091 3092 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) 3093 { 3094 u16 fw_speed_mask = 0; 3095 3096 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || 3097 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) 3098 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 3099 3100 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || 3101 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) 3102 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 3103 3104 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) 3105 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 3106 3107 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) 3108 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 3109 3110 return fw_speed_mask; 3111 } 3112 3113 static int bnxt_set_link_ksettings(struct net_device *dev, 3114 const struct ethtool_link_ksettings *lk_ksettings) 3115 { 3116 struct bnxt *bp = netdev_priv(dev); 3117 struct bnxt_link_info *link_info = &bp->link_info; 3118 const struct ethtool_link_settings *base = &lk_ksettings->base; 3119 bool set_pause = false; 3120 u32 speed, lanes = 0; 3121 int rc = 0; 3122 3123 if (!BNXT_PHY_CFG_ABLE(bp)) 3124 return -EOPNOTSUPP; 3125 3126 mutex_lock(&bp->link_lock); 3127 if (base->autoneg == AUTONEG_ENABLE) { 3128 bnxt_set_ethtool_speeds(link_info, 3129 lk_ksettings->link_modes.advertising); 3130 link_info->autoneg |= BNXT_AUTONEG_SPEED; 3131 if (!link_info->advertising && !link_info->advertising_pam4) { 3132 link_info->advertising = link_info->support_auto_speeds; 3133 link_info->advertising_pam4 = 3134 link_info->support_pam4_auto_speeds; 3135 } 3136 /* any change to autoneg will cause link change, therefore the 3137 * driver should put back the original pause setting in autoneg 3138 */ 3139 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3140 set_pause = true; 3141 } else { 3142 u8 phy_type = link_info->phy_type; 3143 3144 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 3145 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 3146 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 3147 netdev_err(dev, "10GBase-T devices must autoneg\n"); 3148 rc = -EINVAL; 3149 goto set_setting_exit; 3150 } 3151 if (base->duplex == DUPLEX_HALF) { 3152 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 3153 rc = -EINVAL; 3154 goto set_setting_exit; 3155 } 3156 speed = base->speed; 3157 lanes = lk_ksettings->lanes; 3158 rc = bnxt_force_link_speed(dev, speed, lanes); 3159 if (rc) { 3160 if (rc == -EALREADY) 3161 rc = 0; 3162 goto set_setting_exit; 3163 } 3164 } 3165 3166 if (netif_running(dev)) 3167 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 3168 3169 set_setting_exit: 3170 mutex_unlock(&bp->link_lock); 3171 return rc; 3172 } 3173 3174 static int bnxt_get_fecparam(struct net_device *dev, 3175 struct ethtool_fecparam *fec) 3176 { 3177 struct bnxt *bp = netdev_priv(dev); 3178 struct bnxt_link_info *link_info; 3179 u8 active_fec; 3180 u16 fec_cfg; 3181 3182 link_info = &bp->link_info; 3183 fec_cfg = link_info->fec_cfg; 3184 active_fec = link_info->active_fec_sig_mode & 3185 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 3186 if (fec_cfg & BNXT_FEC_NONE) { 3187 fec->fec = ETHTOOL_FEC_NONE; 3188 fec->active_fec = ETHTOOL_FEC_NONE; 3189 return 0; 3190 } 3191 if (fec_cfg & BNXT_FEC_AUTONEG) 3192 fec->fec |= ETHTOOL_FEC_AUTO; 3193 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 3194 fec->fec |= ETHTOOL_FEC_BASER; 3195 if (fec_cfg & BNXT_FEC_ENC_RS) 3196 fec->fec |= ETHTOOL_FEC_RS; 3197 if (fec_cfg & BNXT_FEC_ENC_LLRS) 3198 fec->fec |= ETHTOOL_FEC_LLRS; 3199 3200 switch (active_fec) { 3201 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 3202 fec->active_fec |= ETHTOOL_FEC_BASER; 3203 break; 3204 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 3205 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 3206 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 3207 fec->active_fec |= ETHTOOL_FEC_RS; 3208 break; 3209 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 3210 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 3211 fec->active_fec |= ETHTOOL_FEC_LLRS; 3212 break; 3213 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 3214 fec->active_fec |= ETHTOOL_FEC_OFF; 3215 break; 3216 } 3217 return 0; 3218 } 3219 3220 static const struct ethtool_fec_hist_range bnxt_fec_ranges[] = { 3221 { 0, 0}, 3222 { 1, 1}, 3223 { 2, 2}, 3224 { 3, 3}, 3225 { 4, 4}, 3226 { 5, 5}, 3227 { 6, 6}, 3228 { 7, 7}, 3229 { 8, 8}, 3230 { 9, 9}, 3231 { 10, 10}, 3232 { 11, 11}, 3233 { 12, 12}, 3234 { 13, 13}, 3235 { 14, 14}, 3236 { 15, 15}, 3237 { 0, 0}, 3238 }; 3239 3240 static void bnxt_hwrm_port_phy_fdrstat(struct bnxt *bp, 3241 struct ethtool_fec_hist *hist) 3242 { 3243 struct ethtool_fec_hist_value *values = hist->values; 3244 struct hwrm_port_phy_fdrstat_output *resp; 3245 struct hwrm_port_phy_fdrstat_input *req; 3246 int rc, i; 3247 3248 if (!(bp->phy_flags & BNXT_PHY_FL_FDRSTATS)) 3249 return; 3250 3251 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_FDRSTAT); 3252 if (rc) 3253 return; 3254 3255 req->port_id = cpu_to_le16(bp->pf.port_id); 3256 req->ops = cpu_to_le16(PORT_PHY_FDRSTAT_REQ_OPS_COUNTER); 3257 resp = hwrm_req_hold(bp, req); 3258 rc = hwrm_req_send(bp, req); 3259 if (!rc) { 3260 hist->ranges = bnxt_fec_ranges; 3261 for (i = 0; i <= 15; i++) { 3262 __le64 sum = resp->accumulated_codewords_err_s[i]; 3263 3264 values[i].sum = le64_to_cpu(sum); 3265 } 3266 } 3267 hwrm_req_drop(bp, req); 3268 } 3269 3270 static void bnxt_get_fec_stats(struct net_device *dev, 3271 struct ethtool_fec_stats *fec_stats, 3272 struct ethtool_fec_hist *hist) 3273 { 3274 struct bnxt *bp = netdev_priv(dev); 3275 u64 *rx; 3276 3277 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 3278 return; 3279 3280 rx = bp->rx_port_stats_ext.sw_stats; 3281 fec_stats->corrected_bits.total = 3282 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 3283 3284 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) 3285 return; 3286 3287 fec_stats->corrected_blocks.total = 3288 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); 3289 fec_stats->uncorrectable_blocks.total = 3290 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); 3291 bnxt_hwrm_port_phy_fdrstat(bp, hist); 3292 } 3293 3294 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 3295 u32 fec) 3296 { 3297 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 3298 3299 if (fec & ETHTOOL_FEC_BASER) 3300 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 3301 else if (fec & ETHTOOL_FEC_RS) 3302 fw_fec |= BNXT_FEC_RS_ON(link_info); 3303 else if (fec & ETHTOOL_FEC_LLRS) 3304 fw_fec |= BNXT_FEC_LLRS_ON; 3305 return fw_fec; 3306 } 3307 3308 static int bnxt_set_fecparam(struct net_device *dev, 3309 struct ethtool_fecparam *fecparam) 3310 { 3311 struct hwrm_port_phy_cfg_input *req; 3312 struct bnxt *bp = netdev_priv(dev); 3313 struct bnxt_link_info *link_info; 3314 u32 new_cfg, fec = fecparam->fec; 3315 u16 fec_cfg; 3316 int rc; 3317 3318 link_info = &bp->link_info; 3319 fec_cfg = link_info->fec_cfg; 3320 if (fec_cfg & BNXT_FEC_NONE) 3321 return -EOPNOTSUPP; 3322 3323 if (fec & ETHTOOL_FEC_OFF) { 3324 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 3325 BNXT_FEC_ALL_OFF(link_info); 3326 goto apply_fec; 3327 } 3328 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 3329 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 3330 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 3331 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 3332 return -EINVAL; 3333 3334 if (fec & ETHTOOL_FEC_AUTO) { 3335 if (!link_info->autoneg) 3336 return -EINVAL; 3337 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 3338 } else { 3339 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 3340 } 3341 3342 apply_fec: 3343 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 3344 if (rc) 3345 return rc; 3346 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3347 rc = hwrm_req_send(bp, req); 3348 /* update current settings */ 3349 if (!rc) { 3350 mutex_lock(&bp->link_lock); 3351 bnxt_update_link(bp, false); 3352 mutex_unlock(&bp->link_lock); 3353 } 3354 return rc; 3355 } 3356 3357 static void bnxt_get_pauseparam(struct net_device *dev, 3358 struct ethtool_pauseparam *epause) 3359 { 3360 struct bnxt *bp = netdev_priv(dev); 3361 struct bnxt_link_info *link_info = &bp->link_info; 3362 3363 if (BNXT_VF(bp)) 3364 return; 3365 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 3366 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 3367 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 3368 } 3369 3370 static void bnxt_get_pause_stats(struct net_device *dev, 3371 struct ethtool_pause_stats *epstat) 3372 { 3373 struct bnxt *bp = netdev_priv(dev); 3374 u64 *rx, *tx; 3375 3376 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 3377 return; 3378 3379 rx = bp->port_stats.sw_stats; 3380 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3381 3382 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 3383 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 3384 } 3385 3386 static int bnxt_set_pauseparam(struct net_device *dev, 3387 struct ethtool_pauseparam *epause) 3388 { 3389 int rc = 0; 3390 struct bnxt *bp = netdev_priv(dev); 3391 struct bnxt_link_info *link_info = &bp->link_info; 3392 3393 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3394 return -EOPNOTSUPP; 3395 3396 mutex_lock(&bp->link_lock); 3397 if (epause->autoneg) { 3398 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 3399 rc = -EINVAL; 3400 goto pause_exit; 3401 } 3402 3403 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 3404 link_info->req_flow_ctrl = 0; 3405 } else { 3406 /* when transition from auto pause to force pause, 3407 * force a link change 3408 */ 3409 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 3410 link_info->force_link_chng = true; 3411 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 3412 link_info->req_flow_ctrl = 0; 3413 } 3414 if (epause->rx_pause) 3415 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 3416 3417 if (epause->tx_pause) 3418 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 3419 3420 if (netif_running(dev)) 3421 rc = bnxt_hwrm_set_pause(bp); 3422 3423 pause_exit: 3424 mutex_unlock(&bp->link_lock); 3425 return rc; 3426 } 3427 3428 static u32 bnxt_get_link(struct net_device *dev) 3429 { 3430 struct bnxt *bp = netdev_priv(dev); 3431 3432 /* TODO: handle MF, VF, driver close case */ 3433 return BNXT_LINK_IS_UP(bp); 3434 } 3435 3436 static int bnxt_get_link_ext_state(struct net_device *dev, 3437 struct ethtool_link_ext_state_info *info) 3438 { 3439 struct bnxt *bp = netdev_priv(dev); 3440 u8 reason; 3441 3442 if (BNXT_LINK_IS_UP(bp)) 3443 return -ENODATA; 3444 3445 reason = bp->link_info.link_down_reason; 3446 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF) { 3447 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE; 3448 info->link_training = ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT; 3449 return 0; 3450 } 3451 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED) { 3452 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_NO_CABLE; 3453 return 0; 3454 } 3455 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION) { 3456 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_OTP_SPEED_VIOLATION; 3457 return 0; 3458 } 3459 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT) { 3460 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_MODULE; 3461 return 0; 3462 } 3463 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST) { 3464 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_BMC_REQUEST_DOWN; 3465 return 0; 3466 } 3467 return -ENODATA; 3468 } 3469 3470 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 3471 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 3472 { 3473 struct hwrm_nvm_get_dev_info_output *resp; 3474 struct hwrm_nvm_get_dev_info_input *req; 3475 int rc; 3476 3477 if (BNXT_VF(bp)) 3478 return -EOPNOTSUPP; 3479 3480 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); 3481 if (rc) 3482 return rc; 3483 3484 resp = hwrm_req_hold(bp, req); 3485 rc = hwrm_req_send(bp, req); 3486 if (!rc) 3487 memcpy(nvm_dev_info, resp, sizeof(*resp)); 3488 hwrm_req_drop(bp, req); 3489 return rc; 3490 } 3491 3492 static void bnxt_print_admin_err(struct bnxt *bp) 3493 { 3494 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 3495 } 3496 3497 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 3498 u16 ext, u16 *index, u32 *item_length, 3499 u32 *data_length); 3500 3501 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 3502 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 3503 u32 dir_item_len, const u8 *data, 3504 size_t data_len) 3505 { 3506 struct bnxt *bp = netdev_priv(dev); 3507 struct hwrm_nvm_write_input *req; 3508 int rc; 3509 3510 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); 3511 if (rc) 3512 return rc; 3513 3514 if (data_len && data) { 3515 dma_addr_t dma_handle; 3516 u8 *kmem; 3517 3518 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); 3519 if (!kmem) { 3520 hwrm_req_drop(bp, req); 3521 return -ENOMEM; 3522 } 3523 3524 req->dir_data_length = cpu_to_le32(data_len); 3525 3526 memcpy(kmem, data, data_len); 3527 req->host_src_addr = cpu_to_le64(dma_handle); 3528 } 3529 3530 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3531 req->dir_type = cpu_to_le16(dir_type); 3532 req->dir_ordinal = cpu_to_le16(dir_ordinal); 3533 req->dir_ext = cpu_to_le16(dir_ext); 3534 req->dir_attr = cpu_to_le16(dir_attr); 3535 req->dir_item_length = cpu_to_le32(dir_item_len); 3536 rc = hwrm_req_send(bp, req); 3537 3538 if (rc == -EACCES) 3539 bnxt_print_admin_err(bp); 3540 return rc; 3541 } 3542 3543 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 3544 u8 self_reset, u8 flags) 3545 { 3546 struct bnxt *bp = netdev_priv(dev); 3547 struct hwrm_fw_reset_input *req; 3548 int rc; 3549 3550 if (!bnxt_hwrm_reset_permitted(bp)) { 3551 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); 3552 return -EPERM; 3553 } 3554 3555 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 3556 if (rc) 3557 return rc; 3558 3559 req->embedded_proc_type = proc_type; 3560 req->selfrst_status = self_reset; 3561 req->flags = flags; 3562 3563 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 3564 rc = hwrm_req_send_silent(bp, req); 3565 } else { 3566 rc = hwrm_req_send(bp, req); 3567 if (rc == -EACCES) 3568 bnxt_print_admin_err(bp); 3569 } 3570 return rc; 3571 } 3572 3573 static int bnxt_firmware_reset(struct net_device *dev, 3574 enum bnxt_nvm_directory_type dir_type) 3575 { 3576 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 3577 u8 proc_type, flags = 0; 3578 3579 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 3580 /* (e.g. when firmware isn't already running) */ 3581 switch (dir_type) { 3582 case BNX_DIR_TYPE_CHIMP_PATCH: 3583 case BNX_DIR_TYPE_BOOTCODE: 3584 case BNX_DIR_TYPE_BOOTCODE_2: 3585 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 3586 /* Self-reset ChiMP upon next PCIe reset: */ 3587 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3588 break; 3589 case BNX_DIR_TYPE_APE_FW: 3590 case BNX_DIR_TYPE_APE_PATCH: 3591 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 3592 /* Self-reset APE upon next PCIe reset: */ 3593 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3594 break; 3595 case BNX_DIR_TYPE_KONG_FW: 3596 case BNX_DIR_TYPE_KONG_PATCH: 3597 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 3598 break; 3599 case BNX_DIR_TYPE_BONO_FW: 3600 case BNX_DIR_TYPE_BONO_PATCH: 3601 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 3602 break; 3603 default: 3604 return -EINVAL; 3605 } 3606 3607 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 3608 } 3609 3610 static int bnxt_firmware_reset_chip(struct net_device *dev) 3611 { 3612 struct bnxt *bp = netdev_priv(dev); 3613 u8 flags = 0; 3614 3615 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 3616 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 3617 3618 return bnxt_hwrm_firmware_reset(dev, 3619 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 3620 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 3621 flags); 3622 } 3623 3624 static int bnxt_firmware_reset_ap(struct net_device *dev) 3625 { 3626 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 3627 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 3628 0); 3629 } 3630 3631 static int bnxt_flash_firmware(struct net_device *dev, 3632 u16 dir_type, 3633 const u8 *fw_data, 3634 size_t fw_size) 3635 { 3636 int rc = 0; 3637 u16 code_type; 3638 u32 stored_crc; 3639 u32 calculated_crc; 3640 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 3641 3642 switch (dir_type) { 3643 case BNX_DIR_TYPE_BOOTCODE: 3644 case BNX_DIR_TYPE_BOOTCODE_2: 3645 code_type = CODE_BOOT; 3646 break; 3647 case BNX_DIR_TYPE_CHIMP_PATCH: 3648 code_type = CODE_CHIMP_PATCH; 3649 break; 3650 case BNX_DIR_TYPE_APE_FW: 3651 code_type = CODE_MCTP_PASSTHRU; 3652 break; 3653 case BNX_DIR_TYPE_APE_PATCH: 3654 code_type = CODE_APE_PATCH; 3655 break; 3656 case BNX_DIR_TYPE_KONG_FW: 3657 code_type = CODE_KONG_FW; 3658 break; 3659 case BNX_DIR_TYPE_KONG_PATCH: 3660 code_type = CODE_KONG_PATCH; 3661 break; 3662 case BNX_DIR_TYPE_BONO_FW: 3663 code_type = CODE_BONO_FW; 3664 break; 3665 case BNX_DIR_TYPE_BONO_PATCH: 3666 code_type = CODE_BONO_PATCH; 3667 break; 3668 default: 3669 netdev_err(dev, "Unsupported directory entry type: %u\n", 3670 dir_type); 3671 return -EINVAL; 3672 } 3673 if (fw_size < sizeof(struct bnxt_fw_header)) { 3674 netdev_err(dev, "Invalid firmware file size: %u\n", 3675 (unsigned int)fw_size); 3676 return -EINVAL; 3677 } 3678 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 3679 netdev_err(dev, "Invalid firmware signature: %08X\n", 3680 le32_to_cpu(header->signature)); 3681 return -EINVAL; 3682 } 3683 if (header->code_type != code_type) { 3684 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 3685 code_type, header->code_type); 3686 return -EINVAL; 3687 } 3688 if (header->device != DEVICE_CUMULUS_FAMILY) { 3689 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 3690 DEVICE_CUMULUS_FAMILY, header->device); 3691 return -EINVAL; 3692 } 3693 /* Confirm the CRC32 checksum of the file: */ 3694 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3695 sizeof(stored_crc))); 3696 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3697 if (calculated_crc != stored_crc) { 3698 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 3699 (unsigned long)stored_crc, 3700 (unsigned long)calculated_crc); 3701 return -EINVAL; 3702 } 3703 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3704 0, 0, 0, fw_data, fw_size); 3705 if (rc == 0) /* Firmware update successful */ 3706 rc = bnxt_firmware_reset(dev, dir_type); 3707 3708 return rc; 3709 } 3710 3711 static int bnxt_flash_microcode(struct net_device *dev, 3712 u16 dir_type, 3713 const u8 *fw_data, 3714 size_t fw_size) 3715 { 3716 struct bnxt_ucode_trailer *trailer; 3717 u32 calculated_crc; 3718 u32 stored_crc; 3719 int rc = 0; 3720 3721 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 3722 netdev_err(dev, "Invalid microcode file size: %u\n", 3723 (unsigned int)fw_size); 3724 return -EINVAL; 3725 } 3726 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 3727 sizeof(*trailer))); 3728 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 3729 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 3730 le32_to_cpu(trailer->sig)); 3731 return -EINVAL; 3732 } 3733 if (le16_to_cpu(trailer->dir_type) != dir_type) { 3734 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 3735 dir_type, le16_to_cpu(trailer->dir_type)); 3736 return -EINVAL; 3737 } 3738 if (le16_to_cpu(trailer->trailer_length) < 3739 sizeof(struct bnxt_ucode_trailer)) { 3740 netdev_err(dev, "Invalid microcode trailer length: %d\n", 3741 le16_to_cpu(trailer->trailer_length)); 3742 return -EINVAL; 3743 } 3744 3745 /* Confirm the CRC32 checksum of the file: */ 3746 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3747 sizeof(stored_crc))); 3748 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3749 if (calculated_crc != stored_crc) { 3750 netdev_err(dev, 3751 "CRC32 (%08lX) does not match calculated: %08lX\n", 3752 (unsigned long)stored_crc, 3753 (unsigned long)calculated_crc); 3754 return -EINVAL; 3755 } 3756 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3757 0, 0, 0, fw_data, fw_size); 3758 3759 return rc; 3760 } 3761 3762 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 3763 { 3764 switch (dir_type) { 3765 case BNX_DIR_TYPE_CHIMP_PATCH: 3766 case BNX_DIR_TYPE_BOOTCODE: 3767 case BNX_DIR_TYPE_BOOTCODE_2: 3768 case BNX_DIR_TYPE_APE_FW: 3769 case BNX_DIR_TYPE_APE_PATCH: 3770 case BNX_DIR_TYPE_KONG_FW: 3771 case BNX_DIR_TYPE_KONG_PATCH: 3772 case BNX_DIR_TYPE_BONO_FW: 3773 case BNX_DIR_TYPE_BONO_PATCH: 3774 return true; 3775 } 3776 3777 return false; 3778 } 3779 3780 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 3781 { 3782 switch (dir_type) { 3783 case BNX_DIR_TYPE_AVS: 3784 case BNX_DIR_TYPE_EXP_ROM_MBA: 3785 case BNX_DIR_TYPE_PCIE: 3786 case BNX_DIR_TYPE_TSCF_UCODE: 3787 case BNX_DIR_TYPE_EXT_PHY: 3788 case BNX_DIR_TYPE_CCM: 3789 case BNX_DIR_TYPE_ISCSI_BOOT: 3790 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3791 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3792 return true; 3793 } 3794 3795 return false; 3796 } 3797 3798 static bool bnxt_dir_type_is_executable(u16 dir_type) 3799 { 3800 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3801 bnxt_dir_type_is_other_exec_format(dir_type); 3802 } 3803 3804 static int bnxt_flash_firmware_from_file(struct net_device *dev, 3805 u16 dir_type, 3806 const char *filename) 3807 { 3808 const struct firmware *fw; 3809 int rc; 3810 3811 rc = request_firmware(&fw, filename, &dev->dev); 3812 if (rc != 0) { 3813 netdev_err(dev, "Error %d requesting firmware file: %s\n", 3814 rc, filename); 3815 return rc; 3816 } 3817 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 3818 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 3819 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 3820 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 3821 else 3822 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3823 0, 0, 0, fw->data, fw->size); 3824 release_firmware(fw); 3825 return rc; 3826 } 3827 3828 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" 3829 #define MSG_INVALID_PKG "PKG install error : Invalid package" 3830 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" 3831 #define MSG_INVALID_DEV "PKG install error : Invalid device" 3832 #define MSG_INTERNAL_ERR "PKG install error : Internal error" 3833 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" 3834 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" 3835 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" 3836 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" 3837 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" 3838 3839 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, 3840 struct netlink_ext_ack *extack) 3841 { 3842 switch (result) { 3843 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: 3844 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: 3845 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: 3846 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: 3847 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: 3848 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: 3849 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); 3850 return -EINVAL; 3851 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: 3852 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: 3853 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: 3854 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: 3855 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: 3856 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: 3857 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: 3858 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: 3859 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: 3860 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: 3861 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: 3862 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: 3863 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: 3864 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); 3865 return -ENOPKG; 3866 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: 3867 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); 3868 return -EPERM; 3869 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: 3870 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: 3871 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: 3872 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: 3873 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: 3874 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); 3875 return -EOPNOTSUPP; 3876 default: 3877 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); 3878 return -EIO; 3879 } 3880 } 3881 3882 #define BNXT_PKG_DMA_SIZE 0x40000 3883 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 3884 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 3885 3886 static int bnxt_hwrm_nvm_defrag(struct bnxt *bp) 3887 { 3888 struct hwrm_nvm_defrag_input *req; 3889 int rc; 3890 3891 rc = hwrm_req_init(bp, req, HWRM_NVM_DEFRAG); 3892 if (rc) 3893 return rc; 3894 req->flags = cpu_to_le32(NVM_DEFRAG_REQ_FLAGS_DEFRAG); 3895 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3896 3897 return hwrm_req_send(bp, req); 3898 } 3899 3900 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, 3901 struct netlink_ext_ack *extack) 3902 { 3903 struct bnxt *bp = netdev_priv(dev); 3904 bool retry = false; 3905 u32 item_len; 3906 int rc; 3907 3908 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3909 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, 3910 &item_len, NULL); 3911 if (rc) { 3912 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3913 return rc; 3914 } 3915 3916 if (fw_size > item_len) { 3917 do { 3918 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, 3919 BNX_DIR_ORDINAL_FIRST, 0, 1, 3920 round_up(fw_size, 4096), NULL, 3921 0); 3922 3923 if (rc == -ENOSPC) { 3924 if (retry || bnxt_hwrm_nvm_defrag(bp)) 3925 break; 3926 retry = true; 3927 } 3928 } while (rc == -ENOSPC); 3929 3930 if (rc) { 3931 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); 3932 return rc; 3933 } 3934 } 3935 return 0; 3936 } 3937 3938 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 3939 u32 install_type, struct netlink_ext_ack *extack) 3940 { 3941 struct hwrm_nvm_install_update_input *install; 3942 struct hwrm_nvm_install_update_output *resp; 3943 struct hwrm_nvm_modify_input *modify; 3944 struct bnxt *bp = netdev_priv(dev); 3945 bool defrag_attempted = false; 3946 dma_addr_t dma_handle; 3947 u8 *kmem = NULL; 3948 u32 modify_len; 3949 u32 item_len; 3950 u8 cmd_err; 3951 u16 index; 3952 int rc; 3953 3954 /* resize before flashing larger image than available space */ 3955 rc = bnxt_resize_update_entry(dev, fw->size, extack); 3956 if (rc) 3957 return rc; 3958 3959 bnxt_hwrm_fw_set_time(bp); 3960 3961 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); 3962 if (rc) 3963 return rc; 3964 3965 /* Try allocating a large DMA buffer first. Older fw will 3966 * cause excessive NVRAM erases when using small blocks. 3967 */ 3968 modify_len = roundup_pow_of_two(fw->size); 3969 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 3970 while (1) { 3971 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); 3972 if (!kmem && modify_len > PAGE_SIZE) 3973 modify_len /= 2; 3974 else 3975 break; 3976 } 3977 if (!kmem) { 3978 hwrm_req_drop(bp, modify); 3979 return -ENOMEM; 3980 } 3981 3982 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); 3983 if (rc) { 3984 hwrm_req_drop(bp, modify); 3985 return rc; 3986 } 3987 3988 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout); 3989 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout); 3990 3991 hwrm_req_hold(bp, modify); 3992 modify->host_src_addr = cpu_to_le64(dma_handle); 3993 3994 resp = hwrm_req_hold(bp, install); 3995 if ((install_type & 0xffff) == 0) 3996 install_type >>= 16; 3997 install->install_type = cpu_to_le32(install_type); 3998 3999 do { 4000 u32 copied = 0, len = modify_len; 4001 4002 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 4003 BNX_DIR_ORDINAL_FIRST, 4004 BNX_DIR_EXT_NONE, 4005 &index, &item_len, NULL); 4006 if (rc) { 4007 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 4008 break; 4009 } 4010 if (fw->size > item_len) { 4011 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); 4012 rc = -EFBIG; 4013 break; 4014 } 4015 4016 modify->dir_idx = cpu_to_le16(index); 4017 4018 if (fw->size > modify_len) 4019 modify->flags = BNXT_NVM_MORE_FLAG; 4020 while (copied < fw->size) { 4021 u32 balance = fw->size - copied; 4022 4023 if (balance <= modify_len) { 4024 len = balance; 4025 if (copied) 4026 modify->flags |= BNXT_NVM_LAST_FLAG; 4027 } 4028 memcpy(kmem, fw->data + copied, len); 4029 modify->len = cpu_to_le32(len); 4030 modify->offset = cpu_to_le32(copied); 4031 rc = hwrm_req_send(bp, modify); 4032 if (rc) 4033 goto pkg_abort; 4034 copied += len; 4035 } 4036 4037 rc = hwrm_req_send_silent(bp, install); 4038 if (!rc) 4039 break; 4040 4041 if (defrag_attempted) { 4042 /* We have tried to defragment already in the previous 4043 * iteration. Return with the result for INSTALL_UPDATE 4044 */ 4045 break; 4046 } 4047 4048 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 4049 4050 switch (cmd_err) { 4051 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: 4052 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); 4053 rc = -EALREADY; 4054 break; 4055 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: 4056 install->flags = 4057 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 4058 4059 rc = hwrm_req_send_silent(bp, install); 4060 if (!rc) 4061 break; 4062 4063 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 4064 4065 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 4066 /* FW has cleared NVM area, driver will create 4067 * UPDATE directory and try the flash again 4068 */ 4069 defrag_attempted = true; 4070 install->flags = 0; 4071 rc = bnxt_flash_nvram(bp->dev, 4072 BNX_DIR_TYPE_UPDATE, 4073 BNX_DIR_ORDINAL_FIRST, 4074 0, 0, item_len, NULL, 0); 4075 if (!rc) 4076 break; 4077 } 4078 fallthrough; 4079 default: 4080 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); 4081 } 4082 } while (defrag_attempted && !rc); 4083 4084 pkg_abort: 4085 hwrm_req_drop(bp, modify); 4086 hwrm_req_drop(bp, install); 4087 4088 if (resp->result) { 4089 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 4090 (s8)resp->result, (int)resp->problem_item); 4091 rc = nvm_update_err_to_stderr(dev, resp->result, extack); 4092 } 4093 if (rc == -EACCES) 4094 bnxt_print_admin_err(bp); 4095 return rc; 4096 } 4097 4098 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 4099 u32 install_type, struct netlink_ext_ack *extack) 4100 { 4101 const struct firmware *fw; 4102 int rc; 4103 4104 rc = request_firmware(&fw, filename, &dev->dev); 4105 if (rc != 0) { 4106 netdev_err(dev, "PKG error %d requesting file: %s\n", 4107 rc, filename); 4108 return rc; 4109 } 4110 4111 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); 4112 4113 release_firmware(fw); 4114 4115 return rc; 4116 } 4117 4118 static int bnxt_flash_device(struct net_device *dev, 4119 struct ethtool_flash *flash) 4120 { 4121 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 4122 netdev_err(dev, "flashdev not supported from a virtual function\n"); 4123 return -EINVAL; 4124 } 4125 4126 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 4127 flash->region > 0xffff) 4128 return bnxt_flash_package_from_file(dev, flash->data, 4129 flash->region, NULL); 4130 4131 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 4132 } 4133 4134 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 4135 { 4136 struct hwrm_nvm_get_dir_info_output *output; 4137 struct hwrm_nvm_get_dir_info_input *req; 4138 struct bnxt *bp = netdev_priv(dev); 4139 int rc; 4140 4141 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); 4142 if (rc) 4143 return rc; 4144 4145 output = hwrm_req_hold(bp, req); 4146 rc = hwrm_req_send(bp, req); 4147 if (!rc) { 4148 *entries = le32_to_cpu(output->entries); 4149 *length = le32_to_cpu(output->entry_length); 4150 } 4151 hwrm_req_drop(bp, req); 4152 return rc; 4153 } 4154 4155 static int bnxt_get_eeprom_len(struct net_device *dev) 4156 { 4157 struct bnxt *bp = netdev_priv(dev); 4158 4159 if (BNXT_VF(bp)) 4160 return 0; 4161 4162 /* The -1 return value allows the entire 32-bit range of offsets to be 4163 * passed via the ethtool command-line utility. 4164 */ 4165 return -1; 4166 } 4167 4168 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 4169 { 4170 struct bnxt *bp = netdev_priv(dev); 4171 int rc; 4172 u32 dir_entries; 4173 u32 entry_length; 4174 u8 *buf; 4175 size_t buflen; 4176 dma_addr_t dma_handle; 4177 struct hwrm_nvm_get_dir_entries_input *req; 4178 4179 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 4180 if (rc != 0) 4181 return rc; 4182 4183 if (!dir_entries || !entry_length) 4184 return -EIO; 4185 4186 /* Insert 2 bytes of directory info (count and size of entries) */ 4187 if (len < 2) 4188 return -EINVAL; 4189 4190 *data++ = dir_entries; 4191 *data++ = entry_length; 4192 len -= 2; 4193 memset(data, 0xff, len); 4194 4195 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); 4196 if (rc) 4197 return rc; 4198 4199 buflen = mul_u32_u32(dir_entries, entry_length); 4200 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); 4201 if (!buf) { 4202 hwrm_req_drop(bp, req); 4203 return -ENOMEM; 4204 } 4205 req->host_dest_addr = cpu_to_le64(dma_handle); 4206 4207 hwrm_req_hold(bp, req); /* hold the slice */ 4208 rc = hwrm_req_send(bp, req); 4209 if (rc == 0) 4210 memcpy(data, buf, len > buflen ? buflen : len); 4211 hwrm_req_drop(bp, req); 4212 return rc; 4213 } 4214 4215 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 4216 u32 length, u8 *data) 4217 { 4218 struct bnxt *bp = netdev_priv(dev); 4219 int rc; 4220 u8 *buf; 4221 dma_addr_t dma_handle; 4222 struct hwrm_nvm_read_input *req; 4223 4224 if (!length) 4225 return -EINVAL; 4226 4227 rc = hwrm_req_init(bp, req, HWRM_NVM_READ); 4228 if (rc) 4229 return rc; 4230 4231 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); 4232 if (!buf) { 4233 hwrm_req_drop(bp, req); 4234 return -ENOMEM; 4235 } 4236 4237 req->host_dest_addr = cpu_to_le64(dma_handle); 4238 req->dir_idx = cpu_to_le16(index); 4239 req->offset = cpu_to_le32(offset); 4240 req->len = cpu_to_le32(length); 4241 4242 hwrm_req_hold(bp, req); /* hold the slice */ 4243 rc = hwrm_req_send(bp, req); 4244 if (rc == 0) 4245 memcpy(data, buf, length); 4246 hwrm_req_drop(bp, req); 4247 return rc; 4248 } 4249 4250 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 4251 u16 ext, u16 *index, u32 *item_length, 4252 u32 *data_length) 4253 { 4254 struct hwrm_nvm_find_dir_entry_output *output; 4255 struct hwrm_nvm_find_dir_entry_input *req; 4256 struct bnxt *bp = netdev_priv(dev); 4257 int rc; 4258 4259 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); 4260 if (rc) 4261 return rc; 4262 4263 req->enables = 0; 4264 req->dir_idx = 0; 4265 req->dir_type = cpu_to_le16(type); 4266 req->dir_ordinal = cpu_to_le16(ordinal); 4267 req->dir_ext = cpu_to_le16(ext); 4268 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 4269 output = hwrm_req_hold(bp, req); 4270 rc = hwrm_req_send_silent(bp, req); 4271 if (rc == 0) { 4272 if (index) 4273 *index = le16_to_cpu(output->dir_idx); 4274 if (item_length) 4275 *item_length = le32_to_cpu(output->dir_item_length); 4276 if (data_length) 4277 *data_length = le32_to_cpu(output->dir_data_length); 4278 } 4279 hwrm_req_drop(bp, req); 4280 return rc; 4281 } 4282 4283 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 4284 { 4285 char *retval = NULL; 4286 char *p; 4287 char *value; 4288 int field = 0; 4289 4290 if (datalen < 1) 4291 return NULL; 4292 /* null-terminate the log data (removing last '\n'): */ 4293 data[datalen - 1] = 0; 4294 for (p = data; *p != 0; p++) { 4295 field = 0; 4296 retval = NULL; 4297 while (*p != 0 && *p != '\n') { 4298 value = p; 4299 while (*p != 0 && *p != '\t' && *p != '\n') 4300 p++; 4301 if (field == desired_field) 4302 retval = value; 4303 if (*p != '\t') 4304 break; 4305 *p = 0; 4306 field++; 4307 p++; 4308 } 4309 if (*p == 0) 4310 break; 4311 *p = 0; 4312 } 4313 return retval; 4314 } 4315 4316 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) 4317 { 4318 struct bnxt *bp = netdev_priv(dev); 4319 u16 index = 0; 4320 char *pkgver; 4321 u32 pkglen; 4322 u8 *pkgbuf; 4323 int rc; 4324 4325 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 4326 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 4327 &index, NULL, &pkglen); 4328 if (rc) 4329 return rc; 4330 4331 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 4332 if (!pkgbuf) { 4333 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 4334 pkglen); 4335 return -ENOMEM; 4336 } 4337 4338 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); 4339 if (rc) 4340 goto err; 4341 4342 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 4343 pkglen); 4344 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) 4345 strscpy(ver, pkgver, size); 4346 else 4347 rc = -ENOENT; 4348 4349 err: 4350 kfree(pkgbuf); 4351 4352 return rc; 4353 } 4354 4355 static void bnxt_get_pkgver(struct net_device *dev) 4356 { 4357 struct bnxt *bp = netdev_priv(dev); 4358 char buf[FW_VER_STR_LEN - 5]; 4359 int len; 4360 4361 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { 4362 len = strlen(bp->fw_ver_str); 4363 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len, 4364 "/pkg %s", buf); 4365 } 4366 } 4367 4368 static int bnxt_get_eeprom(struct net_device *dev, 4369 struct ethtool_eeprom *eeprom, 4370 u8 *data) 4371 { 4372 u32 index; 4373 u32 offset; 4374 4375 if (eeprom->offset == 0) /* special offset value to get directory */ 4376 return bnxt_get_nvram_directory(dev, eeprom->len, data); 4377 4378 index = eeprom->offset >> 24; 4379 offset = eeprom->offset & 0xffffff; 4380 4381 if (index == 0) { 4382 netdev_err(dev, "unsupported index value: %d\n", index); 4383 return -EINVAL; 4384 } 4385 4386 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 4387 } 4388 4389 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 4390 { 4391 struct hwrm_nvm_erase_dir_entry_input *req; 4392 struct bnxt *bp = netdev_priv(dev); 4393 int rc; 4394 4395 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); 4396 if (rc) 4397 return rc; 4398 4399 req->dir_idx = cpu_to_le16(index); 4400 return hwrm_req_send(bp, req); 4401 } 4402 4403 static int bnxt_set_eeprom(struct net_device *dev, 4404 struct ethtool_eeprom *eeprom, 4405 u8 *data) 4406 { 4407 struct bnxt *bp = netdev_priv(dev); 4408 u8 index, dir_op; 4409 u16 type, ext, ordinal, attr; 4410 4411 if (!BNXT_PF(bp)) { 4412 netdev_err(dev, "NVM write not supported from a virtual function\n"); 4413 return -EINVAL; 4414 } 4415 4416 type = eeprom->magic >> 16; 4417 4418 if (type == 0xffff) { /* special value for directory operations */ 4419 index = eeprom->magic & 0xff; 4420 dir_op = eeprom->magic >> 8; 4421 if (index == 0) 4422 return -EINVAL; 4423 switch (dir_op) { 4424 case 0x0e: /* erase */ 4425 if (eeprom->offset != ~eeprom->magic) 4426 return -EINVAL; 4427 return bnxt_erase_nvram_directory(dev, index - 1); 4428 default: 4429 return -EINVAL; 4430 } 4431 } 4432 4433 /* Create or re-write an NVM item: */ 4434 if (bnxt_dir_type_is_executable(type)) 4435 return -EOPNOTSUPP; 4436 ext = eeprom->magic & 0xffff; 4437 ordinal = eeprom->offset >> 16; 4438 attr = eeprom->offset & 0xffff; 4439 4440 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, 4441 eeprom->len); 4442 } 4443 4444 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) 4445 { 4446 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 4447 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 4448 struct bnxt *bp = netdev_priv(dev); 4449 struct ethtool_keee *eee = &bp->eee; 4450 struct bnxt_link_info *link_info = &bp->link_info; 4451 int rc = 0; 4452 4453 if (!BNXT_PHY_CFG_ABLE(bp)) 4454 return -EOPNOTSUPP; 4455 4456 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4457 return -EOPNOTSUPP; 4458 4459 mutex_lock(&bp->link_lock); 4460 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 4461 if (!edata->eee_enabled) 4462 goto eee_ok; 4463 4464 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4465 netdev_warn(dev, "EEE requires autoneg\n"); 4466 rc = -EINVAL; 4467 goto eee_exit; 4468 } 4469 if (edata->tx_lpi_enabled) { 4470 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 4471 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 4472 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 4473 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 4474 rc = -EINVAL; 4475 goto eee_exit; 4476 } else if (!bp->lpi_tmr_hi) { 4477 edata->tx_lpi_timer = eee->tx_lpi_timer; 4478 } 4479 } 4480 if (linkmode_empty(edata->advertised)) { 4481 linkmode_and(edata->advertised, advertising, eee->supported); 4482 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { 4483 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); 4484 rc = -EINVAL; 4485 goto eee_exit; 4486 } 4487 4488 linkmode_copy(eee->advertised, edata->advertised); 4489 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 4490 eee->tx_lpi_timer = edata->tx_lpi_timer; 4491 eee_ok: 4492 eee->eee_enabled = edata->eee_enabled; 4493 4494 if (netif_running(dev)) 4495 rc = bnxt_hwrm_set_link_setting(bp, false, true); 4496 4497 eee_exit: 4498 mutex_unlock(&bp->link_lock); 4499 return rc; 4500 } 4501 4502 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) 4503 { 4504 struct bnxt *bp = netdev_priv(dev); 4505 4506 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4507 return -EOPNOTSUPP; 4508 4509 *edata = bp->eee; 4510 if (!bp->eee.eee_enabled) { 4511 /* Preserve tx_lpi_timer so that the last value will be used 4512 * by default when it is re-enabled. 4513 */ 4514 linkmode_zero(edata->advertised); 4515 edata->tx_lpi_enabled = 0; 4516 } 4517 4518 if (!bp->eee.eee_active) 4519 linkmode_zero(edata->lp_advertised); 4520 4521 return 0; 4522 } 4523 4524 static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val) 4525 { 4526 struct hwrm_queue_pfcwd_timeout_qcfg_output *resp; 4527 struct hwrm_queue_pfcwd_timeout_qcfg_input *req; 4528 int rc; 4529 4530 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG); 4531 if (rc) 4532 return rc; 4533 resp = hwrm_req_hold(bp, req); 4534 rc = hwrm_req_send(bp, req); 4535 if (!rc) 4536 *val = le16_to_cpu(resp->pfcwd_timeout_value); 4537 hwrm_req_drop(bp, req); 4538 return rc; 4539 } 4540 4541 static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val) 4542 { 4543 struct hwrm_queue_pfcwd_timeout_cfg_input *req; 4544 int rc; 4545 4546 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG); 4547 if (rc) 4548 return rc; 4549 req->pfcwd_timeout_value = cpu_to_le16(val); 4550 rc = hwrm_req_send(bp, req); 4551 return rc; 4552 } 4553 4554 static int bnxt_set_tunable(struct net_device *dev, 4555 const struct ethtool_tunable *tuna, 4556 const void *data) 4557 { 4558 struct bnxt *bp = netdev_priv(dev); 4559 u32 rx_copybreak, val; 4560 4561 switch (tuna->id) { 4562 case ETHTOOL_RX_COPYBREAK: 4563 rx_copybreak = *(u32 *)data; 4564 if (rx_copybreak > BNXT_MAX_RX_COPYBREAK) 4565 return -ERANGE; 4566 if (rx_copybreak != bp->rx_copybreak) { 4567 if (netif_running(dev)) 4568 return -EBUSY; 4569 bp->rx_copybreak = rx_copybreak; 4570 } 4571 return 0; 4572 case ETHTOOL_PFC_PREVENTION_TOUT: 4573 if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms) 4574 return -EOPNOTSUPP; 4575 4576 val = *(u16 *)data; 4577 if (val > bp->max_pfcwd_tmo_ms && 4578 val != PFC_STORM_PREVENTION_AUTO) 4579 return -EINVAL; 4580 return bnxt_hwrm_pfcwd_cfg(bp, val); 4581 default: 4582 return -EOPNOTSUPP; 4583 } 4584 } 4585 4586 static int bnxt_get_tunable(struct net_device *dev, 4587 const struct ethtool_tunable *tuna, void *data) 4588 { 4589 struct bnxt *bp = netdev_priv(dev); 4590 4591 switch (tuna->id) { 4592 case ETHTOOL_RX_COPYBREAK: 4593 *(u32 *)data = bp->rx_copybreak; 4594 break; 4595 case ETHTOOL_PFC_PREVENTION_TOUT: 4596 if (!bp->max_pfcwd_tmo_ms) 4597 return -EOPNOTSUPP; 4598 return bnxt_hwrm_pfcwd_qcfg(bp, data); 4599 default: 4600 return -EOPNOTSUPP; 4601 } 4602 4603 return 0; 4604 } 4605 4606 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 4607 u16 page_number, u8 bank, 4608 u16 start_addr, u16 data_length, 4609 u8 *buf) 4610 { 4611 struct hwrm_port_phy_i2c_read_output *output; 4612 struct hwrm_port_phy_i2c_read_input *req; 4613 int rc, byte_offset = 0; 4614 4615 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); 4616 if (rc) 4617 return rc; 4618 4619 output = hwrm_req_hold(bp, req); 4620 req->i2c_slave_addr = i2c_addr; 4621 req->page_number = cpu_to_le16(page_number); 4622 req->port_id = cpu_to_le16(bp->pf.port_id); 4623 do { 4624 u16 xfer_size; 4625 4626 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 4627 data_length -= xfer_size; 4628 req->page_offset = cpu_to_le16(start_addr + byte_offset); 4629 req->data_length = xfer_size; 4630 req->enables = 4631 cpu_to_le32((start_addr + byte_offset ? 4632 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 4633 0) | 4634 (bank ? 4635 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : 4636 0)); 4637 rc = hwrm_req_send(bp, req); 4638 if (!rc) 4639 memcpy(buf + byte_offset, output->data, xfer_size); 4640 byte_offset += xfer_size; 4641 } while (!rc && data_length > 0); 4642 hwrm_req_drop(bp, req); 4643 4644 return rc; 4645 } 4646 4647 static int bnxt_get_module_info(struct net_device *dev, 4648 struct ethtool_modinfo *modinfo) 4649 { 4650 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 4651 struct bnxt *bp = netdev_priv(dev); 4652 int rc; 4653 4654 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4655 return -EPERM; 4656 4657 /* No point in going further if phy status indicates 4658 * module is not inserted or if it is powered down or 4659 * if it is of type 10GBase-T 4660 */ 4661 if (bp->link_info.module_status > 4662 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4663 return -EOPNOTSUPP; 4664 4665 /* This feature is not supported in older firmware versions */ 4666 if (bp->hwrm_spec_code < 0x10202) 4667 return -EOPNOTSUPP; 4668 4669 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, 4670 SFF_DIAG_SUPPORT_OFFSET + 1, 4671 data); 4672 if (!rc) { 4673 u8 module_id = data[0]; 4674 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 4675 4676 switch (module_id) { 4677 case SFF_MODULE_ID_SFP: 4678 modinfo->type = ETH_MODULE_SFF_8472; 4679 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4680 if (!diag_supported) 4681 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4682 break; 4683 case SFF_MODULE_ID_QSFP: 4684 case SFF_MODULE_ID_QSFP_PLUS: 4685 modinfo->type = ETH_MODULE_SFF_8436; 4686 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4687 break; 4688 case SFF_MODULE_ID_QSFP28: 4689 modinfo->type = ETH_MODULE_SFF_8636; 4690 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 4691 break; 4692 default: 4693 rc = -EOPNOTSUPP; 4694 break; 4695 } 4696 } 4697 return rc; 4698 } 4699 4700 static int bnxt_get_module_eeprom(struct net_device *dev, 4701 struct ethtool_eeprom *eeprom, 4702 u8 *data) 4703 { 4704 struct bnxt *bp = netdev_priv(dev); 4705 u16 start = eeprom->offset, length = eeprom->len; 4706 int rc = 0; 4707 4708 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4709 return -EPERM; 4710 4711 memset(data, 0, eeprom->len); 4712 4713 /* Read A0 portion of the EEPROM */ 4714 if (start < ETH_MODULE_SFF_8436_LEN) { 4715 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 4716 length = ETH_MODULE_SFF_8436_LEN - start; 4717 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4718 start, length, data); 4719 if (rc) 4720 return rc; 4721 start += length; 4722 data += length; 4723 length = eeprom->len - length; 4724 } 4725 4726 /* Read A2 portion of the EEPROM */ 4727 if (length) { 4728 start -= ETH_MODULE_SFF_8436_LEN; 4729 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, 4730 start, length, data); 4731 } 4732 return rc; 4733 } 4734 4735 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) 4736 { 4737 if (bp->link_info.module_status <= 4738 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4739 return 0; 4740 4741 if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 4742 bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){ 4743 NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T"); 4744 return -EOPNOTSUPP; 4745 } 4746 switch (bp->link_info.module_status) { 4747 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 4748 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); 4749 break; 4750 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: 4751 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); 4752 break; 4753 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: 4754 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); 4755 break; 4756 default: 4757 NL_SET_ERR_MSG_MOD(extack, "Unknown error"); 4758 break; 4759 } 4760 return -EINVAL; 4761 } 4762 4763 static int 4764 bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, 4765 const struct ethtool_module_eeprom *page_data, 4766 struct netlink_ext_ack *extack) 4767 { 4768 int rc; 4769 4770 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 4771 NL_SET_ERR_MSG_MOD(extack, 4772 "Module read/write not permitted on untrusted VF"); 4773 return -EPERM; 4774 } 4775 4776 rc = bnxt_get_module_status(bp, extack); 4777 if (rc) 4778 return rc; 4779 4780 if (bp->hwrm_spec_code < 0x10202) { 4781 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); 4782 return -EINVAL; 4783 } 4784 4785 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { 4786 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); 4787 return -EINVAL; 4788 } 4789 return 0; 4790 } 4791 4792 static int bnxt_get_module_eeprom_by_page(struct net_device *dev, 4793 const struct ethtool_module_eeprom *page_data, 4794 struct netlink_ext_ack *extack) 4795 { 4796 struct bnxt *bp = netdev_priv(dev); 4797 int rc; 4798 4799 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4800 if (rc) 4801 return rc; 4802 4803 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, 4804 page_data->page, page_data->bank, 4805 page_data->offset, 4806 page_data->length, 4807 page_data->data); 4808 if (rc) { 4809 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); 4810 return rc; 4811 } 4812 return page_data->length; 4813 } 4814 4815 static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, 4816 const struct ethtool_module_eeprom *page) 4817 { 4818 struct hwrm_port_phy_i2c_write_input *req; 4819 int bytes_written = 0; 4820 int rc; 4821 4822 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); 4823 if (rc) 4824 return rc; 4825 4826 hwrm_req_hold(bp, req); 4827 req->i2c_slave_addr = page->i2c_address << 1; 4828 req->page_number = cpu_to_le16(page->page); 4829 req->bank_number = page->bank; 4830 req->port_id = cpu_to_le16(bp->pf.port_id); 4831 req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | 4832 PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); 4833 4834 while (bytes_written < page->length) { 4835 u16 xfer_size; 4836 4837 xfer_size = min_t(u16, page->length - bytes_written, 4838 BNXT_MAX_PHY_I2C_RESP_SIZE); 4839 req->page_offset = cpu_to_le16(page->offset + bytes_written); 4840 req->data_length = xfer_size; 4841 memcpy(req->data, page->data + bytes_written, xfer_size); 4842 rc = hwrm_req_send(bp, req); 4843 if (rc) 4844 break; 4845 bytes_written += xfer_size; 4846 } 4847 4848 hwrm_req_drop(bp, req); 4849 return rc; 4850 } 4851 4852 static int bnxt_set_module_eeprom_by_page(struct net_device *dev, 4853 const struct ethtool_module_eeprom *page_data, 4854 struct netlink_ext_ack *extack) 4855 { 4856 struct bnxt *bp = netdev_priv(dev); 4857 int rc; 4858 4859 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4860 if (rc) 4861 return rc; 4862 4863 rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); 4864 if (rc) { 4865 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); 4866 return rc; 4867 } 4868 return page_data->length; 4869 } 4870 4871 static int bnxt_nway_reset(struct net_device *dev) 4872 { 4873 int rc = 0; 4874 4875 struct bnxt *bp = netdev_priv(dev); 4876 struct bnxt_link_info *link_info = &bp->link_info; 4877 4878 if (!BNXT_PHY_CFG_ABLE(bp)) 4879 return -EOPNOTSUPP; 4880 4881 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 4882 return -EINVAL; 4883 4884 if (netif_running(dev)) 4885 rc = bnxt_hwrm_set_link_setting(bp, true, false); 4886 4887 return rc; 4888 } 4889 4890 static int bnxt_set_phys_id(struct net_device *dev, 4891 enum ethtool_phys_id_state state) 4892 { 4893 struct hwrm_port_led_cfg_input *req; 4894 struct bnxt *bp = netdev_priv(dev); 4895 struct bnxt_pf_info *pf = &bp->pf; 4896 struct bnxt_led_cfg *led_cfg; 4897 u8 led_state; 4898 __le16 duration; 4899 int rc, i; 4900 4901 if (!bp->num_leds || BNXT_VF(bp)) 4902 return -EOPNOTSUPP; 4903 4904 if (state == ETHTOOL_ID_ACTIVE) { 4905 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 4906 duration = cpu_to_le16(500); 4907 } else if (state == ETHTOOL_ID_INACTIVE) { 4908 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 4909 duration = cpu_to_le16(0); 4910 } else { 4911 return -EINVAL; 4912 } 4913 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); 4914 if (rc) 4915 return rc; 4916 4917 req->port_id = cpu_to_le16(pf->port_id); 4918 req->num_leds = bp->num_leds; 4919 led_cfg = (struct bnxt_led_cfg *)&req->led0_id; 4920 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 4921 req->enables |= BNXT_LED_DFLT_ENABLES(i); 4922 led_cfg->led_id = bp->leds[i].led_id; 4923 led_cfg->led_state = led_state; 4924 led_cfg->led_blink_on = duration; 4925 led_cfg->led_blink_off = duration; 4926 led_cfg->led_group_id = bp->leds[i].led_group_id; 4927 } 4928 return hwrm_req_send(bp, req); 4929 } 4930 4931 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 4932 { 4933 struct hwrm_selftest_irq_input *req; 4934 int rc; 4935 4936 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); 4937 if (rc) 4938 return rc; 4939 4940 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4941 return hwrm_req_send(bp, req); 4942 } 4943 4944 static int bnxt_test_irq(struct bnxt *bp) 4945 { 4946 int i; 4947 4948 for (i = 0; i < bp->cp_nr_rings; i++) { 4949 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 4950 int rc; 4951 4952 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 4953 if (rc) 4954 return rc; 4955 } 4956 return 0; 4957 } 4958 4959 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 4960 { 4961 struct hwrm_port_mac_cfg_input *req; 4962 int rc; 4963 4964 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); 4965 if (rc) 4966 return rc; 4967 4968 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 4969 if (enable) 4970 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 4971 else 4972 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 4973 return hwrm_req_send(bp, req); 4974 } 4975 4976 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 4977 { 4978 struct hwrm_port_phy_qcaps_output *resp; 4979 struct hwrm_port_phy_qcaps_input *req; 4980 int rc; 4981 4982 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 4983 if (rc) 4984 return rc; 4985 4986 resp = hwrm_req_hold(bp, req); 4987 rc = hwrm_req_send(bp, req); 4988 if (!rc) 4989 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 4990 4991 hwrm_req_drop(bp, req); 4992 return rc; 4993 } 4994 4995 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 4996 struct hwrm_port_phy_cfg_input *req) 4997 { 4998 struct bnxt_link_info *link_info = &bp->link_info; 4999 u16 fw_advertising; 5000 u16 fw_speed; 5001 int rc; 5002 5003 if (!link_info->autoneg || 5004 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 5005 return 0; 5006 5007 rc = bnxt_query_force_speeds(bp, &fw_advertising); 5008 if (rc) 5009 return rc; 5010 5011 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 5012 if (BNXT_LINK_IS_UP(bp)) 5013 fw_speed = bp->link_info.link_speed; 5014 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 5015 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 5016 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 5017 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 5018 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 5019 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 5020 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 5021 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 5022 5023 req->force_link_speed = cpu_to_le16(fw_speed); 5024 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 5025 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 5026 rc = hwrm_req_send(bp, req); 5027 req->flags = 0; 5028 req->force_link_speed = cpu_to_le16(0); 5029 return rc; 5030 } 5031 5032 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 5033 { 5034 struct hwrm_port_phy_cfg_input *req; 5035 int rc; 5036 5037 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 5038 if (rc) 5039 return rc; 5040 5041 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ 5042 hwrm_req_hold(bp, req); 5043 5044 if (enable) { 5045 bnxt_disable_an_for_lpbk(bp, req); 5046 if (ext) 5047 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 5048 else 5049 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 5050 } else { 5051 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 5052 } 5053 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 5054 rc = hwrm_req_send(bp, req); 5055 hwrm_req_drop(bp, req); 5056 return rc; 5057 } 5058 5059 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 5060 u32 raw_cons, int pkt_size) 5061 { 5062 struct bnxt_napi *bnapi = cpr->bnapi; 5063 struct bnxt_rx_ring_info *rxr; 5064 struct bnxt_sw_rx_bd *rx_buf; 5065 struct rx_cmp *rxcmp; 5066 u16 cp_cons, cons; 5067 u8 *data; 5068 u32 len; 5069 int i; 5070 5071 rxr = bnapi->rx_ring; 5072 cp_cons = RING_CMP(raw_cons); 5073 rxcmp = (struct rx_cmp *) 5074 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 5075 cons = rxcmp->rx_cmp_opaque; 5076 rx_buf = &rxr->rx_buf_ring[cons]; 5077 data = rx_buf->data_ptr; 5078 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 5079 if (len != pkt_size) 5080 return -EIO; 5081 i = ETH_ALEN; 5082 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 5083 return -EIO; 5084 i += ETH_ALEN; 5085 for ( ; i < pkt_size; i++) { 5086 if (data[i] != (u8)(i & 0xff)) 5087 return -EIO; 5088 } 5089 return 0; 5090 } 5091 5092 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 5093 int pkt_size) 5094 { 5095 struct tx_cmp *txcmp; 5096 int rc = -EIO; 5097 u32 raw_cons; 5098 u32 cons; 5099 int i; 5100 5101 raw_cons = cpr->cp_raw_cons; 5102 for (i = 0; i < 200; i++) { 5103 cons = RING_CMP(raw_cons); 5104 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 5105 5106 if (!TX_CMP_VALID(txcmp, raw_cons)) { 5107 udelay(5); 5108 continue; 5109 } 5110 5111 /* The valid test of the entry must be done first before 5112 * reading any further. 5113 */ 5114 dma_rmb(); 5115 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP || 5116 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) { 5117 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 5118 raw_cons = NEXT_RAW_CMP(raw_cons); 5119 raw_cons = NEXT_RAW_CMP(raw_cons); 5120 break; 5121 } 5122 raw_cons = NEXT_RAW_CMP(raw_cons); 5123 } 5124 cpr->cp_raw_cons = raw_cons; 5125 return rc; 5126 } 5127 5128 static int bnxt_run_loopback(struct bnxt *bp) 5129 { 5130 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 5131 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5132 struct bnxt_cp_ring_info *cpr; 5133 int pkt_size, i = 0; 5134 struct sk_buff *skb; 5135 dma_addr_t map; 5136 u8 *data; 5137 int rc; 5138 5139 cpr = &rxr->bnapi->cp_ring; 5140 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5141 cpr = rxr->rx_cpr; 5142 pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK, 5143 bp->rx_copybreak)); 5144 skb = netdev_alloc_skb(bp->dev, pkt_size); 5145 if (!skb) 5146 return -ENOMEM; 5147 data = skb_put(skb, pkt_size); 5148 ether_addr_copy(&data[i], bp->dev->dev_addr); 5149 i += ETH_ALEN; 5150 ether_addr_copy(&data[i], bp->dev->dev_addr); 5151 i += ETH_ALEN; 5152 for ( ; i < pkt_size; i++) 5153 data[i] = (u8)(i & 0xff); 5154 5155 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 5156 DMA_TO_DEVICE); 5157 if (dma_mapping_error(&bp->pdev->dev, map)) { 5158 dev_kfree_skb(skb); 5159 return -EIO; 5160 } 5161 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); 5162 5163 /* Sync BD data before updating doorbell */ 5164 wmb(); 5165 5166 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 5167 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 5168 5169 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 5170 dev_kfree_skb(skb); 5171 return rc; 5172 } 5173 5174 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 5175 { 5176 struct hwrm_selftest_exec_output *resp; 5177 struct hwrm_selftest_exec_input *req; 5178 int rc; 5179 5180 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); 5181 if (rc) 5182 return rc; 5183 5184 hwrm_req_timeout(bp, req, bp->test_info->timeout); 5185 req->flags = test_mask; 5186 5187 resp = hwrm_req_hold(bp, req); 5188 rc = hwrm_req_send(bp, req); 5189 *test_results = resp->test_success; 5190 hwrm_req_drop(bp, req); 5191 return rc; 5192 } 5193 5194 #define BNXT_DRV_TESTS 4 5195 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 5196 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 5197 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 5198 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 5199 5200 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 5201 u64 *buf) 5202 { 5203 struct bnxt *bp = netdev_priv(dev); 5204 bool do_ext_lpbk = false; 5205 bool offline = false; 5206 u8 test_results = 0; 5207 u8 test_mask = 0; 5208 int rc = 0, i; 5209 5210 if (!bp->num_tests || !BNXT_PF(bp)) 5211 return; 5212 5213 memset(buf, 0, sizeof(u64) * bp->num_tests); 5214 if (etest->flags & ETH_TEST_FL_OFFLINE && 5215 bnxt_ulp_registered(bp->edev)) { 5216 etest->flags |= ETH_TEST_FL_FAILED; 5217 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); 5218 return; 5219 } 5220 5221 if (!netif_running(dev)) { 5222 etest->flags |= ETH_TEST_FL_FAILED; 5223 return; 5224 } 5225 5226 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 5227 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 5228 do_ext_lpbk = true; 5229 5230 if (etest->flags & ETH_TEST_FL_OFFLINE) { 5231 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 5232 etest->flags |= ETH_TEST_FL_FAILED; 5233 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 5234 return; 5235 } 5236 offline = true; 5237 } 5238 5239 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5240 u8 bit_val = 1 << i; 5241 5242 if (!(bp->test_info->offline_mask & bit_val)) 5243 test_mask |= bit_val; 5244 else if (offline) 5245 test_mask |= bit_val; 5246 } 5247 if (!offline) { 5248 bnxt_run_fw_tests(bp, test_mask, &test_results); 5249 } else { 5250 bnxt_close_nic(bp, true, false); 5251 bnxt_run_fw_tests(bp, test_mask, &test_results); 5252 5253 rc = bnxt_half_open_nic(bp); 5254 if (rc) { 5255 etest->flags |= ETH_TEST_FL_FAILED; 5256 return; 5257 } 5258 buf[BNXT_MACLPBK_TEST_IDX] = 1; 5259 if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) 5260 goto skip_mac_loopback; 5261 5262 bnxt_hwrm_mac_loopback(bp, true); 5263 msleep(250); 5264 if (bnxt_run_loopback(bp)) 5265 etest->flags |= ETH_TEST_FL_FAILED; 5266 else 5267 buf[BNXT_MACLPBK_TEST_IDX] = 0; 5268 5269 bnxt_hwrm_mac_loopback(bp, false); 5270 skip_mac_loopback: 5271 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 5272 if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) 5273 goto skip_phy_loopback; 5274 5275 bnxt_hwrm_phy_loopback(bp, true, false); 5276 msleep(1000); 5277 if (bnxt_run_loopback(bp)) 5278 etest->flags |= ETH_TEST_FL_FAILED; 5279 else 5280 buf[BNXT_PHYLPBK_TEST_IDX] = 0; 5281 skip_phy_loopback: 5282 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 5283 if (do_ext_lpbk) { 5284 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 5285 bnxt_hwrm_phy_loopback(bp, true, true); 5286 msleep(1000); 5287 if (bnxt_run_loopback(bp)) 5288 etest->flags |= ETH_TEST_FL_FAILED; 5289 else 5290 buf[BNXT_EXTLPBK_TEST_IDX] = 0; 5291 } 5292 bnxt_hwrm_phy_loopback(bp, false, false); 5293 bnxt_half_close_nic(bp); 5294 rc = bnxt_open_nic(bp, true, true); 5295 } 5296 if (rc || bnxt_test_irq(bp)) { 5297 buf[BNXT_IRQ_TEST_IDX] = 1; 5298 etest->flags |= ETH_TEST_FL_FAILED; 5299 } 5300 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5301 u8 bit_val = 1 << i; 5302 5303 if ((test_mask & bit_val) && !(test_results & bit_val)) { 5304 buf[i] = 1; 5305 etest->flags |= ETH_TEST_FL_FAILED; 5306 } 5307 } 5308 } 5309 5310 static int bnxt_reset(struct net_device *dev, u32 *flags) 5311 { 5312 struct bnxt *bp = netdev_priv(dev); 5313 bool reload = false; 5314 u32 req = *flags; 5315 5316 if (!req) 5317 return -EINVAL; 5318 5319 if (!BNXT_PF(bp)) { 5320 netdev_err(dev, "Reset is not supported from a VF\n"); 5321 return -EOPNOTSUPP; 5322 } 5323 5324 if (pci_vfs_assigned(bp->pdev) && 5325 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 5326 netdev_err(dev, 5327 "Reset not allowed when VFs are assigned to VMs\n"); 5328 return -EBUSY; 5329 } 5330 5331 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 5332 /* This feature is not supported in older firmware versions */ 5333 if (bp->hwrm_spec_code >= 0x10803) { 5334 if (!bnxt_firmware_reset_chip(dev)) { 5335 netdev_info(dev, "Firmware reset request successful.\n"); 5336 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 5337 reload = true; 5338 *flags &= ~BNXT_FW_RESET_CHIP; 5339 } 5340 } else if (req == BNXT_FW_RESET_CHIP) { 5341 return -EOPNOTSUPP; /* only request, fail hard */ 5342 } 5343 } 5344 5345 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { 5346 /* This feature is not supported in older firmware versions */ 5347 if (bp->hwrm_spec_code >= 0x10803) { 5348 if (!bnxt_firmware_reset_ap(dev)) { 5349 netdev_info(dev, "Reset application processor successful.\n"); 5350 reload = true; 5351 *flags &= ~BNXT_FW_RESET_AP; 5352 } 5353 } else if (req == BNXT_FW_RESET_AP) { 5354 return -EOPNOTSUPP; /* only request, fail hard */ 5355 } 5356 } 5357 5358 if (reload) 5359 netdev_info(dev, "Reload driver to complete reset\n"); 5360 5361 return 0; 5362 } 5363 5364 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 5365 { 5366 struct bnxt *bp = netdev_priv(dev); 5367 5368 if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { 5369 netdev_info(dev, 5370 "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); 5371 return -EINVAL; 5372 } 5373 5374 if (dump->flag == BNXT_DUMP_CRASH) { 5375 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR && 5376 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { 5377 netdev_info(dev, 5378 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 5379 return -EOPNOTSUPP; 5380 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) { 5381 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); 5382 return -EOPNOTSUPP; 5383 } 5384 } 5385 5386 bp->dump_flag = dump->flag; 5387 return 0; 5388 } 5389 5390 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 5391 { 5392 struct bnxt *bp = netdev_priv(dev); 5393 5394 if (bp->hwrm_spec_code < 0x10801) 5395 return -EOPNOTSUPP; 5396 5397 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 5398 bp->ver_resp.hwrm_fw_min_8b << 16 | 5399 bp->ver_resp.hwrm_fw_bld_8b << 8 | 5400 bp->ver_resp.hwrm_fw_rsvd_8b; 5401 5402 dump->flag = bp->dump_flag; 5403 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); 5404 return 0; 5405 } 5406 5407 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 5408 void *buf) 5409 { 5410 struct bnxt *bp = netdev_priv(dev); 5411 5412 if (bp->hwrm_spec_code < 0x10801) 5413 return -EOPNOTSUPP; 5414 5415 memset(buf, 0, dump->len); 5416 5417 dump->flag = bp->dump_flag; 5418 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); 5419 } 5420 5421 static int bnxt_get_ts_info(struct net_device *dev, 5422 struct kernel_ethtool_ts_info *info) 5423 { 5424 struct bnxt *bp = netdev_priv(dev); 5425 struct bnxt_ptp_cfg *ptp; 5426 5427 ptp = bp->ptp_cfg; 5428 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; 5429 5430 if (!ptp) 5431 return 0; 5432 5433 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 5434 SOF_TIMESTAMPING_RX_HARDWARE | 5435 SOF_TIMESTAMPING_RAW_HARDWARE; 5436 if (ptp->ptp_clock) 5437 info->phc_index = ptp_clock_index(ptp->ptp_clock); 5438 5439 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 5440 5441 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 5442 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 5443 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 5444 5445 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) 5446 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); 5447 return 0; 5448 } 5449 5450 static void bnxt_hwrm_pcie_qstats(struct bnxt *bp) 5451 { 5452 struct hwrm_pcie_qstats_output *resp; 5453 struct hwrm_pcie_qstats_input *req; 5454 5455 bp->pcie_stat_len = 0; 5456 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 5457 return; 5458 5459 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 5460 return; 5461 5462 resp = hwrm_req_hold(bp, req); 5463 if (__bnxt_hwrm_pcie_qstats(bp, req)) 5464 bp->pcie_stat_len = min_t(u16, 5465 le16_to_cpu(resp->pcie_stat_size), 5466 sizeof(struct pcie_ctx_hw_stats_v2)); 5467 hwrm_req_drop(bp, req); 5468 } 5469 5470 void bnxt_ethtool_init(struct bnxt *bp) 5471 { 5472 struct hwrm_selftest_qlist_output *resp; 5473 struct hwrm_selftest_qlist_input *req; 5474 struct bnxt_test_info *test_info; 5475 struct net_device *dev = bp->dev; 5476 int i, rc; 5477 5478 bnxt_hwrm_pcie_qstats(bp); 5479 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 5480 bnxt_get_pkgver(dev); 5481 5482 bp->num_tests = 0; 5483 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 5484 return; 5485 5486 test_info = bp->test_info; 5487 if (!test_info) { 5488 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); 5489 if (!test_info) 5490 return; 5491 bp->test_info = test_info; 5492 } 5493 5494 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) 5495 return; 5496 5497 resp = hwrm_req_hold(bp, req); 5498 rc = hwrm_req_send_silent(bp, req); 5499 if (rc) 5500 goto ethtool_init_exit; 5501 5502 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 5503 if (bp->num_tests > BNXT_MAX_TEST) 5504 bp->num_tests = BNXT_MAX_TEST; 5505 5506 test_info->offline_mask = resp->offline_tests; 5507 test_info->timeout = le16_to_cpu(resp->test_timeout); 5508 if (!test_info->timeout) 5509 test_info->timeout = HWRM_CMD_TIMEOUT; 5510 for (i = 0; i < bp->num_tests; i++) { 5511 char *str = test_info->string[i]; 5512 char *fw_str = resp->test_name[i]; 5513 5514 if (i == BNXT_MACLPBK_TEST_IDX) { 5515 strcpy(str, "Mac loopback test (offline)"); 5516 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 5517 strcpy(str, "Phy loopback test (offline)"); 5518 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 5519 strcpy(str, "Ext loopback test (offline)"); 5520 } else if (i == BNXT_IRQ_TEST_IDX) { 5521 strcpy(str, "Interrupt_test (offline)"); 5522 } else { 5523 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 5524 fw_str, test_info->offline_mask & (1 << i) ? 5525 "offline" : "online"); 5526 } 5527 } 5528 5529 ethtool_init_exit: 5530 hwrm_req_drop(bp, req); 5531 } 5532 5533 static void bnxt_get_eth_phy_stats(struct net_device *dev, 5534 struct ethtool_eth_phy_stats *phy_stats) 5535 { 5536 struct bnxt *bp = netdev_priv(dev); 5537 u64 *rx; 5538 5539 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5540 return; 5541 5542 rx = bp->rx_port_stats_ext.sw_stats; 5543 phy_stats->SymbolErrorDuringCarrier = 5544 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 5545 } 5546 5547 static void bnxt_get_eth_mac_stats(struct net_device *dev, 5548 struct ethtool_eth_mac_stats *mac_stats) 5549 { 5550 struct bnxt *bp = netdev_priv(dev); 5551 u64 *rx, *tx; 5552 5553 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5554 return; 5555 5556 rx = bp->port_stats.sw_stats; 5557 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5558 5559 mac_stats->FramesReceivedOK = 5560 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 5561 mac_stats->FramesTransmittedOK = 5562 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 5563 mac_stats->FrameCheckSequenceErrors = 5564 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 5565 mac_stats->AlignmentErrors = 5566 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 5567 mac_stats->OutOfRangeLengthField = 5568 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 5569 } 5570 5571 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 5572 struct ethtool_eth_ctrl_stats *ctrl_stats) 5573 { 5574 struct bnxt *bp = netdev_priv(dev); 5575 u64 *rx; 5576 5577 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5578 return; 5579 5580 rx = bp->port_stats.sw_stats; 5581 ctrl_stats->MACControlFramesReceived = 5582 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 5583 } 5584 5585 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 5586 { 0, 64 }, 5587 { 65, 127 }, 5588 { 128, 255 }, 5589 { 256, 511 }, 5590 { 512, 1023 }, 5591 { 1024, 1518 }, 5592 { 1519, 2047 }, 5593 { 2048, 4095 }, 5594 { 4096, 9216 }, 5595 { 9217, 16383 }, 5596 {} 5597 }; 5598 5599 static void bnxt_get_rmon_stats(struct net_device *dev, 5600 struct ethtool_rmon_stats *rmon_stats, 5601 const struct ethtool_rmon_hist_range **ranges) 5602 { 5603 struct bnxt *bp = netdev_priv(dev); 5604 u64 *rx, *tx; 5605 5606 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5607 return; 5608 5609 rx = bp->port_stats.sw_stats; 5610 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5611 5612 rmon_stats->jabbers = 5613 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 5614 rmon_stats->oversize_pkts = 5615 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 5616 rmon_stats->undersize_pkts = 5617 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 5618 5619 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 5620 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 5621 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 5622 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 5623 rmon_stats->hist[4] = 5624 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 5625 rmon_stats->hist[5] = 5626 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 5627 rmon_stats->hist[6] = 5628 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 5629 rmon_stats->hist[7] = 5630 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 5631 rmon_stats->hist[8] = 5632 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 5633 rmon_stats->hist[9] = 5634 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 5635 5636 rmon_stats->hist_tx[0] = 5637 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 5638 rmon_stats->hist_tx[1] = 5639 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 5640 rmon_stats->hist_tx[2] = 5641 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 5642 rmon_stats->hist_tx[3] = 5643 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 5644 rmon_stats->hist_tx[4] = 5645 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 5646 rmon_stats->hist_tx[5] = 5647 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 5648 rmon_stats->hist_tx[6] = 5649 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 5650 rmon_stats->hist_tx[7] = 5651 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 5652 rmon_stats->hist_tx[8] = 5653 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 5654 rmon_stats->hist_tx[9] = 5655 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 5656 5657 *ranges = bnxt_rmon_ranges; 5658 } 5659 5660 static void bnxt_get_ptp_stats(struct net_device *dev, 5661 struct ethtool_ts_stats *ts_stats) 5662 { 5663 struct bnxt *bp = netdev_priv(dev); 5664 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 5665 5666 if (ptp) { 5667 ts_stats->pkts = ptp->stats.ts_pkts; 5668 ts_stats->lost = ptp->stats.ts_lost; 5669 ts_stats->err = atomic64_read(&ptp->stats.ts_err); 5670 } 5671 } 5672 5673 static void bnxt_get_link_ext_stats(struct net_device *dev, 5674 struct ethtool_link_ext_stats *stats) 5675 { 5676 struct bnxt *bp = netdev_priv(dev); 5677 u64 *rx; 5678 5679 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5680 return; 5681 5682 rx = bp->rx_port_stats_ext.sw_stats; 5683 stats->link_down_events = 5684 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); 5685 } 5686 5687 void bnxt_ethtool_free(struct bnxt *bp) 5688 { 5689 kfree(bp->test_info); 5690 bp->test_info = NULL; 5691 } 5692 5693 const struct ethtool_ops bnxt_ethtool_ops = { 5694 .cap_link_lanes_supported = 1, 5695 .rxfh_per_ctx_key = 1, 5696 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, 5697 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, 5698 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), 5699 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5700 ETHTOOL_COALESCE_MAX_FRAMES | 5701 ETHTOOL_COALESCE_USECS_IRQ | 5702 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 5703 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 5704 ETHTOOL_COALESCE_USE_ADAPTIVE_RX | 5705 ETHTOOL_COALESCE_USE_CQE, 5706 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | 5707 ETHTOOL_RING_USE_HDS_THRS, 5708 .get_link_ksettings = bnxt_get_link_ksettings, 5709 .set_link_ksettings = bnxt_set_link_ksettings, 5710 .get_fec_stats = bnxt_get_fec_stats, 5711 .get_fecparam = bnxt_get_fecparam, 5712 .set_fecparam = bnxt_set_fecparam, 5713 .get_pause_stats = bnxt_get_pause_stats, 5714 .get_pauseparam = bnxt_get_pauseparam, 5715 .set_pauseparam = bnxt_set_pauseparam, 5716 .get_drvinfo = bnxt_get_drvinfo, 5717 .get_regs_len = bnxt_get_regs_len, 5718 .get_regs = bnxt_get_regs, 5719 .get_wol = bnxt_get_wol, 5720 .set_wol = bnxt_set_wol, 5721 .get_coalesce = bnxt_get_coalesce, 5722 .set_coalesce = bnxt_set_coalesce, 5723 .get_msglevel = bnxt_get_msglevel, 5724 .set_msglevel = bnxt_set_msglevel, 5725 .get_sset_count = bnxt_get_sset_count, 5726 .get_strings = bnxt_get_strings, 5727 .get_ethtool_stats = bnxt_get_ethtool_stats, 5728 .set_ringparam = bnxt_set_ringparam, 5729 .get_ringparam = bnxt_get_ringparam, 5730 .get_channels = bnxt_get_channels, 5731 .set_channels = bnxt_set_channels, 5732 .get_rxnfc = bnxt_get_rxnfc, 5733 .set_rxnfc = bnxt_set_rxnfc, 5734 .get_rx_ring_count = bnxt_get_rx_ring_count, 5735 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 5736 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 5737 .get_rxfh = bnxt_get_rxfh, 5738 .set_rxfh = bnxt_set_rxfh, 5739 .get_rxfh_fields = bnxt_get_rxfh_fields, 5740 .set_rxfh_fields = bnxt_set_rxfh_fields, 5741 .create_rxfh_context = bnxt_create_rxfh_context, 5742 .modify_rxfh_context = bnxt_modify_rxfh_context, 5743 .remove_rxfh_context = bnxt_remove_rxfh_context, 5744 .flash_device = bnxt_flash_device, 5745 .get_eeprom_len = bnxt_get_eeprom_len, 5746 .get_eeprom = bnxt_get_eeprom, 5747 .set_eeprom = bnxt_set_eeprom, 5748 .get_link = bnxt_get_link, 5749 .get_link_ext_state = bnxt_get_link_ext_state, 5750 .get_link_ext_stats = bnxt_get_link_ext_stats, 5751 .get_eee = bnxt_get_eee, 5752 .set_eee = bnxt_set_eee, 5753 .get_tunable = bnxt_get_tunable, 5754 .set_tunable = bnxt_set_tunable, 5755 .get_module_info = bnxt_get_module_info, 5756 .get_module_eeprom = bnxt_get_module_eeprom, 5757 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, 5758 .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, 5759 .nway_reset = bnxt_nway_reset, 5760 .set_phys_id = bnxt_set_phys_id, 5761 .self_test = bnxt_self_test, 5762 .get_ts_info = bnxt_get_ts_info, 5763 .reset = bnxt_reset, 5764 .set_dump = bnxt_set_dump, 5765 .get_dump_flag = bnxt_get_dump_flag, 5766 .get_dump_data = bnxt_get_dump_data, 5767 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 5768 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 5769 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 5770 .get_rmon_stats = bnxt_get_rmon_stats, 5771 .get_ts_stats = bnxt_get_ptp_stats, 5772 }; 5773