1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2017 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/ctype.h> 13 #include <linux/stringify.h> 14 #include <linux/ethtool.h> 15 #include <linux/ethtool_netlink.h> 16 #include <linux/linkmode.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/etherdevice.h> 20 #include <linux/crc32.h> 21 #include <linux/firmware.h> 22 #include <linux/utsname.h> 23 #include <linux/time.h> 24 #include <linux/ptp_clock_kernel.h> 25 #include <linux/net_tstamp.h> 26 #include <linux/timecounter.h> 27 #include <net/netdev_queues.h> 28 #include <net/netlink.h> 29 #include <linux/bnxt/hsi.h> 30 #include <linux/bnxt/ulp.h> 31 #include "bnxt.h" 32 #include "bnxt_hwrm.h" 33 #include "bnxt_xdp.h" 34 #include "bnxt_ptp.h" 35 #include "bnxt_ethtool.h" 36 #include "bnxt_gso.h" 37 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ 38 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ 39 #include "bnxt_coredump.h" 40 41 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \ 42 do { \ 43 if (extack) \ 44 NL_SET_ERR_MSG_MOD(extack, msg); \ 45 netdev_err(dev, "%s\n", msg); \ 46 } while (0) 47 48 static u32 bnxt_get_msglevel(struct net_device *dev) 49 { 50 struct bnxt *bp = netdev_priv(dev); 51 52 return bp->msg_enable; 53 } 54 55 static void bnxt_set_msglevel(struct net_device *dev, u32 value) 56 { 57 struct bnxt *bp = netdev_priv(dev); 58 59 bp->msg_enable = value; 60 } 61 62 static int bnxt_get_coalesce(struct net_device *dev, 63 struct ethtool_coalesce *coal, 64 struct kernel_ethtool_coalesce *kernel_coal, 65 struct netlink_ext_ack *extack) 66 { 67 struct bnxt *bp = netdev_priv(dev); 68 struct bnxt_coal *hw_coal; 69 u16 mult; 70 71 memset(coal, 0, sizeof(*coal)); 72 73 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; 74 75 hw_coal = &bp->rx_coal; 76 mult = hw_coal->bufs_per_record; 77 coal->rx_coalesce_usecs = hw_coal->coal_ticks; 78 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; 79 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 80 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 81 if (hw_coal->flags & 82 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 83 kernel_coal->use_cqe_mode_rx = true; 84 85 hw_coal = &bp->tx_coal; 86 mult = hw_coal->bufs_per_record; 87 coal->tx_coalesce_usecs = hw_coal->coal_ticks; 88 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; 89 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; 90 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; 91 if (hw_coal->flags & 92 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) 93 kernel_coal->use_cqe_mode_tx = true; 94 95 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; 96 97 return 0; 98 } 99 100 static int bnxt_set_coalesce(struct net_device *dev, 101 struct ethtool_coalesce *coal, 102 struct kernel_ethtool_coalesce *kernel_coal, 103 struct netlink_ext_ack *extack) 104 { 105 struct bnxt *bp = netdev_priv(dev); 106 bool update_stats = false; 107 struct bnxt_coal *hw_coal; 108 int rc = 0; 109 u16 mult; 110 111 if (coal->use_adaptive_rx_coalesce) { 112 bp->flags |= BNXT_FLAG_DIM; 113 } else { 114 if (bp->flags & BNXT_FLAG_DIM) { 115 bp->flags &= ~(BNXT_FLAG_DIM); 116 goto reset_coalesce; 117 } 118 } 119 120 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && 121 !(bp->coal_cap.cmpl_params & 122 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) 123 return -EOPNOTSUPP; 124 125 hw_coal = &bp->rx_coal; 126 mult = hw_coal->bufs_per_record; 127 hw_coal->coal_ticks = coal->rx_coalesce_usecs; 128 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; 129 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; 130 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; 131 hw_coal->flags &= 132 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 133 if (kernel_coal->use_cqe_mode_rx) 134 hw_coal->flags |= 135 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 136 137 hw_coal = &bp->tx_coal; 138 mult = hw_coal->bufs_per_record; 139 hw_coal->coal_ticks = coal->tx_coalesce_usecs; 140 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; 141 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; 142 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; 143 hw_coal->flags &= 144 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 145 if (kernel_coal->use_cqe_mode_tx) 146 hw_coal->flags |= 147 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 148 149 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { 150 u32 stats_ticks = coal->stats_block_coalesce_usecs; 151 152 /* Allow 0, which means disable. */ 153 if (stats_ticks) 154 stats_ticks = clamp_t(u32, stats_ticks, 155 BNXT_MIN_STATS_COAL_TICKS, 156 BNXT_MAX_STATS_COAL_TICKS); 157 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); 158 bp->stats_coal_ticks = stats_ticks; 159 if (bp->stats_coal_ticks) 160 bp->current_interval = 161 bp->stats_coal_ticks * HZ / 1000000; 162 else 163 bp->current_interval = BNXT_TIMER_INTERVAL; 164 update_stats = true; 165 } 166 167 reset_coalesce: 168 if (test_bit(BNXT_STATE_OPEN, &bp->state)) { 169 if (update_stats) { 170 bnxt_close_nic(bp, true, false); 171 rc = bnxt_open_nic(bp, true, false); 172 } else { 173 rc = bnxt_hwrm_set_coal(bp); 174 } 175 } 176 177 return rc; 178 } 179 180 static const char * const bnxt_ring_rx_stats_str[] = { 181 "rx_ucast_packets", 182 "rx_mcast_packets", 183 "rx_bcast_packets", 184 "rx_discards", 185 "rx_errors", 186 "rx_ucast_bytes", 187 "rx_mcast_bytes", 188 "rx_bcast_bytes", 189 }; 190 191 static const char * const bnxt_ring_tx_stats_str[] = { 192 "tx_ucast_packets", 193 "tx_mcast_packets", 194 "tx_bcast_packets", 195 "tx_errors", 196 "tx_discards", 197 "tx_ucast_bytes", 198 "tx_mcast_bytes", 199 "tx_bcast_bytes", 200 }; 201 202 static const char * const bnxt_ring_tpa_stats_str[] = { 203 "tpa_packets", 204 "tpa_bytes", 205 "tpa_events", 206 "tpa_aborts", 207 }; 208 209 static const char * const bnxt_ring_tpa2_stats_str[] = { 210 "rx_tpa_eligible_pkt", 211 "rx_tpa_eligible_bytes", 212 "rx_tpa_pkt", 213 "rx_tpa_bytes", 214 "rx_tpa_errors", 215 "rx_tpa_events", 216 }; 217 218 static const char * const bnxt_rx_sw_stats_str[] = { 219 "rx_l4_csum_errors", 220 "rx_resets", 221 "rx_buf_errors", 222 }; 223 224 static const char * const bnxt_cmn_sw_stats_str[] = { 225 "missed_irqs", 226 }; 227 228 #define BNXT_RX_STATS_ENTRY(counter) \ 229 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } 230 231 #define BNXT_TX_STATS_ENTRY(counter) \ 232 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } 233 234 #define BNXT_RX_STATS_EXT_ENTRY(counter) \ 235 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 236 237 #define BNXT_TX_STATS_EXT_ENTRY(counter) \ 238 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } 239 240 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ 241 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ 242 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) 243 244 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ 245 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ 246 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) 247 248 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \ 249 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ 250 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ 251 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ 252 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ 253 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ 254 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ 255 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ 256 BNXT_RX_STATS_EXT_PFC_ENTRY(7) 257 258 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \ 259 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ 260 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ 261 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ 262 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ 263 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ 264 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ 265 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ 266 BNXT_TX_STATS_EXT_PFC_ENTRY(7) 267 268 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ 269 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ 270 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) 271 272 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ 273 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ 274 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) 275 276 #define BNXT_RX_STATS_EXT_COS_ENTRIES \ 277 BNXT_RX_STATS_EXT_COS_ENTRY(0), \ 278 BNXT_RX_STATS_EXT_COS_ENTRY(1), \ 279 BNXT_RX_STATS_EXT_COS_ENTRY(2), \ 280 BNXT_RX_STATS_EXT_COS_ENTRY(3), \ 281 BNXT_RX_STATS_EXT_COS_ENTRY(4), \ 282 BNXT_RX_STATS_EXT_COS_ENTRY(5), \ 283 BNXT_RX_STATS_EXT_COS_ENTRY(6), \ 284 BNXT_RX_STATS_EXT_COS_ENTRY(7) \ 285 286 #define BNXT_TX_STATS_EXT_COS_ENTRIES \ 287 BNXT_TX_STATS_EXT_COS_ENTRY(0), \ 288 BNXT_TX_STATS_EXT_COS_ENTRY(1), \ 289 BNXT_TX_STATS_EXT_COS_ENTRY(2), \ 290 BNXT_TX_STATS_EXT_COS_ENTRY(3), \ 291 BNXT_TX_STATS_EXT_COS_ENTRY(4), \ 292 BNXT_TX_STATS_EXT_COS_ENTRY(5), \ 293 BNXT_TX_STATS_EXT_COS_ENTRY(6), \ 294 BNXT_TX_STATS_EXT_COS_ENTRY(7) \ 295 296 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ 297 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ 298 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) 299 300 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ 301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ 302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ 303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ 304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ 305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ 306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ 307 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ 308 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) 309 310 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ 311 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ 312 __stringify(counter##_pri##n) } 313 314 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ 315 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ 316 __stringify(counter##_pri##n) } 317 318 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \ 319 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ 320 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ 321 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ 322 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ 323 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ 324 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ 325 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ 326 BNXT_RX_STATS_PRI_ENTRY(counter, 7) 327 328 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \ 329 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ 330 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ 331 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ 332 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ 333 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ 334 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ 335 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ 336 BNXT_TX_STATS_PRI_ENTRY(counter, 7) 337 338 enum { 339 RX_TOTAL_DISCARDS, 340 TX_TOTAL_DISCARDS, 341 RX_NETPOLL_DISCARDS, 342 }; 343 344 static const char *const bnxt_ring_drv_stats_arr[] = { 345 "rx_total_l4_csum_errors", 346 "rx_total_resets", 347 "rx_total_buf_errors", 348 "rx_total_oom_discards", 349 "rx_total_netpoll_discards", 350 "rx_total_ring_discards", 351 "tx_total_resets", 352 "tx_total_ring_discards", 353 "total_missed_irqs", 354 }; 355 356 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) 357 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) 358 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) 359 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) 360 361 static const struct { 362 long offset; 363 char string[ETH_GSTRING_LEN]; 364 } bnxt_port_stats_arr[] = { 365 BNXT_RX_STATS_ENTRY(rx_64b_frames), 366 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), 367 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), 368 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), 369 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), 370 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), 371 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), 372 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), 373 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), 374 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), 375 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), 376 BNXT_RX_STATS_ENTRY(rx_total_frames), 377 BNXT_RX_STATS_ENTRY(rx_ucast_frames), 378 BNXT_RX_STATS_ENTRY(rx_mcast_frames), 379 BNXT_RX_STATS_ENTRY(rx_bcast_frames), 380 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), 381 BNXT_RX_STATS_ENTRY(rx_ctrl_frames), 382 BNXT_RX_STATS_ENTRY(rx_pause_frames), 383 BNXT_RX_STATS_ENTRY(rx_pfc_frames), 384 BNXT_RX_STATS_ENTRY(rx_align_err_frames), 385 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), 386 BNXT_RX_STATS_ENTRY(rx_jbr_frames), 387 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), 388 BNXT_RX_STATS_ENTRY(rx_tagged_frames), 389 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), 390 BNXT_RX_STATS_ENTRY(rx_good_frames), 391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), 392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), 393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), 394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), 395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), 396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), 397 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), 398 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), 399 BNXT_RX_STATS_ENTRY(rx_undrsz_frames), 400 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), 401 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), 402 BNXT_RX_STATS_ENTRY(rx_bytes), 403 BNXT_RX_STATS_ENTRY(rx_runt_bytes), 404 BNXT_RX_STATS_ENTRY(rx_runt_frames), 405 BNXT_RX_STATS_ENTRY(rx_stat_discard), 406 BNXT_RX_STATS_ENTRY(rx_stat_err), 407 408 BNXT_TX_STATS_ENTRY(tx_64b_frames), 409 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), 410 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), 411 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), 412 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), 413 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), 414 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), 415 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), 416 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), 417 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), 418 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), 419 BNXT_TX_STATS_ENTRY(tx_good_frames), 420 BNXT_TX_STATS_ENTRY(tx_total_frames), 421 BNXT_TX_STATS_ENTRY(tx_ucast_frames), 422 BNXT_TX_STATS_ENTRY(tx_mcast_frames), 423 BNXT_TX_STATS_ENTRY(tx_bcast_frames), 424 BNXT_TX_STATS_ENTRY(tx_pause_frames), 425 BNXT_TX_STATS_ENTRY(tx_pfc_frames), 426 BNXT_TX_STATS_ENTRY(tx_jabber_frames), 427 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), 428 BNXT_TX_STATS_ENTRY(tx_err), 429 BNXT_TX_STATS_ENTRY(tx_fifo_underruns), 430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), 431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), 432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), 433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), 434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), 435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), 436 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), 437 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), 438 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), 439 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), 440 BNXT_TX_STATS_ENTRY(tx_total_collisions), 441 BNXT_TX_STATS_ENTRY(tx_bytes), 442 BNXT_TX_STATS_ENTRY(tx_xthol_frames), 443 BNXT_TX_STATS_ENTRY(tx_stat_discard), 444 BNXT_TX_STATS_ENTRY(tx_stat_error), 445 }; 446 447 static const struct { 448 long offset; 449 char string[ETH_GSTRING_LEN]; 450 } bnxt_port_stats_ext_arr[] = { 451 BNXT_RX_STATS_EXT_ENTRY(link_down_events), 452 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), 453 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), 454 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), 455 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 456 BNXT_RX_STATS_EXT_COS_ENTRIES, 457 BNXT_RX_STATS_EXT_PFC_ENTRIES, 458 BNXT_RX_STATS_EXT_ENTRY(rx_bits), 459 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), 460 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), 461 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), 462 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, 463 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), 464 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), 465 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss), 466 }; 467 468 static const struct { 469 long offset; 470 char string[ETH_GSTRING_LEN]; 471 } bnxt_tx_port_stats_ext_arr[] = { 472 BNXT_TX_STATS_EXT_COS_ENTRIES, 473 BNXT_TX_STATS_EXT_PFC_ENTRIES, 474 }; 475 476 static const struct { 477 long base_off; 478 char string[ETH_GSTRING_LEN]; 479 } bnxt_rx_bytes_pri_arr[] = { 480 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), 481 }; 482 483 static const struct { 484 long base_off; 485 char string[ETH_GSTRING_LEN]; 486 } bnxt_rx_pkts_pri_arr[] = { 487 BNXT_RX_STATS_PRI_ENTRIES(rx_packets), 488 }; 489 490 static const struct { 491 long base_off; 492 char string[ETH_GSTRING_LEN]; 493 } bnxt_tx_bytes_pri_arr[] = { 494 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), 495 }; 496 497 static const struct { 498 long base_off; 499 char string[ETH_GSTRING_LEN]; 500 } bnxt_tx_pkts_pri_arr[] = { 501 BNXT_TX_STATS_PRI_ENTRIES(tx_packets), 502 }; 503 504 #define BNXT_NUM_RING_DRV_STATS ARRAY_SIZE(bnxt_ring_drv_stats_arr) 505 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 506 #define BNXT_NUM_STATS_PRI \ 507 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ 508 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ 509 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ 510 ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) 511 512 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) 513 { 514 if (BNXT_SUPPORTS_TPA(bp)) { 515 if (bp->max_tpa_v2) { 516 if (BNXT_CHIP_P5(bp)) 517 return BNXT_NUM_TPA_RING_STATS_P5; 518 return BNXT_NUM_TPA_RING_STATS_P7; 519 } 520 return BNXT_NUM_TPA_RING_STATS; 521 } 522 return 0; 523 } 524 525 static int bnxt_get_num_ring_stats(struct bnxt *bp) 526 { 527 int rx, tx, cmn; 528 529 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + 530 bnxt_get_num_tpa_ring_stats(bp); 531 tx = NUM_RING_TX_HW_STATS; 532 cmn = NUM_RING_CMN_SW_STATS; 533 return rx * bp->rx_nr_rings + 534 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) + 535 cmn * bp->cp_nr_rings; 536 } 537 538 static int bnxt_get_num_stats(struct bnxt *bp) 539 { 540 int num_stats = bnxt_get_num_ring_stats(bp); 541 int len; 542 543 num_stats += BNXT_NUM_RING_DRV_STATS; 544 545 if (bp->flags & BNXT_FLAG_PORT_STATS) 546 num_stats += BNXT_NUM_PORT_STATS; 547 548 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 549 len = min_t(int, bp->fw_rx_stats_ext_size, 550 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 551 num_stats += len; 552 len = min_t(int, bp->fw_tx_stats_ext_size, 553 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 554 num_stats += len; 555 if (bp->pri2cos_valid) 556 num_stats += BNXT_NUM_STATS_PRI; 557 } 558 559 return num_stats; 560 } 561 562 static int bnxt_get_sset_count(struct net_device *dev, int sset) 563 { 564 struct bnxt *bp = netdev_priv(dev); 565 566 switch (sset) { 567 case ETH_SS_STATS: 568 return bnxt_get_num_stats(bp); 569 case ETH_SS_TEST: 570 if (!bp->num_tests) 571 return -EOPNOTSUPP; 572 return bp->num_tests; 573 default: 574 return -EOPNOTSUPP; 575 } 576 } 577 578 static bool is_rx_ring(struct bnxt *bp, int ring_num) 579 { 580 return ring_num < bp->rx_nr_rings; 581 } 582 583 static bool is_tx_ring(struct bnxt *bp, int ring_num) 584 { 585 int tx_base = 0; 586 587 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 588 tx_base = bp->rx_nr_rings; 589 590 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) 591 return true; 592 return false; 593 } 594 595 static void bnxt_get_ethtool_stats(struct net_device *dev, 596 struct ethtool_stats *stats, u64 *buf) 597 { 598 struct bnxt_total_ring_drv_stats ring_drv_stats = {0}; 599 struct bnxt *bp = netdev_priv(dev); 600 u64 *curr, *prev; 601 u32 tpa_stats; 602 u32 i, j = 0; 603 604 if (!bp->bnapi) { 605 j += bnxt_get_num_ring_stats(bp); 606 goto skip_ring_stats; 607 } 608 609 tpa_stats = bnxt_get_num_tpa_ring_stats(bp); 610 for (i = 0; i < bp->cp_nr_rings; i++) { 611 struct bnxt_napi *bnapi = bp->bnapi[i]; 612 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 613 u64 *sw_stats = cpr->stats.sw_stats; 614 u64 *sw; 615 int k; 616 617 if (is_rx_ring(bp, i)) { 618 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) 619 buf[j] = sw_stats[k]; 620 } 621 if (is_tx_ring(bp, i)) { 622 k = NUM_RING_RX_HW_STATS; 623 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 624 j++, k++) 625 buf[j] = sw_stats[k]; 626 } 627 if (!tpa_stats || !is_rx_ring(bp, i)) 628 goto skip_tpa_ring_stats; 629 630 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; 631 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + 632 tpa_stats; j++, k++) 633 buf[j] = sw_stats[k]; 634 635 skip_tpa_ring_stats: 636 sw = (u64 *)&cpr->sw_stats->rx; 637 if (is_rx_ring(bp, i)) { 638 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) 639 buf[j] = sw[k]; 640 } 641 642 sw = (u64 *)&cpr->sw_stats->cmn; 643 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) 644 buf[j] = sw[k]; 645 } 646 647 bnxt_get_ring_drv_stats(bp, &ring_drv_stats); 648 649 skip_ring_stats: 650 curr = &ring_drv_stats.rx_total_l4_csum_errors; 651 prev = &bp->ring_drv_stats_prev.rx_total_l4_csum_errors; 652 for (i = 0; i < BNXT_NUM_RING_DRV_STATS; i++, j++, curr++, prev++) 653 buf[j] = *curr + *prev; 654 655 if (bp->flags & BNXT_FLAG_PORT_STATS) { 656 u64 *port_stats = bp->port_stats.sw_stats; 657 658 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) 659 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); 660 } 661 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 662 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; 663 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; 664 u32 len; 665 666 len = min_t(u32, bp->fw_rx_stats_ext_size, 667 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 668 for (i = 0; i < len; i++, j++) { 669 buf[j] = *(rx_port_stats_ext + 670 bnxt_port_stats_ext_arr[i].offset); 671 } 672 len = min_t(u32, bp->fw_tx_stats_ext_size, 673 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 674 for (i = 0; i < len; i++, j++) { 675 buf[j] = *(tx_port_stats_ext + 676 bnxt_tx_port_stats_ext_arr[i].offset); 677 } 678 if (bp->pri2cos_valid) { 679 for (i = 0; i < 8; i++, j++) { 680 long n = bnxt_rx_bytes_pri_arr[i].base_off + 681 bp->pri2cos_idx[i]; 682 683 buf[j] = *(rx_port_stats_ext + n); 684 } 685 for (i = 0; i < 8; i++, j++) { 686 long n = bnxt_rx_pkts_pri_arr[i].base_off + 687 bp->pri2cos_idx[i]; 688 689 buf[j] = *(rx_port_stats_ext + n); 690 } 691 for (i = 0; i < 8; i++, j++) { 692 u8 cos_idx = bp->pri2cos_idx[i]; 693 long n; 694 695 n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx; 696 buf[j] = *(tx_port_stats_ext + n); 697 if (bp->cos0_cos1_shared && !cos_idx) 698 buf[j] += *(tx_port_stats_ext + n + 1); 699 } 700 for (i = 0; i < 8; i++, j++) { 701 u8 cos_idx = bp->pri2cos_idx[i]; 702 long n; 703 704 n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx; 705 buf[j] = *(tx_port_stats_ext + n); 706 if (bp->cos0_cos1_shared && !cos_idx) 707 buf[j] += *(tx_port_stats_ext + n + 1); 708 } 709 } 710 } 711 } 712 713 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 714 { 715 struct bnxt *bp = netdev_priv(dev); 716 u32 i, j, num_str; 717 const char *str; 718 719 switch (stringset) { 720 case ETH_SS_STATS: 721 for (i = 0; i < bp->cp_nr_rings; i++) { 722 if (is_rx_ring(bp, i)) 723 for (j = 0; j < NUM_RING_RX_HW_STATS; j++) { 724 str = bnxt_ring_rx_stats_str[j]; 725 ethtool_sprintf(&buf, "[%d]: %s", i, 726 str); 727 } 728 if (is_tx_ring(bp, i)) 729 for (j = 0; j < NUM_RING_TX_HW_STATS; j++) { 730 str = bnxt_ring_tx_stats_str[j]; 731 ethtool_sprintf(&buf, "[%d]: %s", i, 732 str); 733 } 734 num_str = bnxt_get_num_tpa_ring_stats(bp); 735 if (!num_str || !is_rx_ring(bp, i)) 736 goto skip_tpa_stats; 737 738 if (bp->max_tpa_v2) 739 for (j = 0; j < num_str; j++) { 740 str = bnxt_ring_tpa2_stats_str[j]; 741 ethtool_sprintf(&buf, "[%d]: %s", i, 742 str); 743 } 744 else 745 for (j = 0; j < num_str; j++) { 746 str = bnxt_ring_tpa_stats_str[j]; 747 ethtool_sprintf(&buf, "[%d]: %s", i, 748 str); 749 } 750 skip_tpa_stats: 751 if (is_rx_ring(bp, i)) 752 for (j = 0; j < NUM_RING_RX_SW_STATS; j++) { 753 str = bnxt_rx_sw_stats_str[j]; 754 ethtool_sprintf(&buf, "[%d]: %s", i, 755 str); 756 } 757 for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) { 758 str = bnxt_cmn_sw_stats_str[j]; 759 ethtool_sprintf(&buf, "[%d]: %s", i, str); 760 } 761 } 762 for (i = 0; i < BNXT_NUM_RING_DRV_STATS; i++) 763 ethtool_puts(&buf, bnxt_ring_drv_stats_arr[i]); 764 765 if (bp->flags & BNXT_FLAG_PORT_STATS) 766 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 767 str = bnxt_port_stats_arr[i].string; 768 ethtool_puts(&buf, str); 769 } 770 771 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 772 u32 len; 773 774 len = min_t(u32, bp->fw_rx_stats_ext_size, 775 ARRAY_SIZE(bnxt_port_stats_ext_arr)); 776 for (i = 0; i < len; i++) { 777 str = bnxt_port_stats_ext_arr[i].string; 778 ethtool_puts(&buf, str); 779 } 780 781 len = min_t(u32, bp->fw_tx_stats_ext_size, 782 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); 783 for (i = 0; i < len; i++) { 784 str = bnxt_tx_port_stats_ext_arr[i].string; 785 ethtool_puts(&buf, str); 786 } 787 788 if (bp->pri2cos_valid) { 789 for (i = 0; i < 8; i++) { 790 str = bnxt_rx_bytes_pri_arr[i].string; 791 ethtool_puts(&buf, str); 792 } 793 794 for (i = 0; i < 8; i++) { 795 str = bnxt_rx_pkts_pri_arr[i].string; 796 ethtool_puts(&buf, str); 797 } 798 799 for (i = 0; i < 8; i++) { 800 str = bnxt_tx_bytes_pri_arr[i].string; 801 ethtool_puts(&buf, str); 802 } 803 804 for (i = 0; i < 8; i++) { 805 str = bnxt_tx_pkts_pri_arr[i].string; 806 ethtool_puts(&buf, str); 807 } 808 } 809 } 810 break; 811 case ETH_SS_TEST: 812 if (bp->num_tests) 813 for (i = 0; i < bp->num_tests; i++) 814 ethtool_puts(&buf, bp->test_info->string[i]); 815 break; 816 default: 817 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", 818 stringset); 819 break; 820 } 821 } 822 823 static void bnxt_get_ringparam(struct net_device *dev, 824 struct ethtool_ringparam *ering, 825 struct kernel_ethtool_ringparam *kernel_ering, 826 struct netlink_ext_ack *extack) 827 { 828 struct bnxt *bp = netdev_priv(dev); 829 830 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 831 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 832 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; 833 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; 834 } else { 835 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; 836 ering->rx_jumbo_max_pending = 0; 837 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; 838 } 839 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; 840 841 ering->rx_pending = bp->rx_ring_size; 842 ering->rx_jumbo_pending = bp->rx_agg_ring_size; 843 ering->tx_pending = bp->tx_ring_size; 844 845 kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX; 846 } 847 848 static int bnxt_set_ringparam(struct net_device *dev, 849 struct ethtool_ringparam *ering, 850 struct kernel_ethtool_ringparam *kernel_ering, 851 struct netlink_ext_ack *extack) 852 { 853 u8 tcp_data_split = kernel_ering->tcp_data_split; 854 struct bnxt *bp = netdev_priv(dev); 855 u8 hds_config_mod; 856 int rc; 857 858 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 859 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 860 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) 861 return -EINVAL; 862 863 if ((dev->features & NETIF_F_GSO_UDP_L4) && 864 !(bp->flags & BNXT_FLAG_UDP_GSO_CAP) && 865 ering->tx_pending < 2 * BNXT_SW_USO_MAX_DESCS) 866 return -EINVAL; 867 868 hds_config_mod = tcp_data_split != dev->cfg->hds_config; 869 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod) 870 return -EINVAL; 871 872 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && 873 hds_config_mod && BNXT_RX_PAGE_MODE(bp)) { 874 NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached"); 875 return -EINVAL; 876 } 877 878 if (netif_running(dev)) 879 bnxt_close_nic(bp, false, false); 880 881 if (hds_config_mod) { 882 if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED) 883 bp->flags |= BNXT_FLAG_HDS; 884 else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) 885 bp->flags &= ~BNXT_FLAG_HDS; 886 } 887 888 bp->rx_ring_size = ering->rx_pending; 889 bp->tx_ring_size = ering->tx_pending; 890 bnxt_set_ring_params(bp); 891 892 if (netif_running(dev)) { 893 rc = bnxt_open_nic(bp, false, false); 894 if (rc) 895 return rc; 896 } 897 898 /* ring size changes may affect features (SW USO requires a minimum 899 * ring size), so recalculate features to ensure the correct features 900 * are blocked/available. 901 */ 902 netdev_update_features(dev); 903 return 0; 904 } 905 906 static void bnxt_get_channels(struct net_device *dev, 907 struct ethtool_channels *channel) 908 { 909 struct bnxt *bp = netdev_priv(dev); 910 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 911 int max_rx_rings, max_tx_rings, tcs; 912 int max_tx_sch_inputs, tx_grps; 913 914 /* Get the most up-to-date max_tx_sch_inputs. */ 915 if (netif_running(dev) && BNXT_NEW_RM(bp)) 916 bnxt_hwrm_func_resc_qcaps(bp, false); 917 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; 918 919 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); 920 if (max_tx_sch_inputs) 921 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 922 923 tcs = bp->num_tc; 924 tx_grps = max(tcs, 1); 925 if (bp->tx_nr_rings_xdp) 926 tx_grps++; 927 max_tx_rings /= tx_grps; 928 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); 929 930 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { 931 max_rx_rings = 0; 932 max_tx_rings = 0; 933 } 934 if (max_tx_sch_inputs) 935 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 936 937 if (tcs > 1) 938 max_tx_rings /= tcs; 939 940 channel->max_rx = max_rx_rings; 941 channel->max_tx = max_tx_rings; 942 channel->max_other = 0; 943 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 944 channel->combined_count = bp->rx_nr_rings; 945 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 946 channel->combined_count--; 947 } else { 948 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { 949 channel->rx_count = bp->rx_nr_rings; 950 channel->tx_count = bp->tx_nr_rings_per_tc; 951 } 952 } 953 } 954 955 static int bnxt_set_channels(struct net_device *dev, 956 struct ethtool_channels *channel) 957 { 958 struct bnxt *bp = netdev_priv(dev); 959 int req_tx_rings, req_rx_rings, tcs; 960 u32 new_tbl_size = 0, old_tbl_size; 961 bool sh = false; 962 int tx_xdp = 0; 963 int rc = 0; 964 965 if (channel->other_count) 966 return -EINVAL; 967 968 if (!channel->combined_count && 969 (!channel->rx_count || !channel->tx_count)) 970 return -EINVAL; 971 972 if (channel->combined_count && 973 (channel->rx_count || channel->tx_count)) 974 return -EINVAL; 975 976 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || 977 channel->tx_count)) 978 return -EINVAL; 979 980 if (channel->combined_count) 981 sh = true; 982 983 tcs = bp->num_tc; 984 985 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 986 req_rx_rings = sh ? channel->combined_count : channel->rx_count; 987 if (bp->tx_nr_rings_xdp) { 988 if (!sh) { 989 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); 990 return -EINVAL; 991 } 992 tx_xdp = req_rx_rings; 993 } 994 995 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); 996 if (rc) { 997 netdev_warn(dev, "Unable to allocate the requested rings\n"); 998 return rc; 999 } 1000 1001 /* RSS table size only changes on P5 chips with older firmware; 1002 * newer firmware always uses the largest table size. 1003 */ 1004 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 1005 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings)) { 1006 new_tbl_size = bnxt_get_nr_rss_ctxs(bp, req_rx_rings) * 1007 BNXT_RSS_TABLE_ENTRIES_P5; 1008 old_tbl_size = bnxt_get_rxfh_indir_size(dev); 1009 1010 if (!ethtool_rxfh_indir_can_resize(dev, bp->rss_indir_tbl, 1011 old_tbl_size, 1012 new_tbl_size)) { 1013 netdev_warn(dev, "RSS table resize not possible\n"); 1014 return -EINVAL; 1015 } 1016 1017 rc = ethtool_rxfh_ctxs_can_resize(dev, new_tbl_size); 1018 if (rc) 1019 return rc; 1020 } 1021 1022 if (netif_running(dev)) { 1023 if (BNXT_PF(bp)) { 1024 /* TODO CHIMP_FW: Send message to all VF's 1025 * before PF unload 1026 */ 1027 } 1028 bnxt_close_nic(bp, true, false); 1029 } 1030 1031 if (new_tbl_size) { 1032 ethtool_rxfh_indir_resize(dev, bp->rss_indir_tbl, 1033 old_tbl_size, new_tbl_size); 1034 ethtool_rxfh_ctxs_resize(dev, new_tbl_size); 1035 } 1036 1037 if (sh) { 1038 bp->flags |= BNXT_FLAG_SHARED_RINGS; 1039 bp->rx_nr_rings = channel->combined_count; 1040 bp->tx_nr_rings_per_tc = channel->combined_count; 1041 } else { 1042 bp->flags &= ~BNXT_FLAG_SHARED_RINGS; 1043 bp->rx_nr_rings = channel->rx_count; 1044 bp->tx_nr_rings_per_tc = channel->tx_count; 1045 } 1046 bp->tx_nr_rings_xdp = tx_xdp; 1047 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; 1048 if (tcs > 1) 1049 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; 1050 1051 bnxt_set_cp_rings(bp, sh); 1052 1053 /* After changing number of rx channels, update NTUPLE feature. */ 1054 netdev_update_features(dev); 1055 if (netif_running(dev)) { 1056 rc = bnxt_open_nic(bp, true, false); 1057 if ((!rc) && BNXT_PF(bp)) { 1058 /* TODO CHIMP_FW: Send message to all VF's 1059 * to renable 1060 */ 1061 } 1062 } else { 1063 rc = bnxt_reserve_rings(bp, true); 1064 } 1065 1066 return rc; 1067 } 1068 1069 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], 1070 int tbl_size, u32 *ids, u32 start, 1071 u32 id_cnt) 1072 { 1073 int i, j = start; 1074 1075 if (j >= id_cnt) 1076 return j; 1077 for (i = 0; i < tbl_size; i++) { 1078 struct hlist_head *head; 1079 struct bnxt_filter_base *fltr; 1080 1081 head = &tbl[i]; 1082 hlist_for_each_entry_rcu(fltr, head, hash) { 1083 if (!fltr->flags || 1084 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) 1085 continue; 1086 ids[j++] = fltr->sw_id; 1087 if (j == id_cnt) 1088 return j; 1089 } 1090 } 1091 return j; 1092 } 1093 1094 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, 1095 struct hlist_head tbl[], 1096 int tbl_size, u32 id) 1097 { 1098 int i; 1099 1100 for (i = 0; i < tbl_size; i++) { 1101 struct hlist_head *head; 1102 struct bnxt_filter_base *fltr; 1103 1104 head = &tbl[i]; 1105 hlist_for_each_entry_rcu(fltr, head, hash) { 1106 if (fltr->flags && fltr->sw_id == id) 1107 return fltr; 1108 } 1109 } 1110 return NULL; 1111 } 1112 1113 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, 1114 u32 *rule_locs) 1115 { 1116 u32 count; 1117 1118 cmd->data = bp->ntp_fltr_count; 1119 rcu_read_lock(); 1120 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, 1121 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, 1122 cmd->rule_cnt); 1123 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, 1124 BNXT_NTP_FLTR_HASH_SIZE, 1125 rule_locs, count, 1126 cmd->rule_cnt); 1127 rcu_read_unlock(); 1128 1129 return 0; 1130 } 1131 1132 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1133 { 1134 struct ethtool_rx_flow_spec *fs = 1135 (struct ethtool_rx_flow_spec *)&cmd->fs; 1136 struct bnxt_filter_base *fltr_base; 1137 struct bnxt_ntuple_filter *fltr; 1138 struct bnxt_flow_masks *fmasks; 1139 struct flow_keys *fkeys; 1140 int rc = -EINVAL; 1141 1142 if (fs->location >= bp->max_fltr) 1143 return rc; 1144 1145 rcu_read_lock(); 1146 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1147 BNXT_L2_FLTR_HASH_SIZE, 1148 fs->location); 1149 if (fltr_base) { 1150 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1151 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1152 struct bnxt_l2_filter *l2_fltr; 1153 struct bnxt_l2_key *l2_key; 1154 1155 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1156 l2_key = &l2_fltr->l2_key; 1157 fs->flow_type = ETHER_FLOW; 1158 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); 1159 eth_broadcast_addr(m_ether->h_dest); 1160 if (l2_key->vlan) { 1161 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1162 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1163 1164 fs->flow_type |= FLOW_EXT; 1165 m_ext->vlan_tci = htons(0xfff); 1166 h_ext->vlan_tci = htons(l2_key->vlan); 1167 } 1168 if (fltr_base->flags & BNXT_ACT_RING_DST) 1169 fs->ring_cookie = fltr_base->rxq; 1170 if (fltr_base->flags & BNXT_ACT_FUNC_DST) 1171 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << 1172 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 1173 rcu_read_unlock(); 1174 return 0; 1175 } 1176 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1177 BNXT_NTP_FLTR_HASH_SIZE, 1178 fs->location); 1179 if (!fltr_base) { 1180 rcu_read_unlock(); 1181 return rc; 1182 } 1183 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1184 1185 fkeys = &fltr->fkeys; 1186 fmasks = &fltr->fmasks; 1187 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 1188 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1189 fs->flow_type = IP_USER_FLOW; 1190 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1191 fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD; 1192 fs->m_u.usr_ip4_spec.proto = 0; 1193 } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) { 1194 fs->flow_type = IP_USER_FLOW; 1195 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 1196 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; 1197 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; 1198 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1199 fs->flow_type = TCP_V4_FLOW; 1200 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1201 fs->flow_type = UDP_V4_FLOW; 1202 } else { 1203 goto fltr_err; 1204 } 1205 1206 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; 1207 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; 1208 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; 1209 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; 1210 if (fs->flow_type == TCP_V4_FLOW || 1211 fs->flow_type == UDP_V4_FLOW) { 1212 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; 1213 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; 1214 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; 1215 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; 1216 } 1217 } else { 1218 if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) { 1219 fs->flow_type = IPV6_USER_FLOW; 1220 fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD; 1221 fs->m_u.usr_ip6_spec.l4_proto = 0; 1222 } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) { 1223 fs->flow_type = IPV6_USER_FLOW; 1224 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; 1225 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; 1226 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { 1227 fs->flow_type = TCP_V6_FLOW; 1228 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { 1229 fs->flow_type = UDP_V6_FLOW; 1230 } else { 1231 goto fltr_err; 1232 } 1233 1234 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = 1235 fkeys->addrs.v6addrs.src; 1236 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = 1237 fmasks->addrs.v6addrs.src; 1238 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = 1239 fkeys->addrs.v6addrs.dst; 1240 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = 1241 fmasks->addrs.v6addrs.dst; 1242 if (fs->flow_type == TCP_V6_FLOW || 1243 fs->flow_type == UDP_V6_FLOW) { 1244 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; 1245 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; 1246 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; 1247 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; 1248 } 1249 } 1250 1251 if (fltr->base.flags & BNXT_ACT_DROP) { 1252 fs->ring_cookie = RX_CLS_FLOW_DISC; 1253 } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 1254 fs->flow_type |= FLOW_RSS; 1255 cmd->rss_context = fltr->base.fw_vnic_id; 1256 } else { 1257 fs->ring_cookie = fltr->base.rxq; 1258 } 1259 rc = 0; 1260 1261 fltr_err: 1262 rcu_read_unlock(); 1263 1264 return rc; 1265 } 1266 1267 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, 1268 u32 index) 1269 { 1270 struct ethtool_rxfh_context *ctx; 1271 1272 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); 1273 if (!ctx) 1274 return NULL; 1275 return ethtool_rxfh_context_priv(ctx); 1276 } 1277 1278 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, 1279 struct bnxt_vnic_info *vnic) 1280 { 1281 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 1282 1283 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 1284 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, 1285 vnic->rss_table_size, 1286 &vnic->rss_table_dma_addr, 1287 GFP_KERNEL); 1288 if (!vnic->rss_table) 1289 return -ENOMEM; 1290 1291 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 1292 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 1293 return 0; 1294 } 1295 1296 static int bnxt_add_l2_cls_rule(struct bnxt *bp, 1297 struct ethtool_rx_flow_spec *fs) 1298 { 1299 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1300 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1301 struct ethhdr *h_ether = &fs->h_u.ether_spec; 1302 struct ethhdr *m_ether = &fs->m_u.ether_spec; 1303 struct bnxt_l2_filter *fltr; 1304 struct bnxt_l2_key key; 1305 u16 vnic_id; 1306 u8 flags; 1307 int rc; 1308 1309 if (BNXT_CHIP_P5_PLUS(bp)) 1310 return -EOPNOTSUPP; 1311 1312 if (!is_broadcast_ether_addr(m_ether->h_dest)) 1313 return -EINVAL; 1314 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); 1315 key.vlan = 0; 1316 if (fs->flow_type & FLOW_EXT) { 1317 struct ethtool_flow_ext *m_ext = &fs->m_ext; 1318 struct ethtool_flow_ext *h_ext = &fs->h_ext; 1319 1320 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) 1321 return -EINVAL; 1322 key.vlan = ntohs(h_ext->vlan_tci); 1323 } 1324 1325 if (vf) { 1326 flags = BNXT_ACT_FUNC_DST; 1327 vnic_id = 0xffff; 1328 vf--; 1329 } else { 1330 flags = BNXT_ACT_RING_DST; 1331 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; 1332 } 1333 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); 1334 if (IS_ERR(fltr)) 1335 return PTR_ERR(fltr); 1336 1337 fltr->base.fw_vnic_id = vnic_id; 1338 fltr->base.rxq = ring; 1339 fltr->base.vf_idx = vf; 1340 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 1341 if (rc) 1342 bnxt_del_l2_filter(bp, fltr); 1343 else 1344 fs->location = fltr->base.sw_id; 1345 return rc; 1346 } 1347 1348 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, 1349 struct ethtool_usrip4_spec *ip_mask) 1350 { 1351 u8 mproto = ip_mask->proto; 1352 u8 sproto = ip_spec->proto; 1353 1354 if (ip_mask->l4_4_bytes || ip_mask->tos || 1355 ip_spec->ip_ver != ETH_RX_NFC_IP4 || 1356 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP))) 1357 return false; 1358 return true; 1359 } 1360 1361 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, 1362 struct ethtool_usrip6_spec *ip_mask) 1363 { 1364 u8 mproto = ip_mask->l4_proto; 1365 u8 sproto = ip_spec->l4_proto; 1366 1367 if (ip_mask->l4_4_bytes || ip_mask->tclass || 1368 (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6))) 1369 return false; 1370 return true; 1371 } 1372 1373 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, 1374 struct ethtool_rxnfc *cmd) 1375 { 1376 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1377 struct bnxt_ntuple_filter *new_fltr, *fltr; 1378 u32 flow_type = fs->flow_type & 0xff; 1379 struct bnxt_l2_filter *l2_fltr; 1380 struct bnxt_flow_masks *fmasks; 1381 struct flow_keys *fkeys; 1382 u32 idx; 1383 int rc; 1384 1385 if (!bp->vnic_info) 1386 return -EAGAIN; 1387 1388 if (fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) 1389 return -EOPNOTSUPP; 1390 1391 if (fs->ring_cookie != RX_CLS_FLOW_DISC && 1392 ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) 1393 return -EOPNOTSUPP; 1394 1395 if (flow_type == IP_USER_FLOW) { 1396 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, 1397 &fs->m_u.usr_ip4_spec)) 1398 return -EOPNOTSUPP; 1399 } 1400 1401 if (flow_type == IPV6_USER_FLOW) { 1402 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, 1403 &fs->m_u.usr_ip6_spec)) 1404 return -EOPNOTSUPP; 1405 } 1406 1407 new_fltr = kzalloc_obj(*new_fltr); 1408 if (!new_fltr) 1409 return -ENOMEM; 1410 1411 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 1412 atomic_inc(&l2_fltr->refcnt); 1413 new_fltr->l2_fltr = l2_fltr; 1414 fmasks = &new_fltr->fmasks; 1415 fkeys = &new_fltr->fkeys; 1416 1417 rc = -EOPNOTSUPP; 1418 switch (flow_type) { 1419 case IP_USER_FLOW: { 1420 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; 1421 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; 1422 1423 fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto 1424 : BNXT_IP_PROTO_WILDCARD; 1425 fkeys->basic.n_proto = htons(ETH_P_IP); 1426 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1427 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1428 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1429 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1430 break; 1431 } 1432 case TCP_V4_FLOW: 1433 case UDP_V4_FLOW: { 1434 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; 1435 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; 1436 1437 fkeys->basic.ip_proto = IPPROTO_TCP; 1438 if (flow_type == UDP_V4_FLOW) 1439 fkeys->basic.ip_proto = IPPROTO_UDP; 1440 fkeys->basic.n_proto = htons(ETH_P_IP); 1441 fkeys->addrs.v4addrs.src = ip_spec->ip4src; 1442 fmasks->addrs.v4addrs.src = ip_mask->ip4src; 1443 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; 1444 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; 1445 fkeys->ports.src = ip_spec->psrc; 1446 fmasks->ports.src = ip_mask->psrc; 1447 fkeys->ports.dst = ip_spec->pdst; 1448 fmasks->ports.dst = ip_mask->pdst; 1449 break; 1450 } 1451 case IPV6_USER_FLOW: { 1452 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; 1453 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; 1454 1455 fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto 1456 : BNXT_IP_PROTO_WILDCARD; 1457 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1458 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1459 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1460 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1461 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1462 break; 1463 } 1464 case TCP_V6_FLOW: 1465 case UDP_V6_FLOW: { 1466 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; 1467 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; 1468 1469 fkeys->basic.ip_proto = IPPROTO_TCP; 1470 if (flow_type == UDP_V6_FLOW) 1471 fkeys->basic.ip_proto = IPPROTO_UDP; 1472 fkeys->basic.n_proto = htons(ETH_P_IPV6); 1473 1474 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; 1475 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; 1476 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; 1477 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; 1478 fkeys->ports.src = ip_spec->psrc; 1479 fmasks->ports.src = ip_mask->psrc; 1480 fkeys->ports.dst = ip_spec->pdst; 1481 fmasks->ports.dst = ip_mask->pdst; 1482 break; 1483 } 1484 default: 1485 rc = -EOPNOTSUPP; 1486 goto ntuple_err; 1487 } 1488 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) 1489 goto ntuple_err; 1490 1491 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); 1492 rcu_read_lock(); 1493 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 1494 if (fltr) { 1495 rcu_read_unlock(); 1496 rc = -EEXIST; 1497 goto ntuple_err; 1498 } 1499 rcu_read_unlock(); 1500 1501 new_fltr->base.flags = BNXT_ACT_NO_AGING; 1502 if (fs->flow_type & FLOW_RSS) { 1503 struct bnxt_rss_ctx *rss_ctx; 1504 1505 new_fltr->base.fw_vnic_id = 0; 1506 new_fltr->base.flags |= BNXT_ACT_RSS_CTX; 1507 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); 1508 if (rss_ctx) { 1509 new_fltr->base.fw_vnic_id = rss_ctx->index; 1510 } else { 1511 rc = -EINVAL; 1512 goto ntuple_err; 1513 } 1514 } 1515 if (fs->ring_cookie == RX_CLS_FLOW_DISC) 1516 new_fltr->base.flags |= BNXT_ACT_DROP; 1517 else 1518 new_fltr->base.rxq = ethtool_get_flow_spec_ring(fs->ring_cookie); 1519 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); 1520 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 1521 if (!rc) { 1522 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); 1523 if (rc) { 1524 bnxt_del_ntp_filter(bp, new_fltr); 1525 return rc; 1526 } 1527 fs->location = new_fltr->base.sw_id; 1528 return 0; 1529 } 1530 1531 ntuple_err: 1532 atomic_dec(&l2_fltr->refcnt); 1533 kfree(new_fltr); 1534 return rc; 1535 } 1536 1537 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1538 { 1539 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1540 u32 ring, flow_type; 1541 int rc; 1542 u8 vf; 1543 1544 if (!netif_running(bp->dev)) 1545 return -EAGAIN; 1546 if (!(bp->flags & BNXT_FLAG_RFS)) 1547 return -EPERM; 1548 if (fs->location != RX_CLS_LOC_ANY) 1549 return -EINVAL; 1550 1551 flow_type = fs->flow_type; 1552 if ((flow_type == IP_USER_FLOW || 1553 flow_type == IPV6_USER_FLOW) && 1554 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) 1555 return -EOPNOTSUPP; 1556 if (flow_type & FLOW_MAC_EXT) 1557 return -EINVAL; 1558 flow_type &= ~FLOW_EXT; 1559 1560 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) 1561 return bnxt_add_ntuple_cls_rule(bp, cmd); 1562 1563 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 1564 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 1565 if (BNXT_VF(bp) && vf) 1566 return -EINVAL; 1567 if (BNXT_PF(bp) && vf > bp->pf.active_vfs) 1568 return -EINVAL; 1569 if (!vf && ring >= bp->rx_nr_rings) 1570 return -EINVAL; 1571 1572 if (flow_type == ETHER_FLOW) 1573 rc = bnxt_add_l2_cls_rule(bp, fs); 1574 else 1575 rc = bnxt_add_ntuple_cls_rule(bp, cmd); 1576 return rc; 1577 } 1578 1579 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) 1580 { 1581 struct ethtool_rx_flow_spec *fs = &cmd->fs; 1582 struct bnxt_filter_base *fltr_base; 1583 struct bnxt_ntuple_filter *fltr; 1584 u32 id = fs->location; 1585 1586 rcu_read_lock(); 1587 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, 1588 BNXT_L2_FLTR_HASH_SIZE, id); 1589 if (fltr_base) { 1590 struct bnxt_l2_filter *l2_fltr; 1591 1592 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); 1593 rcu_read_unlock(); 1594 bnxt_hwrm_l2_filter_free(bp, l2_fltr); 1595 bnxt_del_l2_filter(bp, l2_fltr); 1596 return 0; 1597 } 1598 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, 1599 BNXT_NTP_FLTR_HASH_SIZE, id); 1600 if (!fltr_base) { 1601 rcu_read_unlock(); 1602 return -ENOENT; 1603 } 1604 1605 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); 1606 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) { 1607 rcu_read_unlock(); 1608 return -EINVAL; 1609 } 1610 rcu_read_unlock(); 1611 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); 1612 bnxt_del_ntp_filter(bp, fltr); 1613 return 0; 1614 } 1615 1616 static u64 get_ethtool_ipv4_rss(struct bnxt *bp) 1617 { 1618 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 1619 return RXH_IP_SRC | RXH_IP_DST; 1620 return 0; 1621 } 1622 1623 static u64 get_ethtool_ipv6_rss(struct bnxt *bp) 1624 { 1625 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 1626 return RXH_IP_SRC | RXH_IP_DST; 1627 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL) 1628 return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL; 1629 return 0; 1630 } 1631 1632 static int bnxt_get_rxfh_fields(struct net_device *dev, 1633 struct ethtool_rxfh_fields *cmd) 1634 { 1635 struct bnxt *bp = netdev_priv(dev); 1636 1637 cmd->data = 0; 1638 switch (cmd->flow_type) { 1639 case TCP_V4_FLOW: 1640 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) 1641 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1642 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1643 cmd->data |= get_ethtool_ipv4_rss(bp); 1644 break; 1645 case UDP_V4_FLOW: 1646 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) 1647 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1648 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1649 fallthrough; 1650 case AH_ESP_V4_FLOW: 1651 if (bp->rss_hash_cfg & 1652 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1653 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) 1654 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1655 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1656 fallthrough; 1657 case SCTP_V4_FLOW: 1658 case AH_V4_FLOW: 1659 case ESP_V4_FLOW: 1660 case IPV4_FLOW: 1661 cmd->data |= get_ethtool_ipv4_rss(bp); 1662 break; 1663 1664 case TCP_V6_FLOW: 1665 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) 1666 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1667 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1668 cmd->data |= get_ethtool_ipv6_rss(bp); 1669 break; 1670 case UDP_V6_FLOW: 1671 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) 1672 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1673 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1674 fallthrough; 1675 case AH_ESP_V6_FLOW: 1676 if (bp->rss_hash_cfg & 1677 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1678 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) 1679 cmd->data |= RXH_IP_SRC | RXH_IP_DST | 1680 RXH_L4_B_0_1 | RXH_L4_B_2_3; 1681 fallthrough; 1682 case SCTP_V6_FLOW: 1683 case AH_V6_FLOW: 1684 case ESP_V6_FLOW: 1685 case IPV6_FLOW: 1686 cmd->data |= get_ethtool_ipv6_rss(bp); 1687 break; 1688 } 1689 return 0; 1690 } 1691 1692 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 1693 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) 1694 1695 static int bnxt_set_rxfh_fields(struct net_device *dev, 1696 const struct ethtool_rxfh_fields *cmd, 1697 struct netlink_ext_ack *extack) 1698 { 1699 struct bnxt *bp = netdev_priv(dev); 1700 int tuple, rc = 0; 1701 u32 rss_hash_cfg; 1702 1703 rss_hash_cfg = bp->rss_hash_cfg; 1704 1705 if (cmd->data == RXH_4TUPLE) 1706 tuple = 4; 1707 else if (cmd->data == RXH_2TUPLE || 1708 cmd->data == (RXH_2TUPLE | RXH_IP6_FL)) 1709 tuple = 2; 1710 else if (!cmd->data) 1711 tuple = 0; 1712 else 1713 return -EINVAL; 1714 1715 if (cmd->data & RXH_IP6_FL && 1716 !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP)) 1717 return -EINVAL; 1718 1719 if (cmd->flow_type == TCP_V4_FLOW) { 1720 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1721 if (tuple == 4) 1722 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; 1723 } else if (cmd->flow_type == UDP_V4_FLOW) { 1724 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1725 return -EINVAL; 1726 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1727 if (tuple == 4) 1728 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; 1729 } else if (cmd->flow_type == TCP_V6_FLOW) { 1730 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1731 if (tuple == 4) 1732 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 1733 } else if (cmd->flow_type == UDP_V6_FLOW) { 1734 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) 1735 return -EINVAL; 1736 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1737 if (tuple == 4) 1738 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 1739 } else if (cmd->flow_type == AH_ESP_V4_FLOW) { 1740 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || 1741 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) 1742 return -EINVAL; 1743 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1744 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); 1745 if (tuple == 4) 1746 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | 1747 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; 1748 } else if (cmd->flow_type == AH_ESP_V6_FLOW) { 1749 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || 1750 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) 1751 return -EINVAL; 1752 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1753 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); 1754 if (tuple == 4) 1755 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | 1756 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; 1757 } else if (tuple == 4) { 1758 return -EINVAL; 1759 } 1760 1761 switch (cmd->flow_type) { 1762 case TCP_V4_FLOW: 1763 case UDP_V4_FLOW: 1764 case SCTP_V4_FLOW: 1765 case AH_ESP_V4_FLOW: 1766 case AH_V4_FLOW: 1767 case ESP_V4_FLOW: 1768 case IPV4_FLOW: 1769 if (tuple == 2) 1770 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1771 else if (!tuple) 1772 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; 1773 break; 1774 1775 case TCP_V6_FLOW: 1776 case UDP_V6_FLOW: 1777 case SCTP_V6_FLOW: 1778 case AH_ESP_V6_FLOW: 1779 case AH_V6_FLOW: 1780 case ESP_V6_FLOW: 1781 case IPV6_FLOW: 1782 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 1783 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL); 1784 if (!tuple) 1785 break; 1786 if (cmd->data & RXH_IP6_FL) 1787 rss_hash_cfg |= 1788 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; 1789 else if (tuple == 2) 1790 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; 1791 break; 1792 } 1793 1794 if (bp->rss_hash_cfg == rss_hash_cfg) 1795 return 0; 1796 1797 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 1798 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; 1799 bp->rss_hash_cfg = rss_hash_cfg; 1800 if (netif_running(bp->dev)) { 1801 bnxt_close_nic(bp, false, false); 1802 rc = bnxt_open_nic(bp, false, false); 1803 } 1804 return rc; 1805 } 1806 1807 static u32 bnxt_get_rx_ring_count(struct net_device *dev) 1808 { 1809 struct bnxt *bp = netdev_priv(dev); 1810 1811 return bp->rx_nr_rings; 1812 } 1813 1814 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 1815 u32 *rule_locs) 1816 { 1817 struct bnxt *bp = netdev_priv(dev); 1818 int rc = 0; 1819 1820 switch (cmd->cmd) { 1821 case ETHTOOL_GRXCLSRLCNT: 1822 cmd->rule_cnt = bp->ntp_fltr_count; 1823 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; 1824 break; 1825 1826 case ETHTOOL_GRXCLSRLALL: 1827 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); 1828 break; 1829 1830 case ETHTOOL_GRXCLSRULE: 1831 rc = bnxt_grxclsrule(bp, cmd); 1832 break; 1833 1834 default: 1835 rc = -EOPNOTSUPP; 1836 break; 1837 } 1838 1839 return rc; 1840 } 1841 1842 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 1843 { 1844 struct bnxt *bp = netdev_priv(dev); 1845 int rc; 1846 1847 switch (cmd->cmd) { 1848 case ETHTOOL_SRXCLSRLINS: 1849 rc = bnxt_srxclsrlins(bp, cmd); 1850 break; 1851 1852 case ETHTOOL_SRXCLSRLDEL: 1853 rc = bnxt_srxclsrldel(bp, cmd); 1854 break; 1855 1856 default: 1857 rc = -EOPNOTSUPP; 1858 break; 1859 } 1860 return rc; 1861 } 1862 1863 u32 bnxt_get_rxfh_indir_size(struct net_device *dev) 1864 { 1865 struct bnxt *bp = netdev_priv(dev); 1866 1867 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1868 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1869 BNXT_RSS_TABLE_ENTRIES_P5; 1870 return HW_HASH_INDEX_SIZE; 1871 } 1872 1873 static u32 bnxt_get_rxfh_key_size(struct net_device *dev) 1874 { 1875 return HW_HASH_KEY_SIZE; 1876 } 1877 1878 static int bnxt_get_rxfh(struct net_device *dev, 1879 struct ethtool_rxfh_param *rxfh) 1880 { 1881 struct bnxt_rss_ctx *rss_ctx = NULL; 1882 struct bnxt *bp = netdev_priv(dev); 1883 u32 *indir_tbl = bp->rss_indir_tbl; 1884 struct bnxt_vnic_info *vnic; 1885 u32 i, tbl_size; 1886 1887 rxfh->hfunc = ETH_RSS_HASH_TOP; 1888 1889 if (!bp->vnic_info) 1890 return 0; 1891 1892 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 1893 if (rxfh->rss_context) { 1894 struct ethtool_rxfh_context *ctx; 1895 1896 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); 1897 if (!ctx) 1898 return -EINVAL; 1899 indir_tbl = ethtool_rxfh_context_indir(ctx); 1900 rss_ctx = ethtool_rxfh_context_priv(ctx); 1901 vnic = &rss_ctx->vnic; 1902 } 1903 1904 if (rxfh->indir && indir_tbl) { 1905 tbl_size = bnxt_get_rxfh_indir_size(dev); 1906 for (i = 0; i < tbl_size; i++) 1907 rxfh->indir[i] = indir_tbl[i]; 1908 } 1909 1910 if (rxfh->key && vnic->rss_hash_key) 1911 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); 1912 1913 return 0; 1914 } 1915 1916 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, 1917 struct bnxt_rss_ctx *rss_ctx, 1918 const struct ethtool_rxfh_param *rxfh) 1919 { 1920 if (rxfh->key) { 1921 if (rss_ctx) { 1922 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, 1923 HW_HASH_KEY_SIZE); 1924 } else { 1925 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); 1926 bp->rss_hash_key_updated = true; 1927 } 1928 } 1929 if (rxfh->indir) { 1930 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 1931 u32 *indir_tbl = bp->rss_indir_tbl; 1932 1933 if (rss_ctx) 1934 indir_tbl = ethtool_rxfh_context_indir(ctx); 1935 for (i = 0; i < tbl_size; i++) 1936 indir_tbl[i] = rxfh->indir[i]; 1937 pad = bp->rss_indir_tbl_entries - tbl_size; 1938 if (pad) 1939 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); 1940 } 1941 } 1942 1943 static int bnxt_rxfh_context_check(struct bnxt *bp, 1944 const struct ethtool_rxfh_param *rxfh, 1945 struct netlink_ext_ack *extack) 1946 { 1947 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { 1948 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); 1949 return -EOPNOTSUPP; 1950 } 1951 1952 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { 1953 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); 1954 return -EOPNOTSUPP; 1955 } 1956 1957 if (!netif_running(bp->dev)) { 1958 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); 1959 return -EAGAIN; 1960 } 1961 1962 return 0; 1963 } 1964 1965 static int bnxt_create_rxfh_context(struct net_device *dev, 1966 struct ethtool_rxfh_context *ctx, 1967 const struct ethtool_rxfh_param *rxfh, 1968 struct netlink_ext_ack *extack) 1969 { 1970 struct bnxt *bp = netdev_priv(dev); 1971 struct bnxt_rss_ctx *rss_ctx; 1972 struct bnxt_vnic_info *vnic; 1973 int rc; 1974 1975 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 1976 if (rc) 1977 return rc; 1978 1979 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { 1980 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", 1981 BNXT_MAX_ETH_RSS_CTX); 1982 return -EINVAL; 1983 } 1984 1985 if (!bnxt_rfs_capable(bp, true)) { 1986 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); 1987 return -ENOMEM; 1988 } 1989 1990 rss_ctx = ethtool_rxfh_context_priv(ctx); 1991 1992 bp->num_rss_ctx++; 1993 1994 vnic = &rss_ctx->vnic; 1995 vnic->rss_ctx = ctx; 1996 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; 1997 vnic->vnic_id = BNXT_VNIC_ID_INVALID; 1998 rc = bnxt_alloc_vnic_rss_table(bp, vnic); 1999 if (rc) 2000 goto out; 2001 2002 /* Populate defaults in the context */ 2003 bnxt_set_dflt_rss_indir_tbl(bp, ctx); 2004 ctx->hfunc = ETH_RSS_HASH_TOP; 2005 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); 2006 memcpy(ethtool_rxfh_context_key(ctx), 2007 bp->rss_hash_key, HW_HASH_KEY_SIZE); 2008 2009 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); 2010 if (rc) { 2011 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); 2012 goto out; 2013 } 2014 2015 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); 2016 if (rc) { 2017 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 2018 goto out; 2019 } 2020 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 2021 2022 rc = __bnxt_setup_vnic_p5(bp, vnic); 2023 if (rc) { 2024 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); 2025 goto out; 2026 } 2027 2028 rss_ctx->index = rxfh->rss_context; 2029 return 0; 2030 out: 2031 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 2032 return rc; 2033 } 2034 2035 static int bnxt_modify_rxfh_context(struct net_device *dev, 2036 struct ethtool_rxfh_context *ctx, 2037 const struct ethtool_rxfh_param *rxfh, 2038 struct netlink_ext_ack *extack) 2039 { 2040 struct bnxt *bp = netdev_priv(dev); 2041 struct bnxt_rss_ctx *rss_ctx; 2042 int rc; 2043 2044 rc = bnxt_rxfh_context_check(bp, rxfh, extack); 2045 if (rc) 2046 return rc; 2047 2048 rss_ctx = ethtool_rxfh_context_priv(ctx); 2049 2050 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); 2051 2052 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); 2053 } 2054 2055 static int bnxt_remove_rxfh_context(struct net_device *dev, 2056 struct ethtool_rxfh_context *ctx, 2057 u32 rss_context, 2058 struct netlink_ext_ack *extack) 2059 { 2060 struct bnxt *bp = netdev_priv(dev); 2061 struct bnxt_rss_ctx *rss_ctx; 2062 2063 rss_ctx = ethtool_rxfh_context_priv(ctx); 2064 2065 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 2066 return 0; 2067 } 2068 2069 static int bnxt_set_rxfh(struct net_device *dev, 2070 struct ethtool_rxfh_param *rxfh, 2071 struct netlink_ext_ack *extack) 2072 { 2073 struct bnxt *bp = netdev_priv(dev); 2074 int rc = 0; 2075 2076 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) 2077 return -EOPNOTSUPP; 2078 2079 bnxt_modify_rss(bp, NULL, NULL, rxfh); 2080 2081 if (netif_running(bp->dev)) { 2082 bnxt_close_nic(bp, false, false); 2083 rc = bnxt_open_nic(bp, false, false); 2084 } 2085 return rc; 2086 } 2087 2088 static void bnxt_get_drvinfo(struct net_device *dev, 2089 struct ethtool_drvinfo *info) 2090 { 2091 struct bnxt *bp = netdev_priv(dev); 2092 2093 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 2094 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); 2095 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); 2096 info->n_stats = bnxt_get_num_stats(bp); 2097 info->testinfo_len = bp->num_tests; 2098 /* TODO CHIMP_FW: eeprom dump details */ 2099 info->eedump_len = 0; 2100 /* TODO CHIMP FW: reg dump details */ 2101 info->regdump_len = 0; 2102 } 2103 2104 static int bnxt_get_regs_len(struct net_device *dev) 2105 { 2106 struct bnxt *bp = netdev_priv(dev); 2107 2108 if (!BNXT_PF(bp)) 2109 return -EOPNOTSUPP; 2110 2111 return BNXT_PXP_REG_LEN + bp->pcie_stat_len; 2112 } 2113 2114 static void * 2115 __bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req) 2116 { 2117 struct pcie_ctx_hw_stats_v2 *hw_pcie_stats; 2118 dma_addr_t hw_pcie_stats_addr; 2119 int rc; 2120 2121 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), 2122 &hw_pcie_stats_addr); 2123 if (!hw_pcie_stats) 2124 return NULL; 2125 2126 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); 2127 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); 2128 rc = hwrm_req_send(bp, req); 2129 2130 return rc ? NULL : hw_pcie_stats; 2131 } 2132 2133 #define BNXT_PCIE_32B_ENTRY(start, end) \ 2134 { offsetof(struct pcie_ctx_hw_stats_v2, start),\ 2135 offsetof(struct pcie_ctx_hw_stats_v2, end) } 2136 2137 static const struct { 2138 u16 start; 2139 u16 end; 2140 } bnxt_pcie_32b_entries[] = { 2141 BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), 2142 BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1), 2143 BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2), 2144 }; 2145 2146 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2147 void *_p) 2148 { 2149 struct hwrm_pcie_qstats_output *resp; 2150 struct hwrm_pcie_qstats_input *req; 2151 struct bnxt *bp = netdev_priv(dev); 2152 u8 *src; 2153 2154 regs->version = 0; 2155 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED)) 2156 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); 2157 2158 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 2159 return; 2160 2161 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 2162 return; 2163 2164 resp = hwrm_req_hold(bp, req); 2165 src = __bnxt_hwrm_pcie_qstats(bp, req); 2166 if (src) { 2167 u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); 2168 int i, j, len; 2169 2170 len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size)); 2171 if (len <= sizeof(struct pcie_ctx_hw_stats)) 2172 regs->version = 1; 2173 else if (len < sizeof(struct pcie_ctx_hw_stats_v2)) 2174 regs->version = 2; 2175 else 2176 regs->version = 3; 2177 2178 for (i = 0, j = 0; i < len; ) { 2179 if (i >= bnxt_pcie_32b_entries[j].start && 2180 i <= bnxt_pcie_32b_entries[j].end) { 2181 u32 *dst32 = (u32 *)(dst + i); 2182 2183 *dst32 = le32_to_cpu(*(__le32 *)(src + i)); 2184 i += 4; 2185 if (i > bnxt_pcie_32b_entries[j].end && 2186 j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) 2187 j++; 2188 } else { 2189 u64 *dst64 = (u64 *)(dst + i); 2190 2191 *dst64 = le64_to_cpu(*(__le64 *)(src + i)); 2192 i += 8; 2193 } 2194 } 2195 } 2196 hwrm_req_drop(bp, req); 2197 } 2198 2199 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2200 { 2201 struct bnxt *bp = netdev_priv(dev); 2202 2203 wol->supported = 0; 2204 wol->wolopts = 0; 2205 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2206 if (bp->flags & BNXT_FLAG_WOL_CAP) { 2207 wol->supported = WAKE_MAGIC; 2208 if (bp->wol) 2209 wol->wolopts = WAKE_MAGIC; 2210 } 2211 } 2212 2213 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2214 { 2215 struct bnxt *bp = netdev_priv(dev); 2216 2217 if (wol->wolopts & ~WAKE_MAGIC) 2218 return -EINVAL; 2219 2220 if (wol->wolopts & WAKE_MAGIC) { 2221 if (!(bp->flags & BNXT_FLAG_WOL_CAP)) 2222 return -EINVAL; 2223 if (!bp->wol) { 2224 if (bnxt_hwrm_alloc_wol_fltr(bp)) 2225 return -EBUSY; 2226 bp->wol = 1; 2227 } 2228 } else { 2229 if (bp->wol) { 2230 if (bnxt_hwrm_free_wol_fltr(bp)) 2231 return -EBUSY; 2232 bp->wol = 0; 2233 } 2234 } 2235 return 0; 2236 } 2237 2238 /* TODO: support 25GB, 40GB, 50GB with different cable type */ 2239 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) 2240 { 2241 linkmode_zero(mode); 2242 2243 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) 2244 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); 2245 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) 2246 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); 2247 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) 2248 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); 2249 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 2250 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); 2251 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 2252 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); 2253 } 2254 2255 enum bnxt_media_type { 2256 BNXT_MEDIA_UNKNOWN = 0, 2257 BNXT_MEDIA_TP, 2258 BNXT_MEDIA_CR, 2259 BNXT_MEDIA_SR, 2260 BNXT_MEDIA_LR_ER_FR, 2261 BNXT_MEDIA_KR, 2262 BNXT_MEDIA_KX, 2263 BNXT_MEDIA_X, 2264 __BNXT_MEDIA_END, 2265 }; 2266 2267 static const enum bnxt_media_type bnxt_phy_types[] = { 2268 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, 2269 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, 2270 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, 2271 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, 2272 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, 2273 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, 2274 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, 2275 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, 2276 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, 2277 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, 2278 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, 2279 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, 2280 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, 2281 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, 2282 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, 2283 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2284 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2285 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, 2286 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, 2287 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, 2288 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2289 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2290 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, 2291 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, 2292 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, 2293 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, 2294 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, 2295 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, 2296 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2297 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2298 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, 2299 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, 2300 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2301 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2302 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, 2303 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, 2304 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2305 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2306 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR, 2307 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR, 2308 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR, 2309 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR, 2310 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR, 2311 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR, 2312 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR, 2313 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR, 2314 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR, 2315 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR, 2316 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR, 2317 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR, 2318 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR, 2319 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR, 2320 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR, 2321 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR, 2322 }; 2323 2324 static enum bnxt_media_type 2325 bnxt_get_media(struct bnxt_link_info *link_info) 2326 { 2327 switch (link_info->media_type) { 2328 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: 2329 return BNXT_MEDIA_TP; 2330 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: 2331 return BNXT_MEDIA_CR; 2332 default: 2333 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) 2334 return bnxt_phy_types[link_info->phy_type]; 2335 return BNXT_MEDIA_UNKNOWN; 2336 } 2337 } 2338 2339 enum bnxt_link_speed_indices { 2340 BNXT_LINK_SPEED_UNKNOWN = 0, 2341 BNXT_LINK_SPEED_100MB_IDX, 2342 BNXT_LINK_SPEED_1GB_IDX, 2343 BNXT_LINK_SPEED_10GB_IDX, 2344 BNXT_LINK_SPEED_25GB_IDX, 2345 BNXT_LINK_SPEED_40GB_IDX, 2346 BNXT_LINK_SPEED_50GB_IDX, 2347 BNXT_LINK_SPEED_100GB_IDX, 2348 BNXT_LINK_SPEED_200GB_IDX, 2349 BNXT_LINK_SPEED_400GB_IDX, 2350 __BNXT_LINK_SPEED_END 2351 }; 2352 2353 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) 2354 { 2355 switch (speed) { 2356 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; 2357 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; 2358 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; 2359 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; 2360 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; 2361 case BNXT_LINK_SPEED_50GB: 2362 case BNXT_LINK_SPEED_50GB_PAM4: 2363 return BNXT_LINK_SPEED_50GB_IDX; 2364 case BNXT_LINK_SPEED_100GB: 2365 case BNXT_LINK_SPEED_100GB_PAM4: 2366 case BNXT_LINK_SPEED_100GB_PAM4_112: 2367 return BNXT_LINK_SPEED_100GB_IDX; 2368 case BNXT_LINK_SPEED_200GB: 2369 case BNXT_LINK_SPEED_200GB_PAM4: 2370 case BNXT_LINK_SPEED_200GB_PAM4_112: 2371 return BNXT_LINK_SPEED_200GB_IDX; 2372 case BNXT_LINK_SPEED_400GB: 2373 case BNXT_LINK_SPEED_400GB_PAM4: 2374 case BNXT_LINK_SPEED_400GB_PAM4_112: 2375 return BNXT_LINK_SPEED_400GB_IDX; 2376 default: return BNXT_LINK_SPEED_UNKNOWN; 2377 } 2378 } 2379 2380 static const enum ethtool_link_mode_bit_indices 2381 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { 2382 [BNXT_LINK_SPEED_100MB_IDX] = { 2383 { 2384 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, 2385 }, 2386 }, 2387 [BNXT_LINK_SPEED_1GB_IDX] = { 2388 { 2389 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 2390 /* historically baseT, but DAC is more correctly baseX */ 2391 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2392 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2393 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 2394 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 2395 }, 2396 }, 2397 [BNXT_LINK_SPEED_10GB_IDX] = { 2398 { 2399 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 2400 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 2401 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 2402 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 2403 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 2404 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 2405 }, 2406 }, 2407 [BNXT_LINK_SPEED_25GB_IDX] = { 2408 { 2409 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 2410 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 2411 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 2412 }, 2413 }, 2414 [BNXT_LINK_SPEED_40GB_IDX] = { 2415 { 2416 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 2417 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 2418 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 2419 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 2420 }, 2421 }, 2422 [BNXT_LINK_SPEED_50GB_IDX] = { 2423 [BNXT_SIG_MODE_NRZ] = { 2424 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 2425 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 2426 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 2427 }, 2428 [BNXT_SIG_MODE_PAM4] = { 2429 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 2430 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 2431 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 2432 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 2433 }, 2434 }, 2435 [BNXT_LINK_SPEED_100GB_IDX] = { 2436 [BNXT_SIG_MODE_NRZ] = { 2437 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 2438 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 2439 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 2440 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 2441 }, 2442 [BNXT_SIG_MODE_PAM4] = { 2443 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 2444 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 2445 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 2446 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 2447 }, 2448 [BNXT_SIG_MODE_PAM4_112] = { 2449 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, 2450 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, 2451 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, 2452 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, 2453 }, 2454 }, 2455 [BNXT_LINK_SPEED_200GB_IDX] = { 2456 [BNXT_SIG_MODE_PAM4] = { 2457 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, 2458 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, 2459 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, 2460 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, 2461 }, 2462 [BNXT_SIG_MODE_PAM4_112] = { 2463 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, 2464 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, 2465 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, 2466 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, 2467 }, 2468 }, 2469 [BNXT_LINK_SPEED_400GB_IDX] = { 2470 [BNXT_SIG_MODE_PAM4] = { 2471 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, 2472 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, 2473 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, 2474 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, 2475 }, 2476 [BNXT_SIG_MODE_PAM4_112] = { 2477 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, 2478 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, 2479 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, 2480 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, 2481 }, 2482 }, 2483 }; 2484 2485 #define BNXT_LINK_MODE_UNKNOWN -1 2486 2487 static enum ethtool_link_mode_bit_indices 2488 bnxt_get_link_mode(struct bnxt_link_info *link_info) 2489 { 2490 enum ethtool_link_mode_bit_indices link_mode; 2491 enum bnxt_link_speed_indices speed; 2492 enum bnxt_media_type media; 2493 u8 sig_mode; 2494 2495 if (link_info->phy_link_status != BNXT_LINK_LINK) 2496 return BNXT_LINK_MODE_UNKNOWN; 2497 2498 media = bnxt_get_media(link_info); 2499 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 2500 speed = bnxt_fw_speed_idx(link_info->link_speed); 2501 sig_mode = link_info->active_fec_sig_mode & 2502 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 2503 } else { 2504 speed = bnxt_fw_speed_idx(link_info->req_link_speed); 2505 sig_mode = link_info->req_signal_mode; 2506 } 2507 if (sig_mode >= BNXT_SIG_MODE_MAX) 2508 return BNXT_LINK_MODE_UNKNOWN; 2509 2510 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux 2511 * link mode, but since no such devices exist, the zeroes in the 2512 * map can be conveniently used to represent unknown link modes. 2513 */ 2514 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2515 if (!link_mode) 2516 return BNXT_LINK_MODE_UNKNOWN; 2517 2518 switch (link_mode) { 2519 case ETHTOOL_LINK_MODE_100baseT_Full_BIT: 2520 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2521 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; 2522 break; 2523 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: 2524 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2525 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; 2526 break; 2527 default: 2528 break; 2529 } 2530 2531 return link_mode; 2532 } 2533 2534 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, 2535 struct ethtool_link_ksettings *lk_ksettings) 2536 { 2537 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2538 2539 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { 2540 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2541 lk_ksettings->link_modes.supported); 2542 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2543 lk_ksettings->link_modes.supported); 2544 } 2545 2546 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 || 2547 link_info->support_pam4_auto_speeds) 2548 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2549 lk_ksettings->link_modes.supported); 2550 2551 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 2552 return; 2553 2554 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) 2555 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2556 lk_ksettings->link_modes.advertising); 2557 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) 2558 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2559 lk_ksettings->link_modes.advertising); 2560 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) 2561 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, 2562 lk_ksettings->link_modes.lp_advertising); 2563 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) 2564 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, 2565 lk_ksettings->link_modes.lp_advertising); 2566 } 2567 2568 static const u16 bnxt_nrz_speed_masks[] = { 2569 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, 2570 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, 2571 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, 2572 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, 2573 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, 2574 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, 2575 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, 2576 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2577 }; 2578 2579 static const u16 bnxt_pam4_speed_masks[] = { 2580 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, 2581 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, 2582 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, 2583 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2584 }; 2585 2586 static const u16 bnxt_nrz_speeds2_masks[] = { 2587 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB, 2588 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB, 2589 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB, 2590 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB, 2591 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB, 2592 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB, 2593 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ 2594 }; 2595 2596 static const u16 bnxt_pam4_speeds2_masks[] = { 2597 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4, 2598 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4, 2599 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4, 2600 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4, 2601 }; 2602 2603 static const u16 bnxt_pam4_112_speeds2_masks[] = { 2604 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112, 2605 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112, 2606 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112, 2607 }; 2608 2609 static enum bnxt_link_speed_indices 2610 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk) 2611 { 2612 const u16 *speeds; 2613 int idx, len; 2614 2615 switch (sig_mode) { 2616 case BNXT_SIG_MODE_NRZ: 2617 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2618 speeds = bnxt_nrz_speeds2_masks; 2619 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks); 2620 } else { 2621 speeds = bnxt_nrz_speed_masks; 2622 len = ARRAY_SIZE(bnxt_nrz_speed_masks); 2623 } 2624 break; 2625 case BNXT_SIG_MODE_PAM4: 2626 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2627 speeds = bnxt_pam4_speeds2_masks; 2628 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks); 2629 } else { 2630 speeds = bnxt_pam4_speed_masks; 2631 len = ARRAY_SIZE(bnxt_pam4_speed_masks); 2632 } 2633 break; 2634 case BNXT_SIG_MODE_PAM4_112: 2635 speeds = bnxt_pam4_112_speeds2_masks; 2636 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks); 2637 break; 2638 default: 2639 return BNXT_LINK_SPEED_UNKNOWN; 2640 } 2641 2642 for (idx = 0; idx < len; idx++) { 2643 if (speeds[idx] == speed_msk) 2644 return idx; 2645 } 2646 2647 return BNXT_LINK_SPEED_UNKNOWN; 2648 } 2649 2650 #define BNXT_FW_SPEED_MSK_BITS 16 2651 2652 static void 2653 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2654 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2655 { 2656 enum ethtool_link_mode_bit_indices link_mode; 2657 enum bnxt_link_speed_indices speed; 2658 u8 bit; 2659 2660 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { 2661 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit); 2662 if (!speed) 2663 continue; 2664 2665 link_mode = bnxt_link_modes[speed][sig_mode][media]; 2666 if (!link_mode) 2667 continue; 2668 2669 linkmode_set_bit(link_mode, et_mask); 2670 } 2671 } 2672 2673 static void 2674 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, 2675 u8 sig_mode, u16 phy_flags, unsigned long *et_mask) 2676 { 2677 if (media) { 2678 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2679 et_mask); 2680 return; 2681 } 2682 2683 /* list speeds for all media if unknown */ 2684 for (media = 1; media < __BNXT_MEDIA_END; media++) 2685 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, 2686 et_mask); 2687 } 2688 2689 static void 2690 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info, 2691 enum bnxt_media_type media, 2692 struct ethtool_link_ksettings *lk_ksettings) 2693 { 2694 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2695 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2696 u16 phy_flags = bp->phy_flags; 2697 2698 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2699 sp_nrz = link_info->support_speeds2; 2700 sp_pam4 = link_info->support_speeds2; 2701 sp_pam4_112 = link_info->support_speeds2; 2702 } else { 2703 sp_nrz = link_info->support_speeds; 2704 sp_pam4 = link_info->support_pam4_speeds; 2705 } 2706 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2707 lk_ksettings->link_modes.supported); 2708 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2709 lk_ksettings->link_modes.supported); 2710 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2711 phy_flags, lk_ksettings->link_modes.supported); 2712 } 2713 2714 static void 2715 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info, 2716 enum bnxt_media_type media, 2717 struct ethtool_link_ksettings *lk_ksettings) 2718 { 2719 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2720 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; 2721 u16 phy_flags = bp->phy_flags; 2722 2723 sp_nrz = link_info->advertising; 2724 if (phy_flags & BNXT_PHY_FL_SPEEDS2) { 2725 sp_pam4 = link_info->advertising; 2726 sp_pam4_112 = link_info->advertising; 2727 } else { 2728 sp_pam4 = link_info->advertising_pam4; 2729 } 2730 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, 2731 lk_ksettings->link_modes.advertising); 2732 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, 2733 lk_ksettings->link_modes.advertising); 2734 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, 2735 phy_flags, lk_ksettings->link_modes.advertising); 2736 } 2737 2738 static void 2739 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info, 2740 enum bnxt_media_type media, 2741 struct ethtool_link_ksettings *lk_ksettings) 2742 { 2743 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2744 u16 phy_flags = bp->phy_flags; 2745 2746 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media, 2747 BNXT_SIG_MODE_NRZ, phy_flags, 2748 lk_ksettings->link_modes.lp_advertising); 2749 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media, 2750 BNXT_SIG_MODE_PAM4, phy_flags, 2751 lk_ksettings->link_modes.lp_advertising); 2752 } 2753 2754 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, 2755 u16 speed_msk, const unsigned long *et_mask, 2756 enum ethtool_link_mode_bit_indices mode) 2757 { 2758 bool mode_desired = linkmode_test_bit(mode, et_mask); 2759 2760 if (!mode) 2761 return; 2762 2763 /* enabled speeds for installed media should override */ 2764 if (installed_media && mode_desired) { 2765 *speeds |= speed_msk; 2766 *delta |= speed_msk; 2767 return; 2768 } 2769 2770 /* many to one mapping, only allow one change per fw_speed bit */ 2771 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { 2772 *speeds ^= speed_msk; 2773 *delta |= speed_msk; 2774 } 2775 } 2776 2777 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, 2778 const unsigned long *et_mask) 2779 { 2780 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2781 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks; 2782 enum bnxt_media_type media = bnxt_get_media(link_info); 2783 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL; 2784 u32 delta_pam4_112 = 0; 2785 u32 delta_pam4 = 0; 2786 u32 delta_nrz = 0; 2787 int i, m; 2788 2789 adv = &link_info->advertising; 2790 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2791 adv_pam4 = &link_info->advertising; 2792 adv_pam4_112 = &link_info->advertising; 2793 sp_msks = bnxt_nrz_speeds2_masks; 2794 sp_pam4_msks = bnxt_pam4_speeds2_masks; 2795 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks; 2796 } else { 2797 adv_pam4 = &link_info->advertising_pam4; 2798 sp_msks = bnxt_nrz_speed_masks; 2799 sp_pam4_msks = bnxt_pam4_speed_masks; 2800 } 2801 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { 2802 /* accept any legal media from user */ 2803 for (m = 1; m < __BNXT_MEDIA_END; m++) { 2804 bnxt_update_speed(&delta_nrz, m == media, 2805 adv, sp_msks[i], et_mask, 2806 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); 2807 bnxt_update_speed(&delta_pam4, m == media, 2808 adv_pam4, sp_pam4_msks[i], et_mask, 2809 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); 2810 if (!adv_pam4_112) 2811 continue; 2812 2813 bnxt_update_speed(&delta_pam4_112, m == media, 2814 adv_pam4_112, sp_pam4_112_msks[i], et_mask, 2815 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]); 2816 } 2817 } 2818 } 2819 2820 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, 2821 struct ethtool_link_ksettings *lk_ksettings) 2822 { 2823 u16 fec_cfg = link_info->fec_cfg; 2824 2825 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { 2826 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2827 lk_ksettings->link_modes.advertising); 2828 return; 2829 } 2830 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 2831 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2832 lk_ksettings->link_modes.advertising); 2833 if (fec_cfg & BNXT_FEC_ENC_RS) 2834 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2835 lk_ksettings->link_modes.advertising); 2836 if (fec_cfg & BNXT_FEC_ENC_LLRS) 2837 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2838 lk_ksettings->link_modes.advertising); 2839 } 2840 2841 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, 2842 struct ethtool_link_ksettings *lk_ksettings) 2843 { 2844 u16 fec_cfg = link_info->fec_cfg; 2845 2846 if (fec_cfg & BNXT_FEC_NONE) { 2847 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 2848 lk_ksettings->link_modes.supported); 2849 return; 2850 } 2851 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) 2852 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 2853 lk_ksettings->link_modes.supported); 2854 if (fec_cfg & BNXT_FEC_ENC_RS_CAP) 2855 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 2856 lk_ksettings->link_modes.supported); 2857 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) 2858 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 2859 lk_ksettings->link_modes.supported); 2860 } 2861 2862 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) 2863 { 2864 switch (fw_link_speed) { 2865 case BNXT_LINK_SPEED_100MB: 2866 return SPEED_100; 2867 case BNXT_LINK_SPEED_1GB: 2868 return SPEED_1000; 2869 case BNXT_LINK_SPEED_2_5GB: 2870 return SPEED_2500; 2871 case BNXT_LINK_SPEED_10GB: 2872 return SPEED_10000; 2873 case BNXT_LINK_SPEED_20GB: 2874 return SPEED_20000; 2875 case BNXT_LINK_SPEED_25GB: 2876 return SPEED_25000; 2877 case BNXT_LINK_SPEED_40GB: 2878 return SPEED_40000; 2879 case BNXT_LINK_SPEED_50GB: 2880 case BNXT_LINK_SPEED_50GB_PAM4: 2881 return SPEED_50000; 2882 case BNXT_LINK_SPEED_100GB: 2883 case BNXT_LINK_SPEED_100GB_PAM4: 2884 case BNXT_LINK_SPEED_100GB_PAM4_112: 2885 return SPEED_100000; 2886 case BNXT_LINK_SPEED_200GB: 2887 case BNXT_LINK_SPEED_200GB_PAM4: 2888 case BNXT_LINK_SPEED_200GB_PAM4_112: 2889 return SPEED_200000; 2890 case BNXT_LINK_SPEED_400GB: 2891 case BNXT_LINK_SPEED_400GB_PAM4: 2892 case BNXT_LINK_SPEED_400GB_PAM4_112: 2893 return SPEED_400000; 2894 default: 2895 return SPEED_UNKNOWN; 2896 } 2897 } 2898 2899 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, 2900 struct bnxt_link_info *link_info) 2901 { 2902 struct ethtool_link_settings *base = &lk_ksettings->base; 2903 2904 if (link_info->link_state == BNXT_LINK_STATE_UP) { 2905 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 2906 base->duplex = DUPLEX_HALF; 2907 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 2908 base->duplex = DUPLEX_FULL; 2909 lk_ksettings->lanes = link_info->active_lanes; 2910 } else if (!link_info->autoneg) { 2911 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 2912 base->duplex = DUPLEX_HALF; 2913 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 2914 base->duplex = DUPLEX_FULL; 2915 } 2916 } 2917 2918 static int bnxt_get_link_ksettings(struct net_device *dev, 2919 struct ethtool_link_ksettings *lk_ksettings) 2920 { 2921 struct ethtool_link_settings *base = &lk_ksettings->base; 2922 enum ethtool_link_mode_bit_indices link_mode; 2923 struct bnxt *bp = netdev_priv(dev); 2924 struct bnxt_link_info *link_info; 2925 enum bnxt_media_type media; 2926 2927 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); 2928 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 2929 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 2930 base->duplex = DUPLEX_UNKNOWN; 2931 base->speed = SPEED_UNKNOWN; 2932 link_info = &bp->link_info; 2933 2934 mutex_lock(&bp->link_lock); 2935 bnxt_get_ethtool_modes(link_info, lk_ksettings); 2936 media = bnxt_get_media(link_info); 2937 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings); 2938 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); 2939 link_mode = bnxt_get_link_mode(link_info); 2940 if (link_mode != BNXT_LINK_MODE_UNKNOWN) 2941 ethtool_params_from_link_mode(lk_ksettings, link_mode); 2942 else 2943 bnxt_get_default_speeds(lk_ksettings, link_info); 2944 2945 if (link_info->autoneg) { 2946 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); 2947 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2948 lk_ksettings->link_modes.advertising); 2949 base->autoneg = AUTONEG_ENABLE; 2950 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings); 2951 if (link_info->phy_link_status == BNXT_LINK_LINK) 2952 bnxt_get_all_ethtool_lp_speeds(link_info, media, 2953 lk_ksettings); 2954 } else { 2955 base->autoneg = AUTONEG_DISABLE; 2956 } 2957 2958 base->port = PORT_NONE; 2959 if (media == BNXT_MEDIA_TP) { 2960 base->port = PORT_TP; 2961 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2962 lk_ksettings->link_modes.supported); 2963 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, 2964 lk_ksettings->link_modes.advertising); 2965 } else if (media == BNXT_MEDIA_KR) { 2966 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2967 lk_ksettings->link_modes.supported); 2968 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, 2969 lk_ksettings->link_modes.advertising); 2970 } else { 2971 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2972 lk_ksettings->link_modes.supported); 2973 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, 2974 lk_ksettings->link_modes.advertising); 2975 2976 if (media == BNXT_MEDIA_CR) 2977 base->port = PORT_DA; 2978 else 2979 base->port = PORT_FIBRE; 2980 } 2981 base->phy_address = link_info->phy_addr; 2982 mutex_unlock(&bp->link_lock); 2983 2984 return 0; 2985 } 2986 2987 static int 2988 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) 2989 { 2990 struct bnxt *bp = netdev_priv(dev); 2991 struct bnxt_link_info *link_info = &bp->link_info; 2992 u16 support_pam4_spds = link_info->support_pam4_speeds; 2993 u16 support_spds2 = link_info->support_speeds2; 2994 u16 support_spds = link_info->support_speeds; 2995 u8 sig_mode = BNXT_SIG_MODE_NRZ; 2996 u32 lanes_needed = 1; 2997 u16 fw_speed = 0; 2998 2999 switch (ethtool_speed) { 3000 case SPEED_100: 3001 if (support_spds & BNXT_LINK_SPEED_MSK_100MB) 3002 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; 3003 break; 3004 case SPEED_1000: 3005 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) || 3006 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB)) 3007 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 3008 break; 3009 case SPEED_2500: 3010 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) 3011 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; 3012 break; 3013 case SPEED_10000: 3014 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) || 3015 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB)) 3016 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 3017 break; 3018 case SPEED_20000: 3019 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { 3020 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; 3021 lanes_needed = 2; 3022 } 3023 break; 3024 case SPEED_25000: 3025 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) || 3026 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB)) 3027 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 3028 break; 3029 case SPEED_40000: 3030 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) || 3031 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) { 3032 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 3033 lanes_needed = 4; 3034 } 3035 break; 3036 case SPEED_50000: 3037 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) || 3038 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) && 3039 lanes != 1) { 3040 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 3041 lanes_needed = 2; 3042 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { 3043 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; 3044 sig_mode = BNXT_SIG_MODE_PAM4; 3045 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) { 3046 fw_speed = BNXT_LINK_SPEED_50GB_PAM4; 3047 sig_mode = BNXT_SIG_MODE_PAM4; 3048 } 3049 break; 3050 case SPEED_100000: 3051 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) || 3052 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) && 3053 lanes != 2 && lanes != 1) { 3054 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; 3055 lanes_needed = 4; 3056 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { 3057 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; 3058 sig_mode = BNXT_SIG_MODE_PAM4; 3059 lanes_needed = 2; 3060 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) && 3061 lanes != 1) { 3062 fw_speed = BNXT_LINK_SPEED_100GB_PAM4; 3063 sig_mode = BNXT_SIG_MODE_PAM4; 3064 lanes_needed = 2; 3065 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) { 3066 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112; 3067 sig_mode = BNXT_SIG_MODE_PAM4_112; 3068 } 3069 break; 3070 case SPEED_200000: 3071 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { 3072 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; 3073 sig_mode = BNXT_SIG_MODE_PAM4; 3074 lanes_needed = 4; 3075 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) && 3076 lanes != 2) { 3077 fw_speed = BNXT_LINK_SPEED_200GB_PAM4; 3078 sig_mode = BNXT_SIG_MODE_PAM4; 3079 lanes_needed = 4; 3080 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) { 3081 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112; 3082 sig_mode = BNXT_SIG_MODE_PAM4_112; 3083 lanes_needed = 2; 3084 } 3085 break; 3086 case SPEED_400000: 3087 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) && 3088 lanes != 4) { 3089 fw_speed = BNXT_LINK_SPEED_400GB_PAM4; 3090 sig_mode = BNXT_SIG_MODE_PAM4; 3091 lanes_needed = 8; 3092 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) { 3093 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112; 3094 sig_mode = BNXT_SIG_MODE_PAM4_112; 3095 lanes_needed = 4; 3096 } 3097 break; 3098 } 3099 3100 if (!fw_speed) { 3101 netdev_err(dev, "unsupported speed!\n"); 3102 return -EINVAL; 3103 } 3104 3105 if (lanes && lanes != lanes_needed) { 3106 netdev_err(dev, "unsupported number of lanes for speed\n"); 3107 return -EINVAL; 3108 } 3109 3110 if (link_info->req_link_speed == fw_speed && 3111 link_info->req_signal_mode == sig_mode && 3112 link_info->autoneg == 0) 3113 return -EALREADY; 3114 3115 link_info->req_link_speed = fw_speed; 3116 link_info->req_signal_mode = sig_mode; 3117 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 3118 link_info->autoneg = 0; 3119 link_info->advertising = 0; 3120 link_info->advertising_pam4 = 0; 3121 3122 return 0; 3123 } 3124 3125 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) 3126 { 3127 u16 fw_speed_mask = 0; 3128 3129 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || 3130 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) 3131 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; 3132 3133 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || 3134 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) 3135 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; 3136 3137 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) 3138 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 3139 3140 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) 3141 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; 3142 3143 return fw_speed_mask; 3144 } 3145 3146 static int bnxt_set_link_ksettings(struct net_device *dev, 3147 const struct ethtool_link_ksettings *lk_ksettings) 3148 { 3149 struct bnxt *bp = netdev_priv(dev); 3150 struct bnxt_link_info *link_info = &bp->link_info; 3151 const struct ethtool_link_settings *base = &lk_ksettings->base; 3152 bool set_pause = false; 3153 u32 speed, lanes = 0; 3154 int rc = 0; 3155 3156 if (!BNXT_PHY_CFG_ABLE(bp)) 3157 return -EOPNOTSUPP; 3158 3159 mutex_lock(&bp->link_lock); 3160 if (base->autoneg == AUTONEG_ENABLE) { 3161 bnxt_set_ethtool_speeds(link_info, 3162 lk_ksettings->link_modes.advertising); 3163 link_info->autoneg |= BNXT_AUTONEG_SPEED; 3164 if (!link_info->advertising && !link_info->advertising_pam4) { 3165 link_info->advertising = link_info->support_auto_speeds; 3166 link_info->advertising_pam4 = 3167 link_info->support_pam4_auto_speeds; 3168 } 3169 /* any change to autoneg will cause link change, therefore the 3170 * driver should put back the original pause setting in autoneg 3171 */ 3172 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3173 set_pause = true; 3174 } else { 3175 u8 phy_type = link_info->phy_type; 3176 3177 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 3178 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || 3179 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 3180 netdev_err(dev, "10GBase-T devices must autoneg\n"); 3181 rc = -EINVAL; 3182 goto set_setting_exit; 3183 } 3184 if (base->duplex == DUPLEX_HALF) { 3185 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 3186 rc = -EINVAL; 3187 goto set_setting_exit; 3188 } 3189 speed = base->speed; 3190 lanes = lk_ksettings->lanes; 3191 rc = bnxt_force_link_speed(dev, speed, lanes); 3192 if (rc) { 3193 if (rc == -EALREADY) 3194 rc = 0; 3195 goto set_setting_exit; 3196 } 3197 } 3198 3199 if (netif_running(dev)) 3200 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 3201 3202 set_setting_exit: 3203 mutex_unlock(&bp->link_lock); 3204 return rc; 3205 } 3206 3207 static int bnxt_get_fecparam(struct net_device *dev, 3208 struct ethtool_fecparam *fec) 3209 { 3210 struct bnxt *bp = netdev_priv(dev); 3211 struct bnxt_link_info *link_info; 3212 u8 active_fec; 3213 u16 fec_cfg; 3214 3215 link_info = &bp->link_info; 3216 fec_cfg = link_info->fec_cfg; 3217 active_fec = link_info->active_fec_sig_mode & 3218 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 3219 if (fec_cfg & BNXT_FEC_NONE) { 3220 fec->fec = ETHTOOL_FEC_NONE; 3221 fec->active_fec = ETHTOOL_FEC_NONE; 3222 return 0; 3223 } 3224 if (fec_cfg & BNXT_FEC_AUTONEG) 3225 fec->fec |= ETHTOOL_FEC_AUTO; 3226 if (fec_cfg & BNXT_FEC_ENC_BASE_R) 3227 fec->fec |= ETHTOOL_FEC_BASER; 3228 if (fec_cfg & BNXT_FEC_ENC_RS) 3229 fec->fec |= ETHTOOL_FEC_RS; 3230 if (fec_cfg & BNXT_FEC_ENC_LLRS) 3231 fec->fec |= ETHTOOL_FEC_LLRS; 3232 3233 switch (active_fec) { 3234 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 3235 fec->active_fec |= ETHTOOL_FEC_BASER; 3236 break; 3237 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 3238 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 3239 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 3240 fec->active_fec |= ETHTOOL_FEC_RS; 3241 break; 3242 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 3243 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 3244 fec->active_fec |= ETHTOOL_FEC_LLRS; 3245 break; 3246 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 3247 fec->active_fec |= ETHTOOL_FEC_OFF; 3248 break; 3249 } 3250 return 0; 3251 } 3252 3253 static const struct ethtool_fec_hist_range bnxt_fec_ranges[] = { 3254 { 0, 0}, 3255 { 1, 1}, 3256 { 2, 2}, 3257 { 3, 3}, 3258 { 4, 4}, 3259 { 5, 5}, 3260 { 6, 6}, 3261 { 7, 7}, 3262 { 8, 8}, 3263 { 9, 9}, 3264 { 10, 10}, 3265 { 11, 11}, 3266 { 12, 12}, 3267 { 13, 13}, 3268 { 14, 14}, 3269 { 15, 15}, 3270 { 0, 0}, 3271 }; 3272 3273 static void bnxt_hwrm_port_phy_fdrstat(struct bnxt *bp, 3274 struct ethtool_fec_hist *hist) 3275 { 3276 struct ethtool_fec_hist_value *values = hist->values; 3277 struct hwrm_port_phy_fdrstat_output *resp; 3278 struct hwrm_port_phy_fdrstat_input *req; 3279 int rc, i; 3280 3281 if (!(bp->phy_flags & BNXT_PHY_FL_FDRSTATS)) 3282 return; 3283 3284 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_FDRSTAT); 3285 if (rc) 3286 return; 3287 3288 req->port_id = cpu_to_le16(bp->pf.port_id); 3289 req->ops = cpu_to_le16(PORT_PHY_FDRSTAT_REQ_OPS_COUNTER); 3290 resp = hwrm_req_hold(bp, req); 3291 rc = hwrm_req_send(bp, req); 3292 if (!rc) { 3293 hist->ranges = bnxt_fec_ranges; 3294 for (i = 0; i <= 15; i++) { 3295 __le64 sum = resp->accumulated_codewords_err_s[i]; 3296 3297 values[i].sum = le64_to_cpu(sum); 3298 } 3299 } 3300 hwrm_req_drop(bp, req); 3301 } 3302 3303 static void bnxt_get_fec_stats(struct net_device *dev, 3304 struct ethtool_fec_stats *fec_stats, 3305 struct ethtool_fec_hist *hist) 3306 { 3307 struct bnxt *bp = netdev_priv(dev); 3308 u64 *rx; 3309 3310 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 3311 return; 3312 3313 rx = bp->rx_port_stats_ext.sw_stats; 3314 fec_stats->corrected_bits.total = 3315 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); 3316 3317 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) 3318 return; 3319 3320 fec_stats->corrected_blocks.total = 3321 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); 3322 fec_stats->uncorrectable_blocks.total = 3323 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); 3324 bnxt_hwrm_port_phy_fdrstat(bp, hist); 3325 } 3326 3327 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, 3328 u32 fec) 3329 { 3330 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; 3331 3332 if (fec & ETHTOOL_FEC_BASER) 3333 fw_fec |= BNXT_FEC_BASE_R_ON(link_info); 3334 else if (fec & ETHTOOL_FEC_RS) 3335 fw_fec |= BNXT_FEC_RS_ON(link_info); 3336 else if (fec & ETHTOOL_FEC_LLRS) 3337 fw_fec |= BNXT_FEC_LLRS_ON; 3338 return fw_fec; 3339 } 3340 3341 static int bnxt_set_fecparam(struct net_device *dev, 3342 struct ethtool_fecparam *fecparam) 3343 { 3344 struct hwrm_port_phy_cfg_input *req; 3345 struct bnxt *bp = netdev_priv(dev); 3346 struct bnxt_link_info *link_info; 3347 u32 new_cfg, fec = fecparam->fec; 3348 u16 fec_cfg; 3349 int rc; 3350 3351 link_info = &bp->link_info; 3352 fec_cfg = link_info->fec_cfg; 3353 if (fec_cfg & BNXT_FEC_NONE) 3354 return -EOPNOTSUPP; 3355 3356 if (fec & ETHTOOL_FEC_OFF) { 3357 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | 3358 BNXT_FEC_ALL_OFF(link_info); 3359 goto apply_fec; 3360 } 3361 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || 3362 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || 3363 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || 3364 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) 3365 return -EINVAL; 3366 3367 if (fec & ETHTOOL_FEC_AUTO) { 3368 if (!link_info->autoneg) 3369 return -EINVAL; 3370 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; 3371 } else { 3372 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); 3373 } 3374 3375 apply_fec: 3376 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 3377 if (rc) 3378 return rc; 3379 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 3380 rc = hwrm_req_send(bp, req); 3381 /* update current settings */ 3382 if (!rc) { 3383 mutex_lock(&bp->link_lock); 3384 bnxt_update_link(bp, false); 3385 mutex_unlock(&bp->link_lock); 3386 } 3387 return rc; 3388 } 3389 3390 static void bnxt_get_pauseparam(struct net_device *dev, 3391 struct ethtool_pauseparam *epause) 3392 { 3393 struct bnxt *bp = netdev_priv(dev); 3394 struct bnxt_link_info *link_info = &bp->link_info; 3395 3396 if (BNXT_VF(bp)) 3397 return; 3398 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); 3399 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); 3400 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); 3401 } 3402 3403 static void bnxt_get_pause_stats(struct net_device *dev, 3404 struct ethtool_pause_stats *epstat) 3405 { 3406 struct bnxt *bp = netdev_priv(dev); 3407 u64 *rx, *tx; 3408 3409 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 3410 return; 3411 3412 rx = bp->port_stats.sw_stats; 3413 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 3414 3415 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); 3416 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); 3417 } 3418 3419 static int bnxt_set_pauseparam(struct net_device *dev, 3420 struct ethtool_pauseparam *epause) 3421 { 3422 int rc = 0; 3423 struct bnxt *bp = netdev_priv(dev); 3424 struct bnxt_link_info *link_info = &bp->link_info; 3425 3426 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) 3427 return -EOPNOTSUPP; 3428 3429 mutex_lock(&bp->link_lock); 3430 if (epause->autoneg) { 3431 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 3432 rc = -EINVAL; 3433 goto pause_exit; 3434 } 3435 3436 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 3437 link_info->req_flow_ctrl = 0; 3438 } else { 3439 /* when transition from auto pause to force pause, 3440 * force a link change 3441 */ 3442 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 3443 link_info->force_link_chng = true; 3444 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; 3445 link_info->req_flow_ctrl = 0; 3446 } 3447 if (epause->rx_pause) 3448 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; 3449 3450 if (epause->tx_pause) 3451 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; 3452 3453 if (netif_running(dev)) 3454 rc = bnxt_hwrm_set_pause(bp); 3455 3456 pause_exit: 3457 mutex_unlock(&bp->link_lock); 3458 return rc; 3459 } 3460 3461 static u32 bnxt_get_link(struct net_device *dev) 3462 { 3463 struct bnxt *bp = netdev_priv(dev); 3464 3465 /* TODO: handle MF, VF, driver close case */ 3466 return BNXT_LINK_IS_UP(bp); 3467 } 3468 3469 static int bnxt_get_link_ext_state(struct net_device *dev, 3470 struct ethtool_link_ext_state_info *info) 3471 { 3472 struct bnxt *bp = netdev_priv(dev); 3473 u8 reason; 3474 3475 if (BNXT_LINK_IS_UP(bp)) 3476 return -ENODATA; 3477 3478 reason = bp->link_info.link_down_reason; 3479 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF) { 3480 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE; 3481 info->link_training = ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT; 3482 return 0; 3483 } 3484 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED) { 3485 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_NO_CABLE; 3486 return 0; 3487 } 3488 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION) { 3489 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_OTP_SPEED_VIOLATION; 3490 return 0; 3491 } 3492 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT) { 3493 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_MODULE; 3494 return 0; 3495 } 3496 if (reason & PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST) { 3497 info->link_ext_state = ETHTOOL_LINK_EXT_STATE_BMC_REQUEST_DOWN; 3498 return 0; 3499 } 3500 return -ENODATA; 3501 } 3502 3503 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, 3504 struct hwrm_nvm_get_dev_info_output *nvm_dev_info) 3505 { 3506 struct hwrm_nvm_get_dev_info_output *resp; 3507 struct hwrm_nvm_get_dev_info_input *req; 3508 int rc; 3509 3510 if (BNXT_VF(bp)) 3511 return -EOPNOTSUPP; 3512 3513 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); 3514 if (rc) 3515 return rc; 3516 3517 resp = hwrm_req_hold(bp, req); 3518 rc = hwrm_req_send(bp, req); 3519 if (!rc) 3520 memcpy(nvm_dev_info, resp, sizeof(*resp)); 3521 hwrm_req_drop(bp, req); 3522 return rc; 3523 } 3524 3525 static void bnxt_print_admin_err(struct bnxt *bp) 3526 { 3527 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n"); 3528 } 3529 3530 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 3531 u16 ext, u16 *index, u32 *item_length, 3532 u32 *data_length); 3533 3534 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, 3535 u16 dir_ordinal, u16 dir_ext, u16 dir_attr, 3536 u32 dir_item_len, const u8 *data, 3537 size_t data_len) 3538 { 3539 struct bnxt *bp = netdev_priv(dev); 3540 struct hwrm_nvm_write_input *req; 3541 int rc; 3542 3543 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); 3544 if (rc) 3545 return rc; 3546 3547 if (data_len && data) { 3548 dma_addr_t dma_handle; 3549 u8 *kmem; 3550 3551 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); 3552 if (!kmem) { 3553 hwrm_req_drop(bp, req); 3554 return -ENOMEM; 3555 } 3556 3557 req->dir_data_length = cpu_to_le32(data_len); 3558 3559 memcpy(kmem, data, data_len); 3560 req->host_src_addr = cpu_to_le64(dma_handle); 3561 } 3562 3563 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3564 req->dir_type = cpu_to_le16(dir_type); 3565 req->dir_ordinal = cpu_to_le16(dir_ordinal); 3566 req->dir_ext = cpu_to_le16(dir_ext); 3567 req->dir_attr = cpu_to_le16(dir_attr); 3568 req->dir_item_length = cpu_to_le32(dir_item_len); 3569 rc = hwrm_req_send(bp, req); 3570 3571 if (rc == -EACCES) 3572 bnxt_print_admin_err(bp); 3573 return rc; 3574 } 3575 3576 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, 3577 u8 self_reset, u8 flags) 3578 { 3579 struct bnxt *bp = netdev_priv(dev); 3580 struct hwrm_fw_reset_input *req; 3581 int rc; 3582 3583 if (!bnxt_hwrm_reset_permitted(bp)) { 3584 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); 3585 return -EPERM; 3586 } 3587 3588 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 3589 if (rc) 3590 return rc; 3591 3592 req->embedded_proc_type = proc_type; 3593 req->selfrst_status = self_reset; 3594 req->flags = flags; 3595 3596 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { 3597 rc = hwrm_req_send_silent(bp, req); 3598 } else { 3599 rc = hwrm_req_send(bp, req); 3600 if (rc == -EACCES) 3601 bnxt_print_admin_err(bp); 3602 } 3603 return rc; 3604 } 3605 3606 static int bnxt_firmware_reset(struct net_device *dev, 3607 enum bnxt_nvm_directory_type dir_type) 3608 { 3609 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; 3610 u8 proc_type, flags = 0; 3611 3612 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ 3613 /* (e.g. when firmware isn't already running) */ 3614 switch (dir_type) { 3615 case BNX_DIR_TYPE_CHIMP_PATCH: 3616 case BNX_DIR_TYPE_BOOTCODE: 3617 case BNX_DIR_TYPE_BOOTCODE_2: 3618 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; 3619 /* Self-reset ChiMP upon next PCIe reset: */ 3620 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3621 break; 3622 case BNX_DIR_TYPE_APE_FW: 3623 case BNX_DIR_TYPE_APE_PATCH: 3624 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 3625 /* Self-reset APE upon next PCIe reset: */ 3626 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; 3627 break; 3628 case BNX_DIR_TYPE_KONG_FW: 3629 case BNX_DIR_TYPE_KONG_PATCH: 3630 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; 3631 break; 3632 case BNX_DIR_TYPE_BONO_FW: 3633 case BNX_DIR_TYPE_BONO_PATCH: 3634 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; 3635 break; 3636 default: 3637 return -EINVAL; 3638 } 3639 3640 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); 3641 } 3642 3643 static int bnxt_firmware_reset_chip(struct net_device *dev) 3644 { 3645 struct bnxt *bp = netdev_priv(dev); 3646 u8 flags = 0; 3647 3648 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 3649 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 3650 3651 return bnxt_hwrm_firmware_reset(dev, 3652 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 3653 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 3654 flags); 3655 } 3656 3657 static int bnxt_firmware_reset_ap(struct net_device *dev) 3658 { 3659 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, 3660 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, 3661 0); 3662 } 3663 3664 static int bnxt_flash_firmware(struct net_device *dev, 3665 u16 dir_type, 3666 const u8 *fw_data, 3667 size_t fw_size) 3668 { 3669 int rc = 0; 3670 u16 code_type; 3671 u32 stored_crc; 3672 u32 calculated_crc; 3673 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; 3674 3675 switch (dir_type) { 3676 case BNX_DIR_TYPE_BOOTCODE: 3677 case BNX_DIR_TYPE_BOOTCODE_2: 3678 code_type = CODE_BOOT; 3679 break; 3680 case BNX_DIR_TYPE_CHIMP_PATCH: 3681 code_type = CODE_CHIMP_PATCH; 3682 break; 3683 case BNX_DIR_TYPE_APE_FW: 3684 code_type = CODE_MCTP_PASSTHRU; 3685 break; 3686 case BNX_DIR_TYPE_APE_PATCH: 3687 code_type = CODE_APE_PATCH; 3688 break; 3689 case BNX_DIR_TYPE_KONG_FW: 3690 code_type = CODE_KONG_FW; 3691 break; 3692 case BNX_DIR_TYPE_KONG_PATCH: 3693 code_type = CODE_KONG_PATCH; 3694 break; 3695 case BNX_DIR_TYPE_BONO_FW: 3696 code_type = CODE_BONO_FW; 3697 break; 3698 case BNX_DIR_TYPE_BONO_PATCH: 3699 code_type = CODE_BONO_PATCH; 3700 break; 3701 default: 3702 netdev_err(dev, "Unsupported directory entry type: %u\n", 3703 dir_type); 3704 return -EINVAL; 3705 } 3706 if (fw_size < sizeof(struct bnxt_fw_header)) { 3707 netdev_err(dev, "Invalid firmware file size: %u\n", 3708 (unsigned int)fw_size); 3709 return -EINVAL; 3710 } 3711 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { 3712 netdev_err(dev, "Invalid firmware signature: %08X\n", 3713 le32_to_cpu(header->signature)); 3714 return -EINVAL; 3715 } 3716 if (header->code_type != code_type) { 3717 netdev_err(dev, "Expected firmware type: %d, read: %d\n", 3718 code_type, header->code_type); 3719 return -EINVAL; 3720 } 3721 if (header->device != DEVICE_CUMULUS_FAMILY) { 3722 netdev_err(dev, "Expected firmware device family %d, read: %d\n", 3723 DEVICE_CUMULUS_FAMILY, header->device); 3724 return -EINVAL; 3725 } 3726 /* Confirm the CRC32 checksum of the file: */ 3727 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3728 sizeof(stored_crc))); 3729 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3730 if (calculated_crc != stored_crc) { 3731 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", 3732 (unsigned long)stored_crc, 3733 (unsigned long)calculated_crc); 3734 return -EINVAL; 3735 } 3736 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3737 0, 0, 0, fw_data, fw_size); 3738 if (rc == 0) /* Firmware update successful */ 3739 rc = bnxt_firmware_reset(dev, dir_type); 3740 3741 return rc; 3742 } 3743 3744 static int bnxt_flash_microcode(struct net_device *dev, 3745 u16 dir_type, 3746 const u8 *fw_data, 3747 size_t fw_size) 3748 { 3749 struct bnxt_ucode_trailer *trailer; 3750 u32 calculated_crc; 3751 u32 stored_crc; 3752 int rc = 0; 3753 3754 if (fw_size < sizeof(struct bnxt_ucode_trailer)) { 3755 netdev_err(dev, "Invalid microcode file size: %u\n", 3756 (unsigned int)fw_size); 3757 return -EINVAL; 3758 } 3759 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - 3760 sizeof(*trailer))); 3761 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { 3762 netdev_err(dev, "Invalid microcode trailer signature: %08X\n", 3763 le32_to_cpu(trailer->sig)); 3764 return -EINVAL; 3765 } 3766 if (le16_to_cpu(trailer->dir_type) != dir_type) { 3767 netdev_err(dev, "Expected microcode type: %d, read: %d\n", 3768 dir_type, le16_to_cpu(trailer->dir_type)); 3769 return -EINVAL; 3770 } 3771 if (le16_to_cpu(trailer->trailer_length) < 3772 sizeof(struct bnxt_ucode_trailer)) { 3773 netdev_err(dev, "Invalid microcode trailer length: %d\n", 3774 le16_to_cpu(trailer->trailer_length)); 3775 return -EINVAL; 3776 } 3777 3778 /* Confirm the CRC32 checksum of the file: */ 3779 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - 3780 sizeof(stored_crc))); 3781 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); 3782 if (calculated_crc != stored_crc) { 3783 netdev_err(dev, 3784 "CRC32 (%08lX) does not match calculated: %08lX\n", 3785 (unsigned long)stored_crc, 3786 (unsigned long)calculated_crc); 3787 return -EINVAL; 3788 } 3789 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3790 0, 0, 0, fw_data, fw_size); 3791 3792 return rc; 3793 } 3794 3795 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) 3796 { 3797 switch (dir_type) { 3798 case BNX_DIR_TYPE_CHIMP_PATCH: 3799 case BNX_DIR_TYPE_BOOTCODE: 3800 case BNX_DIR_TYPE_BOOTCODE_2: 3801 case BNX_DIR_TYPE_APE_FW: 3802 case BNX_DIR_TYPE_APE_PATCH: 3803 case BNX_DIR_TYPE_KONG_FW: 3804 case BNX_DIR_TYPE_KONG_PATCH: 3805 case BNX_DIR_TYPE_BONO_FW: 3806 case BNX_DIR_TYPE_BONO_PATCH: 3807 return true; 3808 } 3809 3810 return false; 3811 } 3812 3813 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) 3814 { 3815 switch (dir_type) { 3816 case BNX_DIR_TYPE_AVS: 3817 case BNX_DIR_TYPE_EXP_ROM_MBA: 3818 case BNX_DIR_TYPE_PCIE: 3819 case BNX_DIR_TYPE_TSCF_UCODE: 3820 case BNX_DIR_TYPE_EXT_PHY: 3821 case BNX_DIR_TYPE_CCM: 3822 case BNX_DIR_TYPE_ISCSI_BOOT: 3823 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3824 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3825 return true; 3826 } 3827 3828 return false; 3829 } 3830 3831 static bool bnxt_dir_type_is_executable(u16 dir_type) 3832 { 3833 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3834 bnxt_dir_type_is_other_exec_format(dir_type); 3835 } 3836 3837 static int bnxt_flash_firmware_from_file(struct net_device *dev, 3838 u16 dir_type, 3839 const char *filename) 3840 { 3841 const struct firmware *fw; 3842 int rc; 3843 3844 rc = request_firmware(&fw, filename, &dev->dev); 3845 if (rc != 0) { 3846 netdev_err(dev, "Error %d requesting firmware file: %s\n", 3847 rc, filename); 3848 return rc; 3849 } 3850 if (bnxt_dir_type_is_ape_bin_format(dir_type)) 3851 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); 3852 else if (bnxt_dir_type_is_other_exec_format(dir_type)) 3853 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); 3854 else 3855 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 3856 0, 0, 0, fw->data, fw->size); 3857 release_firmware(fw); 3858 return rc; 3859 } 3860 3861 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" 3862 #define MSG_INVALID_PKG "PKG install error : Invalid package" 3863 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" 3864 #define MSG_INVALID_DEV "PKG install error : Invalid device" 3865 #define MSG_INTERNAL_ERR "PKG install error : Internal error" 3866 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" 3867 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" 3868 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" 3869 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" 3870 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" 3871 3872 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, 3873 struct netlink_ext_ack *extack) 3874 { 3875 switch (result) { 3876 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: 3877 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: 3878 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: 3879 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: 3880 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: 3881 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: 3882 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); 3883 return -EINVAL; 3884 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: 3885 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: 3886 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: 3887 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: 3888 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: 3889 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: 3890 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: 3891 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: 3892 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: 3893 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: 3894 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: 3895 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: 3896 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: 3897 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); 3898 return -ENOPKG; 3899 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: 3900 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); 3901 return -EPERM; 3902 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: 3903 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: 3904 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: 3905 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: 3906 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: 3907 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); 3908 return -EOPNOTSUPP; 3909 default: 3910 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); 3911 return -EIO; 3912 } 3913 } 3914 3915 #define BNXT_PKG_DMA_SIZE 0x40000 3916 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) 3917 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) 3918 3919 static int bnxt_hwrm_nvm_defrag(struct bnxt *bp) 3920 { 3921 struct hwrm_nvm_defrag_input *req; 3922 int rc; 3923 3924 rc = hwrm_req_init(bp, req, HWRM_NVM_DEFRAG); 3925 if (rc) 3926 return rc; 3927 req->flags = cpu_to_le32(NVM_DEFRAG_REQ_FLAGS_DEFRAG); 3928 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); 3929 3930 return hwrm_req_send(bp, req); 3931 } 3932 3933 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, 3934 struct netlink_ext_ack *extack) 3935 { 3936 struct bnxt *bp = netdev_priv(dev); 3937 bool retry = false; 3938 u32 item_len; 3939 int rc; 3940 3941 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 3942 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, 3943 &item_len, NULL); 3944 if (rc) { 3945 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 3946 return rc; 3947 } 3948 3949 if (fw_size > item_len) { 3950 do { 3951 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, 3952 BNX_DIR_ORDINAL_FIRST, 0, 1, 3953 round_up(fw_size, 4096), NULL, 3954 0); 3955 3956 if (rc == -ENOSPC) { 3957 if (retry || bnxt_hwrm_nvm_defrag(bp)) 3958 break; 3959 retry = true; 3960 } 3961 } while (rc == -ENOSPC); 3962 3963 if (rc) { 3964 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); 3965 return rc; 3966 } 3967 } 3968 return 0; 3969 } 3970 3971 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, 3972 u32 install_type, struct netlink_ext_ack *extack) 3973 { 3974 struct hwrm_nvm_install_update_input *install; 3975 struct hwrm_nvm_install_update_output *resp; 3976 struct hwrm_nvm_modify_input *modify; 3977 struct bnxt *bp = netdev_priv(dev); 3978 bool defrag_attempted = false; 3979 dma_addr_t dma_handle; 3980 u8 *kmem = NULL; 3981 u32 modify_len; 3982 u32 item_len; 3983 u8 cmd_err; 3984 u16 index; 3985 int rc; 3986 3987 /* resize before flashing larger image than available space */ 3988 rc = bnxt_resize_update_entry(dev, fw->size, extack); 3989 if (rc) 3990 return rc; 3991 3992 bnxt_hwrm_fw_set_time(bp); 3993 3994 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); 3995 if (rc) 3996 return rc; 3997 3998 /* Try allocating a large DMA buffer first. Older fw will 3999 * cause excessive NVRAM erases when using small blocks. 4000 */ 4001 modify_len = roundup_pow_of_two(fw->size); 4002 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); 4003 while (1) { 4004 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); 4005 if (!kmem && modify_len > PAGE_SIZE) 4006 modify_len /= 2; 4007 else 4008 break; 4009 } 4010 if (!kmem) { 4011 hwrm_req_drop(bp, modify); 4012 return -ENOMEM; 4013 } 4014 4015 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); 4016 if (rc) { 4017 hwrm_req_drop(bp, modify); 4018 return rc; 4019 } 4020 4021 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout); 4022 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout); 4023 4024 hwrm_req_hold(bp, modify); 4025 modify->host_src_addr = cpu_to_le64(dma_handle); 4026 4027 resp = hwrm_req_hold(bp, install); 4028 if ((install_type & 0xffff) == 0) 4029 install_type >>= 16; 4030 install->install_type = cpu_to_le32(install_type); 4031 4032 do { 4033 u32 copied = 0, len = modify_len; 4034 4035 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 4036 BNX_DIR_ORDINAL_FIRST, 4037 BNX_DIR_EXT_NONE, 4038 &index, &item_len, NULL); 4039 if (rc) { 4040 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); 4041 break; 4042 } 4043 if (fw->size > item_len) { 4044 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); 4045 rc = -EFBIG; 4046 break; 4047 } 4048 4049 modify->dir_idx = cpu_to_le16(index); 4050 4051 if (fw->size > modify_len) 4052 modify->flags = BNXT_NVM_MORE_FLAG; 4053 while (copied < fw->size) { 4054 u32 balance = fw->size - copied; 4055 4056 if (balance <= modify_len) { 4057 len = balance; 4058 if (copied) 4059 modify->flags |= BNXT_NVM_LAST_FLAG; 4060 } 4061 memcpy(kmem, fw->data + copied, len); 4062 modify->len = cpu_to_le32(len); 4063 modify->offset = cpu_to_le32(copied); 4064 rc = hwrm_req_send(bp, modify); 4065 if (rc) 4066 goto pkg_abort; 4067 copied += len; 4068 } 4069 4070 rc = hwrm_req_send_silent(bp, install); 4071 if (!rc) 4072 break; 4073 4074 if (defrag_attempted) { 4075 /* We have tried to defragment already in the previous 4076 * iteration. Return with the result for INSTALL_UPDATE 4077 */ 4078 break; 4079 } 4080 4081 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 4082 4083 switch (cmd_err) { 4084 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: 4085 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); 4086 rc = -EALREADY; 4087 break; 4088 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: 4089 install->flags = 4090 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 4091 4092 rc = hwrm_req_send_silent(bp, install); 4093 if (!rc) 4094 break; 4095 4096 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; 4097 4098 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { 4099 /* FW has cleared NVM area, driver will create 4100 * UPDATE directory and try the flash again 4101 */ 4102 defrag_attempted = true; 4103 install->flags = 0; 4104 rc = bnxt_flash_nvram(bp->dev, 4105 BNX_DIR_TYPE_UPDATE, 4106 BNX_DIR_ORDINAL_FIRST, 4107 0, 0, item_len, NULL, 0); 4108 if (!rc) 4109 break; 4110 } 4111 fallthrough; 4112 default: 4113 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); 4114 } 4115 } while (defrag_attempted && !rc); 4116 4117 pkg_abort: 4118 hwrm_req_drop(bp, modify); 4119 hwrm_req_drop(bp, install); 4120 4121 if (resp->result) { 4122 netdev_err(dev, "PKG install error = %d, problem_item = %d\n", 4123 (s8)resp->result, (int)resp->problem_item); 4124 rc = nvm_update_err_to_stderr(dev, resp->result, extack); 4125 } 4126 if (rc == -EACCES) 4127 bnxt_print_admin_err(bp); 4128 return rc; 4129 } 4130 4131 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, 4132 u32 install_type, struct netlink_ext_ack *extack) 4133 { 4134 const struct firmware *fw; 4135 int rc; 4136 4137 rc = request_firmware(&fw, filename, &dev->dev); 4138 if (rc != 0) { 4139 netdev_err(dev, "PKG error %d requesting file: %s\n", 4140 rc, filename); 4141 return rc; 4142 } 4143 4144 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); 4145 4146 release_firmware(fw); 4147 4148 return rc; 4149 } 4150 4151 static int bnxt_flash_device(struct net_device *dev, 4152 struct ethtool_flash *flash) 4153 { 4154 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { 4155 netdev_err(dev, "flashdev not supported from a virtual function\n"); 4156 return -EINVAL; 4157 } 4158 4159 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || 4160 flash->region > 0xffff) 4161 return bnxt_flash_package_from_file(dev, flash->data, 4162 flash->region, NULL); 4163 4164 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); 4165 } 4166 4167 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) 4168 { 4169 struct hwrm_nvm_get_dir_info_output *output; 4170 struct hwrm_nvm_get_dir_info_input *req; 4171 struct bnxt *bp = netdev_priv(dev); 4172 int rc; 4173 4174 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); 4175 if (rc) 4176 return rc; 4177 4178 output = hwrm_req_hold(bp, req); 4179 rc = hwrm_req_send(bp, req); 4180 if (!rc) { 4181 *entries = le32_to_cpu(output->entries); 4182 *length = le32_to_cpu(output->entry_length); 4183 } 4184 hwrm_req_drop(bp, req); 4185 return rc; 4186 } 4187 4188 static int bnxt_get_eeprom_len(struct net_device *dev) 4189 { 4190 struct bnxt *bp = netdev_priv(dev); 4191 4192 if (BNXT_VF(bp)) 4193 return 0; 4194 4195 /* The -1 return value allows the entire 32-bit range of offsets to be 4196 * passed via the ethtool command-line utility. 4197 */ 4198 return -1; 4199 } 4200 4201 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) 4202 { 4203 struct bnxt *bp = netdev_priv(dev); 4204 int rc; 4205 u32 dir_entries; 4206 u32 entry_length; 4207 u8 *buf; 4208 size_t buflen; 4209 dma_addr_t dma_handle; 4210 struct hwrm_nvm_get_dir_entries_input *req; 4211 4212 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); 4213 if (rc != 0) 4214 return rc; 4215 4216 if (!dir_entries || !entry_length) 4217 return -EIO; 4218 4219 /* Insert 2 bytes of directory info (count and size of entries) */ 4220 if (len < 2) 4221 return -EINVAL; 4222 4223 *data++ = dir_entries; 4224 *data++ = entry_length; 4225 len -= 2; 4226 memset(data, 0xff, len); 4227 4228 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); 4229 if (rc) 4230 return rc; 4231 4232 buflen = mul_u32_u32(dir_entries, entry_length); 4233 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); 4234 if (!buf) { 4235 hwrm_req_drop(bp, req); 4236 return -ENOMEM; 4237 } 4238 req->host_dest_addr = cpu_to_le64(dma_handle); 4239 4240 hwrm_req_hold(bp, req); /* hold the slice */ 4241 rc = hwrm_req_send(bp, req); 4242 if (rc == 0) 4243 memcpy(data, buf, len > buflen ? buflen : len); 4244 hwrm_req_drop(bp, req); 4245 return rc; 4246 } 4247 4248 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, 4249 u32 length, u8 *data) 4250 { 4251 struct bnxt *bp = netdev_priv(dev); 4252 int rc; 4253 u8 *buf; 4254 dma_addr_t dma_handle; 4255 struct hwrm_nvm_read_input *req; 4256 4257 if (!length) 4258 return -EINVAL; 4259 4260 rc = hwrm_req_init(bp, req, HWRM_NVM_READ); 4261 if (rc) 4262 return rc; 4263 4264 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); 4265 if (!buf) { 4266 hwrm_req_drop(bp, req); 4267 return -ENOMEM; 4268 } 4269 4270 req->host_dest_addr = cpu_to_le64(dma_handle); 4271 req->dir_idx = cpu_to_le16(index); 4272 req->offset = cpu_to_le32(offset); 4273 req->len = cpu_to_le32(length); 4274 4275 hwrm_req_hold(bp, req); /* hold the slice */ 4276 rc = hwrm_req_send(bp, req); 4277 if (rc == 0) 4278 memcpy(data, buf, length); 4279 hwrm_req_drop(bp, req); 4280 return rc; 4281 } 4282 4283 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, 4284 u16 ext, u16 *index, u32 *item_length, 4285 u32 *data_length) 4286 { 4287 struct hwrm_nvm_find_dir_entry_output *output; 4288 struct hwrm_nvm_find_dir_entry_input *req; 4289 struct bnxt *bp = netdev_priv(dev); 4290 int rc; 4291 4292 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); 4293 if (rc) 4294 return rc; 4295 4296 req->enables = 0; 4297 req->dir_idx = 0; 4298 req->dir_type = cpu_to_le16(type); 4299 req->dir_ordinal = cpu_to_le16(ordinal); 4300 req->dir_ext = cpu_to_le16(ext); 4301 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 4302 output = hwrm_req_hold(bp, req); 4303 rc = hwrm_req_send_silent(bp, req); 4304 if (rc == 0) { 4305 if (index) 4306 *index = le16_to_cpu(output->dir_idx); 4307 if (item_length) 4308 *item_length = le32_to_cpu(output->dir_item_length); 4309 if (data_length) 4310 *data_length = le32_to_cpu(output->dir_data_length); 4311 } 4312 hwrm_req_drop(bp, req); 4313 return rc; 4314 } 4315 4316 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) 4317 { 4318 char *retval = NULL; 4319 char *p; 4320 char *value; 4321 int field = 0; 4322 4323 if (datalen < 1) 4324 return NULL; 4325 /* null-terminate the log data (removing last '\n'): */ 4326 data[datalen - 1] = 0; 4327 for (p = data; *p != 0; p++) { 4328 field = 0; 4329 retval = NULL; 4330 while (*p != 0 && *p != '\n') { 4331 value = p; 4332 while (*p != 0 && *p != '\t' && *p != '\n') 4333 p++; 4334 if (field == desired_field) 4335 retval = value; 4336 if (*p != '\t') 4337 break; 4338 *p = 0; 4339 field++; 4340 p++; 4341 } 4342 if (*p == 0) 4343 break; 4344 *p = 0; 4345 } 4346 return retval; 4347 } 4348 4349 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) 4350 { 4351 struct bnxt *bp = netdev_priv(dev); 4352 u16 index = 0; 4353 char *pkgver; 4354 u32 pkglen; 4355 u8 *pkgbuf; 4356 int rc; 4357 4358 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 4359 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 4360 &index, NULL, &pkglen); 4361 if (rc) 4362 return rc; 4363 4364 pkgbuf = kzalloc(pkglen, GFP_KERNEL); 4365 if (!pkgbuf) { 4366 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 4367 pkglen); 4368 return -ENOMEM; 4369 } 4370 4371 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); 4372 if (rc) 4373 goto err; 4374 4375 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 4376 pkglen); 4377 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) 4378 strscpy(ver, pkgver, size); 4379 else 4380 rc = -ENOENT; 4381 4382 err: 4383 kfree(pkgbuf); 4384 4385 return rc; 4386 } 4387 4388 static void bnxt_get_pkgver(struct net_device *dev) 4389 { 4390 struct bnxt *bp = netdev_priv(dev); 4391 char buf[FW_VER_STR_LEN - 5]; 4392 int len; 4393 4394 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { 4395 len = strlen(bp->fw_ver_str); 4396 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len, 4397 "/pkg %s", buf); 4398 } 4399 } 4400 4401 static int bnxt_get_eeprom(struct net_device *dev, 4402 struct ethtool_eeprom *eeprom, 4403 u8 *data) 4404 { 4405 u32 index; 4406 u32 offset; 4407 4408 if (eeprom->offset == 0) /* special offset value to get directory */ 4409 return bnxt_get_nvram_directory(dev, eeprom->len, data); 4410 4411 index = eeprom->offset >> 24; 4412 offset = eeprom->offset & 0xffffff; 4413 4414 if (index == 0) { 4415 netdev_err(dev, "unsupported index value: %d\n", index); 4416 return -EINVAL; 4417 } 4418 4419 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data); 4420 } 4421 4422 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) 4423 { 4424 struct hwrm_nvm_erase_dir_entry_input *req; 4425 struct bnxt *bp = netdev_priv(dev); 4426 int rc; 4427 4428 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); 4429 if (rc) 4430 return rc; 4431 4432 req->dir_idx = cpu_to_le16(index); 4433 return hwrm_req_send(bp, req); 4434 } 4435 4436 static int bnxt_set_eeprom(struct net_device *dev, 4437 struct ethtool_eeprom *eeprom, 4438 u8 *data) 4439 { 4440 struct bnxt *bp = netdev_priv(dev); 4441 u8 index, dir_op; 4442 u16 type, ext, ordinal, attr; 4443 4444 if (!BNXT_PF(bp)) { 4445 netdev_err(dev, "NVM write not supported from a virtual function\n"); 4446 return -EINVAL; 4447 } 4448 4449 type = eeprom->magic >> 16; 4450 4451 if (type == 0xffff) { /* special value for directory operations */ 4452 index = eeprom->magic & 0xff; 4453 dir_op = eeprom->magic >> 8; 4454 if (index == 0) 4455 return -EINVAL; 4456 switch (dir_op) { 4457 case 0x0e: /* erase */ 4458 if (eeprom->offset != ~eeprom->magic) 4459 return -EINVAL; 4460 return bnxt_erase_nvram_directory(dev, index - 1); 4461 default: 4462 return -EINVAL; 4463 } 4464 } 4465 4466 /* Create or re-write an NVM item: */ 4467 if (bnxt_dir_type_is_executable(type)) 4468 return -EOPNOTSUPP; 4469 ext = eeprom->magic & 0xffff; 4470 ordinal = eeprom->offset >> 16; 4471 attr = eeprom->offset & 0xffff; 4472 4473 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, 4474 eeprom->len); 4475 } 4476 4477 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) 4478 { 4479 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 4480 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 4481 struct bnxt *bp = netdev_priv(dev); 4482 struct ethtool_keee *eee = &bp->eee; 4483 struct bnxt_link_info *link_info = &bp->link_info; 4484 int rc = 0; 4485 4486 if (!BNXT_PHY_CFG_ABLE(bp)) 4487 return -EOPNOTSUPP; 4488 4489 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4490 return -EOPNOTSUPP; 4491 4492 mutex_lock(&bp->link_lock); 4493 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 4494 if (!edata->eee_enabled) 4495 goto eee_ok; 4496 4497 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4498 netdev_warn(dev, "EEE requires autoneg\n"); 4499 rc = -EINVAL; 4500 goto eee_exit; 4501 } 4502 if (edata->tx_lpi_enabled) { 4503 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || 4504 edata->tx_lpi_timer < bp->lpi_tmr_lo)) { 4505 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", 4506 bp->lpi_tmr_lo, bp->lpi_tmr_hi); 4507 rc = -EINVAL; 4508 goto eee_exit; 4509 } else if (!bp->lpi_tmr_hi) { 4510 edata->tx_lpi_timer = eee->tx_lpi_timer; 4511 } 4512 } 4513 if (linkmode_empty(edata->advertised)) { 4514 linkmode_and(edata->advertised, advertising, eee->supported); 4515 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { 4516 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); 4517 rc = -EINVAL; 4518 goto eee_exit; 4519 } 4520 4521 linkmode_copy(eee->advertised, edata->advertised); 4522 eee->tx_lpi_enabled = edata->tx_lpi_enabled; 4523 eee->tx_lpi_timer = edata->tx_lpi_timer; 4524 eee_ok: 4525 eee->eee_enabled = edata->eee_enabled; 4526 4527 if (netif_running(dev)) 4528 rc = bnxt_hwrm_set_link_setting(bp, false, true); 4529 4530 eee_exit: 4531 mutex_unlock(&bp->link_lock); 4532 return rc; 4533 } 4534 4535 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) 4536 { 4537 struct bnxt *bp = netdev_priv(dev); 4538 4539 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 4540 return -EOPNOTSUPP; 4541 4542 *edata = bp->eee; 4543 if (!bp->eee.eee_enabled) { 4544 /* Preserve tx_lpi_timer so that the last value will be used 4545 * by default when it is re-enabled. 4546 */ 4547 linkmode_zero(edata->advertised); 4548 edata->tx_lpi_enabled = 0; 4549 } 4550 4551 if (!bp->eee.eee_active) 4552 linkmode_zero(edata->lp_advertised); 4553 4554 return 0; 4555 } 4556 4557 static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val) 4558 { 4559 struct hwrm_queue_pfcwd_timeout_qcfg_output *resp; 4560 struct hwrm_queue_pfcwd_timeout_qcfg_input *req; 4561 int rc; 4562 4563 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG); 4564 if (rc) 4565 return rc; 4566 resp = hwrm_req_hold(bp, req); 4567 rc = hwrm_req_send(bp, req); 4568 if (!rc) 4569 *val = le16_to_cpu(resp->pfcwd_timeout_value); 4570 hwrm_req_drop(bp, req); 4571 return rc; 4572 } 4573 4574 static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val) 4575 { 4576 struct hwrm_queue_pfcwd_timeout_cfg_input *req; 4577 int rc; 4578 4579 rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG); 4580 if (rc) 4581 return rc; 4582 req->pfcwd_timeout_value = cpu_to_le16(val); 4583 rc = hwrm_req_send(bp, req); 4584 return rc; 4585 } 4586 4587 static int bnxt_set_tunable(struct net_device *dev, 4588 const struct ethtool_tunable *tuna, 4589 const void *data) 4590 { 4591 struct bnxt *bp = netdev_priv(dev); 4592 u32 rx_copybreak, val; 4593 4594 switch (tuna->id) { 4595 case ETHTOOL_RX_COPYBREAK: 4596 rx_copybreak = *(u32 *)data; 4597 if (rx_copybreak > BNXT_MAX_RX_COPYBREAK) 4598 return -ERANGE; 4599 if (rx_copybreak != bp->rx_copybreak) { 4600 if (netif_running(dev)) 4601 return -EBUSY; 4602 bp->rx_copybreak = rx_copybreak; 4603 } 4604 return 0; 4605 case ETHTOOL_PFC_PREVENTION_TOUT: 4606 if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms) 4607 return -EOPNOTSUPP; 4608 4609 val = *(u16 *)data; 4610 if (val > bp->max_pfcwd_tmo_ms && 4611 val != PFC_STORM_PREVENTION_AUTO) 4612 return -EINVAL; 4613 return bnxt_hwrm_pfcwd_cfg(bp, val); 4614 default: 4615 return -EOPNOTSUPP; 4616 } 4617 } 4618 4619 static int bnxt_get_tunable(struct net_device *dev, 4620 const struct ethtool_tunable *tuna, void *data) 4621 { 4622 struct bnxt *bp = netdev_priv(dev); 4623 4624 switch (tuna->id) { 4625 case ETHTOOL_RX_COPYBREAK: 4626 *(u32 *)data = bp->rx_copybreak; 4627 break; 4628 case ETHTOOL_PFC_PREVENTION_TOUT: 4629 if (!bp->max_pfcwd_tmo_ms) 4630 return -EOPNOTSUPP; 4631 return bnxt_hwrm_pfcwd_qcfg(bp, data); 4632 default: 4633 return -EOPNOTSUPP; 4634 } 4635 4636 return 0; 4637 } 4638 4639 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, 4640 u16 page_number, u8 bank, 4641 u16 start_addr, u16 data_length, 4642 u8 *buf) 4643 { 4644 struct hwrm_port_phy_i2c_read_output *output; 4645 struct hwrm_port_phy_i2c_read_input *req; 4646 int rc, byte_offset = 0; 4647 4648 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); 4649 if (rc) 4650 return rc; 4651 4652 output = hwrm_req_hold(bp, req); 4653 req->i2c_slave_addr = i2c_addr; 4654 req->page_number = cpu_to_le16(page_number); 4655 req->port_id = cpu_to_le16(bp->pf.port_id); 4656 do { 4657 u16 xfer_size; 4658 4659 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); 4660 data_length -= xfer_size; 4661 req->page_offset = cpu_to_le16(start_addr + byte_offset); 4662 req->data_length = xfer_size; 4663 req->enables = 4664 cpu_to_le32((start_addr + byte_offset ? 4665 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 4666 0) | 4667 (bank ? 4668 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : 4669 0)); 4670 rc = hwrm_req_send(bp, req); 4671 if (!rc) 4672 memcpy(buf + byte_offset, output->data, xfer_size); 4673 byte_offset += xfer_size; 4674 } while (!rc && data_length > 0); 4675 hwrm_req_drop(bp, req); 4676 4677 return rc; 4678 } 4679 4680 static int bnxt_get_module_info(struct net_device *dev, 4681 struct ethtool_modinfo *modinfo) 4682 { 4683 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; 4684 struct bnxt *bp = netdev_priv(dev); 4685 int rc; 4686 4687 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4688 return -EPERM; 4689 4690 /* No point in going further if phy status indicates 4691 * module is not inserted or if it is powered down or 4692 * if it is of type 10GBase-T 4693 */ 4694 if (bp->link_info.module_status > 4695 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4696 return -EOPNOTSUPP; 4697 4698 /* This feature is not supported in older firmware versions */ 4699 if (bp->hwrm_spec_code < 0x10202) 4700 return -EOPNOTSUPP; 4701 4702 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, 4703 SFF_DIAG_SUPPORT_OFFSET + 1, 4704 data); 4705 if (!rc) { 4706 u8 module_id = data[0]; 4707 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; 4708 4709 switch (module_id) { 4710 case SFF_MODULE_ID_SFP: 4711 modinfo->type = ETH_MODULE_SFF_8472; 4712 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4713 if (!diag_supported) 4714 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4715 break; 4716 case SFF_MODULE_ID_QSFP: 4717 case SFF_MODULE_ID_QSFP_PLUS: 4718 modinfo->type = ETH_MODULE_SFF_8436; 4719 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 4720 break; 4721 case SFF_MODULE_ID_QSFP28: 4722 modinfo->type = ETH_MODULE_SFF_8636; 4723 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 4724 break; 4725 default: 4726 rc = -EOPNOTSUPP; 4727 break; 4728 } 4729 } 4730 return rc; 4731 } 4732 4733 static int bnxt_get_module_eeprom(struct net_device *dev, 4734 struct ethtool_eeprom *eeprom, 4735 u8 *data) 4736 { 4737 struct bnxt *bp = netdev_priv(dev); 4738 u16 start = eeprom->offset, length = eeprom->len; 4739 int rc = 0; 4740 4741 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4742 return -EPERM; 4743 4744 memset(data, 0, eeprom->len); 4745 4746 /* Read A0 portion of the EEPROM */ 4747 if (start < ETH_MODULE_SFF_8436_LEN) { 4748 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) 4749 length = ETH_MODULE_SFF_8436_LEN - start; 4750 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 4751 start, length, data); 4752 if (rc) 4753 return rc; 4754 start += length; 4755 data += length; 4756 length = eeprom->len - length; 4757 } 4758 4759 /* Read A2 portion of the EEPROM */ 4760 if (length) { 4761 start -= ETH_MODULE_SFF_8436_LEN; 4762 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, 4763 start, length, data); 4764 } 4765 return rc; 4766 } 4767 4768 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) 4769 { 4770 if (bp->link_info.module_status <= 4771 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) 4772 return 0; 4773 4774 if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || 4775 bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){ 4776 NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T"); 4777 return -EOPNOTSUPP; 4778 } 4779 switch (bp->link_info.module_status) { 4780 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 4781 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); 4782 break; 4783 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: 4784 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); 4785 break; 4786 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: 4787 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); 4788 break; 4789 default: 4790 NL_SET_ERR_MSG_MOD(extack, "Unknown error"); 4791 break; 4792 } 4793 return -EINVAL; 4794 } 4795 4796 static int 4797 bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, 4798 const struct ethtool_module_eeprom *page_data, 4799 struct netlink_ext_ack *extack) 4800 { 4801 int rc; 4802 4803 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 4804 NL_SET_ERR_MSG_MOD(extack, 4805 "Module read/write not permitted on untrusted VF"); 4806 return -EPERM; 4807 } 4808 4809 rc = bnxt_get_module_status(bp, extack); 4810 if (rc) 4811 return rc; 4812 4813 if (bp->hwrm_spec_code < 0x10202) { 4814 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); 4815 return -EINVAL; 4816 } 4817 4818 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { 4819 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); 4820 return -EINVAL; 4821 } 4822 return 0; 4823 } 4824 4825 static int bnxt_get_module_eeprom_by_page(struct net_device *dev, 4826 const struct ethtool_module_eeprom *page_data, 4827 struct netlink_ext_ack *extack) 4828 { 4829 struct bnxt *bp = netdev_priv(dev); 4830 int rc; 4831 4832 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4833 if (rc) 4834 return rc; 4835 4836 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, 4837 page_data->page, page_data->bank, 4838 page_data->offset, 4839 page_data->length, 4840 page_data->data); 4841 if (rc) { 4842 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); 4843 return rc; 4844 } 4845 return page_data->length; 4846 } 4847 4848 static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, 4849 const struct ethtool_module_eeprom *page) 4850 { 4851 struct hwrm_port_phy_i2c_write_input *req; 4852 int bytes_written = 0; 4853 int rc; 4854 4855 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); 4856 if (rc) 4857 return rc; 4858 4859 hwrm_req_hold(bp, req); 4860 req->i2c_slave_addr = page->i2c_address << 1; 4861 req->page_number = cpu_to_le16(page->page); 4862 req->bank_number = page->bank; 4863 req->port_id = cpu_to_le16(bp->pf.port_id); 4864 req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | 4865 PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); 4866 4867 while (bytes_written < page->length) { 4868 u16 xfer_size; 4869 4870 xfer_size = min_t(u16, page->length - bytes_written, 4871 BNXT_MAX_PHY_I2C_RESP_SIZE); 4872 req->page_offset = cpu_to_le16(page->offset + bytes_written); 4873 req->data_length = xfer_size; 4874 memcpy(req->data, page->data + bytes_written, xfer_size); 4875 rc = hwrm_req_send(bp, req); 4876 if (rc) 4877 break; 4878 bytes_written += xfer_size; 4879 } 4880 4881 hwrm_req_drop(bp, req); 4882 return rc; 4883 } 4884 4885 static int bnxt_set_module_eeprom_by_page(struct net_device *dev, 4886 const struct ethtool_module_eeprom *page_data, 4887 struct netlink_ext_ack *extack) 4888 { 4889 struct bnxt *bp = netdev_priv(dev); 4890 int rc; 4891 4892 rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); 4893 if (rc) 4894 return rc; 4895 4896 rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); 4897 if (rc) { 4898 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); 4899 return rc; 4900 } 4901 return page_data->length; 4902 } 4903 4904 static int bnxt_nway_reset(struct net_device *dev) 4905 { 4906 int rc = 0; 4907 4908 struct bnxt *bp = netdev_priv(dev); 4909 struct bnxt_link_info *link_info = &bp->link_info; 4910 4911 if (!BNXT_PHY_CFG_ABLE(bp)) 4912 return -EOPNOTSUPP; 4913 4914 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 4915 return -EINVAL; 4916 4917 if (netif_running(dev)) 4918 rc = bnxt_hwrm_set_link_setting(bp, true, false); 4919 4920 return rc; 4921 } 4922 4923 static int bnxt_set_phys_id(struct net_device *dev, 4924 enum ethtool_phys_id_state state) 4925 { 4926 struct hwrm_port_led_cfg_input *req; 4927 struct bnxt *bp = netdev_priv(dev); 4928 struct bnxt_pf_info *pf = &bp->pf; 4929 struct bnxt_led_cfg *led_cfg; 4930 u8 led_state; 4931 __le16 duration; 4932 int rc, i; 4933 4934 if (!bp->num_leds || BNXT_VF(bp)) 4935 return -EOPNOTSUPP; 4936 4937 if (state == ETHTOOL_ID_ACTIVE) { 4938 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; 4939 duration = cpu_to_le16(500); 4940 } else if (state == ETHTOOL_ID_INACTIVE) { 4941 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; 4942 duration = cpu_to_le16(0); 4943 } else { 4944 return -EINVAL; 4945 } 4946 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); 4947 if (rc) 4948 return rc; 4949 4950 req->port_id = cpu_to_le16(pf->port_id); 4951 req->num_leds = bp->num_leds; 4952 led_cfg = (struct bnxt_led_cfg *)&req->led0_id; 4953 for (i = 0; i < bp->num_leds; i++, led_cfg++) { 4954 req->enables |= BNXT_LED_DFLT_ENABLES(i); 4955 led_cfg->led_id = bp->leds[i].led_id; 4956 led_cfg->led_state = led_state; 4957 led_cfg->led_blink_on = duration; 4958 led_cfg->led_blink_off = duration; 4959 led_cfg->led_group_id = bp->leds[i].led_group_id; 4960 } 4961 return hwrm_req_send(bp, req); 4962 } 4963 4964 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) 4965 { 4966 struct hwrm_selftest_irq_input *req; 4967 int rc; 4968 4969 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); 4970 if (rc) 4971 return rc; 4972 4973 req->cmpl_ring = cpu_to_le16(cmpl_ring); 4974 return hwrm_req_send(bp, req); 4975 } 4976 4977 static int bnxt_test_irq(struct bnxt *bp) 4978 { 4979 int i; 4980 4981 for (i = 0; i < bp->cp_nr_rings; i++) { 4982 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; 4983 int rc; 4984 4985 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); 4986 if (rc) 4987 return rc; 4988 } 4989 return 0; 4990 } 4991 4992 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) 4993 { 4994 struct hwrm_port_mac_cfg_input *req; 4995 int rc; 4996 4997 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); 4998 if (rc) 4999 return rc; 5000 5001 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); 5002 if (enable) 5003 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; 5004 else 5005 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; 5006 return hwrm_req_send(bp, req); 5007 } 5008 5009 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) 5010 { 5011 struct hwrm_port_phy_qcaps_output *resp; 5012 struct hwrm_port_phy_qcaps_input *req; 5013 int rc; 5014 5015 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 5016 if (rc) 5017 return rc; 5018 5019 resp = hwrm_req_hold(bp, req); 5020 rc = hwrm_req_send(bp, req); 5021 if (!rc) 5022 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); 5023 5024 hwrm_req_drop(bp, req); 5025 return rc; 5026 } 5027 5028 static int bnxt_disable_an_for_lpbk(struct bnxt *bp, 5029 struct hwrm_port_phy_cfg_input *req) 5030 { 5031 struct bnxt_link_info *link_info = &bp->link_info; 5032 u16 fw_advertising; 5033 u16 fw_speed; 5034 int rc; 5035 5036 if (!link_info->autoneg || 5037 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) 5038 return 0; 5039 5040 rc = bnxt_query_force_speeds(bp, &fw_advertising); 5041 if (rc) 5042 return rc; 5043 5044 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; 5045 if (BNXT_LINK_IS_UP(bp)) 5046 fw_speed = bp->link_info.link_speed; 5047 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) 5048 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; 5049 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) 5050 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; 5051 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) 5052 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; 5053 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) 5054 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; 5055 5056 req->force_link_speed = cpu_to_le16(fw_speed); 5057 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | 5058 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 5059 rc = hwrm_req_send(bp, req); 5060 req->flags = 0; 5061 req->force_link_speed = cpu_to_le16(0); 5062 return rc; 5063 } 5064 5065 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) 5066 { 5067 struct hwrm_port_phy_cfg_input *req; 5068 int rc; 5069 5070 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 5071 if (rc) 5072 return rc; 5073 5074 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ 5075 hwrm_req_hold(bp, req); 5076 5077 if (enable) { 5078 bnxt_disable_an_for_lpbk(bp, req); 5079 if (ext) 5080 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; 5081 else 5082 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; 5083 } else { 5084 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; 5085 } 5086 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); 5087 rc = hwrm_req_send(bp, req); 5088 hwrm_req_drop(bp, req); 5089 return rc; 5090 } 5091 5092 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 5093 u32 raw_cons, int pkt_size) 5094 { 5095 struct bnxt_napi *bnapi = cpr->bnapi; 5096 struct bnxt_rx_ring_info *rxr; 5097 struct bnxt_sw_rx_bd *rx_buf; 5098 struct rx_cmp *rxcmp; 5099 u16 cp_cons, cons; 5100 u8 *data; 5101 u32 len; 5102 int i; 5103 5104 rxr = bnapi->rx_ring; 5105 cp_cons = RING_CMP(raw_cons); 5106 rxcmp = (struct rx_cmp *) 5107 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 5108 cons = rxcmp->rx_cmp_opaque; 5109 rx_buf = &rxr->rx_buf_ring[cons]; 5110 data = rx_buf->data_ptr; 5111 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; 5112 if (len != pkt_size) 5113 return -EIO; 5114 i = ETH_ALEN; 5115 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) 5116 return -EIO; 5117 i += ETH_ALEN; 5118 for ( ; i < pkt_size; i++) { 5119 if (data[i] != (u8)(i & 0xff)) 5120 return -EIO; 5121 } 5122 return 0; 5123 } 5124 5125 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 5126 int pkt_size) 5127 { 5128 struct tx_cmp *txcmp; 5129 int rc = -EIO; 5130 u32 raw_cons; 5131 u32 cons; 5132 int i; 5133 5134 raw_cons = cpr->cp_raw_cons; 5135 for (i = 0; i < 200; i++) { 5136 cons = RING_CMP(raw_cons); 5137 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 5138 5139 if (!TX_CMP_VALID(txcmp, raw_cons)) { 5140 udelay(5); 5141 continue; 5142 } 5143 5144 /* The valid test of the entry must be done first before 5145 * reading any further. 5146 */ 5147 dma_rmb(); 5148 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP || 5149 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) { 5150 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); 5151 raw_cons = NEXT_RAW_CMP(raw_cons); 5152 raw_cons = NEXT_RAW_CMP(raw_cons); 5153 break; 5154 } 5155 raw_cons = NEXT_RAW_CMP(raw_cons); 5156 } 5157 cpr->cp_raw_cons = raw_cons; 5158 return rc; 5159 } 5160 5161 static int bnxt_run_loopback(struct bnxt *bp) 5162 { 5163 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; 5164 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 5165 struct bnxt_cp_ring_info *cpr; 5166 int pkt_size, i = 0; 5167 struct sk_buff *skb; 5168 dma_addr_t map; 5169 u8 *data; 5170 int rc; 5171 5172 cpr = &rxr->bnapi->cp_ring; 5173 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5174 cpr = rxr->rx_cpr; 5175 pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK, 5176 bp->rx_copybreak)); 5177 skb = netdev_alloc_skb(bp->dev, pkt_size); 5178 if (!skb) 5179 return -ENOMEM; 5180 data = skb_put(skb, pkt_size); 5181 ether_addr_copy(&data[i], bp->dev->dev_addr); 5182 i += ETH_ALEN; 5183 ether_addr_copy(&data[i], bp->dev->dev_addr); 5184 i += ETH_ALEN; 5185 for ( ; i < pkt_size; i++) 5186 data[i] = (u8)(i & 0xff); 5187 5188 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, 5189 DMA_TO_DEVICE); 5190 if (dma_mapping_error(&bp->pdev->dev, map)) { 5191 dev_kfree_skb(skb); 5192 return -EIO; 5193 } 5194 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); 5195 5196 /* Sync BD data before updating doorbell */ 5197 wmb(); 5198 5199 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); 5200 rc = bnxt_poll_loopback(bp, cpr, pkt_size); 5201 5202 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); 5203 dev_kfree_skb(skb); 5204 return rc; 5205 } 5206 5207 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) 5208 { 5209 struct hwrm_selftest_exec_output *resp; 5210 struct hwrm_selftest_exec_input *req; 5211 int rc; 5212 5213 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); 5214 if (rc) 5215 return rc; 5216 5217 hwrm_req_timeout(bp, req, bp->test_info->timeout); 5218 req->flags = test_mask; 5219 5220 resp = hwrm_req_hold(bp, req); 5221 rc = hwrm_req_send(bp, req); 5222 *test_results = resp->test_success; 5223 hwrm_req_drop(bp, req); 5224 return rc; 5225 } 5226 5227 #define BNXT_DRV_TESTS 4 5228 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) 5229 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) 5230 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) 5231 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) 5232 5233 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, 5234 u64 *buf) 5235 { 5236 struct bnxt *bp = netdev_priv(dev); 5237 bool do_ext_lpbk = false; 5238 bool offline = false; 5239 u8 test_results = 0; 5240 u8 test_mask = 0; 5241 int rc = 0, i; 5242 5243 if (!bp->num_tests || !BNXT_PF(bp)) 5244 return; 5245 5246 memset(buf, 0, sizeof(u64) * bp->num_tests); 5247 if (etest->flags & ETH_TEST_FL_OFFLINE && 5248 bnxt_ulp_registered(bp->edev[BNXT_AUXDEV_RDMA])) { 5249 etest->flags |= ETH_TEST_FL_FAILED; 5250 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); 5251 return; 5252 } 5253 5254 if (!netif_running(dev)) { 5255 etest->flags |= ETH_TEST_FL_FAILED; 5256 return; 5257 } 5258 5259 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && 5260 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) 5261 do_ext_lpbk = true; 5262 5263 if (etest->flags & ETH_TEST_FL_OFFLINE) { 5264 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { 5265 etest->flags |= ETH_TEST_FL_FAILED; 5266 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); 5267 return; 5268 } 5269 offline = true; 5270 } 5271 5272 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5273 u8 bit_val = 1 << i; 5274 5275 if (!(bp->test_info->offline_mask & bit_val)) 5276 test_mask |= bit_val; 5277 else if (offline) 5278 test_mask |= bit_val; 5279 } 5280 if (!offline) { 5281 bnxt_run_fw_tests(bp, test_mask, &test_results); 5282 } else { 5283 bnxt_close_nic(bp, true, false); 5284 bnxt_run_fw_tests(bp, test_mask, &test_results); 5285 5286 rc = bnxt_half_open_nic(bp); 5287 if (rc) { 5288 etest->flags |= ETH_TEST_FL_FAILED; 5289 return; 5290 } 5291 buf[BNXT_MACLPBK_TEST_IDX] = 1; 5292 if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) 5293 goto skip_mac_loopback; 5294 5295 bnxt_hwrm_mac_loopback(bp, true); 5296 msleep(250); 5297 if (bnxt_run_loopback(bp)) 5298 etest->flags |= ETH_TEST_FL_FAILED; 5299 else 5300 buf[BNXT_MACLPBK_TEST_IDX] = 0; 5301 5302 bnxt_hwrm_mac_loopback(bp, false); 5303 skip_mac_loopback: 5304 buf[BNXT_PHYLPBK_TEST_IDX] = 1; 5305 if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) 5306 goto skip_phy_loopback; 5307 5308 bnxt_hwrm_phy_loopback(bp, true, false); 5309 msleep(1000); 5310 if (bnxt_run_loopback(bp)) 5311 etest->flags |= ETH_TEST_FL_FAILED; 5312 else 5313 buf[BNXT_PHYLPBK_TEST_IDX] = 0; 5314 skip_phy_loopback: 5315 buf[BNXT_EXTLPBK_TEST_IDX] = 1; 5316 if (do_ext_lpbk) { 5317 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 5318 bnxt_hwrm_phy_loopback(bp, true, true); 5319 msleep(1000); 5320 if (bnxt_run_loopback(bp)) 5321 etest->flags |= ETH_TEST_FL_FAILED; 5322 else 5323 buf[BNXT_EXTLPBK_TEST_IDX] = 0; 5324 } 5325 bnxt_hwrm_phy_loopback(bp, false, false); 5326 bnxt_half_close_nic(bp); 5327 rc = bnxt_open_nic(bp, true, true); 5328 } 5329 if (rc || bnxt_test_irq(bp)) { 5330 buf[BNXT_IRQ_TEST_IDX] = 1; 5331 etest->flags |= ETH_TEST_FL_FAILED; 5332 } 5333 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { 5334 u8 bit_val = 1 << i; 5335 5336 if ((test_mask & bit_val) && !(test_results & bit_val)) { 5337 buf[i] = 1; 5338 etest->flags |= ETH_TEST_FL_FAILED; 5339 } 5340 } 5341 } 5342 5343 static int bnxt_reset(struct net_device *dev, u32 *flags) 5344 { 5345 struct bnxt *bp = netdev_priv(dev); 5346 bool reload = false; 5347 u32 req = *flags; 5348 5349 if (!req) 5350 return -EINVAL; 5351 5352 if (!BNXT_PF(bp)) { 5353 netdev_err(dev, "Reset is not supported from a VF\n"); 5354 return -EOPNOTSUPP; 5355 } 5356 5357 if (pci_vfs_assigned(bp->pdev) && 5358 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { 5359 netdev_err(dev, 5360 "Reset not allowed when VFs are assigned to VMs\n"); 5361 return -EBUSY; 5362 } 5363 5364 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { 5365 /* This feature is not supported in older firmware versions */ 5366 if (bp->hwrm_spec_code >= 0x10803) { 5367 if (!bnxt_firmware_reset_chip(dev)) { 5368 netdev_info(dev, "Firmware reset request successful.\n"); 5369 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) 5370 reload = true; 5371 *flags &= ~BNXT_FW_RESET_CHIP; 5372 } 5373 } else if (req == BNXT_FW_RESET_CHIP) { 5374 return -EOPNOTSUPP; /* only request, fail hard */ 5375 } 5376 } 5377 5378 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { 5379 /* This feature is not supported in older firmware versions */ 5380 if (bp->hwrm_spec_code >= 0x10803) { 5381 if (!bnxt_firmware_reset_ap(dev)) { 5382 netdev_info(dev, "Reset application processor successful.\n"); 5383 reload = true; 5384 *flags &= ~BNXT_FW_RESET_AP; 5385 } 5386 } else if (req == BNXT_FW_RESET_AP) { 5387 return -EOPNOTSUPP; /* only request, fail hard */ 5388 } 5389 } 5390 5391 if (reload) 5392 netdev_info(dev, "Reload driver to complete reset\n"); 5393 5394 return 0; 5395 } 5396 5397 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) 5398 { 5399 struct bnxt *bp = netdev_priv(dev); 5400 5401 if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { 5402 netdev_info(dev, 5403 "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); 5404 return -EINVAL; 5405 } 5406 5407 if (dump->flag == BNXT_DUMP_CRASH) { 5408 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR && 5409 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { 5410 netdev_info(dev, 5411 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); 5412 return -EOPNOTSUPP; 5413 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) { 5414 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); 5415 return -EOPNOTSUPP; 5416 } 5417 } 5418 5419 bp->dump_flag = dump->flag; 5420 return 0; 5421 } 5422 5423 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) 5424 { 5425 struct bnxt *bp = netdev_priv(dev); 5426 5427 if (bp->hwrm_spec_code < 0x10801) 5428 return -EOPNOTSUPP; 5429 5430 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | 5431 bp->ver_resp.hwrm_fw_min_8b << 16 | 5432 bp->ver_resp.hwrm_fw_bld_8b << 8 | 5433 bp->ver_resp.hwrm_fw_rsvd_8b; 5434 5435 dump->flag = bp->dump_flag; 5436 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); 5437 return 0; 5438 } 5439 5440 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, 5441 void *buf) 5442 { 5443 struct bnxt *bp = netdev_priv(dev); 5444 5445 if (bp->hwrm_spec_code < 0x10801) 5446 return -EOPNOTSUPP; 5447 5448 memset(buf, 0, dump->len); 5449 5450 dump->flag = bp->dump_flag; 5451 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); 5452 } 5453 5454 static int bnxt_get_ts_info(struct net_device *dev, 5455 struct kernel_ethtool_ts_info *info) 5456 { 5457 struct bnxt *bp = netdev_priv(dev); 5458 struct bnxt_ptp_cfg *ptp; 5459 5460 ptp = bp->ptp_cfg; 5461 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE; 5462 5463 if (!ptp) 5464 return 0; 5465 5466 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | 5467 SOF_TIMESTAMPING_RX_HARDWARE | 5468 SOF_TIMESTAMPING_RAW_HARDWARE; 5469 if (ptp->ptp_clock) 5470 info->phc_index = ptp_clock_index(ptp->ptp_clock); 5471 5472 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); 5473 5474 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 5475 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 5476 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 5477 5478 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) 5479 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); 5480 return 0; 5481 } 5482 5483 static void bnxt_hwrm_pcie_qstats(struct bnxt *bp) 5484 { 5485 struct hwrm_pcie_qstats_output *resp; 5486 struct hwrm_pcie_qstats_input *req; 5487 5488 bp->pcie_stat_len = 0; 5489 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) 5490 return; 5491 5492 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) 5493 return; 5494 5495 resp = hwrm_req_hold(bp, req); 5496 if (__bnxt_hwrm_pcie_qstats(bp, req)) 5497 bp->pcie_stat_len = min_t(u16, 5498 le16_to_cpu(resp->pcie_stat_size), 5499 sizeof(struct pcie_ctx_hw_stats_v2)); 5500 hwrm_req_drop(bp, req); 5501 } 5502 5503 void bnxt_ethtool_init(struct bnxt *bp) 5504 { 5505 struct hwrm_selftest_qlist_output *resp; 5506 struct hwrm_selftest_qlist_input *req; 5507 struct bnxt_test_info *test_info; 5508 struct net_device *dev = bp->dev; 5509 int i, rc; 5510 5511 bnxt_hwrm_pcie_qstats(bp); 5512 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) 5513 bnxt_get_pkgver(dev); 5514 5515 bp->num_tests = 0; 5516 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) 5517 return; 5518 5519 test_info = bp->test_info; 5520 if (!test_info) { 5521 test_info = kzalloc_obj(*bp->test_info); 5522 if (!test_info) 5523 return; 5524 bp->test_info = test_info; 5525 } 5526 5527 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) 5528 return; 5529 5530 resp = hwrm_req_hold(bp, req); 5531 rc = hwrm_req_send_silent(bp, req); 5532 if (rc) 5533 goto ethtool_init_exit; 5534 5535 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; 5536 if (bp->num_tests > BNXT_MAX_TEST) 5537 bp->num_tests = BNXT_MAX_TEST; 5538 5539 test_info->offline_mask = resp->offline_tests; 5540 test_info->timeout = le16_to_cpu(resp->test_timeout); 5541 if (!test_info->timeout) 5542 test_info->timeout = HWRM_CMD_TIMEOUT; 5543 for (i = 0; i < bp->num_tests; i++) { 5544 char *str = test_info->string[i]; 5545 char *fw_str = resp->test_name[i]; 5546 5547 if (i == BNXT_MACLPBK_TEST_IDX) { 5548 strcpy(str, "Mac loopback test (offline)"); 5549 } else if (i == BNXT_PHYLPBK_TEST_IDX) { 5550 strcpy(str, "Phy loopback test (offline)"); 5551 } else if (i == BNXT_EXTLPBK_TEST_IDX) { 5552 strcpy(str, "Ext loopback test (offline)"); 5553 } else if (i == BNXT_IRQ_TEST_IDX) { 5554 strcpy(str, "Interrupt_test (offline)"); 5555 } else { 5556 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 5557 fw_str, test_info->offline_mask & (1 << i) ? 5558 "offline" : "online"); 5559 } 5560 } 5561 5562 ethtool_init_exit: 5563 hwrm_req_drop(bp, req); 5564 } 5565 5566 static void bnxt_get_eth_phy_stats(struct net_device *dev, 5567 struct ethtool_eth_phy_stats *phy_stats) 5568 { 5569 struct bnxt *bp = netdev_priv(dev); 5570 u64 *rx; 5571 5572 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5573 return; 5574 5575 rx = bp->rx_port_stats_ext.sw_stats; 5576 phy_stats->SymbolErrorDuringCarrier = 5577 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); 5578 } 5579 5580 static void bnxt_get_eth_mac_stats(struct net_device *dev, 5581 struct ethtool_eth_mac_stats *mac_stats) 5582 { 5583 struct bnxt *bp = netdev_priv(dev); 5584 u64 *rx, *tx; 5585 5586 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5587 return; 5588 5589 rx = bp->port_stats.sw_stats; 5590 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5591 5592 mac_stats->FramesReceivedOK = 5593 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); 5594 mac_stats->FramesTransmittedOK = 5595 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); 5596 mac_stats->FrameCheckSequenceErrors = 5597 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 5598 mac_stats->AlignmentErrors = 5599 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 5600 mac_stats->OutOfRangeLengthField = 5601 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); 5602 } 5603 5604 static void bnxt_get_eth_ctrl_stats(struct net_device *dev, 5605 struct ethtool_eth_ctrl_stats *ctrl_stats) 5606 { 5607 struct bnxt *bp = netdev_priv(dev); 5608 u64 *rx; 5609 5610 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5611 return; 5612 5613 rx = bp->port_stats.sw_stats; 5614 ctrl_stats->MACControlFramesReceived = 5615 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); 5616 } 5617 5618 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { 5619 { 0, 64 }, 5620 { 65, 127 }, 5621 { 128, 255 }, 5622 { 256, 511 }, 5623 { 512, 1023 }, 5624 { 1024, 1518 }, 5625 { 1519, 2047 }, 5626 { 2048, 4095 }, 5627 { 4096, 9216 }, 5628 { 9217, 16383 }, 5629 {} 5630 }; 5631 5632 static void bnxt_get_rmon_stats(struct net_device *dev, 5633 struct ethtool_rmon_stats *rmon_stats, 5634 const struct ethtool_rmon_hist_range **ranges) 5635 { 5636 struct bnxt *bp = netdev_priv(dev); 5637 u64 *rx, *tx; 5638 5639 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) 5640 return; 5641 5642 rx = bp->port_stats.sw_stats; 5643 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5644 5645 rmon_stats->jabbers = 5646 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 5647 rmon_stats->oversize_pkts = 5648 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); 5649 rmon_stats->undersize_pkts = 5650 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); 5651 5652 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); 5653 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); 5654 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); 5655 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); 5656 rmon_stats->hist[4] = 5657 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); 5658 rmon_stats->hist[5] = 5659 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); 5660 rmon_stats->hist[6] = 5661 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); 5662 rmon_stats->hist[7] = 5663 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); 5664 rmon_stats->hist[8] = 5665 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); 5666 rmon_stats->hist[9] = 5667 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); 5668 5669 rmon_stats->hist_tx[0] = 5670 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); 5671 rmon_stats->hist_tx[1] = 5672 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); 5673 rmon_stats->hist_tx[2] = 5674 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); 5675 rmon_stats->hist_tx[3] = 5676 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); 5677 rmon_stats->hist_tx[4] = 5678 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); 5679 rmon_stats->hist_tx[5] = 5680 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); 5681 rmon_stats->hist_tx[6] = 5682 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); 5683 rmon_stats->hist_tx[7] = 5684 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); 5685 rmon_stats->hist_tx[8] = 5686 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); 5687 rmon_stats->hist_tx[9] = 5688 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); 5689 5690 *ranges = bnxt_rmon_ranges; 5691 } 5692 5693 static void bnxt_get_ptp_stats(struct net_device *dev, 5694 struct ethtool_ts_stats *ts_stats) 5695 { 5696 struct bnxt *bp = netdev_priv(dev); 5697 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 5698 5699 if (ptp) { 5700 ts_stats->pkts = ptp->stats.ts_pkts; 5701 ts_stats->lost = ptp->stats.ts_lost; 5702 ts_stats->err = atomic64_read(&ptp->stats.ts_err); 5703 } 5704 } 5705 5706 static void bnxt_get_link_ext_stats(struct net_device *dev, 5707 struct ethtool_link_ext_stats *stats) 5708 { 5709 struct bnxt *bp = netdev_priv(dev); 5710 u64 *rx; 5711 5712 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 5713 return; 5714 5715 rx = bp->rx_port_stats_ext.sw_stats; 5716 stats->link_down_events = 5717 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); 5718 } 5719 5720 void bnxt_ethtool_free(struct bnxt *bp) 5721 { 5722 kfree(bp->test_info); 5723 bp->test_info = NULL; 5724 } 5725 5726 const struct ethtool_ops bnxt_ethtool_ops = { 5727 .cap_link_lanes_supported = 1, 5728 .rxfh_per_ctx_key = 1, 5729 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1, 5730 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, 5731 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), 5732 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 5733 ETHTOOL_COALESCE_MAX_FRAMES | 5734 ETHTOOL_COALESCE_USECS_IRQ | 5735 ETHTOOL_COALESCE_MAX_FRAMES_IRQ | 5736 ETHTOOL_COALESCE_STATS_BLOCK_USECS | 5737 ETHTOOL_COALESCE_USE_ADAPTIVE_RX | 5738 ETHTOOL_COALESCE_USE_CQE, 5739 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | 5740 ETHTOOL_RING_USE_HDS_THRS, 5741 .get_link_ksettings = bnxt_get_link_ksettings, 5742 .set_link_ksettings = bnxt_set_link_ksettings, 5743 .get_fec_stats = bnxt_get_fec_stats, 5744 .get_fecparam = bnxt_get_fecparam, 5745 .set_fecparam = bnxt_set_fecparam, 5746 .get_pause_stats = bnxt_get_pause_stats, 5747 .get_pauseparam = bnxt_get_pauseparam, 5748 .set_pauseparam = bnxt_set_pauseparam, 5749 .get_drvinfo = bnxt_get_drvinfo, 5750 .get_regs_len = bnxt_get_regs_len, 5751 .get_regs = bnxt_get_regs, 5752 .get_wol = bnxt_get_wol, 5753 .set_wol = bnxt_set_wol, 5754 .get_coalesce = bnxt_get_coalesce, 5755 .set_coalesce = bnxt_set_coalesce, 5756 .get_msglevel = bnxt_get_msglevel, 5757 .set_msglevel = bnxt_set_msglevel, 5758 .get_sset_count = bnxt_get_sset_count, 5759 .get_strings = bnxt_get_strings, 5760 .get_ethtool_stats = bnxt_get_ethtool_stats, 5761 .set_ringparam = bnxt_set_ringparam, 5762 .get_ringparam = bnxt_get_ringparam, 5763 .get_channels = bnxt_get_channels, 5764 .set_channels = bnxt_set_channels, 5765 .get_rxnfc = bnxt_get_rxnfc, 5766 .set_rxnfc = bnxt_set_rxnfc, 5767 .get_rx_ring_count = bnxt_get_rx_ring_count, 5768 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, 5769 .get_rxfh_key_size = bnxt_get_rxfh_key_size, 5770 .get_rxfh = bnxt_get_rxfh, 5771 .set_rxfh = bnxt_set_rxfh, 5772 .get_rxfh_fields = bnxt_get_rxfh_fields, 5773 .set_rxfh_fields = bnxt_set_rxfh_fields, 5774 .create_rxfh_context = bnxt_create_rxfh_context, 5775 .modify_rxfh_context = bnxt_modify_rxfh_context, 5776 .remove_rxfh_context = bnxt_remove_rxfh_context, 5777 .flash_device = bnxt_flash_device, 5778 .get_eeprom_len = bnxt_get_eeprom_len, 5779 .get_eeprom = bnxt_get_eeprom, 5780 .set_eeprom = bnxt_set_eeprom, 5781 .get_link = bnxt_get_link, 5782 .get_link_ext_state = bnxt_get_link_ext_state, 5783 .get_link_ext_stats = bnxt_get_link_ext_stats, 5784 .get_eee = bnxt_get_eee, 5785 .set_eee = bnxt_set_eee, 5786 .get_tunable = bnxt_get_tunable, 5787 .set_tunable = bnxt_set_tunable, 5788 .get_module_info = bnxt_get_module_info, 5789 .get_module_eeprom = bnxt_get_module_eeprom, 5790 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, 5791 .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, 5792 .nway_reset = bnxt_nway_reset, 5793 .set_phys_id = bnxt_set_phys_id, 5794 .self_test = bnxt_self_test, 5795 .get_ts_info = bnxt_get_ts_info, 5796 .reset = bnxt_reset, 5797 .set_dump = bnxt_set_dump, 5798 .get_dump_flag = bnxt_get_dump_flag, 5799 .get_dump_data = bnxt_get_dump_data, 5800 .get_eth_phy_stats = bnxt_get_eth_phy_stats, 5801 .get_eth_mac_stats = bnxt_get_eth_mac_stats, 5802 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, 5803 .get_rmon_stats = bnxt_get_rmon_stats, 5804 .get_ts_stats = bnxt_get_ptp_stats, 5805 }; 5806